From 42785fa98b7784d86f0006f3e35e65b1426032e7 Mon Sep 17 00:00:00 2001 From: Maurice Makaay Date: Fri, 7 Feb 2020 02:13:46 +0100 Subject: [PATCH] Added a lot of roles 'n stuff. --- bin/add_ssh_known_host | 27 ++ environments/demo/credentials.yml | 14 + environments/demo/hosts | 9 + environments/demo/lxd.yml | 42 ++ environments/demo/networks.yml | 24 + environments/demo/node_types.yml | 89 ++++ environments/demo/nodes.yml | 43 ++ environments/demo/software.yml | 4 + lib/dynamic_inventory/__init__.py | 4 + .../__pycache__/__init__.cpython-36.pyc | Bin 0 -> 367 bytes .../__pycache__/config.cpython-36.pyc | Bin 0 -> 1582 bytes .../__pycache__/inventory.cpython-36.pyc | Bin 0 -> 14738 bytes .../__pycache__/lxd_status.cpython-36.pyc | Bin 0 -> 1238 bytes .../__pycache__/script.cpython-36.pyc | Bin 0 -> 1501 bytes .../__pycache__/serialize.cpython-36.pyc | Bin 0 -> 763 bytes lib/dynamic_inventory/config.py | 31 ++ lib/dynamic_inventory/inventory.py | 444 ++++++++++++++++++ lib/dynamic_inventory/lxd_status.py | 37 ++ lib/dynamic_inventory/script.py | 33 ++ lib/dynamic_inventory/serialize.py | 18 + playbook-apps.yml | 8 + playbook-hosts.yml | 11 + playbook.yml | 3 + roles/ansible/meta/main.yml | 6 + roles/ansible/playbook.yml | 6 + roles/ansible/tasks/main.yml | 33 ++ roles/ansible/templates/ansible_aliases.j2 | 5 + .../install_ansible_authorized_ssh_key.j2 | 24 + roles/app.galera_bootstrap/meta/main.yml | 6 + roles/app.galera_bootstrap/playbook.yml | 6 + roles/app.galera_bootstrap/tasks/main.yml | 8 + .../templates/galera_bootstrap_advisor.j2 | 178 +++++++ roles/app.galera_node/defaults/main.yml | 2 + .../files/galera_cluster_status.socket | 9 + .../files/galera_cluster_status@.service | 13 + .../files/userparameter_galera.conf | 70 +++ roles/app.galera_node/meta/main.yml | 6 + roles/app.galera_node/playbook.yml | 7 + roles/app.galera_node/tasks/install.yml | 66 +++ roles/app.galera_node/tasks/main.yml | 6 + roles/app.galera_node/tasks/scripts.yml | 22 + roles/app.galera_node/templates/galera.cnf.j2 | 60 +++ .../templates/galera_cluster_status.j2 | 184 ++++++++ .../galera_flag_as_safe_to_bootstrap.j2 | 7 + .../galera_wsrep_recovered_position.j2 | 33 ++ .../templates/xs4all-galera-utils.conf.j2 | 5 + .../templates/zabbix_my.cnf.j2 | 4 + roles/app.mariadb/defaults/main.yml | 3 + roles/app.mariadb/meta/main.yml | 6 + roles/app.mariadb/tasks/debian-sys-maint.yml | 16 + roles/app.mariadb/tasks/install.yml | 33 ++ roles/app.mariadb/tasks/main.yml | 4 + roles/app.mariadb/tasks/repo.yml | 29 ++ roles/app.mariadb/templates/mariadb_repo.j2 | 3 + roles/app.mariadb/templates/my.cnf.j2 | 4 + roles/auto_upgrades/meta/main.yml | 6 + roles/auto_upgrades/playbook.yml | 6 + roles/auto_upgrades/tasks/disable.yml | 11 + roles/auto_upgrades/tasks/enable.yml | 10 + roles/auto_upgrades/tasks/main.yml | 6 + roles/firewalling/meta/main.yml | 6 + roles/firewalling/playbook.yml | 6 + roles/firewalling/tasks/main.yml | 44 ++ .../firewalling/templates/etc_default_ferm.j2 | 5 + roles/firewalling/templates/ferm.conf.j2 | 53 +++ roles/hostsfile/meta/main.yml | 6 + roles/hostsfile/playbook.yml | 6 + roles/hostsfile/tasks/main.yml | 6 + roles/hostsfile/templates/hosts.j2 | 29 ++ roles/logging/handlers/main.yml | 10 + roles/logging/meta/main.yml | 6 + roles/logging/playbook.yml | 6 + roles/logging/tasks/main.yml | 41 ++ roles/logging/templates/logrotate.conf.j2 | 11 + roles/logging/templates/rsyslog.conf.j2 | 60 +++ roles/logging/templates/ulogd.conf.j2 | 28 ++ roles/lxd_common/meta/main.yml | 6 + roles/lxd_common/playbook.yml | 6 + roles/lxd_common/tasks/main.yml | 16 + .../templates/lxd_tune_network.cron | 2 + .../lxd_common/templates/lxd_tune_network.sh | 14 + roles/lxd_container/meta/main.yml | 6 + roles/lxd_container/playbook.yml | 6 + roles/lxd_container/tasks/ansible_ssh_key.yml | 30 ++ roles/lxd_container/tasks/bootstrap-other.yml | 17 + roles/lxd_container/tasks/bootstrap.yml | 66 +++ roles/lxd_container/tasks/hostname.yml | 35 ++ roles/lxd_container/tasks/main.yml | 12 + roles/lxd_container/tasks/pam_config.yml | 21 + roles/lxd_container/tasks/python.yml | 16 + roles/lxd_container/tasks/sshd_config.yml | 13 + roles/lxd_container/tasks/sshd_install.yml | 11 + .../templates/cloud-init-network-config.j2 | 19 + .../templates/network-interfaces.j2 | 15 + roles/lxd_container/templates/sshd_config.j2 | 27 ++ roles/lxd_host/meta/main.yml | 6 + roles/lxd_host/playbook.yml | 6 + roles/lxd_host/tasks/bash_aliases.yml | 34 ++ roles/lxd_host/tasks/main.yml | 5 + roles/lxd_host/tasks/profiles.yml | 8 + roles/lxd_host/tasks/python.yml | 10 + roles/lxd_host/tasks/tune_system.yml | 29 ++ .../generate_voice_platform_aliases.cron.j2 | 5 + .../generate_voice_platform_aliases.sh.j2 | 21 + roles/lxd_host/templates/limits.conf.j2 | 13 + roles/networksfile/meta/main.yml | 6 + roles/networksfile/playbook.yml | 6 + roles/networksfile/tasks/main.yml | 5 + roles/networksfile/templates/networks.j2 | 6 + roles/timezone/meta/main.yml | 6 + roles/timezone/playbook.yml | 6 + roles/timezone/tasks/main.yml | 4 + roles/users/meta/main.yml | 6 + roles/users/playbook.yml | 6 + roles/users/tasks/main.yml | 5 + 115 files changed, 2598 insertions(+) create mode 100755 bin/add_ssh_known_host create mode 100644 environments/demo/credentials.yml create mode 100755 environments/demo/hosts create mode 100644 environments/demo/lxd.yml create mode 100644 environments/demo/networks.yml create mode 100644 environments/demo/node_types.yml create mode 100644 environments/demo/nodes.yml create mode 100644 environments/demo/software.yml create mode 100644 lib/dynamic_inventory/__init__.py create mode 100644 lib/dynamic_inventory/__pycache__/__init__.cpython-36.pyc create mode 100644 lib/dynamic_inventory/__pycache__/config.cpython-36.pyc create mode 100644 lib/dynamic_inventory/__pycache__/inventory.cpython-36.pyc create mode 100644 lib/dynamic_inventory/__pycache__/lxd_status.cpython-36.pyc create mode 100644 lib/dynamic_inventory/__pycache__/script.cpython-36.pyc create mode 100644 lib/dynamic_inventory/__pycache__/serialize.cpython-36.pyc create mode 100644 lib/dynamic_inventory/config.py create mode 100644 lib/dynamic_inventory/inventory.py create mode 100644 lib/dynamic_inventory/lxd_status.py create mode 100644 lib/dynamic_inventory/script.py create mode 100644 lib/dynamic_inventory/serialize.py create mode 100644 playbook-apps.yml create mode 100644 playbook-hosts.yml create mode 100644 playbook.yml create mode 100644 roles/ansible/meta/main.yml create mode 100644 roles/ansible/playbook.yml create mode 100644 roles/ansible/tasks/main.yml create mode 100644 roles/ansible/templates/ansible_aliases.j2 create mode 100644 roles/ansible/templates/install_ansible_authorized_ssh_key.j2 create mode 100644 roles/app.galera_bootstrap/meta/main.yml create mode 100644 roles/app.galera_bootstrap/playbook.yml create mode 100644 roles/app.galera_bootstrap/tasks/main.yml create mode 100644 roles/app.galera_bootstrap/templates/galera_bootstrap_advisor.j2 create mode 100644 roles/app.galera_node/defaults/main.yml create mode 100644 roles/app.galera_node/files/galera_cluster_status.socket create mode 100644 roles/app.galera_node/files/galera_cluster_status@.service create mode 100644 roles/app.galera_node/files/userparameter_galera.conf create mode 100644 roles/app.galera_node/meta/main.yml create mode 100644 roles/app.galera_node/playbook.yml create mode 100644 roles/app.galera_node/tasks/install.yml create mode 100644 roles/app.galera_node/tasks/main.yml create mode 100644 roles/app.galera_node/tasks/scripts.yml create mode 100644 roles/app.galera_node/templates/galera.cnf.j2 create mode 100755 roles/app.galera_node/templates/galera_cluster_status.j2 create mode 100644 roles/app.galera_node/templates/galera_flag_as_safe_to_bootstrap.j2 create mode 100644 roles/app.galera_node/templates/galera_wsrep_recovered_position.j2 create mode 100644 roles/app.galera_node/templates/xs4all-galera-utils.conf.j2 create mode 100644 roles/app.galera_node/templates/zabbix_my.cnf.j2 create mode 100644 roles/app.mariadb/defaults/main.yml create mode 100644 roles/app.mariadb/meta/main.yml create mode 100644 roles/app.mariadb/tasks/debian-sys-maint.yml create mode 100644 roles/app.mariadb/tasks/install.yml create mode 100644 roles/app.mariadb/tasks/main.yml create mode 100644 roles/app.mariadb/tasks/repo.yml create mode 100644 roles/app.mariadb/templates/mariadb_repo.j2 create mode 100644 roles/app.mariadb/templates/my.cnf.j2 create mode 100644 roles/auto_upgrades/meta/main.yml create mode 100644 roles/auto_upgrades/playbook.yml create mode 100644 roles/auto_upgrades/tasks/disable.yml create mode 100644 roles/auto_upgrades/tasks/enable.yml create mode 100644 roles/auto_upgrades/tasks/main.yml create mode 100644 roles/firewalling/meta/main.yml create mode 100644 roles/firewalling/playbook.yml create mode 100644 roles/firewalling/tasks/main.yml create mode 100644 roles/firewalling/templates/etc_default_ferm.j2 create mode 100644 roles/firewalling/templates/ferm.conf.j2 create mode 100644 roles/hostsfile/meta/main.yml create mode 100644 roles/hostsfile/playbook.yml create mode 100644 roles/hostsfile/tasks/main.yml create mode 100644 roles/hostsfile/templates/hosts.j2 create mode 100644 roles/logging/handlers/main.yml create mode 100644 roles/logging/meta/main.yml create mode 100644 roles/logging/playbook.yml create mode 100644 roles/logging/tasks/main.yml create mode 100644 roles/logging/templates/logrotate.conf.j2 create mode 100644 roles/logging/templates/rsyslog.conf.j2 create mode 100644 roles/logging/templates/ulogd.conf.j2 create mode 100644 roles/lxd_common/meta/main.yml create mode 100644 roles/lxd_common/playbook.yml create mode 100644 roles/lxd_common/tasks/main.yml create mode 100644 roles/lxd_common/templates/lxd_tune_network.cron create mode 100755 roles/lxd_common/templates/lxd_tune_network.sh create mode 100644 roles/lxd_container/meta/main.yml create mode 100644 roles/lxd_container/playbook.yml create mode 100644 roles/lxd_container/tasks/ansible_ssh_key.yml create mode 100644 roles/lxd_container/tasks/bootstrap-other.yml create mode 100644 roles/lxd_container/tasks/bootstrap.yml create mode 100644 roles/lxd_container/tasks/hostname.yml create mode 100644 roles/lxd_container/tasks/main.yml create mode 100644 roles/lxd_container/tasks/pam_config.yml create mode 100644 roles/lxd_container/tasks/python.yml create mode 100644 roles/lxd_container/tasks/sshd_config.yml create mode 100644 roles/lxd_container/tasks/sshd_install.yml create mode 100644 roles/lxd_container/templates/cloud-init-network-config.j2 create mode 100644 roles/lxd_container/templates/network-interfaces.j2 create mode 100644 roles/lxd_container/templates/sshd_config.j2 create mode 100644 roles/lxd_host/meta/main.yml create mode 100644 roles/lxd_host/playbook.yml create mode 100644 roles/lxd_host/tasks/bash_aliases.yml create mode 100644 roles/lxd_host/tasks/main.yml create mode 100644 roles/lxd_host/tasks/profiles.yml create mode 100644 roles/lxd_host/tasks/python.yml create mode 100644 roles/lxd_host/tasks/tune_system.yml create mode 100644 roles/lxd_host/templates/generate_voice_platform_aliases.cron.j2 create mode 100644 roles/lxd_host/templates/generate_voice_platform_aliases.sh.j2 create mode 100644 roles/lxd_host/templates/limits.conf.j2 create mode 100644 roles/networksfile/meta/main.yml create mode 100644 roles/networksfile/playbook.yml create mode 100644 roles/networksfile/tasks/main.yml create mode 100644 roles/networksfile/templates/networks.j2 create mode 100644 roles/timezone/meta/main.yml create mode 100644 roles/timezone/playbook.yml create mode 100644 roles/timezone/tasks/main.yml create mode 100644 roles/users/meta/main.yml create mode 100644 roles/users/playbook.yml create mode 100644 roles/users/tasks/main.yml diff --git a/bin/add_ssh_known_host b/bin/add_ssh_known_host new file mode 100755 index 0000000..ce7e665 --- /dev/null +++ b/bin/add_ssh_known_host @@ -0,0 +1,27 @@ +#!/bin/bash + +SSH_DIR="${HOME}/.ssh" +KNOWN_HOSTS="${SSH_DIR}/known_hosts" +NEW_KNOWN_HOSTS="${KNOWN_HOSTS}..SWAP$$" +BACKUP_KNOWN_HOSTS="${KNOWN_HOSTS}.old" + +if [ "$1" = "" ]; then + echo "Usage: $0 [private key file for testing]" >&2 + exit 1 +fi +REMOTE_HOST=$1 +TEST_KEY=$2 + +# Here flock is used, because we might be adding multiple hosts at the +# same time, resulting in race conditions on writing the known_hosts file. +echo "Add the target host $REMOTE_HOST to $KNOWN_HOSTS" +( + flock -e 200 + touch $KNOWN_HOSTS + (cat $KNOWN_HOSTS; ssh-keyscan $REMOTE_HOST 2>/dev/null) | sort | uniq > $NEW_KNOWN_HOSTS + cp $KNOWN_HOSTS ${KNOWN_HOSTS}.bak + cp $KNOWN_HOSTS $BACKUP_KNOWN_HOSTS + mv $NEW_KNOWN_HOSTS $KNOWN_HOSTS +) 200>${KNOWN_HOSTS}..LCK + +exit 0 diff --git a/environments/demo/credentials.yml b/environments/demo/credentials.yml new file mode 100644 index 0000000..e558e3c --- /dev/null +++ b/environments/demo/credentials.yml @@ -0,0 +1,14 @@ +--- +# Credentials for users, services and databases. + +credentials: + mysql_root: + username: root + password: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 66333336646631366230336266633330393236643537366239393638383935316662353039366434 + 3764373836323436353465323634656138323331646139310a353433333432316437323635316438 + 36303738656663313361646362663663376638613962313933626162383233333364646332623235 + 6461613935666665340a383864313836353963336461343437356537313934646235663863393161 + 3962 + diff --git a/environments/demo/hosts b/environments/demo/hosts new file mode 100755 index 0000000..8d7fc84 --- /dev/null +++ b/environments/demo/hosts @@ -0,0 +1,9 @@ +#!/usr/bin/env python3 + +import sys +environment_path = sys.path[0] +sys.path.append("/etc/ansible/lib") +import dynamic_inventory + +dynamic_inventory.Script(environment_path).execute() + diff --git a/environments/demo/lxd.yml b/environments/demo/lxd.yml new file mode 100644 index 0000000..244a271 --- /dev/null +++ b/environments/demo/lxd.yml @@ -0,0 +1,42 @@ +--- +# We run all containers in privileged mode. This prevents a lot of +# potential issues with things like subuid/subgid mappings on +# mounted filesystems. + +lxd: + profiles: + + default: + name: default + description: Default profile + config: + security.privileged: "true" + user.user-data: | + timezone: Europe/Amsterdam + devices: + root: + path: / + pool: default + type: disk + + demo: + name: demo + description: Demo profile + config: + security.privileged: "true" + user.user-data: | + # cloud-config + package_upgrade: true + packages: + - python3 + timezone: Europe/Amsterdam + devices: + if-demo: + name: if-demo + nictype: bridged + parent: br-demo + type: nic + root: + path: / + pool: default + type: disk diff --git a/environments/demo/networks.yml b/environments/demo/networks.yml new file mode 100644 index 0000000..09f87a1 --- /dev/null +++ b/environments/demo/networks.yml @@ -0,0 +1,24 @@ +--- +# By defining networks here, the named networks can be used to configure +# network interfaces on the nodes. The node network definitions are +# enriched with information from this configuration. +# +# Each network can have a property "segments", containing a list of segment +# names. These are logical segements, which are used to setup firewall rules +# on the correct interfaces / IP-addresses, without requiring that every +# logical segment always uses its own network/interface (e.g. on development, +# there is no difference between the mgmt and public segment, so these +# are attached to the same network/interface.) + +networks: + demo: + network: 192.168.56.0 + gateway: 192.168.56.1 + netmask: 255.255.255.0 + dns: + - 192.168.56.1 + interface: if-demo + segments: + - mgmt + - public + - hostname diff --git a/environments/demo/node_types.yml b/environments/demo/node_types.yml new file mode 100644 index 0000000..f9ca714 --- /dev/null +++ b/environments/demo/node_types.yml @@ -0,0 +1,89 @@ +--- +# The node types are a simple way to add node type-specific configuration. +# In the nodes.yml, add a "type: " property to reference one +# of the node types. The properties from the referenced node type will +# be copied into the node configuration (unless the node has its own +# configuration already for a given property.) + +node_types: + ubuntu-19.10-lxd_host: + distribution: "ubuntu" + distribution_codename: "eoan" + python_package: "python3" + pip_executable: "pip3" + ansible_python_interpreter: "/usr/bin/python3" + ansible_connection: local + ansible_ssh_pipelining: True + ansible_service_mgr: "systemd" + auto_upgrades: True + + ubuntu-19.10-lxd_container: + distribution: "ubuntu" + distribution_codename: "eoan" + lxd_image_server: "https://cloud-images.ubuntu.com/releases" + lxd_image_name: "19.10" + lxd_profile: "demo" + python_package: "python3" + pip_executable: "pip3" + ansible_python_interpreter: "/usr/bin/python3" + ansible_connection: "ssh" + ansible_ssh_private_key_file: "~/.ssh/id_ansible@{{ software.environment }}" + ansible_ssh_pipelining: True + ansible_service_mgr: "systemd" + ansible_user: root + auto_upgrades: True + + ubuntu-18.04-lxd_host: + distribution: "ubuntu" + distribution_codename: "bionic" + php_version: "7.2" + php_apache2_mod_name: "php7_module" + php_libssh_package: "php-ssh2" + php_mcrypt_from: "pear" + python_package: "python3" + pip_executable: "pip3" + ansible_python_interpreter: "/usr/bin/python3" + ansible_connection: "ssh" + ansible_ssh_pipelining: True + ansible_service_mgr: "systemd" + auto_upgrades: True + + ubuntu-14.04-lxd_container: + distribution: "ubuntu" + distribution_codename: "trusty" + lxd_image_server: "https://cloud-images.ubuntu.com/releases" + lxd_image_name: "14.04" + lxd_profile: "demo" + php_version: "5" + php_apache2_mod_name: "php5_module" + php_libssh_package: "libssh2-php" + php_mcrypt_from: "module" + python_package: "python" + pip_executable: "pip" + ansible_python_interpreter: "/usr/bin/python2" + ansible_connection: "ssh" + ansible_ssh_private_key_file: "/root/.ssh/id_ansible@{{ software.environment }}" + ansible_ssh_pipelining: True + ansible_service_mgr: "upstart" + auto_upgrades: True + + ubuntu-18.04-lxd_container: + distribution: "ubuntu" + distribution_codename: "bionic" + lxd_image_server: "https://cloud-images.ubuntu.com/releases" + lxd_image_name: "18.04" + lxd_profile: "demo" + php_version: "7.2" + php_apache2_mod_name: "php7_module" + php_libssh_package: "php-ssh2" + php_mcrypt_from: "pear" + python_package: "python3" + pip_executable: "pip3" + ansible_python_interpreter: "/usr/bin/python3" + ansible_connection: "ssh" + ansible_ssh_private_key_file: "/root/.ssh/id_ansible@{{ software.environment }}" + ansible_ssh_pipelining: True + ansible_service_mgr: "systemd" + auto_upgrades: True + + diff --git a/environments/demo/nodes.yml b/environments/demo/nodes.yml new file mode 100644 index 0000000..86b885c --- /dev/null +++ b/environments/demo/nodes.yml @@ -0,0 +1,43 @@ +--- +nodes: + all-shared: + type: ubuntu-18.04-lxd_container + + lxd-host: + - name: sidn-demo-01 + type: ubuntu-18.04-lxd_host + network: + demo: {address: 192.168.56.150} + - name: sidn-demo-02 + type: ubuntu-18.04-lxd_host + network: + demo: {address: 192.168.56.151} + + ansible: + - name: ansible-01 + lxd_host: sidn-demo-01 + network: + demo: {address: 192.168.56.160} + ansible_connection: local + + galera-shared: + galera_role: service + # Sometimes we see issues with upgrading Galera, requiring some manual + # intervention and resyncing of cluster nodes to get things working. + # Therefore, we'll handle upgrading these nodes in a controlled way + # by hand on production. + auto_upgrades: False + + galera: + - name: galera-01 + lxd_host: sidn-demo-01 + network: + demo: {address: 192.168.56.161, mac_address: "00:16:e3:00:00:a1"} + - name: galera-02 + lxd_host: sidn-demo-02 + network: + demo: {address: 192.168.56.162, mac_address: "00:16:e3:00:00:a2"} + - name: galera-03 + lxd_host: sidn-demo-02 + network: + demo: {address: 192.168.56.163, mac_address: "00:16:e3:00:00:a3"} diff --git a/environments/demo/software.yml b/environments/demo/software.yml new file mode 100644 index 0000000..6c57c7a --- /dev/null +++ b/environments/demo/software.yml @@ -0,0 +1,4 @@ +--- +software: + # The application environment to use. + environment: demo diff --git a/lib/dynamic_inventory/__init__.py b/lib/dynamic_inventory/__init__.py new file mode 100644 index 0000000..a0f6ea6 --- /dev/null +++ b/lib/dynamic_inventory/__init__.py @@ -0,0 +1,4 @@ +from dynamic_inventory.config import Config +from dynamic_inventory.inventory import Inventory +from dynamic_inventory.script import Script +from dynamic_inventory.serialize import convert_to_json diff --git a/lib/dynamic_inventory/__pycache__/__init__.cpython-36.pyc b/lib/dynamic_inventory/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f96198faba669b1e6fc5eca88c5c8b660bfb5130 GIT binary patch literal 367 zcmZ{eJ5B>J5Qgo2B|${AkvPFN>jKIsLZU)QH0fN#+Uy!M$d71jkd~ux1Fpd_(o%5+ zD#j}%O~BTSKaKzSXRhY+v-|hu=94k@MR7bC(G5korqr{Z8)muWJaJ|+o6CIU+!VHy z<;aDp>_kq8%QdU3Z|Y<<-&eQZJ;D>w?9RPv7d;NCyzUTok?5lHRL7{ISI@z_hk7a( z{oq;)oq|J4Ln^Ytw3)m-{XPmYPl`Vb!Z;9%e++^~Xbrq+EJ^Xfu%c&7KaJ3_6Riud iHChZ1E7`DgrmCbHaN>fO~0U;G>q;~hvOAi$)DiR0KO27d$2dv53n@*UWS?o#5 zjy7DjSHzuvz+bY&i4%W;6VL0}4M_wZ`L!RvW50gh4?i1?24DR4;`={B#{OhC8Zdi_ zZl6Oarg+5`Jm;L|K@}`SF8o|n;UdZ-&OT=`{S~@>1hM98CfS;u^9|!P1~e*$ z-F%t}cTd-srD^Jgu3Nb*+Sl$xR-ht_p4KfA)*biCnYVgjineTOPu6CE3$3o^CWh7D znHgaC`^iMNv&r_TNmZUs)T%BPz~cxw_E>nF0H+>@9`6AU zJdQlx2OfGHd!LP_T~hs|#;Zl;1SZ`f@>(H?Wl>qbx_@J#8`ipoH-G3==?-;6+sxZH zh0%UXxo%kA>uUz{Z_l13NG+y{O0E{O*ujz~Nt@g79ReJt+A zAccyySPBH=_pZP0!&_14GxJuKUl&@0ofJ@?dGwM{=D}~sxq0xAZTPPrTm@~g4yXXn z#YS9-wKxwq>?-^g<@Xi)ntjLCA<8hAcF|)yd2F-Dh0RjezJd$|*t`RgAJT(j%lZsY zioz3;MLnw#DTDcIbAX8pNp+X$`_P=ATcXVP1Af5o^KQ6fNgkZSmnb|+b}8LJ%0;6t zU`e{6ls{e+Rj)DRE1$iF&N6iJ4QrIm#wR#AhDz6)OnDlmNtzm3on~o%vMsssvM@wx zyebyexUVr!IX}Alhp#SQboryZI^w+D>4{z`PaBi(iVi=tG+ar{DLV zGqV>_j$INn=ggUNoB#aR@0@*ddb)JuuU2keyyiIn<_!G>$iILq{tj-+QEt5)TypFno;!*t5xHut|Qhgyy2oeM%y_*3Z6Dug$DQ6Z zb6GD*;a05~=~{2fq>rn1tF3!8_Gw!;wrYAWdyXT0r%{g*f9uv(Cn;?2t~DF=Bw)v_ z+E!HOXQzKIGB4nYXK{<1imRN8r`$?Tc_2|PnZMS$6SX>Rz4uDD9&L9T?N;x(FVz}x zq{2oktk>cw>}*6j3TwEwbsKcsio?5&&IZdvwTB@ab=0-plvkJiB&b$7lWH|7RjXTV zwF}Z#lc{QTXSde0Z;I8bYS*jPKXLR?%uQ}TyL>t7)Gu3lU2ZnkF53aB*#MWbTzPv> z&!FjPexHY1;L7s|0a*S5EBq9<0RfcvO$S8CDIck?@~VI|uZk)_S|G5Mw5P9Z&_POFEI&Z>vi8KiUS5fvhx zSD#R4ksei_ROgT`sPpOq(qn2#T|~O5mer$3kE_SjC8Q_Ra)p2wy1S}KL42V`Yf(^9=E>RcdAa`x$Pb}6g@O_SC_q{ z0Ipnb+)8GuSnPTf$JJI_MKRv^anxMbi)cGoFuj0BE_b8V=v1pAo?;(2$1S+M0dJMl z3U%@$|MK>d`TPeSI!?zu;GDb$W>wAM*_Y>hcN?=T+|J!D9yl*MZ-4!3-oCr<_1)Wn zLJNJp?cG?e?uEXG*0cLQ>P+M7eW|m4 z1NCf)ZFPfGnITn>9xo2s3+*8xKnFEV@%fD&1O`ml%xI|(avtf;t8qG{*&oyZF{@9mo}cYk9=3{>}^NM zLsjU;S|_R+U>NX_be!wiWYqO!Gt@)&dYShw=`bMfgt9V(%6w*>M zUNV<gcBDId z)pgz8s&XJh?76{|t%hID>k~qA^SwT2T$xyJYdgBnhGX)fkE6voTyYsU$1OoPO70@A zDR%}}$({FR-Qdvw-ib_>%cBxm&#^Q7G36zqM19!zz_<|bl}xN%{RqmJb24}RB=Y(x z-ZI52a;(py?0H~0qH#g3;uQ?lqB zxbHy9Z|C}Wf7>^u(!xbPw_+yGJl@+6@K+O`Mx%H9<=t(VI&e@(GKVa@8lI2kij-bx6u2?8?ds?X8W#KR!*HJm{wM}vAnm{ z4C$qWop!i}s;zK+N43f!n%!+Qn<33~DIEOORC~LF_KjF7hP62BY$*;?ccC0!wgP`JT&vXrJH9`uraH>!LrJVQ)3bfen^@5n3JMYlLxykC%%>GEAXP zCdeG(ZfO!V$!$h^Ne-%ZoS&EQ_H|tGDcl@zatZtl-2;who_gp6x%>DK(T9yeMr_*d zjM(qsq1)$Y;^Xzc5QK!?hXnUM5H8#aj5MQ;3-TBBYaMu>whS_V(pmS6Ug%yib<8{J z2U$KYL=JWvb{(y6Vi~+XG{w$M5G}AoqmH0`>OJ=5)@G}Hw-v^FaKE=g7KTgbg=a(3 zol-_1&4BHFx*EOm7ZK((wxLQyvxJC`5X~6sq!COg7EVY9)jBCp;sV6Nl1Z)!iKIdm zLZ#Z2qF*TuDvuNBF}!c$ipf9@nD5jmLB?E!!wIsErX7eCop{4BARA1GI0thK#xyII?#imi1*<0FOi4%QGt#lw2EZFKtQB;*Tim}s=HA$i=`f(sUI z)i$FzjCY}KrO$Q?h3(deif^xr;kxHmiyIwG%7hGp*w@SBFm7;m!NE`MH~=I zdF3lclZ^TtKbu&&6}9lK`uRQ1gA(gI9`p$G#%G4~#)tUe<&|c;UTenBm$SZBHK!w% zSig*1h9ubKMa1r7)Q%{^qyQP*h5n;#Td}SbZOy|bt(Q@82Ukqz(+L)G1x%OD(xThD z@X;onEd!T;_T{s<;`ea7MG40OtfV=3QQbRI2Fn3i-q^1-$9X8!YPJ4Qy1AIwG}AJ0TQl0F5wF`|6lR5VU3q{+P;? zgoS9Vl86O75!Qnfr5fw&@YGtJ1{~QDfdPmVMXh0Bv+BfPFAX{Fa? zWi!2jhn9QM@_86*C-k3MKKTe6altn4dn? zyyv;Sa}Q+yp~yI6e%fXVfG|H@iOj@V+fP8`ws!#CbC0IWQN9h4@~c@VsUmv=?FIFt z1ic+lmHoE2LO;WAHLY z77tex`Vd*uNyzw@G3*aHKEK!Tdgt#a>mj3ORN|}|gbC}uiVWHF0DhN=cxf=p~j>z%vM zI#da_wosgSjcqumT~#S@!a%+{yYX@`0IqCLK>4M<0i8}&xd*@oT%IZ*eE=RB@Fxr* zeR=&WynTsNIi7Osppx{VzsxH1YoSvFBns>hab=EBeFk$e%C1bLgT>>jWffig0WQI5 z9jFq7W?liHk%v5R)raq2njyit4~P(+zkvR}?S9)^N7O*B|4kzS#J_`gJ!ld*$#kJX z^1}VPM0lTG_q#4Q9u5(>A1jM!>`VvU^&Jyzyle3dls}8|+`B06I|tr7u+WAN-bGuy zeG_lK+p6ecasVj{U794kcguMXuI#36;?@U4|*OXFOOAar3Y-%5hkFxgw+mN^$P!nH6^})PFSN$!D zj8SlV&wjj_4J^xGM+}3Adb04)SJMGuC}eOc5b}w(x`Fq9&KXeL9Cv0G^cl9b52Q~f z9rTDNaEZeDJ7gMAP~9Ix1zB7B_MaKpq2qO+jtu0`{d(Vr%lXzmolT%Y5QG8*p#TH$ zX!nIa@)^=|1+9zwfIf&T^@}La14+V01~8u@2%hT)CW^R+-rvA}X*T%QoN zWXC~sC(YQ3XW7yy#jJT1E?TO?^h>C)TrxKLD{OY1w&Lx z`;^{Wc=7kR#KbzT5441c6y;%2WMRr(^l$;A>V0B}-)tl_ri<(3bpDY)n3rYVK;{-e zzJ87iMe+kt^7QBXKJ7Tm3=(aYF~w-H*z&HQ-^|&%d8vn1h&o(VU_T~Ka2>!g*tfz` z6jucTHJ|+%j4U+o<jE-X2Le3nz45$cG4QTr3V<&P+?t(;>|=dOQ@L?!#4o86Lt%1PZqeEO;vJgosGwDHg?Ru z2v^6L{Wq8!qxYLA`&V4X^#{EdMQPLVr>dTUJ%8GJ}$iF36D z$fuH7t9nCeSq@7>y@q-^3*MC(owR|Y|(=ehUB^kNSx~Evv4r8pdC5jo+6sm&L+esN^du}EztWKI!Vr@TSW+$ ztM!ejzFD=W4tThuW%fvulW8s4bx4;_b}5f%uc9>@jLO8pfMTXZhQ}D^ag6*wxCGa6 zAg0uBBY1mC37}VTW~%u4nGX9eJXqiB68VFl89zj5iWo_2mVOuV@T2Tro9Hf;hn{=Q zkV+M}^G%|tf&g0AnKwF46;bam!DBddBiO2cJAb=i&{g>Df|{z5_;5eKNRwuyAsKi1 zRuI)Gq|l8fpt8MBqxXMTGvYb^Guj{FZlKJfi1Jy~nv+(TJ@x6J{qp>3GJpR9z1n+v zl|Y|(YN`#m6tH2qQM)6%WB1ojQ9@%F^8%D5Q)xYF#!4=ZH!_(_DIasu&fkifO4ir0u~V-3-{$D%1+!?;?~E-<5hGzhv-zoKHq_N|Kk0E@nJkVaQHH~ zPlUR6^+A*&?ji&;yWfnNhW%^o0~kMn;Y?+cQ3~)O?mPCL8FUGZCl2xMQn-fAzonsC zO9YCz3RcmQa5ayX=)e-Wp_* zO8rd>D$42`$XH38;%TdH%SZv^Vpz(X#)`r!LYc|F*h`Q*E4u@U>ON)OQ>JXo5G{Rp zB{;SOkK{UREbZ-exR*Jxs@GvvPeJX8L)iP^6**(Uo#kwq)1?upFI|v$kln&0FR1!BqtYWiOYvt^>a_zWuF~r%(<~13!HMr#1i&^!-KWIQ(MxZwPqo z83uaJ(9sfzF^xL^Sf#S&jFrG^89^RjJzu0c}kxDSe~cwJT1>Z zlILkW&&cyX$@2`(O`z3DXUft4jT|fsbC};J)f{H!s(E8C>^$>o`lR!otIwu&nMdt& z52%g)P-9gc~PMn`$CPJo65(hdg>a{Yn?ffsIYm5SXJIji;*!bcoEl-d2HUqmel8F6O-7)aa^ zr~vD5`W|8q7hkdS6X{{1`z`6=*Q5tf?mbUGYx@(_-dxj=Cgt$^>=kftO`y#^Ckp4cslkV$%9+7x$zob6VFD*Lzll@6( zw*qM8>3>jX5&TZd85yjn?86rL+*)9r{v`AtwIkk1|K84bt!sCFwNkp9d11!-T}bx; z1}{Q_(%4=Rp%x73YS=rMO1xOaQLz_@iB5so!^*@)r?Xw%MilnGc+Dj~V}lO`jlY2m z1KCBvt4UtUE0c(p>rvG_%)?$*H=}49C*bbjOxm5scJCQTHV?#wLsK*w0}3gs*U%(f zYeUPi3)x+gzQW!#H(698uHh`M3VRO&3>*0hZVYkSV&z452|^=d;Y1L@v!$WQQUBTD z(zh~@81mjD)Xq`MjK!YmJ`Bw`@#{D_*IQ2Uv$PAAE@gXELk%jW0UauHcN)5bGr<5{ zQzY)W`$;u2{-7(Pw_ELS&|%oSXnPE=z5aNL<)y}DgfLL$5-Bp&N_?)OrkgihtnZ>g z@8Z@goX1vt+`Nu>C~IY2-%0BL?VAHK-Ovags-3o3dCdb&y-AIumUt0Y5pofA>b?B` zT>R0czmfKmQvIVY>n~VTz~wod{xY_X7EXW)EOmPuXNyQyf^`N1ni@D{Nx_I%lIQBj zT0px+)U1>YW0UW77X^vi6v}8)FFh}E7`D;C`zL51zmE((IUoN8m`3Bd(Pv{gdHDZ= z>%r?=fJrqep7D>vPB63qfF$Gl0WI--3B}km{}<$WQAXl($t&5s9NqL5(Cbu@&=kj? z_eKCI8^R6P^ze9=p+$TFx6z#_gY_ZQLJ*JbC756sLu}eD=X-0{A$i%cR*`VCv5X%F zY?`fH%4Q=yi4XDE3$a$j&+%ew*qy^3Zp|KGIh%EPcJwp^L(Zd|CHKqHO)s0>9vo>2-z22_t^Hj0n;8_^O@U^*%KLDgaR4Px z$WB0)n+W#kQ1dxQhb}#6f*&^6a=JOa;sLa+Lpa1zxd*3 zKW;ZLn|q+#kZ6ik`Bjuz(7Q46+Rc{pjjF;}G;ZX7Lfr0F}p>?1}&bD*oS>%}_wO0aW?LFt*fIsSk1vX3ijEiGm zuJaYA<-L`GjBXPAaJh}vbuam zA3;@34Ws9HJIUK|-p=wyi=v<5ZHc#wyz$JX9MaN{GFRq}+x>c#w;Q~@#T)k#^qag9 zwADl_HIWp(!5b~HzQP-|lct1e5?=EwhE`>81kiH7H5gg5ZzsUL~-m8lJp}q6w#XsjtdQ?2NMd-sgzuLND;JRqm^SNvgGWpgJa}V z3eAt`q1XJH-uK#5|3XilStoL+jb?_?X!gA~v#&dy*3aAi*Iy1HzsZ9igZLdhY9EXs zf)*rRcp6i|g!7HWOt``W?+9P4fOkb8R>6CMej-up4}3|EQi=|K=;0Ld9v;;Kqlu<9 zxn@vHk0NHA5g(h?q?n4N$d!($@rISs>Xt|k%KyIVWjgKgQspN_)+_Rpp13OcC{L5T zyv#~n$*UfItTfjXwKu*pou*mBivpV{*az+KcA;=RsBEQq% zn$GEWGN6`aFlU<8Ogm?8O|PkC(K<4YQo|YRc4McQrp84cGr!v%aPR^6N{+ZT?wP8} z=JW`q;C-+l4LdR}%3w7BP3UPvkY`!6jtr4Kzg0~PL)g83VT4#p{_lGm5!h@W@L(YAqvQ7p|q1R5yf z0_9}RrY~!HNWQ$7JJa3TfoOZ~!c}muY0YXE<>-@(-D7gGc}!}&9&Gku0VL!hu2fhJ zLRjcf53}&&=fkjpry#|3$YmDFNm=IQX;_s_nY~SWR-lO4D_}uKp2Ug7Tgx`d7^q@- zybk9q>8rT4EHbN2{J(Hph|+FfQ=*>38F#7$upBM;Kc(^sWFy}$o5by1z`|9s*zEfU b-Iuco`lCuritGTdk6pQd22S7xYeDND*uN3N literal 0 HcmV?d00001 diff --git a/lib/dynamic_inventory/__pycache__/script.cpython-36.pyc b/lib/dynamic_inventory/__pycache__/script.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e881ec47143f7e163a305a52856eba52c06836f GIT binary patch literal 1501 zcmZuxOK%%D5GMDf^-6vuKu|a-kWG+_fW$f^$jJy2d^sehrTd_!6Cqg{c+;bVsLeKVt%+uPZ%-w%I(6fyQUyYXSo-eQ<% z5Ryq=v(akiBmN7MfegPf8LD6zjzYs)H`%Zxx&8tuZ!yd(2*pNRvQZ$p48Ac~ zg)+h%$yg?sW7&}@=0tAE40A_z;kDTaqvOLJZR1W0;2!mQDDAFZtI1TCfO~ ze8EI08LauRAGv$-qAAX*QdG^kYHX`7T=KqcPO7on`movB+MM=77a3KbxINXJSGsM^ zU?rx7J#|Ux?fzmq!^8hRA1GT6ipEsOwHnmb@!&sC4UCWeV0z(Ffe0!qL_(1h!Hj2o zzPrMAus$t0o!}#o8=%jzSg;9OvabL;0g->C`e*C}OU7B#gBH^Ml5?bK4ZVx7wNawb zW8uR#TTxeQjrRJ1?!di^=$c2i5?$Kts4uFVSw(q@mEw`!Yt^5TUDl^G;h39D!7(|qSQ;LMA<&IzVLVw0k@xb8YqofNa$f)rIx$=Z_> zd+Yh*qto_teqQLRKw|TgR_8@-s%DJE6=4s6OaBCc0!zW0=h^MN)8iSH%SWPJyIvtB zk?;3hcAbnh<$QXFZ@mql_b|*J1mis(@dS)V_@#V)e_brsu5R&8&Ea`JfI{^$0lQQ) z5>0_sSfP4kaL8J^V_)$SIU zsNN^hg732%2+;1aeuB$BDE*W)dPC&#~L#5GTWaO8rZwLbyzb zGt^~GdQXTyXGOg_(LGq}`y?ojnviPped9f9Bu%ZQ>8%*U&{oRRDD9+K8vFfzr1$BL dXE$k&C#tmm;Qu_i^l%l=8~X3?e=Tqq{s8cdVb}lw literal 0 HcmV?d00001 diff --git a/lib/dynamic_inventory/__pycache__/serialize.cpython-36.pyc b/lib/dynamic_inventory/__pycache__/serialize.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14f6045146bdd736b5702fa1863c51f03c571086 GIT binary patch literal 763 zcmY*X!EV$r5VaF$H@kF&gb?D;LxhCd9M~M-z@b7Q)Ju_;L%Rs=fhZe$i$h{Zwo@sq zoG4d*fIr}ueC5<%;KbMssKoKu;~CF;Gvnur#r)lmlTYgapW{7xVySzX*XHfd;QS}K!&VZF8!E+`YmPAS_t2(Xe$ zOJO70HLbRfbEoB_HP=k>qUL+1fE%HODSje#52F6gGG@xGxMiEwe4Ei;6b&zPF0KGU zsy_R7d)fB(eks8KJdE@hk>l-p?lCl z8)~DJ%i7K^0Q6KTsVs5$HqLV>Aj@+ob#ztK9rKmG6$fxd%|N*6TaNJD4+1jA0Ss?X zBQ@W!Nh4`jvsdo1Y4_SZ8WbN~PV literal 0 HcmV?d00001 diff --git a/lib/dynamic_inventory/config.py b/lib/dynamic_inventory/config.py new file mode 100644 index 0000000..c516f2f --- /dev/null +++ b/lib/dynamic_inventory/config.py @@ -0,0 +1,31 @@ +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.parsing.vault import get_file_vault_secret +from ansible.parsing.dataloader import DataLoader + + +class Config(object): + def __init__(self, environment_path): + self.environment_path = environment_path + self._load_ansible_vault_secrets() + self._load_configuration_files() + + def _load_ansible_vault_secrets(self): + secret = get_file_vault_secret( + filename="~/.ansible-vault-password", + loader=DataLoader()) + secret.load() + self.vault_secrets = [("default", secret)] + + def _load_configuration_files(self): + self.lxd = self._read("lxd.yml")["lxd"] + self.credentials = self._read("credentials.yml")["credentials"] + self.networks = self._read("networks.yml")["networks"] + self.software = self._read("software.yml")["software"] + self.nodes = self._read("nodes.yml")["nodes"] + self.node_types = self._read("node_types.yml")["node_types"] + + def _read(self, filename): + with open("%s/%s" % (self.environment_path, filename)) as fh: + loader = AnsibleLoader(fh, filename, self.vault_secrets) + data = loader.get_single_data() + return data diff --git a/lib/dynamic_inventory/inventory.py b/lib/dynamic_inventory/inventory.py new file mode 100644 index 0000000..33d9c7e --- /dev/null +++ b/lib/dynamic_inventory/inventory.py @@ -0,0 +1,444 @@ +import collections +from collections import defaultdict +from ansible.parsing.yaml.loader import AnsibleLoader +from ansible.parsing.vault import get_file_vault_secret +from ansible.parsing.dataloader import DataLoader +from dynamic_inventory.lxd_status import list_lxd_containers +from ipaddress import ip_network + + +group_types = { + "ansible": "singular", + "lxd-host": "cluster", + "lxd-container": "cluster", + "galera": "cluster", + "galera-donor": "cluster", + "galera-primary": "singular", + "galera-service": "cluster", +} + +network_segments = [ + "mgmt", + "public", + "hostname", +] + + +class InventoryException(Exception): + """Raised in case there are problems with the dynamic inventory.""" + + +class Inventory(object): + def __init__(self, config): + self.config = config + self._process_nodes() + + def _process_nodes(self): + self.groups = defaultdict(list) + self.nodes = {} + self.shared = defaultdict(dict) + for group, data in self.config.nodes.items(): + if group.endswith("-shared"): + for_group = group[0:-7] + self._add_shared(for_group, data) + elif isinstance(data, collections.Sequence): + for node in data: + self._add_node(group, node) + else: + self._add_node(group, data) + self._apply_shared() + self._apply_node_type() + self._create_galera_primary_group() + self._create_galera_role_groups() + self._validate_groups() + self._enrich_network_data() + self._enrich_nodes() + self._create_all_group() + self._create_lxd_container_group() + self._list_unmanaged_hosts_in_inventory() + self._clear_managed_property_from_all_nodes() + self._validate_network() + + def _add_shared(self, group, data): + self.shared[group] = data + + def _add_node(self, group, node): + self._enrich_with_hostname(node) + if node["name"] in self.nodes: + raise InventoryException("Duplicate node name: %s" % node["name"]) + self.groups[group].append(node["name"]) + node["group"] = group + self.nodes[node["name"]] = node + + def _enrich_with_hostname(self, node): + """We allow the name in the nodes.yml file to be an fqdn. We will here + adopt this fqdn as the node's hostname. The node's name will be set + to the bare hostname, without the domain.""" + node["hostname"] = node["name"] + node["name"] = node["name"].split('.')[0] + + def _apply_shared(self): + for group, shared in self.shared.items(): + if group in self.groups: + for node_name in self.groups[group]: + node = self.nodes[node_name] + for key, val in shared.items(): + if key not in node: + node[key] = val + if "all" in self.shared: + for node_name in self.nodes: + node = self.nodes[node_name] + for key, val in self.shared["all"].items(): + if key not in node: + node[key] = val + + def _apply_node_type(self): + for node_name in self.nodes: + node = self.nodes[node_name] + if "type" not in node: + continue + if node["type"] not in self.config.node_types: + raise InventoryException( + "Unknown system type '%s' used for node '%s'" % + (node["type"], node["name"])) + for key, val in self.config.node_types[node["type"]].items(): + if key not in node: + node[key] = val + + def _validate_groups(self): + for name, group_type in group_types.items(): + if group_type == "singular" and len(self.groups[name]) > 1: + raise InventoryException("Multiple nodes defined for singular group '%s'" % name) + + def _create_galera_primary_group(self): + """The 'galera-primary' group is used by plays that create databases + on the Galera cluster. The group simply makes sure that only one + of the cluster nodes is in this group, making it a feasible target + for such tasks. A primary galera node can be explicitly configured + by use of the 'is_primary' property in the nodes.yml file. If not + explicitly configured, the first defined Galera node will be + used by default.""" + primary = next((node["name"] for node in self.nodes.values() if + node["group"] == 'galera' and + "is_primary" in node and + node["is_primary"] is True), None) + if primary is None: + primary = self.groups["galera"][0] + for node in self.groups["galera"]: + self.nodes[node]["is_primary"] = node == primary + self.groups["galera-primary"] = [primary] + + def _create_galera_role_groups(self): + """The 'galera-donor' and 'galera-service' groups are used to differentiate + between galera nodes that are preferred donors for the galera cluster and + galera nodes that are included in haproxy configurations. + This is used to make sure that nodes that are used as donors won't affect + the cluster performance.""" + self.groups['galera-service'] = [] + self.groups['galera-donor'] = [] + for node in self.groups["galera"]: + role = "service" + if "galera_role" in self.nodes[node]: + role = self.nodes[node]["galera_role"] + if role == "service": + self.groups["galera-service"].append(node) + elif role == "donor": + self.groups["galera-donor"].append(node) + else: + raise InventoryException( + "Illegal galera role '%s' used for node '%s'" % (role, node)) + + def _create_all_group(self): + self.groups["all"] = { + "children": [group for group in self.groups.keys()], + "vars": self._create_shared_vars() + } + + def _create_lxd_container_group(self): + self.groups["lxd-container"] = [ + host for host in self.nodes + if "lxd_image_name" in self.nodes[host] + ] + + def _create_shared_vars(self): + segments = self._create_network_segment_data() + mgmt_network_name = segments["mgmt"]["network"] + lxd_hosts = self._get_lxd_host_ip_addresses(mgmt_network_name) + return { + "credentials": self.config.credentials, + "software": self.config.software, + "segment": segments, + "ip": self._create_ip_address_data(segments), + "lxd": self.config.lxd, + "lxd_status": list_lxd_containers(lxd_hosts), + } + + def _get_lxd_host_ip_addresses(self, network_name): + def _get_ip(node_name): + node = self.nodes[node_name] + if "network" not in node: + raise InventoryException( + "Missing network definition for lxd host '%s'" % + node_name) + if network_name not in node["network"]: + raise InventoryException( + "Missing '%s' network definition for lxd host '%s'" % + network_name, node_name) + if "address" not in node["network"][network_name]: + raise InventoryException( + "Missing address in '%s' network definition for lxd host '%s'" % + network_name, node_name) + return node["network"][network_name]["address"] + return [ + _get_ip(h) + for h in self.groups["lxd-host"] + ] + + def _create_ip_address_data(self, segments): + ip = {} + for group, group_nodes in self.groups.items(): + ip[group] = defaultdict(list) + for node_name in group_nodes: + node = self.nodes[node_name] + if "network" in node: + for network, config in node["network"].items(): + ip[group][network].append({ + "name": node_name, + "hostnames": self._get_hostnames_for_network(node, network, segments), + "address": config["address"] + }) + if group_types[group] == "singular": + for network, ips in ip[group].items(): + ip[group][network] = ips[0] + return ip + + def _get_hostnames_for_network(self, node, network, segments): + hostnames = [] + if segments["hostname"]["network"] == network: + hostnames.append(node["hostname"]) + short_name = node["hostname"].split(".")[0] + if (node["hostname"] != short_name): + hostnames.append(short_name) + hostnames.append("%s.%s" % (node["name"], network)) + return list(hostnames) + + def _create_network_segment_data(self): + data = {} + for name, config in self.config.networks.items(): + if "segments" not in config: + continue + for segment in config["segments"]: + if segment not in network_segments: + raise InventoryException( + "Unknown network segment '%s' used for network '%s'" % + (segment, name)) + data[segment] = { + "network": name, + "interface": config["interface"] + } + return data + + def _enrich_network_data(self): + for name, config in self.config.networks.items(): + net = ip_network("%s/%s" % (config['network'], config['netmask'])) + self.config.networks[name]['network_cidr'] = str(net) + if "segments" not in config: + config["segments"] = [] + + def _enrich_nodes(self): + for node in self.nodes.values(): + self._enrich_and_check_ansible_connection(node) + self._enrich_with_network_data(node) + self._enrich_webservice_data(node) + + def _enrich_and_check_ansible_connection(self, node): + if "ansible_connection" not in node: + if not self._is_managed(node): + node["ansible_connection"] = False + else: + raise InventoryException( + "Node '%s' does not have an ansible_connection defined" % node["name"]) + + if node["ansible_connection"] == "local": + self._clear_if_exists(node, "ansible_host") + return + + # Ansible connection already fully specificed. + if "ansible_host" in node: + return + + # Ansible connection using ssh, but the ansible_host is not yet set. + if node["ansible_connection"] == "ssh": + mgmt_net = next(( + n for n, c in self.config.networks.items() + if "mgmt" in c["segments"])) + if mgmt_net not in node["network"]: + raise InventoryException( + "Node '%s' does not have the '%s' management network defned" % + (node["name"], mgmt_net)) + node["ansible_host"] = node["network"][mgmt_net]["address"] + return + + # Ansible connection using lxc, based on the configured lxd_host. + if node["ansible_connection"] == "lxd": + if "lxd_profile" not in node: + raise InventoryException( + "Node '%s' uses lxd, but 'lxd_profile' is not configured") + if "ansible_host" not in node: + node["ansible_host"] = "%s:%s" % (node["lxd_host"], node["name"]) + + def _enrich_with_network_data(self, node): + """Enrich all network configuration blocks in the nodes with + network configuration data from the network.yml config file. + Properties that are not defined in the node config are filled + with properties from the network config.""" + if "network" not in node: + return + for network_name, node_config in node["network"].items(): + if network_name not in self.config.networks: + raise InventoryException( + "Node '%s' uses network '%s', but that network is not defined" % + (node["name"], network_name)) + for key, value in self.config.networks[network_name].items(): + if key != "segments" and key not in node_config: + node_config[key] = value + + def _enrich_webservice_data(self, node): + if "webservice" not in node: + return + listen = [] + certs = set() + for network_name, config in node["webservice"].items(): + if network_name not in node["network"]: + raise InventoryException( + "Illegal webservice listen definition: " + + "network '%s' is not defined for host" % network_name) + config["network"] = network_name + config["address"] = node["network"][network_name]["address"] + if "http_port" not in config: + config["http_port"] = 80 + if "https_port" not in config: + config["https_port"] = 443 + if "http" not in config: + config["http"] = False + if "https" not in config: + config["https"] = False + if "force_https" not in config: + config["force_https"] = False + if "use_keepalived_vip" not in config: + config["use_keepalived_vip"] = False + if not config["http"] and not config["https"]: + raise InventoryException( + "Invalid webservice config, because both http and https " + + "are disabled " + + "on network '%s' for host '%s'" % (network_name, node["name"])) + if config["force_https"] and not config["https"]: + raise InventoryException( + "Invalid option 'force_https', because option 'https' is " + + "not enabled for the webservice " + + "on network '%s' for host '%s'" % (network_name, node["name"])) + if config["https"] and "cert" not in config: + raise InventoryException( + "Missing option 'cert' for the webservice " + + "on network '%s' for host '%s'" % (network_name, node["name"])) + listen.append(config) + + # When keepalived is in use and the webservice definition requests it, + # the virtual IP-address of keepalived is assigned as a listen address + # for the webservice. + if config["use_keepalived_vip"]: + config_vip = config.copy() + if ("keepalived" not in node or "virtual_ipaddress" not in node["keepalived"]): + raise InventoryException( + "use_keepalived_vip enabled for webservice, but no keepalived " + + "virtual IP-address defined for host '%s'" % node["name"]) + config_vip["address"] = node["keepalived"]["virtual_ipaddress"] + listen.append(config_vip) + + redirect_to_https = [] + for l in listen: + if l["force_https"]: + redirect_to_https.append({ + "network": l["network"], + "address": l["address"], + "port": l["http_port"], + "https_port": l["https_port"] + }) + service = [] + for l in listen: + if l["http"] and not l["force_https"]: + service.append({ + "network": l["network"], + "address": l["address"], + "port": l["http_port"], + "https": False + }) + if l["https"]: + service.append({ + "network": l["network"], + "address": l["address"], + "port": "%s ssl" % l["https_port"], + "cert": l["cert"], + "https": True + }) + certs.add(l["cert"]) + node["webservice"] = { + "redirect_to_https": redirect_to_https, + "service": service + } + + # Register special groups for the certificates that are used, + # so we can use that group in our playbooks to decide whether + # or not the current host needs the certifcate setup. + # The group name is "requires_" + for cert in certs: + group_name = "requires_%s" % cert + if group_name not in self.groups: + group_types[group_name] = "cluster" + self.groups[group_name] = [node["name"]] + else: + self.groups[group_name].append(node["name"]) + + def _list_unmanaged_hosts_in_inventory(self): + """The unmanaged hosts are marked in the inventory, by adding them + to a group "unmanaged".""" + unmanaged = [ + node["name"] for node in self.nodes.values() + if not self._is_managed(node)] + self.groups["unmanaged"] = unmanaged + + def _is_managed(self, node): + return "managed" not in node or node["managed"] + + def _clear_managed_property_from_all_nodes(self): + """The 'managed' property is only used for building the inventory data. + It has no use beyond that. Therefore we delete the property from + all nodes.""" + for node in self.nodes.values(): + self._clear_if_exists(node, "managed") + + def _validate_network(self): + ip_addresses = set() + mac_addresses = set() + for node in self.nodes.values(): + for network in node["network"]: + config = node["network"][network] + + if False and config["address"] in ip_addresses: + raise InventoryException( + "IP address %s of node %s is used by multiple hosts" + % (config["address"], node["name"])) + ip_addresses.add(config["address"]) + + if "mac_address" in config: + if config["mac_address"] in mac_addresses: + raise InventoryException( + "MAC address %s of node %s is used by multiple hosts" + % config["mac_address"]) + mac_addresses.add(config["mac_address"]) + + def _clear_if_exists(self, node, key): + try: + del node[key] + except KeyError: + pass diff --git a/lib/dynamic_inventory/lxd_status.py b/lib/dynamic_inventory/lxd_status.py new file mode 100644 index 0000000..b652371 --- /dev/null +++ b/lib/dynamic_inventory/lxd_status.py @@ -0,0 +1,37 @@ +import json +import subprocess + + +def _lxd_list_all(hosts): + return map(_lxd_list, hosts) + + +def _lxd_list(host): + output = subprocess.check_output([ + "ssh", host, "--", + "lxc", "list", "--fast", "--format", "json"]) + + def add_host(g): + g["host"] = host + return g + return map(add_host, json.loads(output)) + + +def _create_container_info(g): + data = { + "name": g["name"], + "status": g["status"], + "host": g["host"], + } + if 'homedir' in g["expanded_devices"]: + data["homedir"] = g["expanded_devices"]['homedir'] + return data + +def list_lxd_containers(hosts): + """create a list of all the LXD containers that are running on the LXD hosts.""" + containers_per_host = _lxd_list_all(hosts) + all_containers = {} + for containers in containers_per_host: + for container in containers: + all_containers[container["name"]] = _create_container_info(container) + return all_containers diff --git a/lib/dynamic_inventory/script.py b/lib/dynamic_inventory/script.py new file mode 100644 index 0000000..4237c8c --- /dev/null +++ b/lib/dynamic_inventory/script.py @@ -0,0 +1,33 @@ +import argparse +import dynamic_inventory + + +class Script(object): + def __init__(self, environment_path): + config = dynamic_inventory.Config(environment_path) + self.inventory = dynamic_inventory.Inventory(config) + + def execute(self): + args = self._parse_args() + if args.host is None: + self._do_list() + else: + self._do_host(args.host) + + def _parse_args(self): + p = argparse.ArgumentParser(description='Produce Ansible inventory') + p.add_argument( + '--list', action='store_true', default=True, + help='List all hosts') + p.add_argument( + '--host', action='store', + help='Show variable for a single host') + return p.parse_args() + + def _do_list(self): + data = self.inventory.groups.copy() + data["_meta"] = {"hostvars": self.inventory.nodes} + print(dynamic_inventory.convert_to_json(data)) + + def _do_host(self, name): + print(dynamic_inventory.convert_to_json(self.inventory.nodes[name])) diff --git a/lib/dynamic_inventory/serialize.py b/lib/dynamic_inventory/serialize.py new file mode 100644 index 0000000..84df546 --- /dev/null +++ b/lib/dynamic_inventory/serialize.py @@ -0,0 +1,18 @@ +import json +from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode + + +def convert_to_json(data): + return json.dumps(data, sort_keys=True, indent=2, default=_json_default_serializer) + + +def _json_default_serializer(value): + if isinstance(value, bytes): + return value.decode('utf-8') + if isinstance(value, str): + return value + if isinstance(value, AnsibleVaultEncryptedUnicode): + return str(value) + raise TypeError( + "Unsupported type '%s' used in inventory data " + "(no support for JSON serializing this type)" % type(value).__name__) diff --git a/playbook-apps.yml b/playbook-apps.yml new file mode 100644 index 0000000..d63a2b5 --- /dev/null +++ b/playbook-apps.yml @@ -0,0 +1,8 @@ +--- +- import_playbook: roles/users/playbook.yml +- import_playbook: roles/hostsfile/playbook.yml +- import_playbook: roles/networksfile/playbook.yml +- import_playbook: roles/logging/playbook.yml +- import_playbook: roles/firewalling/playbook.yml +- import_playbook: roles/app.galera_node/playbook.yml +- import_playbook: roles/app.galera_bootstrap/playbook.yml diff --git a/playbook-hosts.yml b/playbook-hosts.yml new file mode 100644 index 0000000..9765bb6 --- /dev/null +++ b/playbook-hosts.yml @@ -0,0 +1,11 @@ +--- +# This playbook is used to setup and configure the hosts that make up the +# Voice Platform (i.e. the physical hosts and the LXD system containers +# that are run on them). The actual software installation is handled by the +# playbook-apps.yml playbook. +# The ansible playbook is also included here, because that one sets up the +# ssh key that ansible uses to connect to the managed hosts. +- import_playbook: roles/ansible/playbook.yml +- import_playbook: roles/lxd_host/playbook.yml +- import_playbook: roles/lxd_container/playbook.yml +- import_playbook: roles/lxd_common/playbook.yml diff --git a/playbook.yml b/playbook.yml new file mode 100644 index 0000000..9bac632 --- /dev/null +++ b/playbook.yml @@ -0,0 +1,3 @@ +--- +- import_playbook: playbook-hosts.yml +- import_playbook: playbook-apps.yml diff --git a/roles/ansible/meta/main.yml b/roles/ansible/meta/main.yml new file mode 100644 index 0000000..3a4e6dc --- /dev/null +++ b/roles/ansible/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: Setup Ansible + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/ansible/playbook.yml b/roles/ansible/playbook.yml new file mode 100644 index 0000000..9da7d5b --- /dev/null +++ b/roles/ansible/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: ansible:!unmanaged + gather_facts: no + serial: 4 + roles: + - ansible diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml new file mode 100644 index 0000000..421a524 --- /dev/null +++ b/roles/ansible/tasks/main.yml @@ -0,0 +1,33 @@ +--- +- name: Install ~root/.ansible_aliases + template: + src: ansible_aliases.j2 + dest: /root/.ansible_aliases + +- name: Enable ansible aliases in ~/.bash_aliases + lineinfile: + path: /root/.bash_aliases + regexp: 'ansible_aliases' + line: '. ~/.ansible_aliases' + create: yes + owner: root + group: root + mode: 0640 + +- name: "Check if an ansible-specific ssh keypair exists" + stat: + path: ~/.ssh/id_ansible@{{ software.environment }} + register: keypair + tags: [ "ssh" ] + +- name: "Generate an ansible-specific ssh keypair" + shell: ssh-keygen -C ansible@{{ software.environment }} -f ~/.ssh/id_ansible@{{ software.environment }} -N "" + when: not keypair.stat.exists + tags: [ "ssh" ] + +- name: "Generate /root/.ssh/install_ansible_authorized_ssh_key" + template: + src: templates/install_ansible_authorized_ssh_key.j2 + dest: ~/.ssh/install_ansible_ssh_key + mode: 0700 + tags: [ "ssh", "ssh_authorized_key" ] diff --git a/roles/ansible/templates/ansible_aliases.j2 b/roles/ansible/templates/ansible_aliases.j2 new file mode 100644 index 0000000..39811d9 --- /dev/null +++ b/roles/ansible/templates/ansible_aliases.j2 @@ -0,0 +1,5 @@ +alias play="ansible-playbook playbook.yml" +alias play-hosts="ansible-playbook playbook-hosts.yml" +alias play-apps="ansible-playbook playbook-apps.yml" +alias replay="ansible-playbook playbook.yml --limit @playbook.retry" +alias ansible-pwgen='ansible-vault encrypt_string $(pwgen 32 -c -n -1)' diff --git a/roles/ansible/templates/install_ansible_authorized_ssh_key.j2 b/roles/ansible/templates/install_ansible_authorized_ssh_key.j2 new file mode 100644 index 0000000..5287edb --- /dev/null +++ b/roles/ansible/templates/install_ansible_authorized_ssh_key.j2 @@ -0,0 +1,24 @@ +#!/bin/bash +# {{ ansible_managed }} +# Installs the Anbible management public ssh key on this host. + +{% set pubkey = lookup('env', 'HOME')+"/.ssh/id_ansible@"+software.environment+".pub" -%} +{% set keydata = lookup('file', pubkey) -%} +SSH_DIR="${HOME}/.ssh" +NAME="ansible@{{ software.environment }}" +AUTHORIZED_KEYS="${SSH_DIR}/authorized_keys" +NEW_AUTHORIZED_KEYS="${AUTHORIZED_KEYS}..SWAP$$" +BACKUP_AUTHORIZED_KEYS="${AUTHORIZED_KEYS}.old" +#KEY_DATA='from="{{ ip.ansible[segment.mgmt.network].address }}" {{ keydata }}' +KEY_DATA='{{ keydata }}' + +mkdir -p $SSH_DIR +touch $AUTHORIZED_KEYS +echo "Remove ${NAME} from existing authorized keys" +cat $AUTHORIZED_KEYS | grep -v \ ${NAME}$ > $NEW_AUTHORIZED_KEYS +echo "Add fresh ansible public key to the authorized keys" +echo $KEY_DATA >> $NEW_AUTHORIZED_KEYS +echo "Install the new authorized keys" +mv $NEW_AUTHORIZED_KEYS $AUTHORIZED_KEYS +cp $AUTHORIZED_KEYS $BACKUP_AUTHORIZED_KEYS + diff --git a/roles/app.galera_bootstrap/meta/main.yml b/roles/app.galera_bootstrap/meta/main.yml new file mode 100644 index 0000000..87ade6b --- /dev/null +++ b/roles/app.galera_bootstrap/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: Bootstrap Galera cluster + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/app.galera_bootstrap/playbook.yml b/roles/app.galera_bootstrap/playbook.yml new file mode 100644 index 0000000..453c5c0 --- /dev/null +++ b/roles/app.galera_bootstrap/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: lxd-host:ansible:!unmanaged + gather_facts: no + serial: 4 + roles: + - app.galera_bootstrap diff --git a/roles/app.galera_bootstrap/tasks/main.yml b/roles/app.galera_bootstrap/tasks/main.yml new file mode 100644 index 0000000..44ad63a --- /dev/null +++ b/roles/app.galera_bootstrap/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: "Install galera cluster bootstrap advisor script" + template: + src: galera_bootstrap_advisor.j2 + dest: /root/galera_bootstrap_advisor + mode: 0750 + owner: root + group: root diff --git a/roles/app.galera_bootstrap/templates/galera_bootstrap_advisor.j2 b/roles/app.galera_bootstrap/templates/galera_bootstrap_advisor.j2 new file mode 100644 index 0000000..e9d9ea9 --- /dev/null +++ b/roles/app.galera_bootstrap/templates/galera_bootstrap_advisor.j2 @@ -0,0 +1,178 @@ +#!/usr/bin/env python +# {{ ansible_managed }} +# Author: Maurice Makaay, XS4ALL + +from __future__ import print_function +import subprocess +import re +import json +from os import system +from sys import exit, stdin + + +nodes = {{ groups['galera'] | to_json }} +lxd_status = {{ lxd_status | to_json }} + + +def propose(commands, exit_code): + for command in commands: + print("# %s" % command) + print("") + print("Execute now? [y/n]: ", end="") + answer = stdin.readline() + if "y" in answer.lower(): + print("") + for command in commands: + print("EXECUTING> %s" % command) + system(command) + print("") + exit(0) + exit(exit_code) + + +print("Collecting Galera status information from nodes ...") +status = {} +for node in nodes: + lxd_host = "%s:%s" % (lxd_status[node]["host"], node) + try: + result = subprocess.check_output([ + "lxc", "exec", lxd_host, "/root/galera_cluster_status"]) + status[node] = json.loads(result) + except subprocess.CalledProcessError: + status[node] = { + "cluster_size": 0, + "cluster_status": 'Status Failed', + "connected": "Unknown", + "ready": "Unknown", + "safe_to_bootstrap": 0, + "seqno": -1, + "uuid": None + } + status[node]['lxd_host'] = lxd_host + status[node]['node'] = node + +def is_primary(s): + return s["cluster_status"] == "Primary" + +def has_correct_cluster_size(s): + return s["cluster_size"] == len(nodes) + +def is_connected(s): + return s["connected"] == "ON" + +def is_ready(s): + return s["ready"] == "ON" + + +print("") +print("%-20s %-15s %-6s %-12s %-7s" % ( + "Node", "Status", "Size", "Connected", "Ready")) +for node in nodes: + s = status[node] + print("%-20s %-15s %-6s %-12s %-7s" % ( + node, s["cluster_status"], s["cluster_size"], + s["connected"], s["ready"])) + +print("") +print("Checking cluster status ...") +print("") + +# ---------------------------------------------------------------------------- +# CASE: All cluster nodes are up and running, green lights galore! +# ---------------------------------------------------------------------------- + +all_primary = all(map(is_primary, status.values())) +all_size_ok = all(map(has_correct_cluster_size, status.values())) +all_connected = all(map(is_connected, status.values())) +all_ready = all(map(is_ready, status.values())) + +if all([all_primary, all_size_ok, all_connected, all_ready]): + print("There's no bootstrapping work to do here, all looks good!") + print("") + exit(0) + +# ---------------------------------------------------------------------------- +# CASE: The cluster is parially down, but some cluster hosts are still ok. +# ---------------------------------------------------------------------------- + +if any(map(is_primary, status.values())) and any(map(is_ready, status.values())): + print("The cluster seems to be in a degraded status.") + print("Please investigate the cluster status.") + print("- Can the cluster hosts reach each other over the network?") + print("- Are all mariadb instances running?") + print("") + print("It might help to (re)start the database server on the degraded node(s):") + print("") + commands = [ + "lxc exec %s service mysql restart" % s["lxd_host"] + for s in status.values() + if not is_primary(s) + ] + propose(commands, 1) + +# ---------------------------------------------------------------------------- +# CASE: All cluster nodes are down, one cluster node is safe to bootstrap. +# ---------------------------------------------------------------------------- + +print("None of the cluster hosts is operational. A cluster bootup is required.") + +safe_to_bootstrap = [s for s in status.values() if s["safe_to_bootstrap"] == 1] +if any(safe_to_bootstrap): + bootstrap_node = safe_to_bootstrap[0] + print("A node is marked as 'safe to bootstrap', so proposed strategy:") + print("") + commands = ["lxc exec %s galera_new_cluster" % bootstrap_node["lxd_host"]] + for n, s in status.items(): + if n == bootstrap_node["node"]: + continue + commands.append("lxc exec %s service mysql start" % s["lxd_host"]) + propose(commands, 2) + +# ---------------------------------------------------------------------------- +# CASE: All cluster nodes are down, no cluster node is safe to bootstrap. +# ---------------------------------------------------------------------------- + +print("Unfortunately, none of the nodes is marked as safe to bootstrap.") +print("Retrieving last recovered position for all cluster nodes ...") +print("") +print("%-20s %-15s %-40s" % ("Node", "Recovery pos", "UUID")) + +for n, s in status.items(): + lxd_host = "%s:%s" % (lxd_status[n]["host"], n) + try: + result = subprocess.check_output([ + "lxc", "exec", lxd_host, "/root/galera_wsrep_recovered_position"]) + uuid_and_pos = json.loads(result) + uuid, pos = re.split(':', uuid_and_pos, maxsplit=1) + s["uuid"] = uuid + s["pos"] = int(pos) + except subprocess.CalledProcessError: + s["uuid"] = "Unknown" + s["pos"] = -1 + print("%-20s %-15d %-40s" % (n, s["pos"], s["uuid"])) + +uuids = set((s["uuid"] for s in status.values())) +if len(uuids) != 1: + print("") + print("Wow... now wait a minute... There are multiple UUID's in play!") + print("That should never happen in a Galera cluster.") + print("You will have to handle this one yourself I'm afraid.") + +def get_pos_key(x): + return x["pos"] + +old_to_new = sorted(status.itervalues(), key=get_pos_key) +bootstrap_node = old_to_new[-1] + +print("") +print("Determined a node that is safe for bootstrapping, so proposed strategy:") +print("") +commands = [ + "lxc exec %s /root/galera_flag_as_safe_to_bootstrap" % bootstrap_node["lxd_host"], + "lxc exec %s galera_new_cluster" % bootstrap_node["lxd_host"] +] +for n, s in status.items(): + if n == bootstrap_node["node"]: + continue + commands.append("lxc exec %s service mysql start" % s["lxd_host"]) +propose(commands, 3) diff --git a/roles/app.galera_node/defaults/main.yml b/roles/app.galera_node/defaults/main.yml new file mode 100644 index 0000000..f4248f0 --- /dev/null +++ b/roles/app.galera_node/defaults/main.yml @@ -0,0 +1,2 @@ +--- +marked_down_lockfile: /var/lib/mysql/manually.marked.down diff --git a/roles/app.galera_node/files/galera_cluster_status.socket b/roles/app.galera_node/files/galera_cluster_status.socket new file mode 100644 index 0000000..8c2bbee --- /dev/null +++ b/roles/app.galera_node/files/galera_cluster_status.socket @@ -0,0 +1,9 @@ +[Unit] +Description=Galera cluster status socket + +[Socket] +ListenStream=3366 +Accept=true + +[Install] +WantedBy=sockets.target diff --git a/roles/app.galera_node/files/galera_cluster_status@.service b/roles/app.galera_node/files/galera_cluster_status@.service new file mode 100644 index 0000000..a2f0506 --- /dev/null +++ b/roles/app.galera_node/files/galera_cluster_status@.service @@ -0,0 +1,13 @@ +[Unit] +Description=Galera/Mariadb status checker +Requires=galera_cluster_status.socket + +[Service] +Type=simple +ExecStart=/root/galera_cluster_status --haproxy +TimeoutStopSec=5 +StandardInput=socket +StandardError=journal + +[Install] +WantedBy=multi-user.target diff --git a/roles/app.galera_node/files/userparameter_galera.conf b/roles/app.galera_node/files/userparameter_galera.conf new file mode 100644 index 0000000..6f6ff17 --- /dev/null +++ b/roles/app.galera_node/files/userparameter_galera.conf @@ -0,0 +1,70 @@ +#Total number of cluster membership changes happened. +UserParameter=galera.cluster_conf_id[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_conf_id';" | HOME=/var/lib/zabbix mysql -N + +#Current number of members in the cluster. +UserParameter=galera.cluster_size[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_size';" | HOME=/var/lib/zabbix mysql -N + +#Status of this cluster component. That is, whether the node is part of a PRIMARY or NON_PRIMARY component. +UserParameter=galera.cluster_status[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_status';" | HOME=/var/lib/zabbix mysql -N + +#If the value is OFF, the node has not yet connected to any of the cluster components. +UserParameter=galera.wsrep_connected[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_connected';" | HOME=/var/lib/zabbix mysql -N + +#Shows the internal state of the EVS Protocol +UserParameter=galera.wsrep_evs_state[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_evs_state';" | HOME=/var/lib/zabbix mysql -N + +#How much the slave lag is slowing down the cluster. +UserParameter=galera.wsrep_flow_control_paused[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_flow_control_paused';" | HOME=/var/lib/zabbix mysql -N + +#Returns the number of FC_PAUSE events the node has received. Does not reset over time +UserParameter=galera.wsrep_flow_control_recv[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_flow_control_recv';" | HOME=/var/lib/zabbix mysql -N + +#Returns the number of FC_PAUSE events the node has sent. Does not reset over time +UserParameter=galera.wsrep_flow_control_sent[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_flow_control_sent';" | HOME=/var/lib/zabbix mysql -N + +#Displays the group communications UUID. +UserParameter=galera.wsrep_gcom_uuid[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_gcomm_uuid';" | HOME=/var/lib/zabbix mysql -N + +#The sequence number, or seqno, of the last committed transaction. +UserParameter=galera.wsrep_last_committed[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_last_committed';" | HOME=/var/lib/zabbix mysql -N + +#Internal Galera Cluster FSM state number. + +UserParameter=galera.wsrep_local_state[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_state';" | HOME=/var/lib/zabbix mysql -N + +#Total number of local transactions that were aborted by slave transactions while in execution. +UserParameter=galera.wsrep_local_bf_aborts[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_bf_aborts';" | HOME=/var/lib/zabbix mysql -N + +#Current (instantaneous) length of the recv queue. +UserParameter=galera.wsrep_local_recv_queue[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_recv_queue';" | HOME=/var/lib/zabbix mysql -N + +#Current (instantaneous) length of the send queue. +UserParameter=galera.wsrep_local_send_queue[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_send_queue';" | HOME=/var/lib/zabbix mysql -N + +#Human-readable explanation of the state. +UserParameter=galera.wsrep_local_state_comment[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_state_comment';" | HOME=/var/lib/zabbix mysql -N + +#The UUID of the state stored on this node. +UserParameter=galera.wsrep_local_state_uuid[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_state_uuid';" | HOME=/var/lib/zabbix mysql -N + +#Whether the server is ready to accept queries. +UserParameter=galera.wsrep_ready[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_ready';" | HOME=/var/lib/zabbix mysql -N + +#Total size of write-sets received from other nodes. +UserParameter=galera.wsrep_received_bytes[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_received_bytes';" | HOME=/var/lib/zabbix mysql -N + +#Total size of write-sets replicated. +UserParameter=galera.replicated_bytes[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_replicated_bytes';" | HOME=/var/lib/zabbix mysql -N + +#Total size of data replicated. +UserParameter=galera.wsrep_repl_data_bytes[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_repl_data_bytes';" | HOME=/var/lib/zabbix mysql -N + +#Total number of keys replicated. +UserParameter=galera.wsrep_repl_keys[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_repl_keys';" | HOME=/var/lib/zabbix mysql -N + +#Total size of keys replicated in bytes +UserParameter=galera.wsrep_repl_keys_bytes[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_repl_keys_bytes';" | HOME=/var/lib/zabbix mysql -N + +#Total size of other bits replicated +UserParameter=galera.wsrep_repl_other_bytes[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_repl_other_bytes';" | HOME=/var/lib/zabbix mysql -N + diff --git a/roles/app.galera_node/meta/main.yml b/roles/app.galera_node/meta/main.yml new file mode 100644 index 0000000..1005c23 --- /dev/null +++ b/roles/app.galera_node/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: Install Galera cluster nodes + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/app.galera_node/playbook.yml b/roles/app.galera_node/playbook.yml new file mode 100644 index 0000000..76ba27a --- /dev/null +++ b/roles/app.galera_node/playbook.yml @@ -0,0 +1,7 @@ +--- +- hosts: galera:!unmanaged + gather_facts: no + roles: + - app.galera_node + # Process one galera node at a time, to not bring multiple nodes down simultaneously. + serial: 1 diff --git a/roles/app.galera_node/tasks/install.yml b/roles/app.galera_node/tasks/install.yml new file mode 100644 index 0000000..046dd8d --- /dev/null +++ b/roles/app.galera_node/tasks/install.yml @@ -0,0 +1,66 @@ +--- +- name: "Uninstall xtrabackup utility (switched to mariabackup)" + apt: + name: percona-xtrabackup + state: absent + +- name: "Remove tuning configuration file which is now merged with galera.cnf" + file: + path: /etc/mysql/conf.d/tuning.cnf + state: absent + +- name: "Check if the galera config already exists" + stat: + path: /etc/mysql/conf.d/galera.cnf + register: galera_cnf + +- name: "Configure /etc/mysql/conf.d/galera.cnf" + template: + src: galera.cnf.j2 + dest: /etc/mysql/conf.d/galera.cnf + owner: root + group: mysql + mode: 0640 + tags: + - config + +# TODO Check if we want this back or done in some other way. +# It's safer to not be meddling with starting and stopping galera +# nodes on subsequent runs. Registering in a file that the +# initialization has been executed might already do the trick. +# +#- name: "Check if the node is safe for galera bootstrapping" +# shell: "egrep -s -q '^ *safe_to_bootstrap *: *1 *$' /var/lib/mysql/grastate.dat" +# register: grastate +# failed_when: grastate.rc not in [0, 1, 2] +# when: is_primary +# +# # state 0 = grastate.dat file exists and it contains "safe_to_bootstrap: 1" +# # state 1 = grastate.dat file exists, but it does not contain "safe_to_bootstrap: 1" +# # state 2 = grastate.dat file does not yet exist, this is a new node +#- name: "Stop mysql service on primary node, prior to bootstrapping galera" +# service: +# name: mariadb +# state: stopped +# when: is_primary and grastate.rc in [0, 2] +# +#- name: "Bootstrap galera on primary node" +# shell: galera_new_cluster +# when: is_primary and grastate.rc in [0, 2] +# +#- name: "Restart mysql service on secondary node" +# service: +# name: mariadb +# state: restarted +# when: not is_primary + +- name: "Create galera-haproxy user (for checking node health)" + mysql_user: + name: galera-haproxy + host: "%" + +- name: "Restart mysql server (only on initial install)" + service: + name: mariadb + state: restarted + when: not galera_cnf.stat.exists diff --git a/roles/app.galera_node/tasks/main.yml b/roles/app.galera_node/tasks/main.yml new file mode 100644 index 0000000..88a53b2 --- /dev/null +++ b/roles/app.galera_node/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- include_role: name=app.mariadb +- import_tasks: scripts.yml + tags: [ "scripts" ] +- import_tasks: install.yml + tags: [ "cleanup" ] diff --git a/roles/app.galera_node/tasks/scripts.yml b/roles/app.galera_node/tasks/scripts.yml new file mode 100644 index 0000000..f4429a6 --- /dev/null +++ b/roles/app.galera_node/tasks/scripts.yml @@ -0,0 +1,22 @@ +- name: "Install required packages for galera support scripts" + apt: + name: + - "{{ vars.python_package }}" + - "{{ vars.python_package }}-mysql.connector" + state: present + tags: + - scripts + +- name: "Install galera support scripts" + template: + src: "{{ item }}.j2" + dest: "/root/{{ item }}" + owner: root + group: root + mode: 0700 + with_items: + - galera_cluster_status + - galera_wsrep_recovered_position + - galera_flag_as_safe_to_bootstrap + tags: + - scripts diff --git a/roles/app.galera_node/templates/galera.cnf.j2 b/roles/app.galera_node/templates/galera.cnf.j2 new file mode 100644 index 0000000..599a494 --- /dev/null +++ b/roles/app.galera_node/templates/galera.cnf.j2 @@ -0,0 +1,60 @@ +[mysqld] + +bind-address = 0.0.0.0 + +# Don't do name resolving on clients, because it might cause unneeded impact. +skip-name-resolve + +# Galera works best with innodb tables. +default-storage-engine = innodb + +# Mandatory options to make galera replication work reliable. +binlog_format = ROW +innodb_autoinc_lock_mode = 2 + +# Enable the wsrep provider for cluster sync support. +wsrep_provider = /usr/lib/galera/libgalera_smm.so +wsrep_on = ON + +# Configure the cluster node. +wsrep_node_name= " {{ inventory_hostname }}" +wsrep_node_address = "{{ vars.network[segment.mgmt.network].address }}" + +# Configure the cluster environment. +wsrep_cluster_name = "galera_cluster" +wsrep_cluster_address = "gcomm://{{ ip['galera'][segment.mgmt.network] | map(attribute='address') | join(",") }}" +{% if groups['galera-donor'] %} + +# When joining the cluster, try the named donor nodes, before using a service +# node as the donor. The comma at the end is important, because that tells +# galera to try the service nodes when no donor nodes are availble. +wsrep_sst_donor = "{{ groups['galera-donor'] | join(',') }}," +{% endif %} + +# The preferred way to handle replication is using the mariabackup tool. +wsrep_sst_method = mariabackup +wsrep_sst_auth = "root:{{ credentials.mysql_root.password }}" + +{% if software.environment == "production" %} +# Some performance tweaks for the production environment (where more +# resources are available than in the other environments). +max_connections = 250 +innodb_buffer_pool_size = 8192M +query_cache_type = 0 +query_cache_size = 0 +innodb_flush_log_at_trx_commit = 0 +tmp_table_size = 512M +max_heap_table_size = 512M +wsrep_slave_threads = 16 +wsrep_provider_options="gcs.fc_limit=80; gcs.fc_factor=0.8; gcache.size=2G; gcache.page_size=2G" + +# To enable performance monitoring and investigation. +performance_schema = ON +performance-schema-instrument='stage/% = ON' +performance-schema-consumer-events-stages-current = ON +performance-schema-consumer-events-stages-history = ON +performance-schema-consumer-events-stages-history-long = ON +{% endif %} + +[sst] +sst-syslog=1 diff --git a/roles/app.galera_node/templates/galera_cluster_status.j2 b/roles/app.galera_node/templates/galera_cluster_status.j2 new file mode 100755 index 0000000..acb39bf --- /dev/null +++ b/roles/app.galera_node/templates/galera_cluster_status.j2 @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 +# Author: Maurice Makaay, XS4ALL +# {{ ansible_managed }} + +import re +import json +import sys +import os +import configparser +from mysql.connector import MySQLConnection + + +lockfile = "{{ marked_down_lockfile }}" + + +def _get_mode_from_argv(): + mode = "json" + if len(sys.argv) > 1: + if sys.argv[1] == "--haproxy": + mode = "haproxy" + elif sys.argv[1] == "--json": + mode = "json" + else: + raise "Invalid argument(s) used (you can only use --haproxy or --json)." + return mode + + +def _connect_to_db(): + try: + config = configparser.ConfigParser() + config.read("/etc/mysql/debian.cnf") + user = config["client"]["user"] + password = config["client"]["password"] + socket = config["client"]["socket"] + return MySQLConnection( + host="localhost", + database="mysql", + user=user, + password=password, + unix_socket=socket) + except: + return None + +def _init_response(): + return { + 'cluster_size': 0, + 'cluster_status': None, + 'connected': 'OFF', + 'last_committed': 0, + 'local_state_comment': None, + 'read_only': 'OFF', + 'ready': 'OFF', + 'safe_to_bootstrap': 0, + 'seqno': None, + 'sst_method': None, + 'uuid': None, + 'server_version': None, + 'innodb_version': None, + 'protocol_version': None, + 'wsrep_patch_version': None + } + + +def _add_global_status(response, db): + for key, value in _query("SHOW GLOBAL STATUS LIKE 'wsrep_%'", db): + key = re.sub('^wsrep_', '', key) + if key in response: + response[key] = value + + +def _add_global_variables(response, db): + query = """SHOW GLOBAL VARIABLES WHERE Variable_name IN ( + 'read_only', 'wsrep_sst_method', + 'innodb_version', 'protocol_version', 'version', + 'wsrep_patch_version' + )""" + for key, value in _query(query, db): + if key == "version": + key = "server_version" + if key == "wsrep_sst_method": + key = "sst_method" + response[key] = value + + +def _query(query, db): + try: + cursor = db.cursor() + cursor.execute(query) + return cursor.fetchall() + except: + return [] + + +def _add_grastate(response): + try: + f = open("/var/lib/mysql/grastate.dat", "r") + for line in f: + if line.startswith('#') or re.match('^\s*$', line): + continue + line = re.sub('\s+$', '', line) + key, value = re.split(':\s+', line, maxsplit=1) + if key in response: + response[key] = value + response['cluster_size'] = int(response['cluster_size']) + response['seqno'] = int(response['seqno']) + response['safe_to_bootstrap'] = int(response['safe_to_bootstrap']) + except: + pass + + +def _add_manually_disabled(response): + response["manually_disabled"] = os.path.isfile(lockfile); + + +def _evaluate_safe_to_use(response): + ''' + Evaluate if it is safe to use this node for requests. Inspiration: + https://severalnines.com/resources/tutorials/mysql-load-balancing-haproxy-tutorial + ''' + status = response['local_state_comment'] + is_read_only = response['read_only'] != 'OFF' + is_ready = response['ready'] == 'ON' + is_connected = response['connected'] == 'ON' + method = response['sst_method'] + is_using_xtrabackup = method is not None and method.startswith("xtrabackup") + + safe_to_use = False + comment = None + + if response['manually_disabled']: + comment = "The node has been manually disabled (file %s exists)" % lockfile + elif status is None: + comment = "The MySQL server seems not to be running at all" + elif status == 'Synced': + if is_read_only: + comment = "Status is 'Synced', but database is reported to be read-only" + elif not is_ready: + comment = "Status is 'Synced', but database reports WSS not ready" + elif not is_connected: + comment = "Status is 'Synced', but database reports WSS not being connected" + else: + safe_to_use = True + comment = "Status is 'Synced' and database is writable" + elif status == 'Donor': + if is_using_xtrabackup: + safe_to_use = True + comment = "Status is 'Donor', but using safe '%s' as the SST method" % method + else: + comment = "Status is 'Donor', and xtrabackup(-v2) is not used for SST" + else: + comment = "Galera status is not 'Synced', but '%s'" % status + response['safe_to_use'] = safe_to_use + response['safe_to_use_comment'] = comment + + +def _output_response(response, mode): + json_data = json.dumps(response, indent=4, sort_keys=True) + "\r\n" + if mode == "json": + print(json_data) + else: + if response["safe_to_use"]: + print("HTTP/1.1 200 OK", end="\r\n") + else: + print("HTTP/1.1 503 Service Unavailable", end="\r\n") + print("Content-Length: ", len(json_data), end="\r\n") + print("Keep-Alive: no", end="\r\n") + print("Content-Type: Content-Type: application/json", end="\r\n\r\n") + print(json_data, end="") + + +response = _init_response() +db = _connect_to_db() +if db is None: + response['safe_to_use'] = False + response['safe_to_use_comment'] = "Connection to MySQL server failed" +else: + _add_global_status(response, db) + _add_global_variables(response, db) + db.close() + _add_grastate(response) + _add_manually_disabled(response) + _evaluate_safe_to_use(response) +mode = _get_mode_from_argv() +_output_response(response, mode) diff --git a/roles/app.galera_node/templates/galera_flag_as_safe_to_bootstrap.j2 b/roles/app.galera_node/templates/galera_flag_as_safe_to_bootstrap.j2 new file mode 100644 index 0000000..14f8b06 --- /dev/null +++ b/roles/app.galera_node/templates/galera_flag_as_safe_to_bootstrap.j2 @@ -0,0 +1,7 @@ +#!/bin/bash +# Author: Maurice Makaay, XS4ALL +# {{ ansible_managed }} + +sed -i \ + -e 's/safe_to_bootstrap:\s*0/safe_to_bootstrap: 1/' \ + /var/lib/mysql/grastate.dat diff --git a/roles/app.galera_node/templates/galera_wsrep_recovered_position.j2 b/roles/app.galera_node/templates/galera_wsrep_recovered_position.j2 new file mode 100644 index 0000000..1d7efbb --- /dev/null +++ b/roles/app.galera_node/templates/galera_wsrep_recovered_position.j2 @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +# Author: Maurice Makaay, XS4ALL +# {{ ansible_managed }} +# +# This script is used to find out what the latest recorded transaction +# on this host is. It is used by the galera cluster bootstrapping +# script (which is available in /root/bootstrap_galera_cluster on the +# ansible host) to find out in what order to start the nodes in case +# of an unclean full shutdown of the cluster. +# +# DO NOT RUN FOR FUN! This script will bring down the mysql service +# when it is not already down. +# + +import subprocess +import re +import json +from sys import exit + + +subprocess.check_output(["service", "mysql", "stop"]) +result = subprocess.check_output( + ['mysqld', '--wsrep-recover'], stderr= subprocess.STDOUT) + +info = re.compile('WSREP: Recovered position: (.+)\s*$') +for line in result.split("\n"): + result = info.search(line) + if result is not None: + print(json.dumps(result.group(1))) + exit(0) + +print(json.dumps(None)) + diff --git a/roles/app.galera_node/templates/xs4all-galera-utils.conf.j2 b/roles/app.galera_node/templates/xs4all-galera-utils.conf.j2 new file mode 100644 index 0000000..c170035 --- /dev/null +++ b/roles/app.galera_node/templates/xs4all-galera-utils.conf.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} + +[client] +user = debian-sys-maint +password = {{ credentials.mysql_root.password }} diff --git a/roles/app.galera_node/templates/zabbix_my.cnf.j2 b/roles/app.galera_node/templates/zabbix_my.cnf.j2 new file mode 100644 index 0000000..fc1e92b --- /dev/null +++ b/roles/app.galera_node/templates/zabbix_my.cnf.j2 @@ -0,0 +1,4 @@ +[client] +user={{ credentials.ZabbixMysqlMonitoring.username }} +password={{ credentials.ZabbixMysqlMonitoring.password }} +host=127.0.0.1 diff --git a/roles/app.mariadb/defaults/main.yml b/roles/app.mariadb/defaults/main.yml new file mode 100644 index 0000000..3c0650f --- /dev/null +++ b/roles/app.mariadb/defaults/main.yml @@ -0,0 +1,3 @@ +--- +mariadb_version: 10.3 +mariadb_origin: ams2.mirrors.digitalocean.com diff --git a/roles/app.mariadb/meta/main.yml b/roles/app.mariadb/meta/main.yml new file mode 100644 index 0000000..37e3b3f --- /dev/null +++ b/roles/app.mariadb/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: Install MariaDB server + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/app.mariadb/tasks/debian-sys-maint.yml b/roles/app.mariadb/tasks/debian-sys-maint.yml new file mode 100644 index 0000000..a04e90f --- /dev/null +++ b/roles/app.mariadb/tasks/debian-sys-maint.yml @@ -0,0 +1,16 @@ +--- +- name: "Configure the password for the debian-sys-maint user" + mysql_user: + name: "debian-sys-maint" + host: "localhost" + password: "{{ credentials.mysql_root.password }}" + tags: + - debian-sys-maint + +- name: "Store the debian-sys-maint password in /etc/mysql/debian.cnf" + replace: + path: /etc/mysql/debian.cnf + regexp: '^password\s*=.*$' + replace: "password = {{ credentials.mysql_root.password }}" + tags: + - debian-sys-maint diff --git a/roles/app.mariadb/tasks/install.yml b/roles/app.mariadb/tasks/install.yml new file mode 100644 index 0000000..41c9f4a --- /dev/null +++ b/roles/app.mariadb/tasks/install.yml @@ -0,0 +1,33 @@ +- name: "Feed MariaDB root password to debconf" + debconf: + name: "mariadb-server" + question: "{{ item }}" + value: "{{ credentials.mysql_root.password }}" + vtype: password + changed_when: False + with_items: + - mysql-server/root_password + - mysql-server/root_password_again + +- name: "Install MariaDB / Galera" + apt: + name: "mariadb-server" + state: present + +- name: "Install Mariabackup" + apt: + name: "mariadb-backup" + state: present + +- name: "Install required Python MySQL module for db management via Ansible" + apt: + name: "{{ vars.python_package }}-mysqldb" + state: present + +- name: "Configure /root/.my.cnf" + template: + src: my.cnf.j2 + dest: /root/.my.cnf + owner: root + group: root + mode: 0400 diff --git a/roles/app.mariadb/tasks/main.yml b/roles/app.mariadb/tasks/main.yml new file mode 100644 index 0000000..0bcaaaf --- /dev/null +++ b/roles/app.mariadb/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- include: repo.yml +- include: install.yml +- include: debian-sys-maint.yml diff --git a/roles/app.mariadb/tasks/repo.yml b/roles/app.mariadb/tasks/repo.yml new file mode 100644 index 0000000..d3157b5 --- /dev/null +++ b/roles/app.mariadb/tasks/repo.yml @@ -0,0 +1,29 @@ +--- +- name: "Add MariaDB repo key" + apt_key: + keyserver: keyserver.ubuntu.com + id: "0xF1656F24C74CD1D8" + state: present + +- name: "Pin MariaDB repo" + template: + src: "mariadb_repo.j2" + dest: "/etc/apt/preferences.d/mariadb" + become: True + +# Using the automatic file naming of apt_repository, the version number is included +# in the apt list file. This results in new list files on every version upgrade. +# Switched to a static name. Here, old files are cleaned up. +- name: "Remove old style MariaDB repo files" + file: + path: "/etc/apt/sources.list.d/{{ item }}" + state: absent + with_items: + - ams2_mirrors_digitalocean_com_mariadb_repo_10_1_ubuntu.list + - ams2_mirrors_digitalocean_com_mariadb_repo_10_3_ubuntu.list + +- name: "Add MariaDB repo" + apt_repository: + repo: "deb [arch=amd64] http://{{ mariadb_origin }}/mariadb/repo/{{ mariadb_version }}/{{ vars.distribution }} {{ vars.distribution_codename }} main" + filename: mariadb + state: present diff --git a/roles/app.mariadb/templates/mariadb_repo.j2 b/roles/app.mariadb/templates/mariadb_repo.j2 new file mode 100644 index 0000000..8b040cf --- /dev/null +++ b/roles/app.mariadb/templates/mariadb_repo.j2 @@ -0,0 +1,3 @@ +Package: mariadb-* +Pin: origin {{ mariadb_origin }} +Pin-Priority: 600 diff --git a/roles/app.mariadb/templates/my.cnf.j2 b/roles/app.mariadb/templates/my.cnf.j2 new file mode 100644 index 0000000..6a061b8 --- /dev/null +++ b/roles/app.mariadb/templates/my.cnf.j2 @@ -0,0 +1,4 @@ +[client] +user=root +password={{ credentials.mysql_root.password }} +host=127.0.0.1 diff --git a/roles/auto_upgrades/meta/main.yml b/roles/auto_upgrades/meta/main.yml new file mode 100644 index 0000000..7b39f34 --- /dev/null +++ b/roles/auto_upgrades/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: Auto upgrades + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/auto_upgrades/playbook.yml b/roles/auto_upgrades/playbook.yml new file mode 100644 index 0000000..5b1726f --- /dev/null +++ b/roles/auto_upgrades/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: all:!unmanaged + gather_facts: no + serial: 4 + roles: + - auto_upgrades diff --git a/roles/auto_upgrades/tasks/disable.yml b/roles/auto_upgrades/tasks/disable.yml new file mode 100644 index 0000000..d4e348b --- /dev/null +++ b/roles/auto_upgrades/tasks/disable.yml @@ -0,0 +1,11 @@ +--- +- name: "Create /etc/apt/apt.conf.d/99auto-upgrades-disabled" + copy: + content: | + # This file is managed by Ansible. Changes will be overwritten. + APT::Periodic::Update-Package-Lists "1"; + APT::Periodic::Unattended-Upgrade "0"; + dest: /etc/apt/apt.conf.d/99auto-upgrades-disabled + owner: root + group: root + mode: 0644 diff --git a/roles/auto_upgrades/tasks/enable.yml b/roles/auto_upgrades/tasks/enable.yml new file mode 100644 index 0000000..548985d --- /dev/null +++ b/roles/auto_upgrades/tasks/enable.yml @@ -0,0 +1,10 @@ +--- +- name: "Install unattended-upgrades package" + apt: + name: unattended-upgrades + state: present + +- name: "Remove /etc/apt/apt.conf.d/99auto-upgrades-disabled when it exists" + file: + dest: /etc/apt/apt.conf.d/99auto-upgrades-disabled + state: absent diff --git a/roles/auto_upgrades/tasks/main.yml b/roles/auto_upgrades/tasks/main.yml new file mode 100644 index 0000000..8548411 --- /dev/null +++ b/roles/auto_upgrades/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- include: disable.yml + when: not vars.auto_upgrades + +- include: enable.yml + when: vars.auto_upgrades diff --git a/roles/firewalling/meta/main.yml b/roles/firewalling/meta/main.yml new file mode 100644 index 0000000..94d6d46 --- /dev/null +++ b/roles/firewalling/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: Configure the firewall using ferm + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/firewalling/playbook.yml b/roles/firewalling/playbook.yml new file mode 100644 index 0000000..9514796 --- /dev/null +++ b/roles/firewalling/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: lxd-container:!unmanaged + gather_facts: no + serial: 2 + roles: + - firewalling diff --git a/roles/firewalling/tasks/main.yml b/roles/firewalling/tasks/main.yml new file mode 100644 index 0000000..bee4199 --- /dev/null +++ b/roles/firewalling/tasks/main.yml @@ -0,0 +1,44 @@ +--- +- name: "Make sure ferm configuration directories exists" + file: + path: "{{ item }}" + state: directory + mode: 0755 + owner: root + group: root + with_items: + - /etc/ferm + - /etc/ferm/ferm.d + +- name: "Create /etc/default/ferm" + template: + src: etc_default_ferm.j2 + dest: /etc/default/ferm + mode: 0644 + owner: root + group: root + +- name: "Create ferm configuration file" + template: + src: ferm.conf.j2 + dest: /etc/ferm/ferm.conf + mode: 0644 + owner: root + group: root + tags: + - config + +- name: "Uninstall ufw" + apt: + name: ufw + state: absent + +- name: "Install ferm" + apt: + name: ferm + state: present + +- name: "Run ferm to configure the firewall" + shell: ferm /etc/ferm/ferm.conf + tags: + - config diff --git a/roles/firewalling/templates/etc_default_ferm.j2 b/roles/firewalling/templates/etc_default_ferm.j2 new file mode 100644 index 0000000..ffcb2dc --- /dev/null +++ b/roles/firewalling/templates/etc_default_ferm.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} +FAST=no +CACHE=yes +OPTIONS= +ENABLED=yes diff --git a/roles/firewalling/templates/ferm.conf.j2 b/roles/firewalling/templates/ferm.conf.j2 new file mode 100644 index 0000000..566595f --- /dev/null +++ b/roles/firewalling/templates/ferm.conf.j2 @@ -0,0 +1,53 @@ +# {{ ansible_managed }} + +domain (ip) { + table filter { + chain INPUT { + policy DROP; + + # connection tracking. + mod state state INVALID DROP; + mod state state (ESTABLISHED RELATED) ACCEPT; + + # allow local packet. + interface lo ACCEPT; + + # respond to ping. + proto icmp ACCEPT; + + # allow SSH connections. + proto tcp dport ssh ACCEPT; + +{% if group == "galera" %} + + # Allow Galera servers to access each other for syncing. + interface {{ segment.mgmt.interface }} + proto (tcp udp) dport (3306 4567 4568 4444) + saddr ({{ ip.galera[segment.mgmt.network] | map(attribute='address') | join(' ') }}) + ACCEPT; +{% endif %} +{% if firewall is defined %} +{% for rule in firewall %} + + # {{ rule.description }} + interface {{ segment[rule.segment].interface }} + proto {{ rule.proto }} dport {{ rule.port }} + saddr {{ rule.source }} + ACCEPT; +{% endfor %} +{% endif %} + + # Log blocked messages. + NFLOG nflog-group 1 nflog-prefix 'DROP: '; + } + chain OUTPUT { + policy ACCEPT; + mod state state (ESTABLISHED RELATED) ACCEPT; + } + chain FORWARD { + policy DROP; + } + } +} + +@include ferm.d/; diff --git a/roles/hostsfile/meta/main.yml b/roles/hostsfile/meta/main.yml new file mode 100644 index 0000000..deedc3f --- /dev/null +++ b/roles/hostsfile/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: Setup hosts file + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/hostsfile/playbook.yml b/roles/hostsfile/playbook.yml new file mode 100644 index 0000000..5c45802 --- /dev/null +++ b/roles/hostsfile/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: lxd-host:lxd-container:!unmanaged + gather_facts: no + serial: 4 + roles: + - hostsfile diff --git a/roles/hostsfile/tasks/main.yml b/roles/hostsfile/tasks/main.yml new file mode 100644 index 0000000..30b1429 --- /dev/null +++ b/roles/hostsfile/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- name: "Generate /etc/hosts" + become: true + template: + src: hosts.j2 + dest: /etc/hosts diff --git a/roles/hostsfile/templates/hosts.j2 b/roles/hostsfile/templates/hosts.j2 new file mode 100644 index 0000000..6478d8b --- /dev/null +++ b/roles/hostsfile/templates/hosts.j2 @@ -0,0 +1,29 @@ +# {{ ansible_managed }} + +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 + +::1 ip6-localhost ip6-loopback localhost6 localhost6.localdomain6 +fe00::0 ip6-localnet +ff00::0 ip6-mcastprefix +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters +ff02::3 ip6-allhosts + +# Hosts within Ansible environment + +{% set seen = [ ] %} +{% for group in ip %} +{% for network in ip[group] %} +{% if "address" in ip[group][network] %} +{% set hosts = [ ip[group][network] ] %} +{% else %} +{% set hosts = ip[group][network] %} +{% endif %} +{% for host in hosts %} +{% if host.address not in seen %} +{% do seen.append(host.address) %} +{{ host.address }} {{ host.hostnames | join(' ') }} +{% endif %} +{% endfor %} +{% endfor %} +{% endfor %} diff --git a/roles/logging/handlers/main.yml b/roles/logging/handlers/main.yml new file mode 100644 index 0000000..fbb17a7 --- /dev/null +++ b/roles/logging/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: restart rsyslog + service: + name: rsyslog + state: restarted + +- name: restart ulogd2 + service: + name: ulogd2 + state: restarted diff --git a/roles/logging/meta/main.yml b/roles/logging/meta/main.yml new file mode 100644 index 0000000..8b0600e --- /dev/null +++ b/roles/logging/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: Setup logging + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/logging/playbook.yml b/roles/logging/playbook.yml new file mode 100644 index 0000000..243907c --- /dev/null +++ b/roles/logging/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: lxd-container:!unmanaged + gather_facts: no + serial: 4 + roles: + - logging diff --git a/roles/logging/tasks/main.yml b/roles/logging/tasks/main.yml new file mode 100644 index 0000000..e256bac --- /dev/null +++ b/roles/logging/tasks/main.yml @@ -0,0 +1,41 @@ +--- +- name: "Install packages" + apt: + name: + - rsyslog + - ulogd2 + state: present + +- name: "Configure rsyslog" + template: + src: rsyslog.conf.j2 + dest: /etc/rsyslog.conf + notify: + - restart rsyslog + +# The previous task created a monolithic syslog configuration file. +# Therefore, we don't use the file in /etc/rsyslog.d anymore. +# To prevent confusion, delete the files in that folder. +- name: "Find no longer used rsyslog.d/* configuration files" + find: + paths: /etc/rsyslog.d + patterns: "*" + register: rsyslog_files + +- name: "Delete rsyslog.d/* configuration files" + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ rsyslog_files.files }}" + +- name: "Configure ulogd2 (used for iptables firewall logging)" + template: + src: ulogd.conf.j2 + dest: /etc/ulogd.conf + notify: + - restart ulogd2 + +- name: "Configure log rotation for the voiceplatform log file" + template: + src: logrotate.conf.j2 + dest: /etc/logrotate.d/voiceplatform diff --git a/roles/logging/templates/logrotate.conf.j2 b/roles/logging/templates/logrotate.conf.j2 new file mode 100644 index 0000000..0867131 --- /dev/null +++ b/roles/logging/templates/logrotate.conf.j2 @@ -0,0 +1,11 @@ +/var/log/voiceplatform.log +{ + rotate 7 + daily + missingok + notifempty + compress + postrotate + restart rsyslog >/dev/null 2>&1 || true + endscript +} diff --git a/roles/logging/templates/rsyslog.conf.j2 b/roles/logging/templates/rsyslog.conf.j2 new file mode 100644 index 0000000..b7e83bf --- /dev/null +++ b/roles/logging/templates/rsyslog.conf.j2 @@ -0,0 +1,60 @@ +# {{ ansible_managed }} + +# ---------------------------------------------------------------------- +# General configuration +# ---------------------------------------------------------------------- + +$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat +$RepeatedMsgReduction on +$FileOwner syslog +$FileGroup adm +$FileCreateMode 0640 +$DirCreateMode 0755 +$Umask 0022 +$PrivDropToUser syslog +$PrivDropToGroup syslog +$WorkDirectory /var/spool/rsyslog + +# ---------------------------------------------------------------------- +# Listening port configuration +# ---------------------------------------------------------------------- + +# Accept syslog over unix sockets. +$ModLoad imuxsock + +# Sockets for chrooted applications. +$AddUnixListenSocket /var/spool/postfix/dev/log + +# Accept syslog on localhost over UDP (for chrooted applications that use +# UDP to circumvent possible chroot unix socket access issues). +$ModLoad imudp +$UDPServerAddress 127.0.0.1 +$UDPServerRun 514 + +# ---------------------------------------------------------------------- +# Log routing configuration +# ---------------------------------------------------------------------- + +# Suppress some messages, caused by an issue in the systemd-shim package. +# It's harmless, so let's not log these, to not make these taint our +# error logging counters. +:msg, contains, "pam_systemd(sshd:session): Failed to create session: No such file or directory" stop +:msg, contains, "pam_systemd(su:session): Failed to create session: No such file or directory" stop + +# Send cloudinit messages to a dediated logfile. +:syslogtag, isequal, "[CLOUDINIT]" { + /var/log/cloud-init.log + stop +} + +# A custom log format for the Voice Platform logging. +$template VoicePlatformLogFmt,"%timegenerated% %HOSTNAME% [%syslogpriority-text%] %syslogtag%%msg:::drop-last-lf%\n" + +# Some standard log targets. +auth,authpriv.* /var/log/auth.log +local0.* -/var/log/firewall.log +local1.* -/var/log/voiceplatform.log;VoicePlatformLogFmt +*.*;auth,authpriv,local0,local1.none -/var/log/syslog +kern.* -/var/log/kern.log +mail.* -/var/log/mail.log +*.emerg :omusrmsg:* diff --git a/roles/logging/templates/ulogd.conf.j2 b/roles/logging/templates/ulogd.conf.j2 new file mode 100644 index 0000000..8c21892 --- /dev/null +++ b/roles/logging/templates/ulogd.conf.j2 @@ -0,0 +1,28 @@ +# {{ ansible_managed }} +# +# This is the configuration for ulogd2, which is used to log +# firewalling messages. Our ferm configuration uses this server +# as its log target. +# +# The logged firewalling messages are passed on to syslog, +# using facility LOCAL0. The syslog server will then decide what +# to do with the messages. + +[global] +logfile="syslog" +loglevel=3 + +plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_inppkt_NFLOG.so" +plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_filter_IFINDEX.so" +plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_filter_IP2STR.so" +plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_filter_PRINTPKT.so" +plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_output_SYSLOG.so" +plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_raw2packet_BASE.so" + +stack=nflog:NFLOG,base:BASE,ifindex:IFINDEX,ip2str:IP2STR,print:PRINTPKT,syslog:SYSLOG + +[nflog] +group=1 + +[syslog] +facility=LOG_LOCAL0 diff --git a/roles/lxd_common/meta/main.yml b/roles/lxd_common/meta/main.yml new file mode 100644 index 0000000..ecb4081 --- /dev/null +++ b/roles/lxd_common/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: LXD setup for both hosts and containers + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/lxd_common/playbook.yml b/roles/lxd_common/playbook.yml new file mode 100644 index 0000000..415c7f4 --- /dev/null +++ b/roles/lxd_common/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: lxd-host:lxd-container:!unmanaged + gather_facts: no + serial: 4 + roles: + - lxd_common diff --git a/roles/lxd_common/tasks/main.yml b/roles/lxd_common/tasks/main.yml new file mode 100644 index 0000000..e1b3a73 --- /dev/null +++ b/roles/lxd_common/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: Install script for LXD network tuning + template: + src: lxd_tune_network.sh + dest: /root/lxd_tune_network.sh + owner: root + group: root + mode: 0750 + +- name: Install cron for periodic LXD network tuning + template: + src: lxd_tune_network.cron + dest: /etc/cron.d/lxd_tune_network + owner: root + group: root + mode: 0644 diff --git a/roles/lxd_common/templates/lxd_tune_network.cron b/roles/lxd_common/templates/lxd_tune_network.cron new file mode 100644 index 0000000..0ace963 --- /dev/null +++ b/roles/lxd_common/templates/lxd_tune_network.cron @@ -0,0 +1,2 @@ +# {{ ansible_managed }} +0 * * * * root /root/lxd_tune_network.sh diff --git a/roles/lxd_common/templates/lxd_tune_network.sh b/roles/lxd_common/templates/lxd_tune_network.sh new file mode 100755 index 0000000..a990ba2 --- /dev/null +++ b/roles/lxd_common/templates/lxd_tune_network.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# +# {{ ansible_managed }} +# +# Tweak network interface txqueuelen as recommended for LXD: +# https://github.com/lxc/lxd/blob/master/doc/production-setup.md +# + +INTERFACES=$(cat /proc/net/dev | grep : | cut -d: -f1 | sed -e 's/ //g' | grep -v ^lo$) + +for IFACE in $INTERFACES; do + ip link set $IFACE txqueuelen 10000 +done + diff --git a/roles/lxd_container/meta/main.yml b/roles/lxd_container/meta/main.yml new file mode 100644 index 0000000..089617a --- /dev/null +++ b/roles/lxd_container/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: LXD container setup + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/lxd_container/playbook.yml b/roles/lxd_container/playbook.yml new file mode 100644 index 0000000..d0a5b93 --- /dev/null +++ b/roles/lxd_container/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: lxd-container:!unmanaged + gather_facts: no + roles: + - lxd_container + serial: 3 diff --git a/roles/lxd_container/tasks/ansible_ssh_key.yml b/roles/lxd_container/tasks/ansible_ssh_key.yml new file mode 100644 index 0000000..42aa128 --- /dev/null +++ b/roles/lxd_container/tasks/ansible_ssh_key.yml @@ -0,0 +1,30 @@ +--- +- name: "Copy ssh key installation script to the LXD host" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + copy: + src: /root/.ssh/install_ansible_ssh_key + dest: /root/.ssh/install_ansible_ssh_key + owner: root + group: root + mode: 755 + # when: vars.group != "ansible" ... no idea why I did this. Old method maybe. + +- name: "Install ssh key installation script on the LXD container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc file push /root/.ssh/install_ansible_ssh_key + {{ inventory_hostname }}/root/.ssh/install_ansible_ssh_key + +- name: "Execute ssh key installation script on the LXD container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc exec {{ inventory_hostname }} /root/.ssh/install_ansible_ssh_key + +- name: "Add the LXD container host key to the ansible known hosts" + local_action: >- + command /etc/ansible/bin/add_ssh_known_host + {{ vars.network[segment.mgmt.network].address }} + /root/.ssh/id_ansible@{{ software.environment }} + +- name: "Test if ansible can now use the ssh connection" + ping: diff --git a/roles/lxd_container/tasks/bootstrap-other.yml b/roles/lxd_container/tasks/bootstrap-other.yml new file mode 100644 index 0000000..56074c1 --- /dev/null +++ b/roles/lxd_container/tasks/bootstrap-other.yml @@ -0,0 +1,17 @@ +--- +- name: "Create LXD container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + lxd_container: + name: "{{ inventory_hostname }}" + state: started + config: + user.network-config: "{{ lookup('template', 'cloud-init-network-config.j2') }}" + source: + type: image + mode: pull + server: "{{ vars.lxd_image_server }}" + protocol: simplestreams + alias: "{{ vars.lxd_image_name }}" + profiles: + - "{{ vars.lxd_profile }}" + wait_for_ipv4_addresses: True diff --git a/roles/lxd_container/tasks/bootstrap.yml b/roles/lxd_container/tasks/bootstrap.yml new file mode 100644 index 0000000..31dcb12 --- /dev/null +++ b/roles/lxd_container/tasks/bootstrap.yml @@ -0,0 +1,66 @@ +--- +- include: bootstrap-other.yml + when: > + inventory_hostname not in lxd_status + +- name: "Set interface MAC addresses" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc config set + {{ hostvars[inventory_hostname].lxd_host | quote }}:{{ inventory_hostname | quote }} + volatile.{{ item.interface | quote }}.hwaddr {{ item.mac_address | quote}} + with_items: "{{ hostvars[inventory_hostname].network.values() | list }}" + when: '"mac_address" in item' + +- name: "Set LXD custom configuration parameters" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc config set + {{ hostvars[inventory_hostname].lxd_host | quote }}:{{ inventory_hostname | quote }} + {{ item.name | quote }} {{ item.value | quote }} + with_items: + - name: boot.autostart.priority + value: "{{ hostvars[inventory_hostname].lxd_boot_priority | default(0) }}" + +- name: "Stop created LXD container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + lxd_container: + name: "{{ inventory_hostname }}" + state: stopped + when: inventory_hostname not in lxd_status + +- name: "Start created LXD container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + lxd_container: + name: "{{ inventory_hostname }}" + state: started + wait_for_ipv4_addresses: True + when: inventory_hostname not in lxd_status + +- name: "Update packages on created LXD container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: lxc exec {{ inventory_hostname }} -- apt-get update + when: inventory_hostname not in lxd_status + +- name: "Upgrade packages on created LXD container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: lxc exec {{ inventory_hostname }} -- apt-get upgrade -y + when: inventory_hostname not in lxd_status + +- name: "Autoremove packages on created LXD container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: lxc exec {{ inventory_hostname }} -- apt-get autoremove -y + when: inventory_hostname not in lxd_status + +- name: "Autoclean packages on created LXD container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: lxc exec {{ inventory_hostname }} -- apt-get autoclean -y + when: inventory_hostname not in lxd_status + +- name: "Disable hostname management from cloud-init" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc exec {{ inventory_hostname }} -- + sed -i -e 's/^\\s*preserve_hostname\\s*:.*/preserve_hostname: true/' /etc/cloud/cloud.cfg + tags: [ hostname ] + when: hostvars[inventory_hostname].distribution_codename != "trusty" diff --git a/roles/lxd_container/tasks/hostname.yml b/roles/lxd_container/tasks/hostname.yml new file mode 100644 index 0000000..ac2b3de --- /dev/null +++ b/roles/lxd_container/tasks/hostname.yml @@ -0,0 +1,35 @@ +--- +# Not using the hostname module on purpose, because at this point Python has +# not yet been install on the container. +# +- name: "Disable hostname management from cloud-init" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc exec {{ inventory_hostname }} -- + sed -i -e 's/^\\s*preserve_hostname\\s*:.*/preserve_hostname: true/' /etc/cloud/cloud.cfg + tags: [ hostname ] + when: hostvars[inventory_hostname].distribution_codename != "trusty" + +- name: "Set container hostname" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc exec {{ inventory_hostname }} + hostnamectl set-hostname {{ hostvars[inventory_hostname].hostname | quote}} + when: hostvars[inventory_hostname].distribution_codename != "trusty" + tags: [ hostname ] + +- name: "Set container hostname (Ubuntu 14.04)" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc exec {{ inventory_hostname }} -- bash -c + "echo {{ hostvars[inventory_hostname].hostname | quote}} > /etc/hostname" + when: hostvars[inventory_hostname].distribution_codename == "trusty" + tags: [ hostname ] + +- name: "Activate container hostname (Ubuntu 14.04)" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc exec {{ inventory_hostname }} + hostname {{ hostvars[inventory_hostname].hostname | quote}} + when: hostvars[inventory_hostname].distribution_codename == "trusty" + tags: [ hostname ] diff --git a/roles/lxd_container/tasks/main.yml b/roles/lxd_container/tasks/main.yml new file mode 100644 index 0000000..7597a11 --- /dev/null +++ b/roles/lxd_container/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- include: bootstrap.yml +- include: python.yml + tags: [ "python" ] +- include: sshd_install.yml + tags: [ "ssh" ] +- include: ansible_ssh_key.yml + tags: [ "ssh" ] +- include: sshd_config.yml + tags: [ "ssh" ] +- include: pam_config.yml + tags: [ "ssh" ] diff --git a/roles/lxd_container/tasks/pam_config.yml b/roles/lxd_container/tasks/pam_config.yml new file mode 100644 index 0000000..a80ee43 --- /dev/null +++ b/roles/lxd_container/tasks/pam_config.yml @@ -0,0 +1,21 @@ +--- +# A few updates, to disable PAM modules that slow down the +# SSH login process. + +- name: Disable 'motd noupdate' in PAM for improving SSH login speed + lineinfile: + name: /etc/pam.d/sshd + regexp: '^session.*pam_motd.*noupdate' + state: absent + +- name: Disable 'motd dynamic' in PAM for improving SSH login speed + lineinfile: + name: /etc/pam.d/sshd + regexp: '^session.*pam_motd.*dynamic' + state: absent + +- name: Disable 'mail' in PAM for improving SSH login speed + lineinfile: + name: /etc/pam.d/sshd + regexp: '^session.*pam_mail' + state: absent diff --git a/roles/lxd_container/tasks/python.yml b/roles/lxd_container/tasks/python.yml new file mode 100644 index 0000000..9629417 --- /dev/null +++ b/roles/lxd_container/tasks/python.yml @@ -0,0 +1,16 @@ +--- +- name: "Check if Python is installed" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc exec {{ inventory_hostname }} -- + dpkg -s {{ hostvars[inventory_hostname].python_package }} + register: python_install_check + failed_when: python_install_check.rc not in [0, 1] + changed_when: False + +- name: "Install Python in container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc exec {{ inventory_hostname }} -- + apt-get install -y {{ hostvars[inventory_hostname].python_package }} + when: python_install_check.rc == 1 diff --git a/roles/lxd_container/tasks/sshd_config.yml b/roles/lxd_container/tasks/sshd_config.yml new file mode 100644 index 0000000..23833e0 --- /dev/null +++ b/roles/lxd_container/tasks/sshd_config.yml @@ -0,0 +1,13 @@ +--- +- name: "Configure sshd" + template: + src: sshd_config.j2 + dest: /etc/ssh/sshd_config + owner: root + group: root + mode: 644 + +- name: "Restart sshd" + service: + name: ssh + state: restarted diff --git a/roles/lxd_container/tasks/sshd_install.yml b/roles/lxd_container/tasks/sshd_install.yml new file mode 100644 index 0000000..fdb3982 --- /dev/null +++ b/roles/lxd_container/tasks/sshd_install.yml @@ -0,0 +1,11 @@ +--- +- name: "Install sshd in container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc exec {{ inventory_hostname }} -- + apt-get install -y openssh-server + +- name: "Start sshd in container" + delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}" + shell: >- + lxc exec {{ inventory_hostname }} -- systemctl restart ssh diff --git a/roles/lxd_container/templates/cloud-init-network-config.j2 b/roles/lxd_container/templates/cloud-init-network-config.j2 new file mode 100644 index 0000000..9f999a9 --- /dev/null +++ b/roles/lxd_container/templates/cloud-init-network-config.j2 @@ -0,0 +1,19 @@ +version: 1 +config: +{% for network in vars.network.values() %} +- type: "physical" + name: "{{ network.interface }}" + subnets: + - type: "static" + address: "{{ network.address }}" + netmask: "{{ network.netmask }}" +{% if "gateway" in network %} + gateway: "{{ network.gateway }}" +{% endif %} +{% if "dns" in network %} + dns_nameservers: +{% for dns_item in network.dns %} + - "{{ dns_item }}" +{% endfor %} +{% endif %} +{% endfor %} diff --git a/roles/lxd_container/templates/network-interfaces.j2 b/roles/lxd_container/templates/network-interfaces.j2 new file mode 100644 index 0000000..7cbb830 --- /dev/null +++ b/roles/lxd_container/templates/network-interfaces.j2 @@ -0,0 +1,15 @@ +auto lo +iface lo inet loopback +{% for network in vars.network.values() %} + +auto {{ network.interface }} +iface {{ network.interface }} inet static + address {{ network.address }} + netmask {{ network.netmask }} +{%if "gateway" in network %} + gateway {{ network.gateway }} +{%endif %} +{% if "dns" in network %} + dns-nameservers {{ network.dns | join(" ") }} +{% endif %} +{% endfor %} diff --git a/roles/lxd_container/templates/sshd_config.j2 b/roles/lxd_container/templates/sshd_config.j2 new file mode 100644 index 0000000..b66798a --- /dev/null +++ b/roles/lxd_container/templates/sshd_config.j2 @@ -0,0 +1,27 @@ +# {{ ansible_managed }} + +AcceptEnv LANG LC_* +Subsystem sftp /usr/lib/openssh/sftp-server + +# Some settings for security. +UsePrivilegeSeparation yes +PermitRootLogin without-password +PermitEmptyPasswords no +AllowAgentForwarding no +AllowTcpForwarding no +X11Forwarding no +PermitTunnel no + +# On containers, we only expect public key-based sessions from ansible. +PubkeyAuthentication yes +PasswordAuthentication no +ChallengeResponseAuthentication no +KerberosAuthentication no +GSSAPIAuthentication no +UsePAM yes + +# Improve the time that it takes to login. +PrintMotd no +PrintLastLog no +UseDns no +Banner none diff --git a/roles/lxd_host/meta/main.yml b/roles/lxd_host/meta/main.yml new file mode 100644 index 0000000..13fa51e --- /dev/null +++ b/roles/lxd_host/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: LXD host setup + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/lxd_host/playbook.yml b/roles/lxd_host/playbook.yml new file mode 100644 index 0000000..ade84f6 --- /dev/null +++ b/roles/lxd_host/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: lxd-host:!unmanaged + gather_facts: no + serial: 4 + roles: + - lxd_host diff --git a/roles/lxd_host/tasks/bash_aliases.yml b/roles/lxd_host/tasks/bash_aliases.yml new file mode 100644 index 0000000..f614baa --- /dev/null +++ b/roles/lxd_host/tasks/bash_aliases.yml @@ -0,0 +1,34 @@ +--- +- name: Install .bash_aliases for root user + lineinfile: + path: /root/.bash_aliases + create: yes + owner: root + group: root + mode: 0644 + regexp: voice_platform_aliases + line: if [ -e /etc/voice_platform_aliases ]; then . /etc/voice_platform_aliases; fi + +- name: Install generate_voice_platform_aliases.sh script + template: + src: generate_voice_platform_aliases.sh.j2 + dest: /root/generate_voice_platform_aliases.sh + owner: root + group: root + mode: 0755 + +- name: Run generate_voice_platform_aliases.sh script + shell: /root/generate_voice_platform_aliases.sh + # Ignore errors. This script is also run from cron periodically, so if + # this script doesn't work right away, it's not a real problem + # (this might for example happen if not all physical hosts are + # booted yet). + failed_when: False + +- name: Install cron for updating the /etc/voice_platform_aliases file + template: + src: generate_voice_platform_aliases.cron.j2 + dest: /etc/cron.d/generate_voice_platform_aliases + owner: root + group: root + mode: 0644 diff --git a/roles/lxd_host/tasks/main.yml b/roles/lxd_host/tasks/main.yml new file mode 100644 index 0000000..bec0293 --- /dev/null +++ b/roles/lxd_host/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- import_tasks: python.yml +- import_tasks: tune_system.yml +- import_tasks: bash_aliases.yml +- import_tasks: profiles.yml diff --git a/roles/lxd_host/tasks/profiles.yml b/roles/lxd_host/tasks/profiles.yml new file mode 100644 index 0000000..8fdb481 --- /dev/null +++ b/roles/lxd_host/tasks/profiles.yml @@ -0,0 +1,8 @@ +--- +- name: Create LXD profiles + lxd_profile: + name: "{{ item.name }}" + config: "{{ item.config }}" + description: "{{ item.description }}" + devices: "{{ item.devices }}" + with_items: "{{ lxd.profiles.values() | list }}" diff --git a/roles/lxd_host/tasks/python.yml b/roles/lxd_host/tasks/python.yml new file mode 100644 index 0000000..c72cf54 --- /dev/null +++ b/roles/lxd_host/tasks/python.yml @@ -0,0 +1,10 @@ +--- +- name: "Check if Python is installed" + raw: dpkg -s python + register: python_install_check + failed_when: python_install_check.rc not in [0, 1] + changed_when: False + +- name: "Install Python in host" + raw: apt-get install -y python + when: python_install_check.rc == 1 diff --git a/roles/lxd_host/tasks/tune_system.yml b/roles/lxd_host/tasks/tune_system.yml new file mode 100644 index 0000000..d376396 --- /dev/null +++ b/roles/lxd_host/tasks/tune_system.yml @@ -0,0 +1,29 @@ +--- +- name: Tune /etc/security/limit.conf for LXD hosting + template: + src: limits.conf.j2 + dest: /etc/security/limits.conf + tags: + - tune + +- name: Find min tcp_mem + shell: cat /proc/sys/net/ipv4/tcp_mem | awk '{ print $1 }' + register: min_tcp_mem + changed_when: False + tags: + - tune + +- name: Tune sysctl parameters for LXD hosting + sysctl: + name: "{{ item.name }}" + value: "{{ item.value }}" + with_items: + - { name: fs.inotify.max_queued_events, value: 1048576 } + - { name: fs.inotify.max_user_instances, value: 1048576 } + - { name: fs.inotify.max_user_watches, value: 1048576 } + - { name: vm.max_map_count, value: 262144 } + - { name: kernel.dmesg_restrict, value: 1 } + - { name: net.core.netdev_max_backlog, value: "{{ min_tcp_mem.stdout_lines[0] }}" } + - { name: vm.swappiness, value: 0 } + tags: + - tune diff --git a/roles/lxd_host/templates/generate_voice_platform_aliases.cron.j2 b/roles/lxd_host/templates/generate_voice_platform_aliases.cron.j2 new file mode 100644 index 0000000..b1ac4dc --- /dev/null +++ b/roles/lxd_host/templates/generate_voice_platform_aliases.cron.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} +# +# Update the LXD container aliases (used for easy login into the +# LXD containers by name). +*/15 * * * * root /root/generate_voice_platform_aliases.sh diff --git a/roles/lxd_host/templates/generate_voice_platform_aliases.sh.j2 b/roles/lxd_host/templates/generate_voice_platform_aliases.sh.j2 new file mode 100644 index 0000000..20aba77 --- /dev/null +++ b/roles/lxd_host/templates/generate_voice_platform_aliases.sh.j2 @@ -0,0 +1,21 @@ +#!/bin/bash + +set -e + +HOSTS="{{ groups['lxd-host'] | join(" ") }}" +ALIASES=/etc/voice_platform_aliases + +echo "# Generated by $0" > $ALIASES.new +echo "" >> $ALIASES.new + +for HOST in $HOSTS; do + GUESTS=$(lxc list -c n $HOST: | grep -v -- -- | awk '{ print $2 }' | { grep -v NAME || true; }) + for GUEST in $GUESTS; do + echo "alias $GUEST='lxc exec $HOST:$GUEST bash'" >> $ALIASES.new + done +done + +mv $ALIASES.new $ALIASES +chown root $ALIASES +chmod 444 $ALIASES + diff --git a/roles/lxd_host/templates/limits.conf.j2 b/roles/lxd_host/templates/limits.conf.j2 new file mode 100644 index 0000000..e949312 --- /dev/null +++ b/roles/lxd_host/templates/limits.conf.j2 @@ -0,0 +1,13 @@ +# {{ ansible_managed }} +# +# Limits tuned for LXD hosting, according to the tuning guidelines: +# https://github.com/lxc/lxd/blob/master/doc/production-setup.md +# + +* soft nofile 1048576 +* hard nofile 1048576 +root soft nofile 1048576 +root hard nofile 1048576 +* soft memlock unlimited +* hard memlock unlimited + diff --git a/roles/networksfile/meta/main.yml b/roles/networksfile/meta/main.yml new file mode 100644 index 0000000..33db336 --- /dev/null +++ b/roles/networksfile/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: Setup networks file + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/networksfile/playbook.yml b/roles/networksfile/playbook.yml new file mode 100644 index 0000000..4e3da18 --- /dev/null +++ b/roles/networksfile/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: all:!unmanaged + gather_facts: no + serial: 4 + roles: + - networksfile diff --git a/roles/networksfile/tasks/main.yml b/roles/networksfile/tasks/main.yml new file mode 100644 index 0000000..21cd149 --- /dev/null +++ b/roles/networksfile/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: "Generate /etc/networks" + template: + src: networks.j2 + dest: /etc/networks diff --git a/roles/networksfile/templates/networks.j2 b/roles/networksfile/templates/networks.j2 new file mode 100644 index 0000000..7bfef27 --- /dev/null +++ b/roles/networksfile/templates/networks.j2 @@ -0,0 +1,6 @@ +# {{ ansible_managed }} +# symbolic names for networks, see networks(5) for more information +link-local 169.254.0.0 +{% for n in network %} +net-{{ n }} {{ network[n].network }} +{% endfor %} diff --git a/roles/timezone/meta/main.yml b/roles/timezone/meta/main.yml new file mode 100644 index 0000000..a3adf7b --- /dev/null +++ b/roles/timezone/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: Configure timezone + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/timezone/playbook.yml b/roles/timezone/playbook.yml new file mode 100644 index 0000000..00c0acb --- /dev/null +++ b/roles/timezone/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: all:!unmanaged + gather_facts: no + serial: 4 + roles: + - timezone diff --git a/roles/timezone/tasks/main.yml b/roles/timezone/tasks/main.yml new file mode 100644 index 0000000..aea0b14 --- /dev/null +++ b/roles/timezone/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- name: Set timezone to Europe/Amsterdam + timezone: + name: Europe/Amsterdam diff --git a/roles/users/meta/main.yml b/roles/users/meta/main.yml new file mode 100644 index 0000000..d9f24e5 --- /dev/null +++ b/roles/users/meta/main.yml @@ -0,0 +1,6 @@ +galaxy_info: + author: Maurice Makaay + description: Setup system users + company: XS4ALL + license: proprietary + min_ansible_version: 1.2 diff --git a/roles/users/playbook.yml b/roles/users/playbook.yml new file mode 100644 index 0000000..7df3a46 --- /dev/null +++ b/roles/users/playbook.yml @@ -0,0 +1,6 @@ +--- +- hosts: all:!unmanaged + gather_facts: no + serial: 4 + roles: + - users diff --git a/roles/users/tasks/main.yml b/roles/users/tasks/main.yml new file mode 100644 index 0000000..341c2f7 --- /dev/null +++ b/roles/users/tasks/main.yml @@ -0,0 +1,5 @@ +- name: Remove default ubuntu user + user: + name: ubuntu + state: absent + remove: yes