Added a lot of roles 'n stuff.
This commit is contained in:
parent
c5ee01f786
commit
42785fa98b
|
@ -0,0 +1,27 @@
|
|||
#!/bin/bash
|
||||
|
||||
SSH_DIR="${HOME}/.ssh"
|
||||
KNOWN_HOSTS="${SSH_DIR}/known_hosts"
|
||||
NEW_KNOWN_HOSTS="${KNOWN_HOSTS}..SWAP$$"
|
||||
BACKUP_KNOWN_HOSTS="${KNOWN_HOSTS}.old"
|
||||
|
||||
if [ "$1" = "" ]; then
|
||||
echo "Usage: $0 <ip / hostname> [private key file for testing]" >&2
|
||||
exit 1
|
||||
fi
|
||||
REMOTE_HOST=$1
|
||||
TEST_KEY=$2
|
||||
|
||||
# Here flock is used, because we might be adding multiple hosts at the
|
||||
# same time, resulting in race conditions on writing the known_hosts file.
|
||||
echo "Add the target host $REMOTE_HOST to $KNOWN_HOSTS"
|
||||
(
|
||||
flock -e 200
|
||||
touch $KNOWN_HOSTS
|
||||
(cat $KNOWN_HOSTS; ssh-keyscan $REMOTE_HOST 2>/dev/null) | sort | uniq > $NEW_KNOWN_HOSTS
|
||||
cp $KNOWN_HOSTS ${KNOWN_HOSTS}.bak
|
||||
cp $KNOWN_HOSTS $BACKUP_KNOWN_HOSTS
|
||||
mv $NEW_KNOWN_HOSTS $KNOWN_HOSTS
|
||||
) 200>${KNOWN_HOSTS}..LCK
|
||||
|
||||
exit 0
|
|
@ -0,0 +1,14 @@
|
|||
---
|
||||
# Credentials for users, services and databases.
|
||||
|
||||
credentials:
|
||||
mysql_root:
|
||||
username: root
|
||||
password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
66333336646631366230336266633330393236643537366239393638383935316662353039366434
|
||||
3764373836323436353465323634656138323331646139310a353433333432316437323635316438
|
||||
36303738656663313361646362663663376638613962313933626162383233333364646332623235
|
||||
6461613935666665340a383864313836353963336461343437356537313934646235663863393161
|
||||
3962
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
environment_path = sys.path[0]
|
||||
sys.path.append("/etc/ansible/lib")
|
||||
import dynamic_inventory
|
||||
|
||||
dynamic_inventory.Script(environment_path).execute()
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
---
|
||||
# We run all containers in privileged mode. This prevents a lot of
|
||||
# potential issues with things like subuid/subgid mappings on
|
||||
# mounted filesystems.
|
||||
|
||||
lxd:
|
||||
profiles:
|
||||
|
||||
default:
|
||||
name: default
|
||||
description: Default profile
|
||||
config:
|
||||
security.privileged: "true"
|
||||
user.user-data: |
|
||||
timezone: Europe/Amsterdam
|
||||
devices:
|
||||
root:
|
||||
path: /
|
||||
pool: default
|
||||
type: disk
|
||||
|
||||
demo:
|
||||
name: demo
|
||||
description: Demo profile
|
||||
config:
|
||||
security.privileged: "true"
|
||||
user.user-data: |
|
||||
# cloud-config
|
||||
package_upgrade: true
|
||||
packages:
|
||||
- python3
|
||||
timezone: Europe/Amsterdam
|
||||
devices:
|
||||
if-demo:
|
||||
name: if-demo
|
||||
nictype: bridged
|
||||
parent: br-demo
|
||||
type: nic
|
||||
root:
|
||||
path: /
|
||||
pool: default
|
||||
type: disk
|
|
@ -0,0 +1,24 @@
|
|||
---
|
||||
# By defining networks here, the named networks can be used to configure
|
||||
# network interfaces on the nodes. The node network definitions are
|
||||
# enriched with information from this configuration.
|
||||
#
|
||||
# Each network can have a property "segments", containing a list of segment
|
||||
# names. These are logical segements, which are used to setup firewall rules
|
||||
# on the correct interfaces / IP-addresses, without requiring that every
|
||||
# logical segment always uses its own network/interface (e.g. on development,
|
||||
# there is no difference between the mgmt and public segment, so these
|
||||
# are attached to the same network/interface.)
|
||||
|
||||
networks:
|
||||
demo:
|
||||
network: 192.168.56.0
|
||||
gateway: 192.168.56.1
|
||||
netmask: 255.255.255.0
|
||||
dns:
|
||||
- 192.168.56.1
|
||||
interface: if-demo
|
||||
segments:
|
||||
- mgmt
|
||||
- public
|
||||
- hostname
|
|
@ -0,0 +1,89 @@
|
|||
---
|
||||
# The node types are a simple way to add node type-specific configuration.
|
||||
# In the nodes.yml, add a "type: <node type name>" property to reference one
|
||||
# of the node types. The properties from the referenced node type will
|
||||
# be copied into the node configuration (unless the node has its own
|
||||
# configuration already for a given property.)
|
||||
|
||||
node_types:
|
||||
ubuntu-19.10-lxd_host:
|
||||
distribution: "ubuntu"
|
||||
distribution_codename: "eoan"
|
||||
python_package: "python3"
|
||||
pip_executable: "pip3"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
ansible_connection: local
|
||||
ansible_ssh_pipelining: True
|
||||
ansible_service_mgr: "systemd"
|
||||
auto_upgrades: True
|
||||
|
||||
ubuntu-19.10-lxd_container:
|
||||
distribution: "ubuntu"
|
||||
distribution_codename: "eoan"
|
||||
lxd_image_server: "https://cloud-images.ubuntu.com/releases"
|
||||
lxd_image_name: "19.10"
|
||||
lxd_profile: "demo"
|
||||
python_package: "python3"
|
||||
pip_executable: "pip3"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
ansible_connection: "ssh"
|
||||
ansible_ssh_private_key_file: "~/.ssh/id_ansible@{{ software.environment }}"
|
||||
ansible_ssh_pipelining: True
|
||||
ansible_service_mgr: "systemd"
|
||||
ansible_user: root
|
||||
auto_upgrades: True
|
||||
|
||||
ubuntu-18.04-lxd_host:
|
||||
distribution: "ubuntu"
|
||||
distribution_codename: "bionic"
|
||||
php_version: "7.2"
|
||||
php_apache2_mod_name: "php7_module"
|
||||
php_libssh_package: "php-ssh2"
|
||||
php_mcrypt_from: "pear"
|
||||
python_package: "python3"
|
||||
pip_executable: "pip3"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
ansible_connection: "ssh"
|
||||
ansible_ssh_pipelining: True
|
||||
ansible_service_mgr: "systemd"
|
||||
auto_upgrades: True
|
||||
|
||||
ubuntu-14.04-lxd_container:
|
||||
distribution: "ubuntu"
|
||||
distribution_codename: "trusty"
|
||||
lxd_image_server: "https://cloud-images.ubuntu.com/releases"
|
||||
lxd_image_name: "14.04"
|
||||
lxd_profile: "demo"
|
||||
php_version: "5"
|
||||
php_apache2_mod_name: "php5_module"
|
||||
php_libssh_package: "libssh2-php"
|
||||
php_mcrypt_from: "module"
|
||||
python_package: "python"
|
||||
pip_executable: "pip"
|
||||
ansible_python_interpreter: "/usr/bin/python2"
|
||||
ansible_connection: "ssh"
|
||||
ansible_ssh_private_key_file: "/root/.ssh/id_ansible@{{ software.environment }}"
|
||||
ansible_ssh_pipelining: True
|
||||
ansible_service_mgr: "upstart"
|
||||
auto_upgrades: True
|
||||
|
||||
ubuntu-18.04-lxd_container:
|
||||
distribution: "ubuntu"
|
||||
distribution_codename: "bionic"
|
||||
lxd_image_server: "https://cloud-images.ubuntu.com/releases"
|
||||
lxd_image_name: "18.04"
|
||||
lxd_profile: "demo"
|
||||
php_version: "7.2"
|
||||
php_apache2_mod_name: "php7_module"
|
||||
php_libssh_package: "php-ssh2"
|
||||
php_mcrypt_from: "pear"
|
||||
python_package: "python3"
|
||||
pip_executable: "pip3"
|
||||
ansible_python_interpreter: "/usr/bin/python3"
|
||||
ansible_connection: "ssh"
|
||||
ansible_ssh_private_key_file: "/root/.ssh/id_ansible@{{ software.environment }}"
|
||||
ansible_ssh_pipelining: True
|
||||
ansible_service_mgr: "systemd"
|
||||
auto_upgrades: True
|
||||
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
---
|
||||
nodes:
|
||||
all-shared:
|
||||
type: ubuntu-18.04-lxd_container
|
||||
|
||||
lxd-host:
|
||||
- name: sidn-demo-01
|
||||
type: ubuntu-18.04-lxd_host
|
||||
network:
|
||||
demo: {address: 192.168.56.150}
|
||||
- name: sidn-demo-02
|
||||
type: ubuntu-18.04-lxd_host
|
||||
network:
|
||||
demo: {address: 192.168.56.151}
|
||||
|
||||
ansible:
|
||||
- name: ansible-01
|
||||
lxd_host: sidn-demo-01
|
||||
network:
|
||||
demo: {address: 192.168.56.160}
|
||||
ansible_connection: local
|
||||
|
||||
galera-shared:
|
||||
galera_role: service
|
||||
# Sometimes we see issues with upgrading Galera, requiring some manual
|
||||
# intervention and resyncing of cluster nodes to get things working.
|
||||
# Therefore, we'll handle upgrading these nodes in a controlled way
|
||||
# by hand on production.
|
||||
auto_upgrades: False
|
||||
|
||||
galera:
|
||||
- name: galera-01
|
||||
lxd_host: sidn-demo-01
|
||||
network:
|
||||
demo: {address: 192.168.56.161, mac_address: "00:16:e3:00:00:a1"}
|
||||
- name: galera-02
|
||||
lxd_host: sidn-demo-02
|
||||
network:
|
||||
demo: {address: 192.168.56.162, mac_address: "00:16:e3:00:00:a2"}
|
||||
- name: galera-03
|
||||
lxd_host: sidn-demo-02
|
||||
network:
|
||||
demo: {address: 192.168.56.163, mac_address: "00:16:e3:00:00:a3"}
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
software:
|
||||
# The application environment to use.
|
||||
environment: demo
|
|
@ -0,0 +1,4 @@
|
|||
from dynamic_inventory.config import Config
|
||||
from dynamic_inventory.inventory import Inventory
|
||||
from dynamic_inventory.script import Script
|
||||
from dynamic_inventory.serialize import convert_to_json
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,31 @@
|
|||
from ansible.parsing.yaml.loader import AnsibleLoader
|
||||
from ansible.parsing.vault import get_file_vault_secret
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
|
||||
|
||||
class Config(object):
|
||||
def __init__(self, environment_path):
|
||||
self.environment_path = environment_path
|
||||
self._load_ansible_vault_secrets()
|
||||
self._load_configuration_files()
|
||||
|
||||
def _load_ansible_vault_secrets(self):
|
||||
secret = get_file_vault_secret(
|
||||
filename="~/.ansible-vault-password",
|
||||
loader=DataLoader())
|
||||
secret.load()
|
||||
self.vault_secrets = [("default", secret)]
|
||||
|
||||
def _load_configuration_files(self):
|
||||
self.lxd = self._read("lxd.yml")["lxd"]
|
||||
self.credentials = self._read("credentials.yml")["credentials"]
|
||||
self.networks = self._read("networks.yml")["networks"]
|
||||
self.software = self._read("software.yml")["software"]
|
||||
self.nodes = self._read("nodes.yml")["nodes"]
|
||||
self.node_types = self._read("node_types.yml")["node_types"]
|
||||
|
||||
def _read(self, filename):
|
||||
with open("%s/%s" % (self.environment_path, filename)) as fh:
|
||||
loader = AnsibleLoader(fh, filename, self.vault_secrets)
|
||||
data = loader.get_single_data()
|
||||
return data
|
|
@ -0,0 +1,444 @@
|
|||
import collections
|
||||
from collections import defaultdict
|
||||
from ansible.parsing.yaml.loader import AnsibleLoader
|
||||
from ansible.parsing.vault import get_file_vault_secret
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
from dynamic_inventory.lxd_status import list_lxd_containers
|
||||
from ipaddress import ip_network
|
||||
|
||||
|
||||
group_types = {
|
||||
"ansible": "singular",
|
||||
"lxd-host": "cluster",
|
||||
"lxd-container": "cluster",
|
||||
"galera": "cluster",
|
||||
"galera-donor": "cluster",
|
||||
"galera-primary": "singular",
|
||||
"galera-service": "cluster",
|
||||
}
|
||||
|
||||
network_segments = [
|
||||
"mgmt",
|
||||
"public",
|
||||
"hostname",
|
||||
]
|
||||
|
||||
|
||||
class InventoryException(Exception):
|
||||
"""Raised in case there are problems with the dynamic inventory."""
|
||||
|
||||
|
||||
class Inventory(object):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self._process_nodes()
|
||||
|
||||
def _process_nodes(self):
|
||||
self.groups = defaultdict(list)
|
||||
self.nodes = {}
|
||||
self.shared = defaultdict(dict)
|
||||
for group, data in self.config.nodes.items():
|
||||
if group.endswith("-shared"):
|
||||
for_group = group[0:-7]
|
||||
self._add_shared(for_group, data)
|
||||
elif isinstance(data, collections.Sequence):
|
||||
for node in data:
|
||||
self._add_node(group, node)
|
||||
else:
|
||||
self._add_node(group, data)
|
||||
self._apply_shared()
|
||||
self._apply_node_type()
|
||||
self._create_galera_primary_group()
|
||||
self._create_galera_role_groups()
|
||||
self._validate_groups()
|
||||
self._enrich_network_data()
|
||||
self._enrich_nodes()
|
||||
self._create_all_group()
|
||||
self._create_lxd_container_group()
|
||||
self._list_unmanaged_hosts_in_inventory()
|
||||
self._clear_managed_property_from_all_nodes()
|
||||
self._validate_network()
|
||||
|
||||
def _add_shared(self, group, data):
|
||||
self.shared[group] = data
|
||||
|
||||
def _add_node(self, group, node):
|
||||
self._enrich_with_hostname(node)
|
||||
if node["name"] in self.nodes:
|
||||
raise InventoryException("Duplicate node name: %s" % node["name"])
|
||||
self.groups[group].append(node["name"])
|
||||
node["group"] = group
|
||||
self.nodes[node["name"]] = node
|
||||
|
||||
def _enrich_with_hostname(self, node):
|
||||
"""We allow the name in the nodes.yml file to be an fqdn. We will here
|
||||
adopt this fqdn as the node's hostname. The node's name will be set
|
||||
to the bare hostname, without the domain."""
|
||||
node["hostname"] = node["name"]
|
||||
node["name"] = node["name"].split('.')[0]
|
||||
|
||||
def _apply_shared(self):
|
||||
for group, shared in self.shared.items():
|
||||
if group in self.groups:
|
||||
for node_name in self.groups[group]:
|
||||
node = self.nodes[node_name]
|
||||
for key, val in shared.items():
|
||||
if key not in node:
|
||||
node[key] = val
|
||||
if "all" in self.shared:
|
||||
for node_name in self.nodes:
|
||||
node = self.nodes[node_name]
|
||||
for key, val in self.shared["all"].items():
|
||||
if key not in node:
|
||||
node[key] = val
|
||||
|
||||
def _apply_node_type(self):
|
||||
for node_name in self.nodes:
|
||||
node = self.nodes[node_name]
|
||||
if "type" not in node:
|
||||
continue
|
||||
if node["type"] not in self.config.node_types:
|
||||
raise InventoryException(
|
||||
"Unknown system type '%s' used for node '%s'" %
|
||||
(node["type"], node["name"]))
|
||||
for key, val in self.config.node_types[node["type"]].items():
|
||||
if key not in node:
|
||||
node[key] = val
|
||||
|
||||
def _validate_groups(self):
|
||||
for name, group_type in group_types.items():
|
||||
if group_type == "singular" and len(self.groups[name]) > 1:
|
||||
raise InventoryException("Multiple nodes defined for singular group '%s'" % name)
|
||||
|
||||
def _create_galera_primary_group(self):
|
||||
"""The 'galera-primary' group is used by plays that create databases
|
||||
on the Galera cluster. The group simply makes sure that only one
|
||||
of the cluster nodes is in this group, making it a feasible target
|
||||
for such tasks. A primary galera node can be explicitly configured
|
||||
by use of the 'is_primary' property in the nodes.yml file. If not
|
||||
explicitly configured, the first defined Galera node will be
|
||||
used by default."""
|
||||
primary = next((node["name"] for node in self.nodes.values() if
|
||||
node["group"] == 'galera' and
|
||||
"is_primary" in node and
|
||||
node["is_primary"] is True), None)
|
||||
if primary is None:
|
||||
primary = self.groups["galera"][0]
|
||||
for node in self.groups["galera"]:
|
||||
self.nodes[node]["is_primary"] = node == primary
|
||||
self.groups["galera-primary"] = [primary]
|
||||
|
||||
def _create_galera_role_groups(self):
|
||||
"""The 'galera-donor' and 'galera-service' groups are used to differentiate
|
||||
between galera nodes that are preferred donors for the galera cluster and
|
||||
galera nodes that are included in haproxy configurations.
|
||||
This is used to make sure that nodes that are used as donors won't affect
|
||||
the cluster performance."""
|
||||
self.groups['galera-service'] = []
|
||||
self.groups['galera-donor'] = []
|
||||
for node in self.groups["galera"]:
|
||||
role = "service"
|
||||
if "galera_role" in self.nodes[node]:
|
||||
role = self.nodes[node]["galera_role"]
|
||||
if role == "service":
|
||||
self.groups["galera-service"].append(node)
|
||||
elif role == "donor":
|
||||
self.groups["galera-donor"].append(node)
|
||||
else:
|
||||
raise InventoryException(
|
||||
"Illegal galera role '%s' used for node '%s'" % (role, node))
|
||||
|
||||
def _create_all_group(self):
|
||||
self.groups["all"] = {
|
||||
"children": [group for group in self.groups.keys()],
|
||||
"vars": self._create_shared_vars()
|
||||
}
|
||||
|
||||
def _create_lxd_container_group(self):
|
||||
self.groups["lxd-container"] = [
|
||||
host for host in self.nodes
|
||||
if "lxd_image_name" in self.nodes[host]
|
||||
]
|
||||
|
||||
def _create_shared_vars(self):
|
||||
segments = self._create_network_segment_data()
|
||||
mgmt_network_name = segments["mgmt"]["network"]
|
||||
lxd_hosts = self._get_lxd_host_ip_addresses(mgmt_network_name)
|
||||
return {
|
||||
"credentials": self.config.credentials,
|
||||
"software": self.config.software,
|
||||
"segment": segments,
|
||||
"ip": self._create_ip_address_data(segments),
|
||||
"lxd": self.config.lxd,
|
||||
"lxd_status": list_lxd_containers(lxd_hosts),
|
||||
}
|
||||
|
||||
def _get_lxd_host_ip_addresses(self, network_name):
|
||||
def _get_ip(node_name):
|
||||
node = self.nodes[node_name]
|
||||
if "network" not in node:
|
||||
raise InventoryException(
|
||||
"Missing network definition for lxd host '%s'" %
|
||||
node_name)
|
||||
if network_name not in node["network"]:
|
||||
raise InventoryException(
|
||||
"Missing '%s' network definition for lxd host '%s'" %
|
||||
network_name, node_name)
|
||||
if "address" not in node["network"][network_name]:
|
||||
raise InventoryException(
|
||||
"Missing address in '%s' network definition for lxd host '%s'" %
|
||||
network_name, node_name)
|
||||
return node["network"][network_name]["address"]
|
||||
return [
|
||||
_get_ip(h)
|
||||
for h in self.groups["lxd-host"]
|
||||
]
|
||||
|
||||
def _create_ip_address_data(self, segments):
|
||||
ip = {}
|
||||
for group, group_nodes in self.groups.items():
|
||||
ip[group] = defaultdict(list)
|
||||
for node_name in group_nodes:
|
||||
node = self.nodes[node_name]
|
||||
if "network" in node:
|
||||
for network, config in node["network"].items():
|
||||
ip[group][network].append({
|
||||
"name": node_name,
|
||||
"hostnames": self._get_hostnames_for_network(node, network, segments),
|
||||
"address": config["address"]
|
||||
})
|
||||
if group_types[group] == "singular":
|
||||
for network, ips in ip[group].items():
|
||||
ip[group][network] = ips[0]
|
||||
return ip
|
||||
|
||||
def _get_hostnames_for_network(self, node, network, segments):
|
||||
hostnames = []
|
||||
if segments["hostname"]["network"] == network:
|
||||
hostnames.append(node["hostname"])
|
||||
short_name = node["hostname"].split(".")[0]
|
||||
if (node["hostname"] != short_name):
|
||||
hostnames.append(short_name)
|
||||
hostnames.append("%s.%s" % (node["name"], network))
|
||||
return list(hostnames)
|
||||
|
||||
def _create_network_segment_data(self):
|
||||
data = {}
|
||||
for name, config in self.config.networks.items():
|
||||
if "segments" not in config:
|
||||
continue
|
||||
for segment in config["segments"]:
|
||||
if segment not in network_segments:
|
||||
raise InventoryException(
|
||||
"Unknown network segment '%s' used for network '%s'" %
|
||||
(segment, name))
|
||||
data[segment] = {
|
||||
"network": name,
|
||||
"interface": config["interface"]
|
||||
}
|
||||
return data
|
||||
|
||||
def _enrich_network_data(self):
|
||||
for name, config in self.config.networks.items():
|
||||
net = ip_network("%s/%s" % (config['network'], config['netmask']))
|
||||
self.config.networks[name]['network_cidr'] = str(net)
|
||||
if "segments" not in config:
|
||||
config["segments"] = []
|
||||
|
||||
def _enrich_nodes(self):
|
||||
for node in self.nodes.values():
|
||||
self._enrich_and_check_ansible_connection(node)
|
||||
self._enrich_with_network_data(node)
|
||||
self._enrich_webservice_data(node)
|
||||
|
||||
def _enrich_and_check_ansible_connection(self, node):
|
||||
if "ansible_connection" not in node:
|
||||
if not self._is_managed(node):
|
||||
node["ansible_connection"] = False
|
||||
else:
|
||||
raise InventoryException(
|
||||
"Node '%s' does not have an ansible_connection defined" % node["name"])
|
||||
|
||||
if node["ansible_connection"] == "local":
|
||||
self._clear_if_exists(node, "ansible_host")
|
||||
return
|
||||
|
||||
# Ansible connection already fully specificed.
|
||||
if "ansible_host" in node:
|
||||
return
|
||||
|
||||
# Ansible connection using ssh, but the ansible_host is not yet set.
|
||||
if node["ansible_connection"] == "ssh":
|
||||
mgmt_net = next((
|
||||
n for n, c in self.config.networks.items()
|
||||
if "mgmt" in c["segments"]))
|
||||
if mgmt_net not in node["network"]:
|
||||
raise InventoryException(
|
||||
"Node '%s' does not have the '%s' management network defned" %
|
||||
(node["name"], mgmt_net))
|
||||
node["ansible_host"] = node["network"][mgmt_net]["address"]
|
||||
return
|
||||
|
||||
# Ansible connection using lxc, based on the configured lxd_host.
|
||||
if node["ansible_connection"] == "lxd":
|
||||
if "lxd_profile" not in node:
|
||||
raise InventoryException(
|
||||
"Node '%s' uses lxd, but 'lxd_profile' is not configured")
|
||||
if "ansible_host" not in node:
|
||||
node["ansible_host"] = "%s:%s" % (node["lxd_host"], node["name"])
|
||||
|
||||
def _enrich_with_network_data(self, node):
|
||||
"""Enrich all network configuration blocks in the nodes with
|
||||
network configuration data from the network.yml config file.
|
||||
Properties that are not defined in the node config are filled
|
||||
with properties from the network config."""
|
||||
if "network" not in node:
|
||||
return
|
||||
for network_name, node_config in node["network"].items():
|
||||
if network_name not in self.config.networks:
|
||||
raise InventoryException(
|
||||
"Node '%s' uses network '%s', but that network is not defined" %
|
||||
(node["name"], network_name))
|
||||
for key, value in self.config.networks[network_name].items():
|
||||
if key != "segments" and key not in node_config:
|
||||
node_config[key] = value
|
||||
|
||||
def _enrich_webservice_data(self, node):
|
||||
if "webservice" not in node:
|
||||
return
|
||||
listen = []
|
||||
certs = set()
|
||||
for network_name, config in node["webservice"].items():
|
||||
if network_name not in node["network"]:
|
||||
raise InventoryException(
|
||||
"Illegal webservice listen definition: " +
|
||||
"network '%s' is not defined for host" % network_name)
|
||||
config["network"] = network_name
|
||||
config["address"] = node["network"][network_name]["address"]
|
||||
if "http_port" not in config:
|
||||
config["http_port"] = 80
|
||||
if "https_port" not in config:
|
||||
config["https_port"] = 443
|
||||
if "http" not in config:
|
||||
config["http"] = False
|
||||
if "https" not in config:
|
||||
config["https"] = False
|
||||
if "force_https" not in config:
|
||||
config["force_https"] = False
|
||||
if "use_keepalived_vip" not in config:
|
||||
config["use_keepalived_vip"] = False
|
||||
if not config["http"] and not config["https"]:
|
||||
raise InventoryException(
|
||||
"Invalid webservice config, because both http and https " +
|
||||
"are disabled " +
|
||||
"on network '%s' for host '%s'" % (network_name, node["name"]))
|
||||
if config["force_https"] and not config["https"]:
|
||||
raise InventoryException(
|
||||
"Invalid option 'force_https', because option 'https' is " +
|
||||
"not enabled for the webservice " +
|
||||
"on network '%s' for host '%s'" % (network_name, node["name"]))
|
||||
if config["https"] and "cert" not in config:
|
||||
raise InventoryException(
|
||||
"Missing option 'cert' for the webservice " +
|
||||
"on network '%s' for host '%s'" % (network_name, node["name"]))
|
||||
listen.append(config)
|
||||
|
||||
# When keepalived is in use and the webservice definition requests it,
|
||||
# the virtual IP-address of keepalived is assigned as a listen address
|
||||
# for the webservice.
|
||||
if config["use_keepalived_vip"]:
|
||||
config_vip = config.copy()
|
||||
if ("keepalived" not in node or "virtual_ipaddress" not in node["keepalived"]):
|
||||
raise InventoryException(
|
||||
"use_keepalived_vip enabled for webservice, but no keepalived " +
|
||||
"virtual IP-address defined for host '%s'" % node["name"])
|
||||
config_vip["address"] = node["keepalived"]["virtual_ipaddress"]
|
||||
listen.append(config_vip)
|
||||
|
||||
redirect_to_https = []
|
||||
for l in listen:
|
||||
if l["force_https"]:
|
||||
redirect_to_https.append({
|
||||
"network": l["network"],
|
||||
"address": l["address"],
|
||||
"port": l["http_port"],
|
||||
"https_port": l["https_port"]
|
||||
})
|
||||
service = []
|
||||
for l in listen:
|
||||
if l["http"] and not l["force_https"]:
|
||||
service.append({
|
||||
"network": l["network"],
|
||||
"address": l["address"],
|
||||
"port": l["http_port"],
|
||||
"https": False
|
||||
})
|
||||
if l["https"]:
|
||||
service.append({
|
||||
"network": l["network"],
|
||||
"address": l["address"],
|
||||
"port": "%s ssl" % l["https_port"],
|
||||
"cert": l["cert"],
|
||||
"https": True
|
||||
})
|
||||
certs.add(l["cert"])
|
||||
node["webservice"] = {
|
||||
"redirect_to_https": redirect_to_https,
|
||||
"service": service
|
||||
}
|
||||
|
||||
# Register special groups for the certificates that are used,
|
||||
# so we can use that group in our playbooks to decide whether
|
||||
# or not the current host needs the certifcate setup.
|
||||
# The group name is "requires_<cert>"
|
||||
for cert in certs:
|
||||
group_name = "requires_%s" % cert
|
||||
if group_name not in self.groups:
|
||||
group_types[group_name] = "cluster"
|
||||
self.groups[group_name] = [node["name"]]
|
||||
else:
|
||||
self.groups[group_name].append(node["name"])
|
||||
|
||||
def _list_unmanaged_hosts_in_inventory(self):
|
||||
"""The unmanaged hosts are marked in the inventory, by adding them
|
||||
to a group "unmanaged"."""
|
||||
unmanaged = [
|
||||
node["name"] for node in self.nodes.values()
|
||||
if not self._is_managed(node)]
|
||||
self.groups["unmanaged"] = unmanaged
|
||||
|
||||
def _is_managed(self, node):
|
||||
return "managed" not in node or node["managed"]
|
||||
|
||||
def _clear_managed_property_from_all_nodes(self):
|
||||
"""The 'managed' property is only used for building the inventory data.
|
||||
It has no use beyond that. Therefore we delete the property from
|
||||
all nodes."""
|
||||
for node in self.nodes.values():
|
||||
self._clear_if_exists(node, "managed")
|
||||
|
||||
def _validate_network(self):
|
||||
ip_addresses = set()
|
||||
mac_addresses = set()
|
||||
for node in self.nodes.values():
|
||||
for network in node["network"]:
|
||||
config = node["network"][network]
|
||||
|
||||
if False and config["address"] in ip_addresses:
|
||||
raise InventoryException(
|
||||
"IP address %s of node %s is used by multiple hosts"
|
||||
% (config["address"], node["name"]))
|
||||
ip_addresses.add(config["address"])
|
||||
|
||||
if "mac_address" in config:
|
||||
if config["mac_address"] in mac_addresses:
|
||||
raise InventoryException(
|
||||
"MAC address %s of node %s is used by multiple hosts"
|
||||
% config["mac_address"])
|
||||
mac_addresses.add(config["mac_address"])
|
||||
|
||||
def _clear_if_exists(self, node, key):
|
||||
try:
|
||||
del node[key]
|
||||
except KeyError:
|
||||
pass
|
|
@ -0,0 +1,37 @@
|
|||
import json
|
||||
import subprocess
|
||||
|
||||
|
||||
def _lxd_list_all(hosts):
|
||||
return map(_lxd_list, hosts)
|
||||
|
||||
|
||||
def _lxd_list(host):
|
||||
output = subprocess.check_output([
|
||||
"ssh", host, "--",
|
||||
"lxc", "list", "--fast", "--format", "json"])
|
||||
|
||||
def add_host(g):
|
||||
g["host"] = host
|
||||
return g
|
||||
return map(add_host, json.loads(output))
|
||||
|
||||
|
||||
def _create_container_info(g):
|
||||
data = {
|
||||
"name": g["name"],
|
||||
"status": g["status"],
|
||||
"host": g["host"],
|
||||
}
|
||||
if 'homedir' in g["expanded_devices"]:
|
||||
data["homedir"] = g["expanded_devices"]['homedir']
|
||||
return data
|
||||
|
||||
def list_lxd_containers(hosts):
|
||||
"""create a list of all the LXD containers that are running on the LXD hosts."""
|
||||
containers_per_host = _lxd_list_all(hosts)
|
||||
all_containers = {}
|
||||
for containers in containers_per_host:
|
||||
for container in containers:
|
||||
all_containers[container["name"]] = _create_container_info(container)
|
||||
return all_containers
|
|
@ -0,0 +1,33 @@
|
|||
import argparse
|
||||
import dynamic_inventory
|
||||
|
||||
|
||||
class Script(object):
|
||||
def __init__(self, environment_path):
|
||||
config = dynamic_inventory.Config(environment_path)
|
||||
self.inventory = dynamic_inventory.Inventory(config)
|
||||
|
||||
def execute(self):
|
||||
args = self._parse_args()
|
||||
if args.host is None:
|
||||
self._do_list()
|
||||
else:
|
||||
self._do_host(args.host)
|
||||
|
||||
def _parse_args(self):
|
||||
p = argparse.ArgumentParser(description='Produce Ansible inventory')
|
||||
p.add_argument(
|
||||
'--list', action='store_true', default=True,
|
||||
help='List all hosts')
|
||||
p.add_argument(
|
||||
'--host', action='store',
|
||||
help='Show variable for a single host')
|
||||
return p.parse_args()
|
||||
|
||||
def _do_list(self):
|
||||
data = self.inventory.groups.copy()
|
||||
data["_meta"] = {"hostvars": self.inventory.nodes}
|
||||
print(dynamic_inventory.convert_to_json(data))
|
||||
|
||||
def _do_host(self, name):
|
||||
print(dynamic_inventory.convert_to_json(self.inventory.nodes[name]))
|
|
@ -0,0 +1,18 @@
|
|||
import json
|
||||
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
|
||||
|
||||
|
||||
def convert_to_json(data):
|
||||
return json.dumps(data, sort_keys=True, indent=2, default=_json_default_serializer)
|
||||
|
||||
|
||||
def _json_default_serializer(value):
|
||||
if isinstance(value, bytes):
|
||||
return value.decode('utf-8')
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
if isinstance(value, AnsibleVaultEncryptedUnicode):
|
||||
return str(value)
|
||||
raise TypeError(
|
||||
"Unsupported type '%s' used in inventory data "
|
||||
"(no support for JSON serializing this type)" % type(value).__name__)
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
- import_playbook: roles/users/playbook.yml
|
||||
- import_playbook: roles/hostsfile/playbook.yml
|
||||
- import_playbook: roles/networksfile/playbook.yml
|
||||
- import_playbook: roles/logging/playbook.yml
|
||||
- import_playbook: roles/firewalling/playbook.yml
|
||||
- import_playbook: roles/app.galera_node/playbook.yml
|
||||
- import_playbook: roles/app.galera_bootstrap/playbook.yml
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
# This playbook is used to setup and configure the hosts that make up the
|
||||
# Voice Platform (i.e. the physical hosts and the LXD system containers
|
||||
# that are run on them). The actual software installation is handled by the
|
||||
# playbook-apps.yml playbook.
|
||||
# The ansible playbook is also included here, because that one sets up the
|
||||
# ssh key that ansible uses to connect to the managed hosts.
|
||||
- import_playbook: roles/ansible/playbook.yml
|
||||
- import_playbook: roles/lxd_host/playbook.yml
|
||||
- import_playbook: roles/lxd_container/playbook.yml
|
||||
- import_playbook: roles/lxd_common/playbook.yml
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
- import_playbook: playbook-hosts.yml
|
||||
- import_playbook: playbook-apps.yml
|
|
@ -0,0 +1,6 @@
|
|||
galaxy_info:
|
||||
author: Maurice Makaay
|
||||
description: Setup Ansible
|
||||
company: XS4ALL
|
||||
license: proprietary
|
||||
min_ansible_version: 1.2
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- hosts: ansible:!unmanaged
|
||||
gather_facts: no
|
||||
serial: 4
|
||||
roles:
|
||||
- ansible
|
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
- name: Install ~root/.ansible_aliases
|
||||
template:
|
||||
src: ansible_aliases.j2
|
||||
dest: /root/.ansible_aliases
|
||||
|
||||
- name: Enable ansible aliases in ~/.bash_aliases
|
||||
lineinfile:
|
||||
path: /root/.bash_aliases
|
||||
regexp: 'ansible_aliases'
|
||||
line: '. ~/.ansible_aliases'
|
||||
create: yes
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0640
|
||||
|
||||
- name: "Check if an ansible-specific ssh keypair exists"
|
||||
stat:
|
||||
path: ~/.ssh/id_ansible@{{ software.environment }}
|
||||
register: keypair
|
||||
tags: [ "ssh" ]
|
||||
|
||||
- name: "Generate an ansible-specific ssh keypair"
|
||||
shell: ssh-keygen -C ansible@{{ software.environment }} -f ~/.ssh/id_ansible@{{ software.environment }} -N ""
|
||||
when: not keypair.stat.exists
|
||||
tags: [ "ssh" ]
|
||||
|
||||
- name: "Generate /root/.ssh/install_ansible_authorized_ssh_key"
|
||||
template:
|
||||
src: templates/install_ansible_authorized_ssh_key.j2
|
||||
dest: ~/.ssh/install_ansible_ssh_key
|
||||
mode: 0700
|
||||
tags: [ "ssh", "ssh_authorized_key" ]
|
|
@ -0,0 +1,5 @@
|
|||
alias play="ansible-playbook playbook.yml"
|
||||
alias play-hosts="ansible-playbook playbook-hosts.yml"
|
||||
alias play-apps="ansible-playbook playbook-apps.yml"
|
||||
alias replay="ansible-playbook playbook.yml --limit @playbook.retry"
|
||||
alias ansible-pwgen='ansible-vault encrypt_string $(pwgen 32 -c -n -1)'
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
# {{ ansible_managed }}
|
||||
# Installs the Anbible management public ssh key on this host.
|
||||
|
||||
{% set pubkey = lookup('env', 'HOME')+"/.ssh/id_ansible@"+software.environment+".pub" -%}
|
||||
{% set keydata = lookup('file', pubkey) -%}
|
||||
SSH_DIR="${HOME}/.ssh"
|
||||
NAME="ansible@{{ software.environment }}"
|
||||
AUTHORIZED_KEYS="${SSH_DIR}/authorized_keys"
|
||||
NEW_AUTHORIZED_KEYS="${AUTHORIZED_KEYS}..SWAP$$"
|
||||
BACKUP_AUTHORIZED_KEYS="${AUTHORIZED_KEYS}.old"
|
||||
#KEY_DATA='from="{{ ip.ansible[segment.mgmt.network].address }}" {{ keydata }}'
|
||||
KEY_DATA='{{ keydata }}'
|
||||
|
||||
mkdir -p $SSH_DIR
|
||||
touch $AUTHORIZED_KEYS
|
||||
echo "Remove ${NAME} from existing authorized keys"
|
||||
cat $AUTHORIZED_KEYS | grep -v \ ${NAME}$ > $NEW_AUTHORIZED_KEYS
|
||||
echo "Add fresh ansible public key to the authorized keys"
|
||||
echo $KEY_DATA >> $NEW_AUTHORIZED_KEYS
|
||||
echo "Install the new authorized keys"
|
||||
mv $NEW_AUTHORIZED_KEYS $AUTHORIZED_KEYS
|
||||
cp $AUTHORIZED_KEYS $BACKUP_AUTHORIZED_KEYS
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
galaxy_info:
|
||||
author: Maurice Makaay
|
||||
description: Bootstrap Galera cluster
|
||||
company: XS4ALL
|
||||
license: proprietary
|
||||
min_ansible_version: 1.2
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- hosts: lxd-host:ansible:!unmanaged
|
||||
gather_facts: no
|
||||
serial: 4
|
||||
roles:
|
||||
- app.galera_bootstrap
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
- name: "Install galera cluster bootstrap advisor script"
|
||||
template:
|
||||
src: galera_bootstrap_advisor.j2
|
||||
dest: /root/galera_bootstrap_advisor
|
||||
mode: 0750
|
||||
owner: root
|
||||
group: root
|
|
@ -0,0 +1,178 @@
|
|||
#!/usr/bin/env python
|
||||
# {{ ansible_managed }}
|
||||
# Author: Maurice Makaay, XS4ALL
|
||||
|
||||
from __future__ import print_function
|
||||
import subprocess
|
||||
import re
|
||||
import json
|
||||
from os import system
|
||||
from sys import exit, stdin
|
||||
|
||||
|
||||
nodes = {{ groups['galera'] | to_json }}
|
||||
lxd_status = {{ lxd_status | to_json }}
|
||||
|
||||
|
||||
def propose(commands, exit_code):
|
||||
for command in commands:
|
||||
print("# %s" % command)
|
||||
print("")
|
||||
print("Execute now? [y/n]: ", end="")
|
||||
answer = stdin.readline()
|
||||
if "y" in answer.lower():
|
||||
print("")
|
||||
for command in commands:
|
||||
print("EXECUTING> %s" % command)
|
||||
system(command)
|
||||
print("")
|
||||
exit(0)
|
||||
exit(exit_code)
|
||||
|
||||
|
||||
print("Collecting Galera status information from nodes ...")
|
||||
status = {}
|
||||
for node in nodes:
|
||||
lxd_host = "%s:%s" % (lxd_status[node]["host"], node)
|
||||
try:
|
||||
result = subprocess.check_output([
|
||||
"lxc", "exec", lxd_host, "/root/galera_cluster_status"])
|
||||
status[node] = json.loads(result)
|
||||
except subprocess.CalledProcessError:
|
||||
status[node] = {
|
||||
"cluster_size": 0,
|
||||
"cluster_status": 'Status Failed',
|
||||
"connected": "Unknown",
|
||||
"ready": "Unknown",
|
||||
"safe_to_bootstrap": 0,
|
||||
"seqno": -1,
|
||||
"uuid": None
|
||||
}
|
||||
status[node]['lxd_host'] = lxd_host
|
||||
status[node]['node'] = node
|
||||
|
||||
def is_primary(s):
|
||||
return s["cluster_status"] == "Primary"
|
||||
|
||||
def has_correct_cluster_size(s):
|
||||
return s["cluster_size"] == len(nodes)
|
||||
|
||||
def is_connected(s):
|
||||
return s["connected"] == "ON"
|
||||
|
||||
def is_ready(s):
|
||||
return s["ready"] == "ON"
|
||||
|
||||
|
||||
print("")
|
||||
print("%-20s %-15s %-6s %-12s %-7s" % (
|
||||
"Node", "Status", "Size", "Connected", "Ready"))
|
||||
for node in nodes:
|
||||
s = status[node]
|
||||
print("%-20s %-15s %-6s %-12s %-7s" % (
|
||||
node, s["cluster_status"], s["cluster_size"],
|
||||
s["connected"], s["ready"]))
|
||||
|
||||
print("")
|
||||
print("Checking cluster status ...")
|
||||
print("")
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# CASE: All cluster nodes are up and running, green lights galore!
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
all_primary = all(map(is_primary, status.values()))
|
||||
all_size_ok = all(map(has_correct_cluster_size, status.values()))
|
||||
all_connected = all(map(is_connected, status.values()))
|
||||
all_ready = all(map(is_ready, status.values()))
|
||||
|
||||
if all([all_primary, all_size_ok, all_connected, all_ready]):
|
||||
print("There's no bootstrapping work to do here, all looks good!")
|
||||
print("")
|
||||
exit(0)
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# CASE: The cluster is parially down, but some cluster hosts are still ok.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
if any(map(is_primary, status.values())) and any(map(is_ready, status.values())):
|
||||
print("The cluster seems to be in a degraded status.")
|
||||
print("Please investigate the cluster status.")
|
||||
print("- Can the cluster hosts reach each other over the network?")
|
||||
print("- Are all mariadb instances running?")
|
||||
print("")
|
||||
print("It might help to (re)start the database server on the degraded node(s):")
|
||||
print("")
|
||||
commands = [
|
||||
"lxc exec %s service mysql restart" % s["lxd_host"]
|
||||
for s in status.values()
|
||||
if not is_primary(s)
|
||||
]
|
||||
propose(commands, 1)
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# CASE: All cluster nodes are down, one cluster node is safe to bootstrap.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
print("None of the cluster hosts is operational. A cluster bootup is required.")
|
||||
|
||||
safe_to_bootstrap = [s for s in status.values() if s["safe_to_bootstrap"] == 1]
|
||||
if any(safe_to_bootstrap):
|
||||
bootstrap_node = safe_to_bootstrap[0]
|
||||
print("A node is marked as 'safe to bootstrap', so proposed strategy:")
|
||||
print("")
|
||||
commands = ["lxc exec %s galera_new_cluster" % bootstrap_node["lxd_host"]]
|
||||
for n, s in status.items():
|
||||
if n == bootstrap_node["node"]:
|
||||
continue
|
||||
commands.append("lxc exec %s service mysql start" % s["lxd_host"])
|
||||
propose(commands, 2)
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# CASE: All cluster nodes are down, no cluster node is safe to bootstrap.
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
print("Unfortunately, none of the nodes is marked as safe to bootstrap.")
|
||||
print("Retrieving last recovered position for all cluster nodes ...")
|
||||
print("")
|
||||
print("%-20s %-15s %-40s" % ("Node", "Recovery pos", "UUID"))
|
||||
|
||||
for n, s in status.items():
|
||||
lxd_host = "%s:%s" % (lxd_status[n]["host"], n)
|
||||
try:
|
||||
result = subprocess.check_output([
|
||||
"lxc", "exec", lxd_host, "/root/galera_wsrep_recovered_position"])
|
||||
uuid_and_pos = json.loads(result)
|
||||
uuid, pos = re.split(':', uuid_and_pos, maxsplit=1)
|
||||
s["uuid"] = uuid
|
||||
s["pos"] = int(pos)
|
||||
except subprocess.CalledProcessError:
|
||||
s["uuid"] = "Unknown"
|
||||
s["pos"] = -1
|
||||
print("%-20s %-15d %-40s" % (n, s["pos"], s["uuid"]))
|
||||
|
||||
uuids = set((s["uuid"] for s in status.values()))
|
||||
if len(uuids) != 1:
|
||||
print("")
|
||||
print("Wow... now wait a minute... There are multiple UUID's in play!")
|
||||
print("That should never happen in a Galera cluster.")
|
||||
print("You will have to handle this one yourself I'm afraid.")
|
||||
|
||||
def get_pos_key(x):
|
||||
return x["pos"]
|
||||
|
||||
old_to_new = sorted(status.itervalues(), key=get_pos_key)
|
||||
bootstrap_node = old_to_new[-1]
|
||||
|
||||
print("")
|
||||
print("Determined a node that is safe for bootstrapping, so proposed strategy:")
|
||||
print("")
|
||||
commands = [
|
||||
"lxc exec %s /root/galera_flag_as_safe_to_bootstrap" % bootstrap_node["lxd_host"],
|
||||
"lxc exec %s galera_new_cluster" % bootstrap_node["lxd_host"]
|
||||
]
|
||||
for n, s in status.items():
|
||||
if n == bootstrap_node["node"]:
|
||||
continue
|
||||
commands.append("lxc exec %s service mysql start" % s["lxd_host"])
|
||||
propose(commands, 3)
|
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
marked_down_lockfile: /var/lib/mysql/manually.marked.down
|
|
@ -0,0 +1,9 @@
|
|||
[Unit]
|
||||
Description=Galera cluster status socket
|
||||
|
||||
[Socket]
|
||||
ListenStream=3366
|
||||
Accept=true
|
||||
|
||||
[Install]
|
||||
WantedBy=sockets.target
|
|
@ -0,0 +1,13 @@
|
|||
[Unit]
|
||||
Description=Galera/Mariadb status checker
|
||||
Requires=galera_cluster_status.socket
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/root/galera_cluster_status --haproxy
|
||||
TimeoutStopSec=5
|
||||
StandardInput=socket
|
||||
StandardError=journal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -0,0 +1,70 @@
|
|||
#Total number of cluster membership changes happened.
|
||||
UserParameter=galera.cluster_conf_id[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_conf_id';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Current number of members in the cluster.
|
||||
UserParameter=galera.cluster_size[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_size';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Status of this cluster component. That is, whether the node is part of a PRIMARY or NON_PRIMARY component.
|
||||
UserParameter=galera.cluster_status[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_cluster_status';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#If the value is OFF, the node has not yet connected to any of the cluster components.
|
||||
UserParameter=galera.wsrep_connected[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_connected';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Shows the internal state of the EVS Protocol
|
||||
UserParameter=galera.wsrep_evs_state[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_evs_state';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#How much the slave lag is slowing down the cluster.
|
||||
UserParameter=galera.wsrep_flow_control_paused[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_flow_control_paused';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Returns the number of FC_PAUSE events the node has received. Does not reset over time
|
||||
UserParameter=galera.wsrep_flow_control_recv[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_flow_control_recv';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Returns the number of FC_PAUSE events the node has sent. Does not reset over time
|
||||
UserParameter=galera.wsrep_flow_control_sent[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_flow_control_sent';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Displays the group communications UUID.
|
||||
UserParameter=galera.wsrep_gcom_uuid[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_gcomm_uuid';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#The sequence number, or seqno, of the last committed transaction.
|
||||
UserParameter=galera.wsrep_last_committed[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_last_committed';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Internal Galera Cluster FSM state number.
|
||||
|
||||
UserParameter=galera.wsrep_local_state[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_state';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Total number of local transactions that were aborted by slave transactions while in execution.
|
||||
UserParameter=galera.wsrep_local_bf_aborts[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_bf_aborts';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Current (instantaneous) length of the recv queue.
|
||||
UserParameter=galera.wsrep_local_recv_queue[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_recv_queue';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Current (instantaneous) length of the send queue.
|
||||
UserParameter=galera.wsrep_local_send_queue[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_send_queue';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Human-readable explanation of the state.
|
||||
UserParameter=galera.wsrep_local_state_comment[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_state_comment';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#The UUID of the state stored on this node.
|
||||
UserParameter=galera.wsrep_local_state_uuid[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_local_state_uuid';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Whether the server is ready to accept queries.
|
||||
UserParameter=galera.wsrep_ready[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_ready';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Total size of write-sets received from other nodes.
|
||||
UserParameter=galera.wsrep_received_bytes[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_received_bytes';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Total size of write-sets replicated.
|
||||
UserParameter=galera.replicated_bytes[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_replicated_bytes';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Total size of data replicated.
|
||||
UserParameter=galera.wsrep_repl_data_bytes[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_repl_data_bytes';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Total number of keys replicated.
|
||||
UserParameter=galera.wsrep_repl_keys[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_repl_keys';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Total size of keys replicated in bytes
|
||||
UserParameter=galera.wsrep_repl_keys_bytes[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_repl_keys_bytes';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
||||
#Total size of other bits replicated
|
||||
UserParameter=galera.wsrep_repl_other_bytes[*],echo "select VARIABLE_VALUE from information_schema.GLOBAL_STATUS where VARIABLE_NAME = 'wsrep_repl_other_bytes';" | HOME=/var/lib/zabbix mysql -N
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
galaxy_info:
|
||||
author: Maurice Makaay
|
||||
description: Install Galera cluster nodes
|
||||
company: XS4ALL
|
||||
license: proprietary
|
||||
min_ansible_version: 1.2
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
- hosts: galera:!unmanaged
|
||||
gather_facts: no
|
||||
roles:
|
||||
- app.galera_node
|
||||
# Process one galera node at a time, to not bring multiple nodes down simultaneously.
|
||||
serial: 1
|
|
@ -0,0 +1,66 @@
|
|||
---
|
||||
- name: "Uninstall xtrabackup utility (switched to mariabackup)"
|
||||
apt:
|
||||
name: percona-xtrabackup
|
||||
state: absent
|
||||
|
||||
- name: "Remove tuning configuration file which is now merged with galera.cnf"
|
||||
file:
|
||||
path: /etc/mysql/conf.d/tuning.cnf
|
||||
state: absent
|
||||
|
||||
- name: "Check if the galera config already exists"
|
||||
stat:
|
||||
path: /etc/mysql/conf.d/galera.cnf
|
||||
register: galera_cnf
|
||||
|
||||
- name: "Configure /etc/mysql/conf.d/galera.cnf"
|
||||
template:
|
||||
src: galera.cnf.j2
|
||||
dest: /etc/mysql/conf.d/galera.cnf
|
||||
owner: root
|
||||
group: mysql
|
||||
mode: 0640
|
||||
tags:
|
||||
- config
|
||||
|
||||
# TODO Check if we want this back or done in some other way.
|
||||
# It's safer to not be meddling with starting and stopping galera
|
||||
# nodes on subsequent runs. Registering in a file that the
|
||||
# initialization has been executed might already do the trick.
|
||||
#
|
||||
#- name: "Check if the node is safe for galera bootstrapping"
|
||||
# shell: "egrep -s -q '^ *safe_to_bootstrap *: *1 *$' /var/lib/mysql/grastate.dat"
|
||||
# register: grastate
|
||||
# failed_when: grastate.rc not in [0, 1, 2]
|
||||
# when: is_primary
|
||||
#
|
||||
# # state 0 = grastate.dat file exists and it contains "safe_to_bootstrap: 1"
|
||||
# # state 1 = grastate.dat file exists, but it does not contain "safe_to_bootstrap: 1"
|
||||
# # state 2 = grastate.dat file does not yet exist, this is a new node
|
||||
#- name: "Stop mysql service on primary node, prior to bootstrapping galera"
|
||||
# service:
|
||||
# name: mariadb
|
||||
# state: stopped
|
||||
# when: is_primary and grastate.rc in [0, 2]
|
||||
#
|
||||
#- name: "Bootstrap galera on primary node"
|
||||
# shell: galera_new_cluster
|
||||
# when: is_primary and grastate.rc in [0, 2]
|
||||
#
|
||||
#- name: "Restart mysql service on secondary node"
|
||||
# service:
|
||||
# name: mariadb
|
||||
# state: restarted
|
||||
# when: not is_primary
|
||||
|
||||
- name: "Create galera-haproxy user (for checking node health)"
|
||||
mysql_user:
|
||||
name: galera-haproxy
|
||||
host: "%"
|
||||
|
||||
- name: "Restart mysql server (only on initial install)"
|
||||
service:
|
||||
name: mariadb
|
||||
state: restarted
|
||||
when: not galera_cnf.stat.exists
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- include_role: name=app.mariadb
|
||||
- import_tasks: scripts.yml
|
||||
tags: [ "scripts" ]
|
||||
- import_tasks: install.yml
|
||||
tags: [ "cleanup" ]
|
|
@ -0,0 +1,22 @@
|
|||
- name: "Install required packages for galera support scripts"
|
||||
apt:
|
||||
name:
|
||||
- "{{ vars.python_package }}"
|
||||
- "{{ vars.python_package }}-mysql.connector"
|
||||
state: present
|
||||
tags:
|
||||
- scripts
|
||||
|
||||
- name: "Install galera support scripts"
|
||||
template:
|
||||
src: "{{ item }}.j2"
|
||||
dest: "/root/{{ item }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0700
|
||||
with_items:
|
||||
- galera_cluster_status
|
||||
- galera_wsrep_recovered_position
|
||||
- galera_flag_as_safe_to_bootstrap
|
||||
tags:
|
||||
- scripts
|
|
@ -0,0 +1,60 @@
|
|||
[mysqld]
|
||||
|
||||
bind-address = 0.0.0.0
|
||||
|
||||
# Don't do name resolving on clients, because it might cause unneeded impact.
|
||||
skip-name-resolve
|
||||
|
||||
# Galera works best with innodb tables.
|
||||
default-storage-engine = innodb
|
||||
|
||||
# Mandatory options to make galera replication work reliable.
|
||||
binlog_format = ROW
|
||||
innodb_autoinc_lock_mode = 2
|
||||
|
||||
# Enable the wsrep provider for cluster sync support.
|
||||
wsrep_provider = /usr/lib/galera/libgalera_smm.so
|
||||
wsrep_on = ON
|
||||
|
||||
# Configure the cluster node.
|
||||
wsrep_node_name= " {{ inventory_hostname }}"
|
||||
wsrep_node_address = "{{ vars.network[segment.mgmt.network].address }}"
|
||||
|
||||
# Configure the cluster environment.
|
||||
wsrep_cluster_name = "galera_cluster"
|
||||
wsrep_cluster_address = "gcomm://{{ ip['galera'][segment.mgmt.network] | map(attribute='address') | join(",") }}"
|
||||
{% if groups['galera-donor'] %}
|
||||
|
||||
# When joining the cluster, try the named donor nodes, before using a service
|
||||
# node as the donor. The comma at the end is important, because that tells
|
||||
# galera to try the service nodes when no donor nodes are availble.
|
||||
wsrep_sst_donor = "{{ groups['galera-donor'] | join(',') }},"
|
||||
{% endif %}
|
||||
|
||||
# The preferred way to handle replication is using the mariabackup tool.
|
||||
wsrep_sst_method = mariabackup
|
||||
wsrep_sst_auth = "root:{{ credentials.mysql_root.password }}"
|
||||
|
||||
{% if software.environment == "production" %}
|
||||
# Some performance tweaks for the production environment (where more
|
||||
# resources are available than in the other environments).
|
||||
max_connections = 250
|
||||
innodb_buffer_pool_size = 8192M
|
||||
query_cache_type = 0
|
||||
query_cache_size = 0
|
||||
innodb_flush_log_at_trx_commit = 0
|
||||
tmp_table_size = 512M
|
||||
max_heap_table_size = 512M
|
||||
wsrep_slave_threads = 16
|
||||
wsrep_provider_options="gcs.fc_limit=80; gcs.fc_factor=0.8; gcache.size=2G; gcache.page_size=2G"
|
||||
|
||||
# To enable performance monitoring and investigation.
|
||||
performance_schema = ON
|
||||
performance-schema-instrument='stage/% = ON'
|
||||
performance-schema-consumer-events-stages-current = ON
|
||||
performance-schema-consumer-events-stages-history = ON
|
||||
performance-schema-consumer-events-stages-history-long = ON
|
||||
{% endif %}
|
||||
|
||||
[sst]
|
||||
sst-syslog=1
|
|
@ -0,0 +1,184 @@
|
|||
#!/usr/bin/env python3
|
||||
# Author: Maurice Makaay, XS4ALL
|
||||
# {{ ansible_managed }}
|
||||
|
||||
import re
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import configparser
|
||||
from mysql.connector import MySQLConnection
|
||||
|
||||
|
||||
lockfile = "{{ marked_down_lockfile }}"
|
||||
|
||||
|
||||
def _get_mode_from_argv():
|
||||
mode = "json"
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == "--haproxy":
|
||||
mode = "haproxy"
|
||||
elif sys.argv[1] == "--json":
|
||||
mode = "json"
|
||||
else:
|
||||
raise "Invalid argument(s) used (you can only use --haproxy or --json)."
|
||||
return mode
|
||||
|
||||
|
||||
def _connect_to_db():
|
||||
try:
|
||||
config = configparser.ConfigParser()
|
||||
config.read("/etc/mysql/debian.cnf")
|
||||
user = config["client"]["user"]
|
||||
password = config["client"]["password"]
|
||||
socket = config["client"]["socket"]
|
||||
return MySQLConnection(
|
||||
host="localhost",
|
||||
database="mysql",
|
||||
user=user,
|
||||
password=password,
|
||||
unix_socket=socket)
|
||||
except:
|
||||
return None
|
||||
|
||||
def _init_response():
|
||||
return {
|
||||
'cluster_size': 0,
|
||||
'cluster_status': None,
|
||||
'connected': 'OFF',
|
||||
'last_committed': 0,
|
||||
'local_state_comment': None,
|
||||
'read_only': 'OFF',
|
||||
'ready': 'OFF',
|
||||
'safe_to_bootstrap': 0,
|
||||
'seqno': None,
|
||||
'sst_method': None,
|
||||
'uuid': None,
|
||||
'server_version': None,
|
||||
'innodb_version': None,
|
||||
'protocol_version': None,
|
||||
'wsrep_patch_version': None
|
||||
}
|
||||
|
||||
|
||||
def _add_global_status(response, db):
|
||||
for key, value in _query("SHOW GLOBAL STATUS LIKE 'wsrep_%'", db):
|
||||
key = re.sub('^wsrep_', '', key)
|
||||
if key in response:
|
||||
response[key] = value
|
||||
|
||||
|
||||
def _add_global_variables(response, db):
|
||||
query = """SHOW GLOBAL VARIABLES WHERE Variable_name IN (
|
||||
'read_only', 'wsrep_sst_method',
|
||||
'innodb_version', 'protocol_version', 'version',
|
||||
'wsrep_patch_version'
|
||||
)"""
|
||||
for key, value in _query(query, db):
|
||||
if key == "version":
|
||||
key = "server_version"
|
||||
if key == "wsrep_sst_method":
|
||||
key = "sst_method"
|
||||
response[key] = value
|
||||
|
||||
|
||||
def _query(query, db):
|
||||
try:
|
||||
cursor = db.cursor()
|
||||
cursor.execute(query)
|
||||
return cursor.fetchall()
|
||||
except:
|
||||
return []
|
||||
|
||||
|
||||
def _add_grastate(response):
|
||||
try:
|
||||
f = open("/var/lib/mysql/grastate.dat", "r")
|
||||
for line in f:
|
||||
if line.startswith('#') or re.match('^\s*$', line):
|
||||
continue
|
||||
line = re.sub('\s+$', '', line)
|
||||
key, value = re.split(':\s+', line, maxsplit=1)
|
||||
if key in response:
|
||||
response[key] = value
|
||||
response['cluster_size'] = int(response['cluster_size'])
|
||||
response['seqno'] = int(response['seqno'])
|
||||
response['safe_to_bootstrap'] = int(response['safe_to_bootstrap'])
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def _add_manually_disabled(response):
|
||||
response["manually_disabled"] = os.path.isfile(lockfile);
|
||||
|
||||
|
||||
def _evaluate_safe_to_use(response):
|
||||
'''
|
||||
Evaluate if it is safe to use this node for requests. Inspiration:
|
||||
https://severalnines.com/resources/tutorials/mysql-load-balancing-haproxy-tutorial
|
||||
'''
|
||||
status = response['local_state_comment']
|
||||
is_read_only = response['read_only'] != 'OFF'
|
||||
is_ready = response['ready'] == 'ON'
|
||||
is_connected = response['connected'] == 'ON'
|
||||
method = response['sst_method']
|
||||
is_using_xtrabackup = method is not None and method.startswith("xtrabackup")
|
||||
|
||||
safe_to_use = False
|
||||
comment = None
|
||||
|
||||
if response['manually_disabled']:
|
||||
comment = "The node has been manually disabled (file %s exists)" % lockfile
|
||||
elif status is None:
|
||||
comment = "The MySQL server seems not to be running at all"
|
||||
elif status == 'Synced':
|
||||
if is_read_only:
|
||||
comment = "Status is 'Synced', but database is reported to be read-only"
|
||||
elif not is_ready:
|
||||
comment = "Status is 'Synced', but database reports WSS not ready"
|
||||
elif not is_connected:
|
||||
comment = "Status is 'Synced', but database reports WSS not being connected"
|
||||
else:
|
||||
safe_to_use = True
|
||||
comment = "Status is 'Synced' and database is writable"
|
||||
elif status == 'Donor':
|
||||
if is_using_xtrabackup:
|
||||
safe_to_use = True
|
||||
comment = "Status is 'Donor', but using safe '%s' as the SST method" % method
|
||||
else:
|
||||
comment = "Status is 'Donor', and xtrabackup(-v2) is not used for SST"
|
||||
else:
|
||||
comment = "Galera status is not 'Synced', but '%s'" % status
|
||||
response['safe_to_use'] = safe_to_use
|
||||
response['safe_to_use_comment'] = comment
|
||||
|
||||
|
||||
def _output_response(response, mode):
|
||||
json_data = json.dumps(response, indent=4, sort_keys=True) + "\r\n"
|
||||
if mode == "json":
|
||||
print(json_data)
|
||||
else:
|
||||
if response["safe_to_use"]:
|
||||
print("HTTP/1.1 200 OK", end="\r\n")
|
||||
else:
|
||||
print("HTTP/1.1 503 Service Unavailable", end="\r\n")
|
||||
print("Content-Length: ", len(json_data), end="\r\n")
|
||||
print("Keep-Alive: no", end="\r\n")
|
||||
print("Content-Type: Content-Type: application/json", end="\r\n\r\n")
|
||||
print(json_data, end="")
|
||||
|
||||
|
||||
response = _init_response()
|
||||
db = _connect_to_db()
|
||||
if db is None:
|
||||
response['safe_to_use'] = False
|
||||
response['safe_to_use_comment'] = "Connection to MySQL server failed"
|
||||
else:
|
||||
_add_global_status(response, db)
|
||||
_add_global_variables(response, db)
|
||||
db.close()
|
||||
_add_grastate(response)
|
||||
_add_manually_disabled(response)
|
||||
_evaluate_safe_to_use(response)
|
||||
mode = _get_mode_from_argv()
|
||||
_output_response(response, mode)
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/bash
|
||||
# Author: Maurice Makaay, XS4ALL
|
||||
# {{ ansible_managed }}
|
||||
|
||||
sed -i \
|
||||
-e 's/safe_to_bootstrap:\s*0/safe_to_bootstrap: 1/' \
|
||||
/var/lib/mysql/grastate.dat
|
|
@ -0,0 +1,33 @@
|
|||
#!/usr/bin/env python3
|
||||
# Author: Maurice Makaay, XS4ALL
|
||||
# {{ ansible_managed }}
|
||||
#
|
||||
# This script is used to find out what the latest recorded transaction
|
||||
# on this host is. It is used by the galera cluster bootstrapping
|
||||
# script (which is available in /root/bootstrap_galera_cluster on the
|
||||
# ansible host) to find out in what order to start the nodes in case
|
||||
# of an unclean full shutdown of the cluster.
|
||||
#
|
||||
# DO NOT RUN FOR FUN! This script will bring down the mysql service
|
||||
# when it is not already down.
|
||||
#
|
||||
|
||||
import subprocess
|
||||
import re
|
||||
import json
|
||||
from sys import exit
|
||||
|
||||
|
||||
subprocess.check_output(["service", "mysql", "stop"])
|
||||
result = subprocess.check_output(
|
||||
['mysqld', '--wsrep-recover'], stderr= subprocess.STDOUT)
|
||||
|
||||
info = re.compile('WSREP: Recovered position: (.+)\s*$')
|
||||
for line in result.split("\n"):
|
||||
result = info.search(line)
|
||||
if result is not None:
|
||||
print(json.dumps(result.group(1)))
|
||||
exit(0)
|
||||
|
||||
print(json.dumps(None))
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
# {{ ansible_managed }}
|
||||
|
||||
[client]
|
||||
user = debian-sys-maint
|
||||
password = {{ credentials.mysql_root.password }}
|
|
@ -0,0 +1,4 @@
|
|||
[client]
|
||||
user={{ credentials.ZabbixMysqlMonitoring.username }}
|
||||
password={{ credentials.ZabbixMysqlMonitoring.password }}
|
||||
host=127.0.0.1
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
mariadb_version: 10.3
|
||||
mariadb_origin: ams2.mirrors.digitalocean.com
|
|
@ -0,0 +1,6 @@
|
|||
galaxy_info:
|
||||
author: Maurice Makaay
|
||||
description: Install MariaDB server
|
||||
company: XS4ALL
|
||||
license: proprietary
|
||||
min_ansible_version: 1.2
|
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
- name: "Configure the password for the debian-sys-maint user"
|
||||
mysql_user:
|
||||
name: "debian-sys-maint"
|
||||
host: "localhost"
|
||||
password: "{{ credentials.mysql_root.password }}"
|
||||
tags:
|
||||
- debian-sys-maint
|
||||
|
||||
- name: "Store the debian-sys-maint password in /etc/mysql/debian.cnf"
|
||||
replace:
|
||||
path: /etc/mysql/debian.cnf
|
||||
regexp: '^password\s*=.*$'
|
||||
replace: "password = {{ credentials.mysql_root.password }}"
|
||||
tags:
|
||||
- debian-sys-maint
|
|
@ -0,0 +1,33 @@
|
|||
- name: "Feed MariaDB root password to debconf"
|
||||
debconf:
|
||||
name: "mariadb-server"
|
||||
question: "{{ item }}"
|
||||
value: "{{ credentials.mysql_root.password }}"
|
||||
vtype: password
|
||||
changed_when: False
|
||||
with_items:
|
||||
- mysql-server/root_password
|
||||
- mysql-server/root_password_again
|
||||
|
||||
- name: "Install MariaDB / Galera"
|
||||
apt:
|
||||
name: "mariadb-server"
|
||||
state: present
|
||||
|
||||
- name: "Install Mariabackup"
|
||||
apt:
|
||||
name: "mariadb-backup"
|
||||
state: present
|
||||
|
||||
- name: "Install required Python MySQL module for db management via Ansible"
|
||||
apt:
|
||||
name: "{{ vars.python_package }}-mysqldb"
|
||||
state: present
|
||||
|
||||
- name: "Configure /root/.my.cnf"
|
||||
template:
|
||||
src: my.cnf.j2
|
||||
dest: /root/.my.cnf
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0400
|
|
@ -0,0 +1,4 @@
|
|||
---
|
||||
- include: repo.yml
|
||||
- include: install.yml
|
||||
- include: debian-sys-maint.yml
|
|
@ -0,0 +1,29 @@
|
|||
---
|
||||
- name: "Add MariaDB repo key"
|
||||
apt_key:
|
||||
keyserver: keyserver.ubuntu.com
|
||||
id: "0xF1656F24C74CD1D8"
|
||||
state: present
|
||||
|
||||
- name: "Pin MariaDB repo"
|
||||
template:
|
||||
src: "mariadb_repo.j2"
|
||||
dest: "/etc/apt/preferences.d/mariadb"
|
||||
become: True
|
||||
|
||||
# Using the automatic file naming of apt_repository, the version number is included
|
||||
# in the apt list file. This results in new list files on every version upgrade.
|
||||
# Switched to a static name. Here, old files are cleaned up.
|
||||
- name: "Remove old style MariaDB repo files"
|
||||
file:
|
||||
path: "/etc/apt/sources.list.d/{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- ams2_mirrors_digitalocean_com_mariadb_repo_10_1_ubuntu.list
|
||||
- ams2_mirrors_digitalocean_com_mariadb_repo_10_3_ubuntu.list
|
||||
|
||||
- name: "Add MariaDB repo"
|
||||
apt_repository:
|
||||
repo: "deb [arch=amd64] http://{{ mariadb_origin }}/mariadb/repo/{{ mariadb_version }}/{{ vars.distribution }} {{ vars.distribution_codename }} main"
|
||||
filename: mariadb
|
||||
state: present
|
|
@ -0,0 +1,3 @@
|
|||
Package: mariadb-*
|
||||
Pin: origin {{ mariadb_origin }}
|
||||
Pin-Priority: 600
|
|
@ -0,0 +1,4 @@
|
|||
[client]
|
||||
user=root
|
||||
password={{ credentials.mysql_root.password }}
|
||||
host=127.0.0.1
|
|
@ -0,0 +1,6 @@
|
|||
galaxy_info:
|
||||
author: Maurice Makaay
|
||||
description: Auto upgrades
|
||||
company: XS4ALL
|
||||
license: proprietary
|
||||
min_ansible_version: 1.2
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- hosts: all:!unmanaged
|
||||
gather_facts: no
|
||||
serial: 4
|
||||
roles:
|
||||
- auto_upgrades
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
- name: "Create /etc/apt/apt.conf.d/99auto-upgrades-disabled"
|
||||
copy:
|
||||
content: |
|
||||
# This file is managed by Ansible. Changes will be overwritten.
|
||||
APT::Periodic::Update-Package-Lists "1";
|
||||
APT::Periodic::Unattended-Upgrade "0";
|
||||
dest: /etc/apt/apt.conf.d/99auto-upgrades-disabled
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
- name: "Install unattended-upgrades package"
|
||||
apt:
|
||||
name: unattended-upgrades
|
||||
state: present
|
||||
|
||||
- name: "Remove /etc/apt/apt.conf.d/99auto-upgrades-disabled when it exists"
|
||||
file:
|
||||
dest: /etc/apt/apt.conf.d/99auto-upgrades-disabled
|
||||
state: absent
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- include: disable.yml
|
||||
when: not vars.auto_upgrades
|
||||
|
||||
- include: enable.yml
|
||||
when: vars.auto_upgrades
|
|
@ -0,0 +1,6 @@
|
|||
galaxy_info:
|
||||
author: Maurice Makaay
|
||||
description: Configure the firewall using ferm
|
||||
company: XS4ALL
|
||||
license: proprietary
|
||||
min_ansible_version: 1.2
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- hosts: lxd-container:!unmanaged
|
||||
gather_facts: no
|
||||
serial: 2
|
||||
roles:
|
||||
- firewalling
|
|
@ -0,0 +1,44 @@
|
|||
---
|
||||
- name: "Make sure ferm configuration directories exists"
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: 0755
|
||||
owner: root
|
||||
group: root
|
||||
with_items:
|
||||
- /etc/ferm
|
||||
- /etc/ferm/ferm.d
|
||||
|
||||
- name: "Create /etc/default/ferm"
|
||||
template:
|
||||
src: etc_default_ferm.j2
|
||||
dest: /etc/default/ferm
|
||||
mode: 0644
|
||||
owner: root
|
||||
group: root
|
||||
|
||||
- name: "Create ferm configuration file"
|
||||
template:
|
||||
src: ferm.conf.j2
|
||||
dest: /etc/ferm/ferm.conf
|
||||
mode: 0644
|
||||
owner: root
|
||||
group: root
|
||||
tags:
|
||||
- config
|
||||
|
||||
- name: "Uninstall ufw"
|
||||
apt:
|
||||
name: ufw
|
||||
state: absent
|
||||
|
||||
- name: "Install ferm"
|
||||
apt:
|
||||
name: ferm
|
||||
state: present
|
||||
|
||||
- name: "Run ferm to configure the firewall"
|
||||
shell: ferm /etc/ferm/ferm.conf
|
||||
tags:
|
||||
- config
|
|
@ -0,0 +1,5 @@
|
|||
# {{ ansible_managed }}
|
||||
FAST=no
|
||||
CACHE=yes
|
||||
OPTIONS=
|
||||
ENABLED=yes
|
|
@ -0,0 +1,53 @@
|
|||
# {{ ansible_managed }}
|
||||
|
||||
domain (ip) {
|
||||
table filter {
|
||||
chain INPUT {
|
||||
policy DROP;
|
||||
|
||||
# connection tracking.
|
||||
mod state state INVALID DROP;
|
||||
mod state state (ESTABLISHED RELATED) ACCEPT;
|
||||
|
||||
# allow local packet.
|
||||
interface lo ACCEPT;
|
||||
|
||||
# respond to ping.
|
||||
proto icmp ACCEPT;
|
||||
|
||||
# allow SSH connections.
|
||||
proto tcp dport ssh ACCEPT;
|
||||
|
||||
{% if group == "galera" %}
|
||||
|
||||
# Allow Galera servers to access each other for syncing.
|
||||
interface {{ segment.mgmt.interface }}
|
||||
proto (tcp udp) dport (3306 4567 4568 4444)
|
||||
saddr ({{ ip.galera[segment.mgmt.network] | map(attribute='address') | join(' ') }})
|
||||
ACCEPT;
|
||||
{% endif %}
|
||||
{% if firewall is defined %}
|
||||
{% for rule in firewall %}
|
||||
|
||||
# {{ rule.description }}
|
||||
interface {{ segment[rule.segment].interface }}
|
||||
proto {{ rule.proto }} dport {{ rule.port }}
|
||||
saddr {{ rule.source }}
|
||||
ACCEPT;
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
# Log blocked messages.
|
||||
NFLOG nflog-group 1 nflog-prefix 'DROP: ';
|
||||
}
|
||||
chain OUTPUT {
|
||||
policy ACCEPT;
|
||||
mod state state (ESTABLISHED RELATED) ACCEPT;
|
||||
}
|
||||
chain FORWARD {
|
||||
policy DROP;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@include ferm.d/;
|
|
@ -0,0 +1,6 @@
|
|||
galaxy_info:
|
||||
author: Maurice Makaay
|
||||
description: Setup hosts file
|
||||
company: XS4ALL
|
||||
license: proprietary
|
||||
min_ansible_version: 1.2
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- hosts: lxd-host:lxd-container:!unmanaged
|
||||
gather_facts: no
|
||||
serial: 4
|
||||
roles:
|
||||
- hostsfile
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- name: "Generate /etc/hosts"
|
||||
become: true
|
||||
template:
|
||||
src: hosts.j2
|
||||
dest: /etc/hosts
|
|
@ -0,0 +1,29 @@
|
|||
# {{ ansible_managed }}
|
||||
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
|
||||
::1 ip6-localhost ip6-loopback localhost6 localhost6.localdomain6
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
ff02::3 ip6-allhosts
|
||||
|
||||
# Hosts within Ansible environment
|
||||
|
||||
{% set seen = [ ] %}
|
||||
{% for group in ip %}
|
||||
{% for network in ip[group] %}
|
||||
{% if "address" in ip[group][network] %}
|
||||
{% set hosts = [ ip[group][network] ] %}
|
||||
{% else %}
|
||||
{% set hosts = ip[group][network] %}
|
||||
{% endif %}
|
||||
{% for host in hosts %}
|
||||
{% if host.address not in seen %}
|
||||
{% do seen.append(host.address) %}
|
||||
{{ host.address }} {{ host.hostnames | join(' ') }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
- name: restart rsyslog
|
||||
service:
|
||||
name: rsyslog
|
||||
state: restarted
|
||||
|
||||
- name: restart ulogd2
|
||||
service:
|
||||
name: ulogd2
|
||||
state: restarted
|
|
@ -0,0 +1,6 @@
|
|||
galaxy_info:
|
||||
author: Maurice Makaay
|
||||
description: Setup logging
|
||||
company: XS4ALL
|
||||
license: proprietary
|
||||
min_ansible_version: 1.2
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- hosts: lxd-container:!unmanaged
|
||||
gather_facts: no
|
||||
serial: 4
|
||||
roles:
|
||||
- logging
|
|
@ -0,0 +1,41 @@
|
|||
---
|
||||
- name: "Install packages"
|
||||
apt:
|
||||
name:
|
||||
- rsyslog
|
||||
- ulogd2
|
||||
state: present
|
||||
|
||||
- name: "Configure rsyslog"
|
||||
template:
|
||||
src: rsyslog.conf.j2
|
||||
dest: /etc/rsyslog.conf
|
||||
notify:
|
||||
- restart rsyslog
|
||||
|
||||
# The previous task created a monolithic syslog configuration file.
|
||||
# Therefore, we don't use the file in /etc/rsyslog.d anymore.
|
||||
# To prevent confusion, delete the files in that folder.
|
||||
- name: "Find no longer used rsyslog.d/* configuration files"
|
||||
find:
|
||||
paths: /etc/rsyslog.d
|
||||
patterns: "*"
|
||||
register: rsyslog_files
|
||||
|
||||
- name: "Delete rsyslog.d/* configuration files"
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
with_items: "{{ rsyslog_files.files }}"
|
||||
|
||||
- name: "Configure ulogd2 (used for iptables firewall logging)"
|
||||
template:
|
||||
src: ulogd.conf.j2
|
||||
dest: /etc/ulogd.conf
|
||||
notify:
|
||||
- restart ulogd2
|
||||
|
||||
- name: "Configure log rotation for the voiceplatform log file"
|
||||
template:
|
||||
src: logrotate.conf.j2
|
||||
dest: /etc/logrotate.d/voiceplatform
|
|
@ -0,0 +1,11 @@
|
|||
/var/log/voiceplatform.log
|
||||
{
|
||||
rotate 7
|
||||
daily
|
||||
missingok
|
||||
notifempty
|
||||
compress
|
||||
postrotate
|
||||
restart rsyslog >/dev/null 2>&1 || true
|
||||
endscript
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
# {{ ansible_managed }}
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# General configuration
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
|
||||
$RepeatedMsgReduction on
|
||||
$FileOwner syslog
|
||||
$FileGroup adm
|
||||
$FileCreateMode 0640
|
||||
$DirCreateMode 0755
|
||||
$Umask 0022
|
||||
$PrivDropToUser syslog
|
||||
$PrivDropToGroup syslog
|
||||
$WorkDirectory /var/spool/rsyslog
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Listening port configuration
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
# Accept syslog over unix sockets.
|
||||
$ModLoad imuxsock
|
||||
|
||||
# Sockets for chrooted applications.
|
||||
$AddUnixListenSocket /var/spool/postfix/dev/log
|
||||
|
||||
# Accept syslog on localhost over UDP (for chrooted applications that use
|
||||
# UDP to circumvent possible chroot unix socket access issues).
|
||||
$ModLoad imudp
|
||||
$UDPServerAddress 127.0.0.1
|
||||
$UDPServerRun 514
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Log routing configuration
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
# Suppress some messages, caused by an issue in the systemd-shim package.
|
||||
# It's harmless, so let's not log these, to not make these taint our
|
||||
# error logging counters.
|
||||
:msg, contains, "pam_systemd(sshd:session): Failed to create session: No such file or directory" stop
|
||||
:msg, contains, "pam_systemd(su:session): Failed to create session: No such file or directory" stop
|
||||
|
||||
# Send cloudinit messages to a dediated logfile.
|
||||
:syslogtag, isequal, "[CLOUDINIT]" {
|
||||
/var/log/cloud-init.log
|
||||
stop
|
||||
}
|
||||
|
||||
# A custom log format for the Voice Platform logging.
|
||||
$template VoicePlatformLogFmt,"%timegenerated% %HOSTNAME% [%syslogpriority-text%] %syslogtag%%msg:::drop-last-lf%\n"
|
||||
|
||||
# Some standard log targets.
|
||||
auth,authpriv.* /var/log/auth.log
|
||||
local0.* -/var/log/firewall.log
|
||||
local1.* -/var/log/voiceplatform.log;VoicePlatformLogFmt
|
||||
*.*;auth,authpriv,local0,local1.none -/var/log/syslog
|
||||
kern.* -/var/log/kern.log
|
||||
mail.* -/var/log/mail.log
|
||||
*.emerg :omusrmsg:*
|
|
@ -0,0 +1,28 @@
|
|||
# {{ ansible_managed }}
|
||||
#
|
||||
# This is the configuration for ulogd2, which is used to log
|
||||
# firewalling messages. Our ferm configuration uses this server
|
||||
# as its log target.
|
||||
#
|
||||
# The logged firewalling messages are passed on to syslog,
|
||||
# using facility LOCAL0. The syslog server will then decide what
|
||||
# to do with the messages.
|
||||
|
||||
[global]
|
||||
logfile="syslog"
|
||||
loglevel=3
|
||||
|
||||
plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_inppkt_NFLOG.so"
|
||||
plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_filter_IFINDEX.so"
|
||||
plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_filter_IP2STR.so"
|
||||
plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_filter_PRINTPKT.so"
|
||||
plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_output_SYSLOG.so"
|
||||
plugin="/usr/lib/x86_64-linux-gnu/ulogd/ulogd_raw2packet_BASE.so"
|
||||
|
||||
stack=nflog:NFLOG,base:BASE,ifindex:IFINDEX,ip2str:IP2STR,print:PRINTPKT,syslog:SYSLOG
|
||||
|
||||
[nflog]
|
||||
group=1
|
||||
|
||||
[syslog]
|
||||
facility=LOG_LOCAL0
|
|
@ -0,0 +1,6 @@
|
|||
galaxy_info:
|
||||
author: Maurice Makaay
|
||||
description: LXD setup for both hosts and containers
|
||||
company: XS4ALL
|
||||
license: proprietary
|
||||
min_ansible_version: 1.2
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- hosts: lxd-host:lxd-container:!unmanaged
|
||||
gather_facts: no
|
||||
serial: 4
|
||||
roles:
|
||||
- lxd_common
|
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
- name: Install script for LXD network tuning
|
||||
template:
|
||||
src: lxd_tune_network.sh
|
||||
dest: /root/lxd_tune_network.sh
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0750
|
||||
|
||||
- name: Install cron for periodic LXD network tuning
|
||||
template:
|
||||
src: lxd_tune_network.cron
|
||||
dest: /etc/cron.d/lxd_tune_network
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
|
@ -0,0 +1,2 @@
|
|||
# {{ ansible_managed }}
|
||||
0 * * * * root /root/lxd_tune_network.sh
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# {{ ansible_managed }}
|
||||
#
|
||||
# Tweak network interface txqueuelen as recommended for LXD:
|
||||
# https://github.com/lxc/lxd/blob/master/doc/production-setup.md
|
||||
#
|
||||
|
||||
INTERFACES=$(cat /proc/net/dev | grep : | cut -d: -f1 | sed -e 's/ //g' | grep -v ^lo$)
|
||||
|
||||
for IFACE in $INTERFACES; do
|
||||
ip link set $IFACE txqueuelen 10000
|
||||
done
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
galaxy_info:
|
||||
author: Maurice Makaay
|
||||
description: LXD container setup
|
||||
company: XS4ALL
|
||||
license: proprietary
|
||||
min_ansible_version: 1.2
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- hosts: lxd-container:!unmanaged
|
||||
gather_facts: no
|
||||
roles:
|
||||
- lxd_container
|
||||
serial: 3
|
|
@ -0,0 +1,30 @@
|
|||
---
|
||||
- name: "Copy ssh key installation script to the LXD host"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
copy:
|
||||
src: /root/.ssh/install_ansible_ssh_key
|
||||
dest: /root/.ssh/install_ansible_ssh_key
|
||||
owner: root
|
||||
group: root
|
||||
mode: 755
|
||||
# when: vars.group != "ansible" ... no idea why I did this. Old method maybe.
|
||||
|
||||
- name: "Install ssh key installation script on the LXD container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc file push /root/.ssh/install_ansible_ssh_key
|
||||
{{ inventory_hostname }}/root/.ssh/install_ansible_ssh_key
|
||||
|
||||
- name: "Execute ssh key installation script on the LXD container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc exec {{ inventory_hostname }} /root/.ssh/install_ansible_ssh_key
|
||||
|
||||
- name: "Add the LXD container host key to the ansible known hosts"
|
||||
local_action: >-
|
||||
command /etc/ansible/bin/add_ssh_known_host
|
||||
{{ vars.network[segment.mgmt.network].address }}
|
||||
/root/.ssh/id_ansible@{{ software.environment }}
|
||||
|
||||
- name: "Test if ansible can now use the ssh connection"
|
||||
ping:
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
- name: "Create LXD container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
lxd_container:
|
||||
name: "{{ inventory_hostname }}"
|
||||
state: started
|
||||
config:
|
||||
user.network-config: "{{ lookup('template', 'cloud-init-network-config.j2') }}"
|
||||
source:
|
||||
type: image
|
||||
mode: pull
|
||||
server: "{{ vars.lxd_image_server }}"
|
||||
protocol: simplestreams
|
||||
alias: "{{ vars.lxd_image_name }}"
|
||||
profiles:
|
||||
- "{{ vars.lxd_profile }}"
|
||||
wait_for_ipv4_addresses: True
|
|
@ -0,0 +1,66 @@
|
|||
---
|
||||
- include: bootstrap-other.yml
|
||||
when: >
|
||||
inventory_hostname not in lxd_status
|
||||
|
||||
- name: "Set interface MAC addresses"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc config set
|
||||
{{ hostvars[inventory_hostname].lxd_host | quote }}:{{ inventory_hostname | quote }}
|
||||
volatile.{{ item.interface | quote }}.hwaddr {{ item.mac_address | quote}}
|
||||
with_items: "{{ hostvars[inventory_hostname].network.values() | list }}"
|
||||
when: '"mac_address" in item'
|
||||
|
||||
- name: "Set LXD custom configuration parameters"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc config set
|
||||
{{ hostvars[inventory_hostname].lxd_host | quote }}:{{ inventory_hostname | quote }}
|
||||
{{ item.name | quote }} {{ item.value | quote }}
|
||||
with_items:
|
||||
- name: boot.autostart.priority
|
||||
value: "{{ hostvars[inventory_hostname].lxd_boot_priority | default(0) }}"
|
||||
|
||||
- name: "Stop created LXD container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
lxd_container:
|
||||
name: "{{ inventory_hostname }}"
|
||||
state: stopped
|
||||
when: inventory_hostname not in lxd_status
|
||||
|
||||
- name: "Start created LXD container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
lxd_container:
|
||||
name: "{{ inventory_hostname }}"
|
||||
state: started
|
||||
wait_for_ipv4_addresses: True
|
||||
when: inventory_hostname not in lxd_status
|
||||
|
||||
- name: "Update packages on created LXD container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: lxc exec {{ inventory_hostname }} -- apt-get update
|
||||
when: inventory_hostname not in lxd_status
|
||||
|
||||
- name: "Upgrade packages on created LXD container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: lxc exec {{ inventory_hostname }} -- apt-get upgrade -y
|
||||
when: inventory_hostname not in lxd_status
|
||||
|
||||
- name: "Autoremove packages on created LXD container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: lxc exec {{ inventory_hostname }} -- apt-get autoremove -y
|
||||
when: inventory_hostname not in lxd_status
|
||||
|
||||
- name: "Autoclean packages on created LXD container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: lxc exec {{ inventory_hostname }} -- apt-get autoclean -y
|
||||
when: inventory_hostname not in lxd_status
|
||||
|
||||
- name: "Disable hostname management from cloud-init"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc exec {{ inventory_hostname }} --
|
||||
sed -i -e 's/^\\s*preserve_hostname\\s*:.*/preserve_hostname: true/' /etc/cloud/cloud.cfg
|
||||
tags: [ hostname ]
|
||||
when: hostvars[inventory_hostname].distribution_codename != "trusty"
|
|
@ -0,0 +1,35 @@
|
|||
---
|
||||
# Not using the hostname module on purpose, because at this point Python has
|
||||
# not yet been install on the container.
|
||||
#
|
||||
- name: "Disable hostname management from cloud-init"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc exec {{ inventory_hostname }} --
|
||||
sed -i -e 's/^\\s*preserve_hostname\\s*:.*/preserve_hostname: true/' /etc/cloud/cloud.cfg
|
||||
tags: [ hostname ]
|
||||
when: hostvars[inventory_hostname].distribution_codename != "trusty"
|
||||
|
||||
- name: "Set container hostname"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc exec {{ inventory_hostname }}
|
||||
hostnamectl set-hostname {{ hostvars[inventory_hostname].hostname | quote}}
|
||||
when: hostvars[inventory_hostname].distribution_codename != "trusty"
|
||||
tags: [ hostname ]
|
||||
|
||||
- name: "Set container hostname (Ubuntu 14.04)"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc exec {{ inventory_hostname }} -- bash -c
|
||||
"echo {{ hostvars[inventory_hostname].hostname | quote}} > /etc/hostname"
|
||||
when: hostvars[inventory_hostname].distribution_codename == "trusty"
|
||||
tags: [ hostname ]
|
||||
|
||||
- name: "Activate container hostname (Ubuntu 14.04)"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc exec {{ inventory_hostname }}
|
||||
hostname {{ hostvars[inventory_hostname].hostname | quote}}
|
||||
when: hostvars[inventory_hostname].distribution_codename == "trusty"
|
||||
tags: [ hostname ]
|
|
@ -0,0 +1,12 @@
|
|||
---
|
||||
- include: bootstrap.yml
|
||||
- include: python.yml
|
||||
tags: [ "python" ]
|
||||
- include: sshd_install.yml
|
||||
tags: [ "ssh" ]
|
||||
- include: ansible_ssh_key.yml
|
||||
tags: [ "ssh" ]
|
||||
- include: sshd_config.yml
|
||||
tags: [ "ssh" ]
|
||||
- include: pam_config.yml
|
||||
tags: [ "ssh" ]
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
# A few updates, to disable PAM modules that slow down the
|
||||
# SSH login process.
|
||||
|
||||
- name: Disable 'motd noupdate' in PAM for improving SSH login speed
|
||||
lineinfile:
|
||||
name: /etc/pam.d/sshd
|
||||
regexp: '^session.*pam_motd.*noupdate'
|
||||
state: absent
|
||||
|
||||
- name: Disable 'motd dynamic' in PAM for improving SSH login speed
|
||||
lineinfile:
|
||||
name: /etc/pam.d/sshd
|
||||
regexp: '^session.*pam_motd.*dynamic'
|
||||
state: absent
|
||||
|
||||
- name: Disable 'mail' in PAM for improving SSH login speed
|
||||
lineinfile:
|
||||
name: /etc/pam.d/sshd
|
||||
regexp: '^session.*pam_mail'
|
||||
state: absent
|
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
- name: "Check if Python is installed"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc exec {{ inventory_hostname }} --
|
||||
dpkg -s {{ hostvars[inventory_hostname].python_package }}
|
||||
register: python_install_check
|
||||
failed_when: python_install_check.rc not in [0, 1]
|
||||
changed_when: False
|
||||
|
||||
- name: "Install Python in container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc exec {{ inventory_hostname }} --
|
||||
apt-get install -y {{ hostvars[inventory_hostname].python_package }}
|
||||
when: python_install_check.rc == 1
|
|
@ -0,0 +1,13 @@
|
|||
---
|
||||
- name: "Configure sshd"
|
||||
template:
|
||||
src: sshd_config.j2
|
||||
dest: /etc/ssh/sshd_config
|
||||
owner: root
|
||||
group: root
|
||||
mode: 644
|
||||
|
||||
- name: "Restart sshd"
|
||||
service:
|
||||
name: ssh
|
||||
state: restarted
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
- name: "Install sshd in container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc exec {{ inventory_hostname }} --
|
||||
apt-get install -y openssh-server
|
||||
|
||||
- name: "Start sshd in container"
|
||||
delegate_to: "{{ hostvars[inventory_hostname].lxd_host }}"
|
||||
shell: >-
|
||||
lxc exec {{ inventory_hostname }} -- systemctl restart ssh
|
|
@ -0,0 +1,19 @@
|
|||
version: 1
|
||||
config:
|
||||
{% for network in vars.network.values() %}
|
||||
- type: "physical"
|
||||
name: "{{ network.interface }}"
|
||||
subnets:
|
||||
- type: "static"
|
||||
address: "{{ network.address }}"
|
||||
netmask: "{{ network.netmask }}"
|
||||
{% if "gateway" in network %}
|
||||
gateway: "{{ network.gateway }}"
|
||||
{% endif %}
|
||||
{% if "dns" in network %}
|
||||
dns_nameservers:
|
||||
{% for dns_item in network.dns %}
|
||||
- "{{ dns_item }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
|
@ -0,0 +1,15 @@
|
|||
auto lo
|
||||
iface lo inet loopback
|
||||
{% for network in vars.network.values() %}
|
||||
|
||||
auto {{ network.interface }}
|
||||
iface {{ network.interface }} inet static
|
||||
address {{ network.address }}
|
||||
netmask {{ network.netmask }}
|
||||
{%if "gateway" in network %}
|
||||
gateway {{ network.gateway }}
|
||||
{%endif %}
|
||||
{% if "dns" in network %}
|
||||
dns-nameservers {{ network.dns | join(" ") }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
|
@ -0,0 +1,27 @@
|
|||
# {{ ansible_managed }}
|
||||
|
||||
AcceptEnv LANG LC_*
|
||||
Subsystem sftp /usr/lib/openssh/sftp-server
|
||||
|
||||
# Some settings for security.
|
||||
UsePrivilegeSeparation yes
|
||||
PermitRootLogin without-password
|
||||
PermitEmptyPasswords no
|
||||
AllowAgentForwarding no
|
||||
AllowTcpForwarding no
|
||||
X11Forwarding no
|
||||
PermitTunnel no
|
||||
|
||||
# On containers, we only expect public key-based sessions from ansible.
|
||||
PubkeyAuthentication yes
|
||||
PasswordAuthentication no
|
||||
ChallengeResponseAuthentication no
|
||||
KerberosAuthentication no
|
||||
GSSAPIAuthentication no
|
||||
UsePAM yes
|
||||
|
||||
# Improve the time that it takes to login.
|
||||
PrintMotd no
|
||||
PrintLastLog no
|
||||
UseDns no
|
||||
Banner none
|
|
@ -0,0 +1,6 @@
|
|||
galaxy_info:
|
||||
author: Maurice Makaay
|
||||
description: LXD host setup
|
||||
company: XS4ALL
|
||||
license: proprietary
|
||||
min_ansible_version: 1.2
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
- hosts: lxd-host:!unmanaged
|
||||
gather_facts: no
|
||||
serial: 4
|
||||
roles:
|
||||
- lxd_host
|
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
- name: Install .bash_aliases for root user
|
||||
lineinfile:
|
||||
path: /root/.bash_aliases
|
||||
create: yes
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
||||
regexp: voice_platform_aliases
|
||||
line: if [ -e /etc/voice_platform_aliases ]; then . /etc/voice_platform_aliases; fi
|
||||
|
||||
- name: Install generate_voice_platform_aliases.sh script
|
||||
template:
|
||||
src: generate_voice_platform_aliases.sh.j2
|
||||
dest: /root/generate_voice_platform_aliases.sh
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Run generate_voice_platform_aliases.sh script
|
||||
shell: /root/generate_voice_platform_aliases.sh
|
||||
# Ignore errors. This script is also run from cron periodically, so if
|
||||
# this script doesn't work right away, it's not a real problem
|
||||
# (this might for example happen if not all physical hosts are
|
||||
# booted yet).
|
||||
failed_when: False
|
||||
|
||||
- name: Install cron for updating the /etc/voice_platform_aliases file
|
||||
template:
|
||||
src: generate_voice_platform_aliases.cron.j2
|
||||
dest: /etc/cron.d/generate_voice_platform_aliases
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0644
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
- import_tasks: python.yml
|
||||
- import_tasks: tune_system.yml
|
||||
- import_tasks: bash_aliases.yml
|
||||
- import_tasks: profiles.yml
|
|
@ -0,0 +1,8 @@
|
|||
---
|
||||
- name: Create LXD profiles
|
||||
lxd_profile:
|
||||
name: "{{ item.name }}"
|
||||
config: "{{ item.config }}"
|
||||
description: "{{ item.description }}"
|
||||
devices: "{{ item.devices }}"
|
||||
with_items: "{{ lxd.profiles.values() | list }}"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue