445 lines
18 KiB
Python
445 lines
18 KiB
Python
import collections
|
|
from collections import defaultdict
|
|
from ansible.parsing.yaml.loader import AnsibleLoader
|
|
from ansible.parsing.vault import get_file_vault_secret
|
|
from ansible.parsing.dataloader import DataLoader
|
|
from dynamic_inventory.lxd_status import list_lxd_containers
|
|
from ipaddress import ip_network
|
|
|
|
|
|
group_types = {
|
|
"ansible": "singular",
|
|
"lxd-host": "cluster",
|
|
"lxd-container": "cluster",
|
|
"galera": "cluster",
|
|
"galera-donor": "cluster",
|
|
"galera-primary": "singular",
|
|
"galera-service": "cluster",
|
|
}
|
|
|
|
network_segments = [
|
|
"mgmt",
|
|
"public",
|
|
"hostname",
|
|
]
|
|
|
|
|
|
class InventoryException(Exception):
|
|
"""Raised in case there are problems with the dynamic inventory."""
|
|
|
|
|
|
class Inventory(object):
|
|
def __init__(self, config):
|
|
self.config = config
|
|
self._process_nodes()
|
|
|
|
def _process_nodes(self):
|
|
self.groups = defaultdict(list)
|
|
self.nodes = {}
|
|
self.shared = defaultdict(dict)
|
|
for group, data in self.config.nodes.items():
|
|
if group.endswith("-shared"):
|
|
for_group = group[0:-7]
|
|
self._add_shared(for_group, data)
|
|
elif isinstance(data, collections.Sequence):
|
|
for node in data:
|
|
self._add_node(group, node)
|
|
else:
|
|
self._add_node(group, data)
|
|
self._apply_shared()
|
|
self._apply_node_type()
|
|
self._create_galera_primary_group()
|
|
self._create_galera_role_groups()
|
|
self._validate_groups()
|
|
self._enrich_network_data()
|
|
self._enrich_nodes()
|
|
self._create_all_group()
|
|
self._create_lxd_container_group()
|
|
self._list_unmanaged_hosts_in_inventory()
|
|
self._clear_managed_property_from_all_nodes()
|
|
self._validate_network()
|
|
|
|
def _add_shared(self, group, data):
|
|
self.shared[group] = data
|
|
|
|
def _add_node(self, group, node):
|
|
self._enrich_with_hostname(node)
|
|
if node["name"] in self.nodes:
|
|
raise InventoryException("Duplicate node name: %s" % node["name"])
|
|
self.groups[group].append(node["name"])
|
|
node["group"] = group
|
|
self.nodes[node["name"]] = node
|
|
|
|
def _enrich_with_hostname(self, node):
|
|
"""We allow the name in the nodes.yml file to be an fqdn. We will here
|
|
adopt this fqdn as the node's hostname. The node's name will be set
|
|
to the bare hostname, without the domain."""
|
|
node["hostname"] = node["name"]
|
|
node["name"] = node["name"].split('.')[0]
|
|
|
|
def _apply_shared(self):
|
|
for group, shared in self.shared.items():
|
|
if group in self.groups:
|
|
for node_name in self.groups[group]:
|
|
node = self.nodes[node_name]
|
|
for key, val in shared.items():
|
|
if key not in node:
|
|
node[key] = val
|
|
if "all" in self.shared:
|
|
for node_name in self.nodes:
|
|
node = self.nodes[node_name]
|
|
for key, val in self.shared["all"].items():
|
|
if key not in node:
|
|
node[key] = val
|
|
|
|
def _apply_node_type(self):
|
|
for node_name in self.nodes:
|
|
node = self.nodes[node_name]
|
|
if "type" not in node:
|
|
continue
|
|
if node["type"] not in self.config.node_types:
|
|
raise InventoryException(
|
|
"Unknown system type '%s' used for node '%s'" %
|
|
(node["type"], node["name"]))
|
|
for key, val in self.config.node_types[node["type"]].items():
|
|
if key not in node:
|
|
node[key] = val
|
|
|
|
def _validate_groups(self):
|
|
for name, group_type in group_types.items():
|
|
if group_type == "singular" and len(self.groups[name]) > 1:
|
|
raise InventoryException("Multiple nodes defined for singular group '%s'" % name)
|
|
|
|
def _create_galera_primary_group(self):
|
|
"""The 'galera-primary' group is used by plays that create databases
|
|
on the Galera cluster. The group simply makes sure that only one
|
|
of the cluster nodes is in this group, making it a feasible target
|
|
for such tasks. A primary galera node can be explicitly configured
|
|
by use of the 'is_primary' property in the nodes.yml file. If not
|
|
explicitly configured, the first defined Galera node will be
|
|
used by default."""
|
|
primary = next((node["name"] for node in self.nodes.values() if
|
|
node["group"] == 'galera' and
|
|
"is_primary" in node and
|
|
node["is_primary"] is True), None)
|
|
if primary is None:
|
|
primary = self.groups["galera"][0]
|
|
for node in self.groups["galera"]:
|
|
self.nodes[node]["is_primary"] = node == primary
|
|
self.groups["galera-primary"] = [primary]
|
|
|
|
def _create_galera_role_groups(self):
|
|
"""The 'galera-donor' and 'galera-service' groups are used to differentiate
|
|
between galera nodes that are preferred donors for the galera cluster and
|
|
galera nodes that are included in haproxy configurations.
|
|
This is used to make sure that nodes that are used as donors won't affect
|
|
the cluster performance."""
|
|
self.groups['galera-service'] = []
|
|
self.groups['galera-donor'] = []
|
|
for node in self.groups["galera"]:
|
|
role = "service"
|
|
if "galera_role" in self.nodes[node]:
|
|
role = self.nodes[node]["galera_role"]
|
|
if role == "service":
|
|
self.groups["galera-service"].append(node)
|
|
elif role == "donor":
|
|
self.groups["galera-donor"].append(node)
|
|
else:
|
|
raise InventoryException(
|
|
"Illegal galera role '%s' used for node '%s'" % (role, node))
|
|
|
|
def _create_all_group(self):
|
|
self.groups["all"] = {
|
|
"children": [group for group in self.groups.keys()],
|
|
"vars": self._create_shared_vars()
|
|
}
|
|
|
|
def _create_lxd_container_group(self):
|
|
self.groups["lxd-container"] = [
|
|
host for host in self.nodes
|
|
if "lxd_image_name" in self.nodes[host]
|
|
]
|
|
|
|
def _create_shared_vars(self):
|
|
segments = self._create_network_segment_data()
|
|
mgmt_network_name = segments["mgmt"]["network"]
|
|
lxd_hosts = self._get_lxd_host_ip_addresses(mgmt_network_name)
|
|
return {
|
|
"credentials": self.config.credentials,
|
|
"software": self.config.software,
|
|
"segment": segments,
|
|
"ip": self._create_ip_address_data(segments),
|
|
"lxd": self.config.lxd,
|
|
"lxd_status": list_lxd_containers(lxd_hosts),
|
|
}
|
|
|
|
def _get_lxd_host_ip_addresses(self, network_name):
|
|
def _get_ip(node_name):
|
|
node = self.nodes[node_name]
|
|
if "network" not in node:
|
|
raise InventoryException(
|
|
"Missing network definition for lxd host '%s'" %
|
|
node_name)
|
|
if network_name not in node["network"]:
|
|
raise InventoryException(
|
|
"Missing '%s' network definition for lxd host '%s'" %
|
|
network_name, node_name)
|
|
if "address" not in node["network"][network_name]:
|
|
raise InventoryException(
|
|
"Missing address in '%s' network definition for lxd host '%s'" %
|
|
network_name, node_name)
|
|
return node["network"][network_name]["address"]
|
|
return [
|
|
_get_ip(h)
|
|
for h in self.groups["lxd-host"]
|
|
]
|
|
|
|
def _create_ip_address_data(self, segments):
|
|
ip = {}
|
|
for group, group_nodes in self.groups.items():
|
|
ip[group] = defaultdict(list)
|
|
for node_name in group_nodes:
|
|
node = self.nodes[node_name]
|
|
if "network" in node:
|
|
for network, config in node["network"].items():
|
|
ip[group][network].append({
|
|
"name": node_name,
|
|
"hostnames": self._get_hostnames_for_network(node, network, segments),
|
|
"address": config["address"]
|
|
})
|
|
if group_types[group] == "singular":
|
|
for network, ips in ip[group].items():
|
|
ip[group][network] = ips[0]
|
|
return ip
|
|
|
|
def _get_hostnames_for_network(self, node, network, segments):
|
|
hostnames = []
|
|
if segments["hostname"]["network"] == network:
|
|
hostnames.append(node["hostname"])
|
|
short_name = node["hostname"].split(".")[0]
|
|
if (node["hostname"] != short_name):
|
|
hostnames.append(short_name)
|
|
hostnames.append("%s.%s" % (node["name"], network))
|
|
return list(hostnames)
|
|
|
|
def _create_network_segment_data(self):
|
|
data = {}
|
|
for name, config in self.config.networks.items():
|
|
if "segments" not in config:
|
|
continue
|
|
for segment in config["segments"]:
|
|
if segment not in network_segments:
|
|
raise InventoryException(
|
|
"Unknown network segment '%s' used for network '%s'" %
|
|
(segment, name))
|
|
data[segment] = {
|
|
"network": name,
|
|
"interface": config["interface"]
|
|
}
|
|
return data
|
|
|
|
def _enrich_network_data(self):
|
|
for name, config in self.config.networks.items():
|
|
net = ip_network("%s/%s" % (config['network'], config['netmask']))
|
|
self.config.networks[name]['network_cidr'] = str(net)
|
|
if "segments" not in config:
|
|
config["segments"] = []
|
|
|
|
def _enrich_nodes(self):
|
|
for node in self.nodes.values():
|
|
self._enrich_and_check_ansible_connection(node)
|
|
self._enrich_with_network_data(node)
|
|
self._enrich_webservice_data(node)
|
|
|
|
def _enrich_and_check_ansible_connection(self, node):
|
|
if "ansible_connection" not in node:
|
|
if not self._is_managed(node):
|
|
node["ansible_connection"] = False
|
|
else:
|
|
raise InventoryException(
|
|
"Node '%s' does not have an ansible_connection defined" % node["name"])
|
|
|
|
if node["ansible_connection"] == "local":
|
|
self._clear_if_exists(node, "ansible_host")
|
|
return
|
|
|
|
# Ansible connection already fully specificed.
|
|
if "ansible_host" in node:
|
|
return
|
|
|
|
# Ansible connection using ssh, but the ansible_host is not yet set.
|
|
if node["ansible_connection"] == "ssh":
|
|
mgmt_net = next((
|
|
n for n, c in self.config.networks.items()
|
|
if "mgmt" in c["segments"]))
|
|
if mgmt_net not in node["network"]:
|
|
raise InventoryException(
|
|
"Node '%s' does not have the '%s' management network defned" %
|
|
(node["name"], mgmt_net))
|
|
node["ansible_host"] = node["network"][mgmt_net]["address"]
|
|
return
|
|
|
|
# Ansible connection using lxc, based on the configured lxd_host.
|
|
if node["ansible_connection"] == "lxd":
|
|
if "lxd_profile" not in node:
|
|
raise InventoryException(
|
|
"Node '%s' uses lxd, but 'lxd_profile' is not configured")
|
|
if "ansible_host" not in node:
|
|
node["ansible_host"] = "%s:%s" % (node["lxd_host"], node["name"])
|
|
|
|
def _enrich_with_network_data(self, node):
|
|
"""Enrich all network configuration blocks in the nodes with
|
|
network configuration data from the network.yml config file.
|
|
Properties that are not defined in the node config are filled
|
|
with properties from the network config."""
|
|
if "network" not in node:
|
|
return
|
|
for network_name, node_config in node["network"].items():
|
|
if network_name not in self.config.networks:
|
|
raise InventoryException(
|
|
"Node '%s' uses network '%s', but that network is not defined" %
|
|
(node["name"], network_name))
|
|
for key, value in self.config.networks[network_name].items():
|
|
if key != "segments" and key not in node_config:
|
|
node_config[key] = value
|
|
|
|
def _enrich_webservice_data(self, node):
|
|
if "webservice" not in node:
|
|
return
|
|
listen = []
|
|
certs = set()
|
|
for network_name, config in node["webservice"].items():
|
|
if network_name not in node["network"]:
|
|
raise InventoryException(
|
|
"Illegal webservice listen definition: " +
|
|
"network '%s' is not defined for host" % network_name)
|
|
config["network"] = network_name
|
|
config["address"] = node["network"][network_name]["address"]
|
|
if "http_port" not in config:
|
|
config["http_port"] = 80
|
|
if "https_port" not in config:
|
|
config["https_port"] = 443
|
|
if "http" not in config:
|
|
config["http"] = False
|
|
if "https" not in config:
|
|
config["https"] = False
|
|
if "force_https" not in config:
|
|
config["force_https"] = False
|
|
if "use_keepalived_vip" not in config:
|
|
config["use_keepalived_vip"] = False
|
|
if not config["http"] and not config["https"]:
|
|
raise InventoryException(
|
|
"Invalid webservice config, because both http and https " +
|
|
"are disabled " +
|
|
"on network '%s' for host '%s'" % (network_name, node["name"]))
|
|
if config["force_https"] and not config["https"]:
|
|
raise InventoryException(
|
|
"Invalid option 'force_https', because option 'https' is " +
|
|
"not enabled for the webservice " +
|
|
"on network '%s' for host '%s'" % (network_name, node["name"]))
|
|
if config["https"] and "cert" not in config:
|
|
raise InventoryException(
|
|
"Missing option 'cert' for the webservice " +
|
|
"on network '%s' for host '%s'" % (network_name, node["name"]))
|
|
listen.append(config)
|
|
|
|
# When keepalived is in use and the webservice definition requests it,
|
|
# the virtual IP-address of keepalived is assigned as a listen address
|
|
# for the webservice.
|
|
if config["use_keepalived_vip"]:
|
|
config_vip = config.copy()
|
|
if ("keepalived" not in node or "virtual_ipaddress" not in node["keepalived"]):
|
|
raise InventoryException(
|
|
"use_keepalived_vip enabled for webservice, but no keepalived " +
|
|
"virtual IP-address defined for host '%s'" % node["name"])
|
|
config_vip["address"] = node["keepalived"]["virtual_ipaddress"]
|
|
listen.append(config_vip)
|
|
|
|
redirect_to_https = []
|
|
for l in listen:
|
|
if l["force_https"]:
|
|
redirect_to_https.append({
|
|
"network": l["network"],
|
|
"address": l["address"],
|
|
"port": l["http_port"],
|
|
"https_port": l["https_port"]
|
|
})
|
|
service = []
|
|
for l in listen:
|
|
if l["http"] and not l["force_https"]:
|
|
service.append({
|
|
"network": l["network"],
|
|
"address": l["address"],
|
|
"port": l["http_port"],
|
|
"https": False
|
|
})
|
|
if l["https"]:
|
|
service.append({
|
|
"network": l["network"],
|
|
"address": l["address"],
|
|
"port": "%s ssl" % l["https_port"],
|
|
"cert": l["cert"],
|
|
"https": True
|
|
})
|
|
certs.add(l["cert"])
|
|
node["webservice"] = {
|
|
"redirect_to_https": redirect_to_https,
|
|
"service": service
|
|
}
|
|
|
|
# Register special groups for the certificates that are used,
|
|
# so we can use that group in our playbooks to decide whether
|
|
# or not the current host needs the certifcate setup.
|
|
# The group name is "requires_<cert>"
|
|
for cert in certs:
|
|
group_name = "requires_%s" % cert
|
|
if group_name not in self.groups:
|
|
group_types[group_name] = "cluster"
|
|
self.groups[group_name] = [node["name"]]
|
|
else:
|
|
self.groups[group_name].append(node["name"])
|
|
|
|
def _list_unmanaged_hosts_in_inventory(self):
|
|
"""The unmanaged hosts are marked in the inventory, by adding them
|
|
to a group "unmanaged"."""
|
|
unmanaged = [
|
|
node["name"] for node in self.nodes.values()
|
|
if not self._is_managed(node)]
|
|
self.groups["unmanaged"] = unmanaged
|
|
|
|
def _is_managed(self, node):
|
|
return "managed" not in node or node["managed"]
|
|
|
|
def _clear_managed_property_from_all_nodes(self):
|
|
"""The 'managed' property is only used for building the inventory data.
|
|
It has no use beyond that. Therefore we delete the property from
|
|
all nodes."""
|
|
for node in self.nodes.values():
|
|
self._clear_if_exists(node, "managed")
|
|
|
|
def _validate_network(self):
|
|
ip_addresses = set()
|
|
mac_addresses = set()
|
|
for node in self.nodes.values():
|
|
for network in node["network"]:
|
|
config = node["network"][network]
|
|
|
|
if False and config["address"] in ip_addresses:
|
|
raise InventoryException(
|
|
"IP address %s of node %s is used by multiple hosts"
|
|
% (config["address"], node["name"]))
|
|
ip_addresses.add(config["address"])
|
|
|
|
if "mac_address" in config:
|
|
if config["mac_address"] in mac_addresses:
|
|
raise InventoryException(
|
|
"MAC address %s of node %s is used by multiple hosts"
|
|
% config["mac_address"])
|
|
mac_addresses.add(config["mac_address"])
|
|
|
|
def _clear_if_exists(self, node, key):
|
|
try:
|
|
del node[key]
|
|
except KeyError:
|
|
pass
|