Page MenuHomeDevCentral

D2790.id10304.diff
No OneTemporary

D2790.id10304.diff

diff --git a/utils/netbox/pillarize.py b/utils/netbox/pillarize.py
new file mode 100755
--- /dev/null
+++ b/utils/netbox/pillarize.py
@@ -0,0 +1,571 @@
+#!/usr/bin/env python3
+
+# -------------------------------------------------------------
+# NetBox — Pillar information for Salt
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# Project: Nasqueron
+# License: BSD-2-Clause
+# Dependencies: PyYAML, pynetbox
+# -------------------------------------------------------------
+
+
+import logging
+import os
+import sys
+
+import pynetbox
+import yaml
+
+
+VRF_RD_DRAKE = "nasqueron.drake"
+
+
+# -------------------------------------------------------------
+# Get NetBox config and credentials
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+
+def get_netbox_config_from_salt():
+ config_path = "/usr/local/etc/salt/master.d/netbox.conf"
+
+ if not os.path.exists(config_path):
+ return False, None
+
+ with open(config_path) as fd:
+ salt_config = yaml.safe_load(fd)
+ salt_config = salt_config["ext_pillar"][0]["netbox"]
+ return True, {
+ "server": salt_config["api_url"].replace("/api/", ""),
+ "token": salt_config["api_token"],
+ }
+
+
+def get_netbox_config_from_config_dir():
+ try:
+ config_path = os.path.join(os.environ["HOME"], ".config", "netbox", "auth.yaml")
+ except KeyError:
+ return False, None
+
+ if not os.path.exists(config_path):
+ return False, None
+
+ with open(config_path) as fd:
+ return True, yaml.safe_load(fd)
+
+
+def get_netbox_config():
+ methods = [get_netbox_config_from_salt, get_netbox_config_from_config_dir]
+
+ for method in methods:
+ has_config, config = method()
+ if has_config:
+ return config
+
+ raise RuntimeError("Can't find NetBox config")
+
+
+# -------------------------------------------------------------
+# Service container
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+
+def init_app(node):
+ """Prepare a services container for appplication."""
+ config = get_netbox_config()
+
+ return {
+ "node": node,
+ "config": config,
+ "netbox": connect_to_netbox(config),
+ }
+
+
+def connect_to_netbox(config):
+ return pynetbox.api(config["server"], token=config["token"])
+
+
+# -------------------------------------------------------------
+# Build pillar
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+
+def build_pillar(app):
+ return {
+ "etc_hosts": build_etc_hosts(app["netbox"]),
+ "node": build_node_pillar(app["netbox"], app["node"]),
+ }
+
+
+# -------------------------------------------------------------
+# Pillar data :: etc_hosts
+# Entries for /etc/hosts
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+
+def build_etc_hosts(nb):
+ ip_addresses = nb.ipam.ip_addresses.filter(vrf=VRF_RD_DRAKE)
+
+ return [compile_etc_host(ip) for ip in ip_addresses if is_relevant_address(ip)]
+
+
+def is_relevant_address(ip):
+ return len(ip.dns_name) > 0 and ip.status.value in ["active", "deprecated"]
+
+
+def compile_etc_host(ip):
+ address = clean_ip(ip.address)
+ short = get_short_dns_name(ip.dns_name)
+ return f"{address} {short} {ip.dns_name}"
+
+
+# -------------------------------------------------------------
+# Pillar data :: node
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+
+def build_node_pillar(nb, node):
+ device = nb.dcim.devices.get(name=node)
+ if device is not None:
+ return build_dedicated_node_pillar(nb, device)
+
+ vm = nb.virtualization.virtual_machines.get(name=node)
+ if vm is not None:
+ return build_vm_node_pillar(nb, vm)
+
+ raise RuntimeError("Can't find pillar data for the node. Please add it to NetBox.")
+
+
+def build_dedicated_node_pillar(nb, device):
+ node = device.local_context_data
+ if not node:
+ node = {}
+
+ device.primary_ip.full_details()
+ node["hostname"] = device.primary_ip.dns_name
+
+ # GRE interfaces aren't included in node pillar, but in network one
+ interfaces = nb.dcim.interfaces.filter(device=device.name)
+ node["network"] = build_network_options(nb, device) | {
+ "interfaces": {
+ compute_interface_name(interface): build_dcim_interface(nb, interface)
+ for interface in interfaces
+ if interface.enabled and interface.type.label != "Virtual"
+ }
+ }
+
+ return node
+
+
+def build_vm_node_pillar(nb, device):
+ node = device.local_context_data
+ if not node:
+ node = {}
+
+ device.primary_ip.full_details()
+ node["hostname"] = device.primary_ip.dns_name
+
+ # GRE interfaces aren't included in node pillar, but in network one
+ interfaces = nb.virtualization.interfaces.filter(virtual_machine=device.name)
+ node["network"] = build_network_options(nb, device) | {
+ "interfaces": {
+ compute_interface_name(interface): build_vm_interface(nb, interface)
+ for interface in interfaces
+ if interface.enabled and not interface.custom_fields["virt_if_virtual"]
+ }
+ }
+
+ return node
+
+
+def build_network_options(nb, node):
+ options = node.config_context.get("network", {})
+
+ if "ipv6_tunnel" not in options:
+ options["ipv6_tunnel"] = False
+
+ canonical_public_ipv4 = find_canonical_public_ipv4(node)
+ if canonical_public_ipv4 is not None:
+ options["canonical_public_ipv4"] = canonical_public_ipv4
+
+ return options
+
+
+def find_canonical_public_ipv4(node):
+ ip = node.primary_ip
+ if ip and ip.family.value == 4 and not is_in_drake_vrf(ip):
+ return clean_ip(ip.address)
+
+ return None
+
+
+def is_in_drake_vrf(ip):
+ try:
+ return ip.vrf.rd == VRF_RD_DRAKE
+ except AttributeError:
+ return False
+
+
+def build_dcim_interface(nb, interface):
+ result = {
+ "device": interface.name,
+ }
+
+ if interface.custom_fields["if_uuid"]:
+ result["uuid"] = interface.custom_fields["if_uuid"]
+
+ ip_addresses = [
+ ip
+ for ip in nb.ipam.ip_addresses.all()
+ if is_assigned_to_dcim_interface(ip, interface.id)
+ ]
+
+ ip_addresses_by_family = get_ip_addresses_by_family(ip_addresses)
+ if len(ip_addresses_by_family["IPv4"]) > 0:
+ result["ipv4"] = build_inet_result(ip_addresses_by_family, "IPv4", "default_gateways", interface)
+
+ if len(ip_addresses_by_family["IPv6"]) > 0:
+ result["ipv6"] = build_inet_result(ip_addresses_by_family, "IPv6", "default_gateways", interface)
+
+ fhrp = build_fhrp(nb, interface, "dcim.interface")
+
+ # Fallback for OVH public CARP (no NetBox assignment possible)
+ if not fhrp:
+ fhrp = build_public_carp_from_custom_fields(nb, interface)
+
+ if fhrp:
+ result["fhrp"] = fhrp
+
+ flags = resolve_network_flags(result)
+ if flags:
+ result["flags"] = flags
+
+ return result
+
+
+def build_vm_interface(nb, interface):
+ result = {
+ "device": interface.name,
+ }
+
+ if interface.custom_fields["if_uuid_virt"]:
+ result["uuid"] = interface.custom_fields["if_uuid_virt"]
+
+ ip_addresses = [
+ ip
+ for ip in nb.ipam.ip_addresses.all()
+ if is_assigned_to_vminterface(ip, interface.id)
+ ]
+
+ ip_addresses_by_family = get_ip_addresses_by_family(ip_addresses)
+ if len(ip_addresses_by_family["IPv4"]) > 0:
+ result["ipv4"] = build_inet_result(ip_addresses_by_family, "IPv4", "default_gateways_virt", interface)
+
+ if len(ip_addresses_by_family["IPv6"]) > 0:
+ result["ipv6"] = build_inet_result(ip_addresses_by_family, "IPv6", "default_gateways_virt", interface)
+
+ fhrp = build_fhrp(nb, interface, "virtualization.vminterface")
+
+ # Fallback for OVH public CARP (no NetBox assignment possible)
+ if not fhrp:
+ fhrp = build_public_carp_from_custom_fields(nb, interface)
+
+ if fhrp:
+ result["fhrp"] = fhrp
+
+ flags = resolve_network_flags(result)
+ if flags:
+ result["flags"] = flags
+
+ return result
+
+
+def resolve_network_flags(result):
+ flags = []
+
+ for subsection, args in result.items():
+ if "flags" in result[subsection]:
+ flags += result[subsection]["flags"].copy()
+ del result[subsection]["flags"]
+
+ return flags
+
+
+def get_ip_addresses_by_family(ip_addresses):
+ ip_addresses_by_family = {"IPv4": [], "IPv6": []}
+
+ for ip in ip_addresses:
+ ip_addresses_by_family[ip.family.label].append(ip)
+
+ return ip_addresses_by_family
+
+
+def build_inet_result(ip_addresses_by_family, family, gw_field, interface):
+ primary_ip = ip_addresses_by_family[family][0]
+ result = {
+ "address": clean_ip(primary_ip.address),
+ }
+ flags = []
+
+ if family == "IPv4":
+ result["netmask"] = ipv4_to_netmask(primary_ip.address)
+
+ if is_ovh_ipfo(primary_ip):
+ flags.append("ipv4_ovh_failover")
+ else:
+ prefix = get_prefix_len(primary_ip.address)
+
+ # OVH + IPv6 /64 => netmask /56
+ try:
+ if primary_ip.tenant and primary_ip.tenant.name == "OVH" and prefix == 64:
+ prefix = 56
+ flags.append("hello_ipv6_ovh")
+ except AttributeError:
+ pass
+
+ result["prefix"] = prefix
+
+ if len(ip_addresses_by_family[family]) > 1:
+ primary = clean_ip(primary_ip.address)
+ result["aliases"] = [
+ clean_ip(ip.address)
+ for ip in ip_addresses_by_family[family][1:]
+ if clean_ip(ip.address) != primary
+ ]
+
+ try:
+ for gw_candidate in interface.custom_fields[gw_field]:
+ gw = gw_candidate["address"]
+ if is_from_family(gw, family):
+ result["gateway"] = clean_ip(gw)
+ break
+ except KeyError:
+ pass
+ except TypeError:
+ pass
+
+ if flags:
+ result["flags"] = flags
+
+ return result
+
+
+# -------------------------------------------------------------
+# Helper functions to use NetBox API
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+
+def is_assigned_to_interface(ip, type, interface_id):
+ return ip.assigned_object_type == type and ip.assigned_object_id == interface_id
+
+def is_assigned_to_vminterface(ip, interface_id):
+ return is_assigned_to_interface(ip, "virtualization.vminterface", interface_id)
+
+def is_assigned_to_dcim_interface(ip, interface_id):
+ return is_assigned_to_interface(ip, "dcim.interface", interface_id)
+
+def filter_ip_addresses(nb, interface_type, interface_id):
+ return [
+ ip
+ for ip in nb.ipam.ip_addresses.all()
+ if is_assigned_to_vminterface(ip, interface_id)
+ ]
+
+
+def compute_interface_name(interface):
+ if len(interface.description) > 0:
+ return interface.description.lower().strip().replace(" ", "_")
+
+ return interface.name
+
+
+def is_ovh_ipfo(ip):
+ tags = [tag.name for tag in ip.tags]
+
+ return "IPFO" in tags and "OVH" in tags
+
+
+# -------------------------------------------------------------
+# Helper functions to manipulate IP and networks
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+
+def clean_ip(ip):
+ pos = ip.find("/")
+ return ip[0:pos]
+
+
+def get_short_dns_name(hostname):
+ pos = hostname.find(".")
+ return hostname[0:pos]
+
+
+def is_from_family(ip, family):
+ return (
+ (family == "IPv4" and "." in ip)
+ or
+ (family == "IPv6" and ":" in ip)
+ )
+
+
+def get_prefix_len(ip):
+ pos = ip.find("/") + 1
+ return int(ip[pos:])
+
+
+def ipv4_to_netmask(ip):
+ return cidr_to_netmask(get_prefix_len(ip))
+
+
+def cidr_to_netmask(cidr):
+ """Compute the netmask for a CIDR prefix."""
+ mask = (0xFFFFFFFF >> (32 - cidr)) << (32 - cidr)
+ return (
+ str((0xFF000000 & mask) >> 24)
+ + "."
+ + str((0x00FF0000 & mask) >> 16)
+ + "."
+ + str((0x0000FF00 & mask) >> 8)
+ + "."
+ + str((0x000000FF & mask))
+ )
+
+
+
+# -------------------------------------------------------------
+# Helper functions to manipulate the configuration CARP
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+def get_fhrp_group(nb, group_pk=None, group_id=None, name=None, protocol=None):
+ """
+ Return an FHRP group object from NetBox.
+ - group_pk: primary key (from /ipam/fhrp-groups/<pk>/)
+ - group_id: protocol group_id field (e.g. VHID for CARP)
+ """
+ if group_pk is not None:
+ g = nb.ipam.fhrp_groups.get(group_pk)
+ if g is not None:
+ return g
+
+ # Filter fallback
+ filters = {}
+ if group_id is not None:
+ filters["group_id"] = group_id
+ if name is not None:
+ filters["name"] = name
+ if protocol is not None:
+ filters["protocol"] = protocol
+
+ if filters:
+ res = list(nb.ipam.fhrp_groups.filter(**filters))
+ if res:
+ return res[0]
+
+ return None
+
+
+def extract_fhrp_vip(g):
+ vip = None
+
+ ip_list = getattr(g, "ip_addresses", None)
+ if ip_list:
+ first = ip_list[0]
+ addr = first.get("address") if isinstance(first, dict) else getattr(first, "address", None)
+ if addr:
+ vip = clean_ip(addr)
+
+ if vip is None:
+ ip_obj = getattr(g, "ip_address", None)
+ addr = getattr(ip_obj, "address", None)
+ if addr:
+ vip = clean_ip(addr)
+
+ return vip
+
+
+def build_public_carp_from_custom_fields(nb, interface):
+ cf = getattr(interface, "custom_fields", {}) or {}
+
+ vhid = cf.get("carp_public_vhid")
+ prio = cf.get("carp_public_priority")
+
+ if not vhid:
+ return []
+
+ # Fetch group details by VHID only (no NetBox internal ID)
+ g = get_fhrp_group(nb, group_id=vhid, protocol="carp")
+
+ vip = extract_fhrp_vip(g) if g else None
+
+ group = {
+ "id": int(vhid), # <- VHID
+ "protocol": "carp",
+ "vip": vip,
+ }
+
+ if prio is not None:
+ group["advskew"] = max(0, 255 - int(prio))
+
+ return [group]
+
+
+def build_fhrp(nb, interface, iface_type):
+ """
+ Build FHRP configuration from NetBox fhrp_group_assignments
+ (works when NetBox can model members: same subnet, etc.).
+ """
+ try:
+ assigns = nb.ipam.fhrp_group_assignments.filter(
+ interface_type=iface_type,
+ interface_id=interface.id,
+ )
+ except AttributeError:
+ return []
+
+ groups = []
+
+ for a in assigns:
+ g = getattr(a, "group", None)
+ if not g:
+ continue
+
+ # Try to get full details (might fail depending on pynetbox/netbox version)
+ try:
+ g.full_details()
+ except Exception:
+ pass
+
+ protocol = getattr(g, "protocol", None)
+ protocol = getattr(protocol, "value", protocol)
+
+ vip = extract_fhrp_vip(g)
+
+ entry = {
+ "id": getattr(g, "group_id", None),
+ "protocol": protocol,
+ "vip": vip,
+ }
+
+ prio = getattr(a, "priority", None)
+ if prio is not None:
+ entry["advskew"] = max(0, 255 - int(prio))
+
+ groups.append(entry)
+
+ return groups
+
+
+# -------------------------------------------------------------
+# Application entry-point
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+if __name__ == "__main__":
+ LOGLEVEL = os.environ.get("LOGLEVEL", "WARNING").upper()
+ logging.basicConfig(level=LOGLEVEL)
+
+ if len(sys.argv) != 2:
+ print(f"Usage: {sys.argv[0]} <node name>", file=sys.stderr)
+ sys.exit(127)
+
+ app = init_app(sys.argv[1])
+ pillar = build_pillar(app)
+ print(yaml.dump(pillar))

File Metadata

Mime Type
text/plain
Expires
Sun, Mar 1, 11:00 (12 h, 21 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3481654
Default Alt Text
D2790.id10304.diff (15 KB)

Event Timeline