Page MenuHomeDevCentral

No OneTemporary

diff --git a/_modules/convert.py b/_modules/convert.py
index cb5ac2d..0e52f52 100644
--- a/_modules/convert.py
+++ b/_modules/convert.py
@@ -1,89 +1,98 @@
# -*- coding: utf-8 -*-
# -------------------------------------------------------------
# Salt — Convert execution module
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Project: Nasqueron
# Created: 2018-09-08
# Description: Functions related to data format conversions
# License: BSD-2-Clause
# -------------------------------------------------------------
import json
import salt.serializers.yaml
# -------------------------------------------------------------
# JSON
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def to_json_from_pillar_key(key):
"""
A function to output a pillar key in JSON.
CLI Example::
salt-call --local convert.to_json "Hello world"
"""
data = __pillar__.get(key, {})
return to_json(data)
def to_json(data):
"""
A function to convert data to JSON.
CLI Example::
salt-call --local convert.to_json "Hello world"
"""
return json.dumps(data, indent=4, sort_keys=True)
# -------------------------------------------------------------
# YAML
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _to_dictionary(data, root=None):
if root is not None:
return {root: _to_dictionary(data)}
if type(data) is list:
dictionary = {}
for item in data:
dictionary.update(_to_dictionary(item))
return dictionary
if type(data) is tuple and len(data) == 2:
return dict({data})
return dict(data)
def to_yaml_dictionary(data, root=None):
"""
A function to convert data to YAML dictionary.
CLI Example::
salt * convert.to_yaml_dictionary '[{"a": "bar"}, {"b": "foo"}]'
That example will return:
```
a: bar
b: foo
```
"""
return salt.serializers.yaml.serialize(
_to_dictionary(data, root), default_flow_style=False
)
def to_flags(data, enable_prefix="enable-", separator=" "):
"""
A function to convert a list of flags in a string to enable them.
"""
return separator.join([enable_prefix + item for item in data])
+
+
+# -------------------------------------------------------------
+# Lists and dictionaries
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+
+def to_list(data):
+ return list(data)
diff --git a/_modules/credentials.py b/_modules/credentials.py
index 067792f..b693413 100644
--- a/_modules/credentials.py
+++ b/_modules/credentials.py
@@ -1,262 +1,322 @@
# -*- coding: utf-8 -*-
# -------------------------------------------------------------
# Salt — Credentials
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Project: Nasqueron
# Description: Credentials-related execution module methods
# License: BSD-2-Clause
# -------------------------------------------------------------
+import ipaddress
import os
from salt.utils.files import fopen
VAULT_PREFIX = "ops/secrets/"
# -------------------------------------------------------------
# Configuration
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _are_credentials_hidden():
return "CONFIG_PUBLISHER" in os.environ or "state.show_sls" in os.environ.get(
"SUDO_COMMAND", ""
)
# -------------------------------------------------------------
# HOF utilities
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _filter_discard_empty_string_values(haystack):
if type(haystack) is dict:
return {k: v for k, v in haystack.items() if v != ""}
if type(haystack) is list:
return [v for v in haystack if v != ""]
raise ValueError("Argument isn't a list or a dict: " + str(type(haystack)))
def _join_document_fragments(fragments):
filtered = _filter_discard_empty_string_values(fragments)
return "\n\n".join(filtered)
# -------------------------------------------------------------
# Fetch credentials from Vault
#
# Methods signatures are compatible with Zemke-Rhyne module.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def _get_default_secret_path():
return VAULT_PREFIX
def read_secret(key, prefix=None):
if _are_credentials_hidden():
return "credential for " + key
if prefix is None:
prefix = _get_default_secret_path()
return __salt__["vault.read_secret"](f"{prefix}/{key}")
def get_password(key, prefix=None):
"""
A function to fetch credential on Vault
CLI Example:
salt docker-001 credentials.get_password nasqueron.foo.bar
:param key: The key in ops/secrets namespace
:param prefix: the prefix path for that key, by default "ops/secrets/"
:return: The username
"""
return read_secret(key, prefix)["password"]
def get_username(key, prefix=None):
"""
A function to fetch the username associated to a credential
through Vault
CLI Example:
salt docker-001 credentials.get_username nasqueron.foo.bar
:param key: The key in ops/secrets namespace
:param prefix: the prefix path for that key, by default "ops/secrets/"
:return: The secret value
"""
return read_secret(key, prefix)["username"]
def get_token(key, prefix=None):
"""
A function to fetch credential through Vault
CLI Example:
salt docker-001 credentials.get_token nasqueron.foo.bar
:param key: The key in ops/secrets namespace
:param prefix: the prefix path for that key, by default "ops/secrets/"
:return: The secret value
For Vault, this is actually an alias of the get_password method.
"""
return get_password(key, prefix)
def get_dsn(host, key, prefix=None):
if _are_credentials_hidden():
return "credential for " + key
secret = read_secret(key, prefix)
return f"{secret['username']}:{secret['password']}@{host}"
+# -------------------------------------------------------------
+# Helpers for IPv6 DUID credentials
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+
+def get_duid_credential_paths(node):
+ return {
+ key: _get_duid_path(interface)
+ for key, interface in _get_duid_interfaces(node).items()
+ }
+
+
+def get_duid_credentials():
+ id = __grains__["id"]
+
+ return {
+ key: _read_duid_secret(interface)
+ for key, interface in _get_duid_interfaces(id).items()
+ }
+
+
+def _get_duid_interfaces(node):
+ return {
+ key: interface
+ for key, interface in __pillar__["nodes"][node]["network"]["interfaces"].items()
+ if _is_duid_interface(interface)
+ }
+
+
+def _is_duid_interface(interface):
+ return (
+ "ipv6" in interface
+ and "flags" in interface
+ and "ipv6_dhcp_duid" in interface["flags"]
+ )
+
+
+def _read_duid_secret(interface):
+ path = _get_duid_path(interface)
+
+ return __salt__["vault.read_secret"](path)["password"]
+
+
+def _get_duid_path(interface):
+ address = interface["ipv6"]["address"]
+ prefixlen = interface["ipv6"]["prefix"]
+ prefix = _get_prefix(address, prefixlen)
+
+ return f"ops/secrets/network/DUID/{prefix}"
+
+
+def _get_prefix(address, prefixlen):
+ ip = ipaddress.IPv6Network((address, prefixlen), strict=False)
+ return str(ip.network_address)
+
+
# -------------------------------------------------------------
# Helpers for Sentry credentials
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def get_sentry_dsn(args):
if _are_credentials_hidden():
return "credential for " + args["credential"]
host = __pillar__["sentry_realms"][args["realm"]]["hostname"]
key = get_username(args["credential"])
return f"https://{key}@{host}/{args['project_id']}"
# -------------------------------------------------------------
# Build Vault policies
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class VaultSaltRolePolicy:
def __init__(self, role):
self.role = role
def build_policy(self):
return _join_document_fragments(
[
self.build_read_secrets_policy(),
self.import_extra_policies(),
]
)
#
# Secrets from pillar entry vault_secrets_by_role
#
def build_read_secrets_policy(self):
vault_paths = __pillar__["vault_secrets_by_role"].get(self.role, [])
return _join_document_fragments(
[_get_read_rule(vault_path) for vault_path in vault_paths]
)
#
# Import policies from pillar entry vault_extra_policies_by_role
#
def import_extra_policies(self):
extra_policies = __pillar__["vault_extra_policies_by_role"].get(self.role, [])
return _join_document_fragments(
[self.import_policy(policy) for policy in extra_policies]
)
@staticmethod
def import_policy(policy):
policy_file = f"{__pillar__['vault_policies_source']}/{policy}.hcl"
if policy_file.startswith("salt://"):
policy_file = __salt__["cp.cache_file"](policy_file)
with fopen(policy_file) as fd:
return fd.read()
def _get_read_rule(vault_path):
resolved_vault_path = _resolve_vault_path(vault_path)
return f"""path \"{resolved_vault_path}\" {{
capabilities = [ \"read\" ]
}}"""
def _resolve_vault_path(vault_path):
for pillar_path, mount_path in __pillar__.get("vault_mount_paths", {}).items():
if vault_path.startswith(pillar_path):
start_position = len(pillar_path)
return mount_path + vault_path[start_position:]
return vault_path
def _compile_roles_policies():
return {
role: VaultSaltRolePolicy(role).build_policy() for role in _get_relevant_roles()
}
def _get_relevant_roles():
return {
role
for pillar_entry in [
"vault_extra_policies_by_role",
"vault_secrets_by_role",
]
for role in __pillar__[pillar_entry].keys()
}
def _build_node_policy(node, roles_policies):
rules = [
roles_policies[role]
for role in __salt__["node.get"]("roles", node)
if role in roles_policies
]
cluster = __salt__["node.get"]("dbserver:cluster", node)
if cluster is not None:
dbserver_rules_paths = __pillar__["vault_secrets_by_dbserver_cluster"].get(
cluster, []
)
rules.append(
_join_document_fragments(
[_get_read_rule(vault_path) for vault_path in dbserver_rules_paths]
)
)
+ for _, vault_path in get_duid_credential_paths(node).items():
+ rules.append(_get_read_rule(vault_path))
+
policy = _join_document_fragments(rules)
if not policy:
policy = "# This policy is intentionally left blank."
policy = policy.replace("%%node%%", node)
return policy
def build_policies_by_node():
roles_policies = _compile_roles_policies()
policies = {
node: _build_node_policy(node, roles_policies)
for node in __pillar__["nodes"].keys()
}
return policies
diff --git a/pillar/nodes/nodes.sls b/pillar/nodes/nodes.sls
index fc98cd6..f32c54a 100644
--- a/pillar/nodes/nodes.sls
+++ b/pillar/nodes/nodes.sls
@@ -1,310 +1,312 @@
# -------------------------------------------------------------
# Salt — Nodes
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Project: Nasqueron
# Created: 2017-10-20
# License: Trivial work, not eligible to copyright
# -------------------------------------------------------------
nodes_aliases:
netmasks:
intranought: &intranought_netmask 255.255.255.240
nodes:
##
## Forest: Nasqueron
## Semantic field: https://devcentral.nasqueron.org/P27
##
cloudhugger:
forest: nasqueron-infra
hostname: cloudhugger.nasqueron.org
roles:
- opensearch
network:
ipv6_native: True
ipv6_tunnel: False
canonical_public_ipv4: 188.165.200.229
interfaces:
eno1:
device: eno1
ipv4:
address: 188.165.200.229
netmask: 255.255.255.0
gateway: 188.165.200.254
ipv6:
address: fe80::ec4:7aff:fe6a:36e8
prefix: 64
gateway: fe80::ee30:91ff:fee0:df80
complector:
forest: nasqueron-infra
hostname: complector.nasqueron.org
roles:
- vault
- salt-primary
zfs:
pool: zroot
network:
ipv6_tunnel: False
interfaces:
intranought:
device: vmx0
ipv4:
address: 172.27.27.7
netmask: *intranought_netmask
gateway: 172.27.27.1
db-A-001:
forest: nasqueron-infra
hostname: db-A-001.nasqueron.drake
roles:
- dbserver-pgsql
zfs:
pool: arcology
dbserver:
cluster: A
network:
ipv6_tunnel: False
interfaces:
intranought:
device: vmx0
ipv4:
address: 172.27.27.8
netmask: *intranought_netmask
gateway: 172.27.27.1
db-B-001:
forest: nasqueron-infra
hostname: db-B-001.nasqueron.drake
roles:
- dbserver-mysql
zfs:
pool: arcology
dbserver:
cluster: B
network:
ipv6_tunnel: False
interfaces:
intranought:
device: vmx0
ipv4:
address: 172.27.27.9
netmask: *intranought_netmask
gateway: 172.27.27.1
dwellers:
forest: nasqueron-dev-docker
hostname: dwellers.nasqueron.org
roles:
- paas-lxc
- paas-docker
- paas-docker-dev
- mastodon
flags:
install_docker_devel_tools: True
network:
ipv6_tunnel: True
canonical_public_ipv4: 51.255.124.11
interfaces:
public:
device: ens192
uuid: 6e05ebea-f2fd-4ca1-a21f-78a778664d8c
ipv4:
address: 51.255.124.11
netmask: *intranought_netmask
gateway: 51.210.99.254
intranought:
device: ens224
uuid: 8e8ca793-b2eb-46d8-9266-125aba6d06c4
ipv4:
address: 172.27.27.4
netmask: *intranought_netmask
gateway: 172.27.27.1
docker-002:
forest: nasqueron-infra
hostname: docker-002.nasqueron.org
roles:
- paas-docker
- paas-docker-prod
network:
ipv6_tunnel: True
canonical_public_ipv4: 51.255.124.9
interfaces:
public:
device: ens192
uuid: d55e0fec-f90b-3014-a458-9067ff8f2520
ipv4:
address: 51.255.124.10
netmask: *intranought_netmask
gateway: 51.210.99.254
intranought:
device: ens224
uuid: 57c04bcc-929b-3177-a2e3-88f84f210721
ipv4:
address: 172.27.27.5
netmask: *intranought_netmask
gateway: 172.27.27.1
router-001:
forest: nasqueron-infra
hostname: router-001.nasqueron.org
roles:
- router
network:
ipv6_tunnel: False
ipv6_native: True
canonical_public_ipv4: 51.255.124.8
interfaces:
public:
device: vmx0
ipv4:
address: 51.255.124.8
netmask: *intranought_netmask
gateway: 51.210.99.254
ipv6:
address: 2001:41d0:303:d971::6a7e
gateway: 2001:41d0:303:d9ff:ff:ff:ff:ff
prefix: 64
flags:
- ipv4_ovh_failover
intranought:
device: vmx1
ipv4:
address: 172.27.27.1
netmask: *intranought_netmask
web-001:
forest: nasqueron-infra
hostname: web-001.nasqueron.org
roles:
- webserver-alkane
- saas-mediawiki
- saas-wordpress
network:
ipv6_tunnel: False
ipv6_native: True
canonical_public_ipv4: 51.255.124.10
interfaces:
intranought:
device: vmx0
ipv4:
address: 172.27.27.10
netmask: *intranought_netmask
gateway: 172.27.27.1
public:
device: vmx1
ipv4:
address: 51.255.124.10
netmask: 255.255.255.255
gateway: 51.210.99.254
ipv6:
address: 2001:41d0:303:d971::517e:c0de
gateway: 2001:41d0:303:d9ff:ff:ff:ff:ff
prefix: 64
fixes:
hello_ipv6_ovh: True
ysul:
forest: nasqueron-dev
hostname: ysul.nasqueron.org
roles:
- devserver
- dbserver-mysql
- viperserv
- webserver-legacy
zfs:
pool: arcology
network:
ipv6_tunnel: True
ipv6_gateway: 2001:470:1f12:9e1::1
canonical_public_ipv4: 212.83.187.132
interfaces:
igb0:
device: igb0
ipv4:
address: 163.172.49.16
netmask: 255.255.255.0
gateway: 163.172.49.1
aliases:
- 212.83.187.132
windriver:
forest: nasqueron-dev
hostname: windriver.nasqueron.org
roles:
- devserver
- dbserver-mysql
- webserver-legacy
zfs:
pool: arcology
network:
ipv6_native: True
ipv6_tunnel: False
canonical_public_ipv4: 51.159.18.59
interfaces:
igb0:
device: igb0
ipv4:
address: 51.159.18.59
netmask: 255.255.255.0
gateway: 51.159.18.1
ipv6:
- address: 2001:0bc8:6005:0005:aa1e:84ff:fef3:5d9c
- gateway: fe80::a293:51ff:feb7:5073
- prefix: 128
+ address: 2001:bc8:2e84:700::da7a:7001
+ gateway: fe80::2616:9dff:fe9c:c521
+ prefix: 56
+ flags:
+ - ipv6_dhcp_duid
##
## Forest: Eglide
## Semantic field: ? (P27 used for "Eglide" too)
##
## This forest is intended to separate credentials
## between Eglide and Nasqueron servers.
##
eglide:
forest: eglide
hostname: eglide.org
roles:
- shellserver
network:
ipv6_tunnel: True
canonical_public_ipv4: 51.159.150.221
interfaces:
ens2:
device: ens2
ipv4:
address: 51.159.150.221
gateway: ""
flags:
# This interface is configured by cloud-init
- skip_interface_configuration
fixes:
rsyslog_xconsole: True
diff --git a/pillar/top.sls b/pillar/top.sls
index 3560484..9661028 100644
--- a/pillar/top.sls
+++ b/pillar/top.sls
@@ -1,70 +1,72 @@
# -------------------------------------------------------------
# Salt configuration for Nasqueron servers
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Project: Nasqueron
# Created: 2016-04-10
# License: Trivial work, not eligible to copyright
# -------------------------------------------------------------
base:
'*':
- core.users
- core.groups
- core.network
- certificates.certificates
- nodes.nodes
- nodes.forests
- hotfixes.roles
- services.monitoring-reporting
- services.table
- webserver.sites
+ - credentials.vault
+
cloudhugger:
- opensearch.software
- opensearch.clusters
complector:
- credentials.vault
# To provision services
- saas.rabbitmq
docker-002:
- notifications.config
- paas.docker
- saas.jenkins
- saas.phpbb
db-A-001:
- dbserver.cluster-A
db-B-001:
- dbserver.cluster-B
dwellers:
- paas.docker
- saas.jenkins
eglide:
- shellserver.quassel
ysul:
- devserver.repos
- saas.mediawiki
- viperserv.bots
- viperserv.fantoir
- webserver.labs
- webserver.wwwroot51
web-001:
- saas.mediawiki
- saas.wordpress
- webserver.credentials
windriver:
- devserver.datacubes
- devserver.ports
- devserver.repos
- webserver.labs
- webserver.credentials
- webserver.wwwroot51
diff --git a/roles/core/network/dhclient6.sls b/roles/core/network/dhclient6.sls
new file mode 100644
index 0000000..95b39bc
--- /dev/null
+++ b/roles/core/network/dhclient6.sls
@@ -0,0 +1,49 @@
+# -------------------------------------------------------------
+# Salt — Network
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# Project: Nasqueron
+# License: Trivial work, not eligible to copyright
+# -------------------------------------------------------------
+
+{% from "map.jinja" import dirs with context %}
+
+# -------------------------------------------------------------
+# DHCPv6 client
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+{% set duid_credentials = salt["credentials.get_duid_credentials"]() %}
+
+{% if duid_credentials %}
+
+ {% if grains["os"] == "FreeBSD" %}
+
+ ipv6_dhcp:
+ pkg.installed:
+ - pkgs:
+ - isc-dhcp44-client
+
+ /usr/local/etc/rc.d/dhclient6:
+ file.managed:
+ - source: salt://roles/core/network/files/FreeBSD/dhclient6.service
+ - mode: 755
+
+ /etc/rc.conf.d/dhclient6:
+ file.managed:
+ - source: salt://roles/core/network/files/FreeBSD/dhclient6.rc
+ - mode: 644
+ - template: jinja
+ - context:
+ interface: {{ salt["convert.to_list"](duid_credentials)[0] }}
+
+ {% endif %}
+
+ {{ dirs.etc }}/dhclient6.conf:
+ file.managed:
+ - source: salt://roles/core/network/files/dhclient6.conf
+ - mode: 400
+ - show_changes: False
+ - template: jinja
+ - context:
+ credentials: {{ duid_credentials }}
+
+{% endif %}
diff --git a/roles/core/network/files/FreeBSD/dhclient6.rc b/roles/core/network/files/FreeBSD/dhclient6.rc
new file mode 100644
index 0000000..9251c31
--- /dev/null
+++ b/roles/core/network/files/FreeBSD/dhclient6.rc
@@ -0,0 +1,17 @@
+# -------------------------------------------------------------
+# Network — rc configuration
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# Project: Nasqueron
+# License: Trivial work, not eligible to copyright
+# Source file: roles/core/network/files/FreeBSD/dhclient6.rc
+# -------------------------------------------------------------
+#
+# <auto-generated>
+# This file is managed by our rOPS SaltStack repository.
+#
+# Changes to this file may cause incorrect behavior
+# and will be lost if the state is redeployed.
+# </auto-generated>
+
+dhclient6_enable=YES
+dhclient6_interface={{ interface }}
diff --git a/roles/core/network/files/FreeBSD/dhclient6.service b/roles/core/network/files/FreeBSD/dhclient6.service
new file mode 100755
index 0000000..4f55678
--- /dev/null
+++ b/roles/core/network/files/FreeBSD/dhclient6.service
@@ -0,0 +1,45 @@
+#!/bin/sh
+
+# PROVIDE: dhclient6
+# KEYWORD: shutdown
+
+# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
+# to enable this service:
+#
+# dhclient6_enable (bool): Set it to YES to enable dhclient6.
+# Default is "NO".
+# dhclient6_interface (user): Set interface to run DHCPv6 client on.
+# No default value. Mandatory.
+# dhclient6_config (path): The path to the configuration file.
+# Default is "/usr/local/etc/dhclient6.conf".
+
+. /etc/rc.subr
+
+name=dhclient6
+desc="Dynamic Host Configuration Protocol (DHCP) client"
+rcvar=dhclient6_enable
+
+load_rc_config $name
+
+: ${dhclient6_enable:="NO"}
+: ${dhclient6_interface:=""}
+: ${dhclient6_config:="/usr/local/etc/dhclient6.conf"}
+
+pidfile="/var/run/dhclient6/${name}.${dhclient6_interface}.pid"
+procname="/usr/local/sbin/dhclient"
+command="$procname"
+command_args="-cf ${dhclient6_config} -6 -P -v ${dhclient6_interface}"
+start_precmd="dhclient6_prestart"
+
+dhclient6_prestart()
+{
+ # /var/run/dhclient6 is not guaranteed to exist,
+ # e.g. if /var/run is a tmpfs
+ install -d -o root -g wheel -m 755 ${pidfile%/*}
+}
+
+if [ -z $dhclient6_interface ] ; then
+ err 1 "$0: no interface specified"
+fi
+
+run_rc_command "$1"
diff --git a/roles/core/network/files/dhclient6.conf b/roles/core/network/files/dhclient6.conf
new file mode 100644
index 0000000..49223a4
--- /dev/null
+++ b/roles/core/network/files/dhclient6.conf
@@ -0,0 +1,19 @@
+# -------------------------------------------------------------
+# IPv6 :: DHCP configuration for ISC dhclient
+# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+# Project: Nasqueron
+# License: Trivial work, not eligible to copyright
+# Source file: roles/core/network/files/dhclient6.conf
+# -------------------------------------------------------------
+#
+# <auto-generated>
+# This file is managed by our rOPS SaltStack repository.
+#
+# Changes to this file may cause incorrect behavior
+# and will be lost if the state is redeployed.
+# </auto-generated>
+{% for interface, duid in credentials.items() %}
+interface "{{ interface }}" {
+ send dhcp6.client-id {{ duid }};
+}
+{% endfor %}
diff --git a/roles/core/network/init.sls b/roles/core/network/init.sls
index 5d6bce9..1667c84 100644
--- a/roles/core/network/init.sls
+++ b/roles/core/network/init.sls
@@ -1,20 +1,21 @@
# -------------------------------------------------------------
# Salt — Network
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Project: Nasqueron
# Created: 2020-09-20
# License: Trivial work, not eligible to copyright
# -------------------------------------------------------------
include:
- .ipv4
- .ipv6
+ - .dhclient6
- .gre
- .routes
# Drake can be configured as:
#
# - ipv4 (e.g. IntraNought network cards on EXSi hypervisor VMs)
# - gre (e.g. isolated servers needing a tunnel)
#
# Both are needed for servers with router role.
diff --git a/roles/core/network/ipv6.sls b/roles/core/network/ipv6.sls
index 7fe617d..5ed87d2 100644
--- a/roles/core/network/ipv6.sls
+++ b/roles/core/network/ipv6.sls
@@ -1,112 +1,116 @@
# -------------------------------------------------------------
# Salt — Network
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Project: Nasqueron
# Created: 2016-06-15
# License: Trivial work, not eligible to copyright
# -------------------------------------------------------------
# -------------------------------------------------------------
# Table of contents
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# :: Native IPv6
# :: 4to6 tunnel
# :: Routes
#
# -------------------------------------------------------------
{% from "map.jinja" import dirs with context %}
{% set network = salt['node.get']('network') %}
# -------------------------------------------------------------
# Native IPv6
+#
+# Flags:
+#
+# - On Online, we need to send a request to a DHCP server
+# with the assigned DUID.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
{% if salt['node.has']('network:ipv6_native') %}
{% for interface_name, interface in network["interfaces"].items() %}
{% if "ipv6" in interface %}
{% if grains['os'] == 'FreeBSD' %}
-
/etc/rc.conf.d/netif/ipv6_{{ interface['device'] }}:
file.managed:
- source: salt://roles/core/network/files/FreeBSD/netif_ipv6.rc
- makedirs: True
- template: jinja
- context:
interface: {{ interface['device'] }}
ipv6_address: {{ interface['ipv6']['address'] }}
ipv6_prefix: {{ interface['ipv6']['prefix'] | default(64) }}
has_native_ipv6: True
{% if "gateway" in interface["ipv6"] %}
/etc/rc.conf.d/routing/ipv6:
file.managed:
- source: salt://roles/core/network/files/FreeBSD/routing_ipv6.rc
- makedirs: True
- template: jinja
- context:
interface: {{ interface['device'] }}
ipv6_address: {{ interface['ipv6']['address'] }}
ipv6_prefix: {{ interface['ipv6']['prefix'] | default(64) }}
ipv6_gateway: {{ interface['ipv6']['gateway'] }}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
{% endif %}
# -------------------------------------------------------------
# 4to6 tunnel
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
{% if salt['node.has']('network:ipv6_tunnel') %}
network_ipv6:
file.managed:
- name : {{ dirs.sbin }}/ipv6-setup-tunnel
- source: salt://roles/core/network/files/ipv6-tunnels/{{ grains['id'] }}.sh.jinja
- template: jinja
- mode: 755
{% endif %}
# -------------------------------------------------------------
# Routes - legacy configuration for ipv6_gateway
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
{% if "ipv6_gateway" in network %}
{% if grains['os'] == 'FreeBSD' %}
/etc/rc.conf.d/routing/ipv6:
file.managed:
- source: salt://roles/core/network/files/FreeBSD/routing_ipv6.rc
- makedirs: True
- template: jinja
- context:
ipv6_gateway: {{ network["ipv6_gateway"] }}
{% endif %}
{% endif %}
# -------------------------------------------------------------
# Routes - IPv6 fix for OVH
#
# OVH network doesn't announce an IPv6 route for a VM at first.
# If from the VM, we reach another network, the route is then
# announced for a while, before being dropped.
#
# To workaround that behavior, solution is to ping regularly
# an external site so packets reach OVH router and a route is
# announced.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
{% if salt['node.has']('fixes:hello_ipv6_ovh') %}
/usr/local/etc/cron.d/hello-ipv6:
file.managed:
- source: salt://roles/core/network/files/FreeBSD/hello-ipv6.cron
- makedirs: True
{% endif %}

File Metadata

Mime Type
text/x-diff
Expires
Thu, Sep 18, 19:24 (59 m, 37 s)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
2991485
Default Alt Text
(30 KB)

Event Timeline