diff --git a/defaults/main.yml b/defaults/main.yml index 44a1beb..d66da5e 100644 --- a/defaults/main.yml +++ b/defaults/main.yml @@ -1,3 +1,4 @@ +--- nebula_version: 1.8.0 nebula_network_name: "My Nebula Mesh Network" nebula_network_cidr: 24 @@ -9,7 +10,28 @@ nebula_node_lighthouse_in_hosts_file: true nebula_node_use_lighthouse_as_relay: true nebula_install_check_cron: true +# Multi-lighthouse configuration +# Each lighthouse needs: hostname, internal_ip, public_hostname, public_port +# The FIRST lighthouse in the list is the "primary" lighthouse: +# - It stores the CA key and signs all certificates +# - All other lighthouses fetch their certs from it +# +# Example: +# nebula_lighthouses: +# - hostname: lighthouse1 +# internal_ip: 192.168.77.1 +# public_hostname: lh1.example.com +# public_port: 4242 +# is_relay: true +# - hostname: lighthouse2 +# internal_ip: 192.168.77.2 +# public_hostname: lh2.example.com +# public_port: 4242 +# is_relay: true +nebula_lighthouses: [] +# Legacy single-lighthouse variables (still supported for backwards compatibility) +# If nebula_lighthouses is empty, these are used to build a single-lighthouse setup. nebula_lighthouse_hostname: lighthouse nebula_lighthouse_internal_ip_addr: 192.168.77.1 nebula_lighthouse_public_hostname: my-nebula-server.com @@ -47,4 +69,3 @@ nebula_sshd_enabled: false nebula_sshd_listen: "127.0.0.1:2222" nebula_sshd_authorized_users: [] nebula_sshd_trusted_cas: [] - diff --git a/example_playbook_and_inventory.yml b/example_playbook_and_inventory.yml new file mode 100644 index 0000000..fa7e807 --- /dev/null +++ b/example_playbook_and_inventory.yml @@ -0,0 +1,69 @@ +# ============================================================= +# Beispiel-Inventory: inventory +# ============================================================= +[nebula_lighthouse] +lighthouse1.example.com +lighthouse2.example.com + +[servers] +web01.example.com nebula_internal_ip_addr=10.43.0.10 +docker01.example.com nebula_internal_ip_addr=10.43.0.11 +db01.example.com nebula_internal_ip_addr=10.43.0.12 + + +# ============================================================= +# Beispiel-Playbook: nebula.yml +# ============================================================= +--- +- name: Deploy Nebula (multi-lighthouse) + hosts: all + gather_facts: yes + user: ansible + become: yes + vars: + nebula_version: 1.8.0 + nebula_network_name: "My Company Nebula" + nebula_network_cidr: 16 + + # --- Multi-Lighthouse-Konfiguration --- + # Der ERSTE Eintrag ist der Primary (hostet CA-Schlüssel). + # Alle weiteren sind Secondaries. + nebula_lighthouses: + - hostname: lighthouse1 + internal_ip: 10.43.0.1 + public_hostname: lh1.example.com + public_port: 4242 + is_relay: true + - hostname: lighthouse2 + internal_ip: 10.43.0.2 + public_hostname: lh2.example.com + public_port: 4242 + is_relay: true + + nebula_firewall_block_action: reject + nebula_inbound_rules: + - { port: "any", proto: "icmp", host: "any" } + - { port: 22, proto: "tcp", host: "any" } + nebula_outbound_rules: + - { port: "any", proto: "any", host: "any" } + + roles: + - role: nebula + + +# ============================================================= +# WICHTIG: Hostname im Inventory muss dem hostname-Feld in +# nebula_lighthouses entsprechen! +# +# lighthouse1.example.com → hostname: lighthouse1 +# lighthouse2.example.com → hostname: lighthouse2 +# +# Die Rolle sucht den passenden Eintrag per: +# selectattr('hostname', 'equalto', inventory_hostname) +# +# Falls du FQDNs als hostname-Feld nutzen willst: +# - hostname: lighthouse1.example.com +# ... +# und im Inventory ebenfalls: +# lighthouse1.example.com +# ============================================================= diff --git a/tasks/lighthouse_primary.yml b/tasks/lighthouse_primary.yml new file mode 100644 index 0000000..aed610b --- /dev/null +++ b/tasks/lighthouse_primary.yml @@ -0,0 +1,61 @@ +--- +# Runs only on groups['nebula_lighthouse'][0] +# This host owns the CA key and signs all certs. + +- name: Ensure CA cert/key exists + command: + chdir: /opt/nebula + cmd: >- + ./nebula-cert ca + -name "{{ nebula_network_name }}" + -duration "{{ nebula_ca_cert_duration }}" + creates: /opt/nebula/ca.crt + +- name: Ensure primary lighthouse cert/key exists + command: + chdir: /opt/nebula + cmd: >- + ./nebula-cert sign + -name "{{ _nebula_primary_lighthouse.hostname }}" + -ip "{{ _nebula_primary_lighthouse.internal_ip }}/{{ nebula_network_cidr }}" + -duration "{{ nebula_client_cert_duration }}" + creates: "/opt/nebula/{{ _nebula_primary_lighthouse.hostname }}.crt" + +- name: Ensure cert/key exists for each secondary lighthouse + command: + chdir: /opt/nebula + cmd: >- + ./nebula-cert sign + -name "{{ item.hostname }}" + -ip "{{ item.internal_ip }}/{{ nebula_network_cidr }}" + -duration "{{ nebula_client_cert_duration }}" + creates: "/opt/nebula/{{ item.hostname }}.crt" + loop: "{{ _nebula_lighthouses_computed[1:] }}" + when: _nebula_lighthouses_computed | length > 1 + +- name: Ensure primary lighthouse is configured + template: + src: lighthouse_config.yml.j2 + dest: /opt/nebula/config.yml + owner: root + group: root + mode: '0400' + notify: restart nebula + vars: + _lh: "{{ _nebula_primary_lighthouse }}" + +- name: Ensure primary lighthouse service exists + template: + src: lighthouse.service.j2 + dest: /etc/systemd/system/lighthouse.service + owner: root + group: root + mode: '0644' + +- name: Ensure primary lighthouse service is enabled and running + systemd: + name: lighthouse + daemon_reload: yes + enabled: yes + masked: no + state: started diff --git a/tasks/lighthouse_secondary.yml b/tasks/lighthouse_secondary.yml new file mode 100644 index 0000000..5c32420 --- /dev/null +++ b/tasks/lighthouse_secondary.yml @@ -0,0 +1,61 @@ +--- +# Runs on all nebula_lighthouse hosts except the primary ([0]). +# Fetches cert + key from the primary lighthouse and deploys config. + +- name: Determine this lighthouse's config entry + set_fact: + _this_lh: >- + {{ + _nebula_lighthouses_computed + | selectattr('hostname', 'equalto', inventory_hostname) + | list + | first + }} + +- name: Read cert/key/ca from primary lighthouse for this secondary + slurp: + src: "/opt/nebula/{{ item }}" + register: _lh_secondary_files + delegate_to: "{{ groups['nebula_lighthouse'][0] }}" + loop: + - "{{ _this_lh.hostname }}.crt" + - "{{ _this_lh.hostname }}.key" + - ca.crt + +- name: Ensure cert, key, CA files are present on this secondary lighthouse + copy: + dest: "/opt/nebula/{{ item['item'] }}" + content: "{{ item['content'] | b64decode }}" + owner: root + group: root + mode: '0600' + loop: "{{ _lh_secondary_files.results }}" + loop_control: + label: "{{ item['item'] }}" + +- name: Ensure secondary lighthouse is configured + template: + src: lighthouse_config.yml.j2 + dest: /opt/nebula/config.yml + owner: root + group: root + mode: '0400' + notify: restart nebula + vars: + _lh: "{{ _this_lh }}" + +- name: Ensure secondary lighthouse service exists + template: + src: lighthouse.service.j2 + dest: /etc/systemd/system/lighthouse.service + owner: root + group: root + mode: '0644' + +- name: Ensure secondary lighthouse service is enabled and running + systemd: + name: lighthouse + daemon_reload: yes + enabled: yes + masked: no + state: started diff --git a/tasks/main.yml b/tasks/main.yml index 29b982f..ead0161 100644 --- a/tasks/main.yml +++ b/tasks/main.yml @@ -3,19 +3,24 @@ block: - name: Uninstall Nebula (clean install) include_tasks: uninstall.yml - when: nebula_clean_install|bool + when: nebula_clean_install | bool - name: Preflight checks include_tasks: preflight.yml - - name: Install Nebula on all hosts + - name: Install Nebula on all hosts include_tasks: nebula.yml - - - name: Deploy Lighthouse - include_tasks: lighthouse.yml - when: inventory_hostname in groups['nebula_lighthouse'] - + + - name: Deploy Primary Lighthouse (CA + cert signing) + include_tasks: lighthouse_primary.yml + when: inventory_hostname == groups['nebula_lighthouse'][0] + + - name: Deploy Secondary Lighthouses + include_tasks: lighthouse_secondary.yml + when: + - inventory_hostname in groups['nebula_lighthouse'] + - inventory_hostname != groups['nebula_lighthouse'][0] + - name: Deploy Nebula Node include_tasks: node.yml - when: inventory_hostname not in groups['nebula_lighthouse'] - when: inventory_hostname in groups['nebula_lighthouse'] or nebula_internal_ip_addr is defined + when: inventory_hostname not in groups['nebula_lighthouse'] and nebula_internal_ip_addr is defined diff --git a/tasks/node.yml b/tasks/node.yml index 9748dbd..a74767c 100644 --- a/tasks/node.yml +++ b/tasks/node.yml @@ -1,12 +1,13 @@ -- name: Check if node certificate exists on lighthouse +--- +- name: Check if node certificate exists on primary lighthouse stat: path: /opt/nebula/{{ inventory_hostname }}.crt - delegate_to: "{{ groups.nebula_lighthouse[0] }}" + delegate_to: "{{ groups['nebula_lighthouse'][0] }}" register: cert_stat - name: Get information about existing certificate (if it exists) command: "/opt/nebula/nebula-cert print -json -path /opt/nebula/{{ inventory_hostname }}.crt" - delegate_to: "{{ groups.nebula_lighthouse[0] }}" + delegate_to: "{{ groups['nebula_lighthouse'][0] }}" changed_when: false when: cert_stat.stat.exists register: current_cert_json @@ -24,62 +25,79 @@ msg: "IP Address in Cert: {{ current_cert_ip }}, Expected IP Address: {{ nebula_internal_ip_addr }}/{{ nebula_network_cidr }}" when: cert_stat.stat.exists -- name: Delete invalid node certificate and key from lighthouse (wrong IP address) +- name: Delete invalid node certificate and key from primary lighthouse (wrong IP address) file: path: "/opt/nebula/{{ item }}" state: absent - delegate_to: "{{ groups.nebula_lighthouse[0] }}" + delegate_to: "{{ groups['nebula_lighthouse'][0] }}" with_items: - "{{ inventory_hostname }}.crt" - "{{ inventory_hostname }}.key" when: - cert_stat.stat.exists - - current_cert_ip != nebula_internal_ip_addr|string + '/' + nebula_network_cidr|string + - current_cert_ip != nebula_internal_ip_addr | string + '/' + nebula_network_cidr | string -- name: Ensure a cert/key exists for each node on lighthouse +- name: Ensure a cert/key exists for this node on primary lighthouse command: chdir: /opt/nebula - cmd: ./nebula-cert sign -name "{{ inventory_hostname }}" -ip "{{ nebula_internal_ip_addr }}/{{ nebula_network_cidr }}" -duration "{{ nebula_client_cert_duration }}" - delegate_to: "{{ groups.nebula_lighthouse[0] }}" - when: not cert_stat.stat.exists or current_cert_ip != nebula_internal_ip_addr|string + '/' + nebula_network_cidr|string + cmd: >- + ./nebula-cert sign + -name "{{ inventory_hostname }}" + -ip "{{ nebula_internal_ip_addr }}/{{ nebula_network_cidr }}" + -duration "{{ nebula_client_cert_duration }}" + delegate_to: "{{ groups['nebula_lighthouse'][0] }}" + when: >- + not cert_stat.stat.exists + or current_cert_ip != nebula_internal_ip_addr | string + '/' + nebula_network_cidr | string -- name: Ensure lighthouse has hosts file entry for node +- name: Ensure primary lighthouse has hosts file entry for node lineinfile: path: /etc/hosts line: "{{ nebula_internal_ip_addr }} {{ inventory_hostname }}.neb" - delegate_to: "{{ groups.nebula_lighthouse[0] }}" + delegate_to: "{{ groups['nebula_lighthouse'][0] }}" when: nebula_lighthouse_build_hosts_file -- name: Ensure node has hosts file entry for lighthouse +- name: Ensure all lighthouses have hosts file entry for node lineinfile: path: /etc/hosts - line: "{{ nebula_lighthouse_internal_ip_addr }} {{ nebula_lighthouse_hostname }}.neb" + line: "{{ nebula_internal_ip_addr }} {{ inventory_hostname }}.neb" + delegate_to: "{{ item }}" + loop: "{{ groups['nebula_lighthouse'][1:] }}" + when: + - nebula_lighthouse_build_hosts_file + - groups['nebula_lighthouse'] | length > 1 + +- name: Ensure node has hosts file entries for all lighthouses + lineinfile: + path: /etc/hosts + line: "{{ item.internal_ip }} {{ item.hostname }}.neb" + loop: "{{ _nebula_lighthouses_computed }}" when: nebula_node_lighthouse_in_hosts_file -- name: Read cert/key from lighthouse +- name: Read cert/key/ca from primary lighthouse slurp: src: "/opt/nebula/{{ item }}" register: lighthouse_files - delegate_to: "{{ groups.nebula_lighthouse[0] }}" - with_items: + delegate_to: "{{ groups['nebula_lighthouse'][0] }}" + with_items: - "{{ inventory_hostname }}.crt" - "{{ inventory_hostname }}.key" - ca.crt -- name: Ensure Cert, Key, CA files exist +- name: Ensure cert, key, CA files exist on node copy: dest: "/opt/nebula/{{ item['item'] }}" content: "{{ item['content'] | b64decode }}" owner: root group: root - mode: 0600 + mode: '0600' loop: "{{ lighthouse_files.results }}" loop_control: label: "{{ item['item'] }}" - name: Ensure Nebula is configured template: - src: node_config.yml.j2 + src: node_config.yml.j2 dest: /opt/nebula/config.yml owner: root group: root @@ -95,7 +113,7 @@ mode: '0644' - name: Ensure Nebula service is enabled and running - systemd: + systemd: name: nebula daemon_reload: yes enabled: yes @@ -109,12 +127,11 @@ owner: root group: root mode: '0755' - when: nebula_install_check_cron|bool + when: nebula_install_check_cron | bool - name: Ensure nebula-check is scheduled via cron cron: name: "nebula-check" minute: "{{ nebula_check_cron_minute | default('*/5') }}" job: "/opt/nebula/nebula-check.sh" - when: nebula_install_check_cron|bool - + when: nebula_install_check_cron | bool diff --git a/templates/lighthouse_config.yml.j2 b/templates/lighthouse_config.yml.j2 index 0751fed..27f19ee 100644 --- a/templates/lighthouse_config.yml.j2 +++ b/templates/lighthouse_config.yml.j2 @@ -1,87 +1,56 @@ pki: - # every node needs a copy of ca.crt, .key, - # and .crt ca: /opt/nebula/ca.crt - cert: /opt/nebula/{{ nebula_lighthouse_hostname }}.crt - key: /opt/nebula/{{ nebula_lighthouse_hostname }}.key + cert: /opt/nebula/{{ _lh.hostname }}.crt + key: /opt/nebula/{{ _lh.hostname }}.key static_host_map: - # how to find one or more lighthouse nodes - # you do NOT need every node to be listed here! - # Similar to "trackers" for torrents - # - # format "": [":[port] or :[port]"] - # - "{{ nebula_lighthouse_internal_ip_addr }}": ["{{ nebula_lighthouse_public_hostname }}:{{ nebula_lighthouse_public_port }}"] +{% for lh in _nebula_lighthouses_computed %} + "{{ lh.internal_ip }}": ["{{ lh.public_hostname }}:{{ lh.public_port }}"] +{% endfor %} lighthouse: interval: 60 - - # if you're a lighthouse, say you're a lighthouse - # am_lighthouse: true - hosts: - # If you're a lighthouse, this section should be EMPTY - # or commented out. If you're NOT a lighthouse, list - # lighthouse nodes here, one per line, in the following - # format: - # - # - "192.168.77.1" -{% if nebula_lighthouse_remote_allow_list|length > 0 %} - # remote_allow_list controls IP ranges that this node will consider when handshaking +{% if nebula_lighthouse_remote_allow_list | length > 0 %} remote_allow_list: - {% for cidr, allow in nebula_lighthouse_remote_allow_list.items() %} +{% for cidr, allow in nebula_lighthouse_remote_allow_list.items() %} '{{ cidr }}': {{ allow | lower }} - {% endfor %} +{% endfor %} {% endif %} -{% if nebula_lighthouse_local_allow_list|length > 0 %} - # local_allow_list filters which local IP addresses we advertise to the lighthouses +{% if nebula_lighthouse_local_allow_list | length > 0 %} local_allow_list: - {% if nebula_lighthouse_local_allow_list.interfaces is defined %} +{% if nebula_lighthouse_local_allow_list.interfaces is defined %} interfaces: - {% for interface, allow in nebula_lighthouse_local_allow_list.interfaces.items() %} +{% for interface, allow in nebula_lighthouse_local_allow_list.interfaces.items() %} '{{ interface }}': {{ allow | lower }} - {% endfor %} - {% endif %} - {% for key, value in nebula_lighthouse_local_allow_list.items() %} - {% if key != 'interfaces' %} +{% endfor %} +{% endif %} +{% for key, value in nebula_lighthouse_local_allow_list.items() %} +{% if key != 'interfaces' %} '{{ key }}': {{ value | lower }} - {% endif %} - {% endfor %} +{% endif %} +{% endfor %} {% endif %} - {% if nebula_lighthouse_extra_config|length > 0 %} - {{- nebula_lighthouse_extra_config | to_nice_yaml | indent(2) }} - {% endif %} +{% if nebula_lighthouse_extra_config | length > 0 %} +{{- nebula_lighthouse_extra_config | to_nice_yaml | indent(2) }} +{% endif %} listen: - # 0.0.0.0 means "all interfaces," which is probably what you want - # host: 0.0.0.0 - port: {{ nebula_lighthouse_public_port }} + port: {{ _lh.public_port }} -# "punchy" basically means "send frequent keepalive packets" -# so that your router won't expire and close your NAT tunnels. -# punchy: true - -# "punch_back" allows the other node to try punching out to you, -# if you're having trouble punching out to it. Useful for stubborn -# networks with symmetric NAT, etc. -# punch_back: true relay: - am_relay: {{ nebula_lighthouse_is_relay }} + am_relay: {{ _lh.is_relay | default(true) }} use_relays: false tun: - # sensible defaults. don't monkey with these unless - # you're CERTAIN you know what you're doing. - # dev: neb0 drop_local_broadcast: false drop_multicast: false @@ -102,15 +71,26 @@ stats: interval: {{ nebula_metrics_prometheus_interval }} {% endif %} -# you NEED this firewall section. -# -# Nebula has its own firewall in addition to anything -# your system has in place, and it's all default deny. -# -# So if you don't specify some rules here, you'll drop -# all traffic, and curse and wonder why you can't ping -# one node from another. -# +{% if nebula_sshd_enabled %} +sshd: + enabled: {{ nebula_sshd_enabled }} + listen: {{ nebula_sshd_listen }} + host_key: /opt/nebula/ssh_host_ed25519_key +{% if nebula_sshd_authorized_users %} + authorized_users: +{% for sshd_user in nebula_sshd_authorized_users %} + - user: {{ sshd_user.user }} + keys: +{% for ssh_key in sshd_user.get('keys', []) %} + - "{{ ssh_key }}" +{% endfor %} +{% for file_key in nebula_sshd_key_registry.get(sshd_user.user, []) %} + - "{{ file_key }}" +{% endfor %} +{% endfor %} +{% endif %} +{% endif %} + firewall: outbound_action: {{ nebula_firewall_block_action }} inbound_action: {{ nebula_firewall_block_action }} @@ -120,10 +100,6 @@ firewall: default_timeout: 10m max_connections: 100000 -# since everything is default deny, all rules you -# actually SPECIFY here are allow rules. -# - outbound: {% for rule in nebula_outbound_rules %} - port: {{ rule.port }} diff --git a/templates/node_config.yml.j2 b/templates/node_config.yml.j2 index 2456738..d6b7d28 100644 --- a/templates/node_config.yml.j2 +++ b/templates/node_config.yml.j2 @@ -1,61 +1,40 @@ pki: - # every node needs a copy of the CA certificate, - # and its own certificate and key, ONLY. - # ca: /opt/nebula/ca.crt cert: /opt/nebula/{{ inventory_hostname }}.crt key: /opt/nebula/{{ inventory_hostname }}.key static_host_map: - # how to find one or more lighthouse nodes - # you do NOT need every node to be listed here! - # - # format "Nebula IP": ["public IP or hostname:port"] - # - "{{ nebula_lighthouse_internal_ip_addr }}": ["{{ nebula_lighthouse_public_hostname }}:{{ nebula_lighthouse_public_port }}"] +{% for lh in _nebula_lighthouses_computed %} + "{{ lh.internal_ip }}": ["{{ lh.public_hostname }}:{{ lh.public_port }}"] +{% endfor %} lighthouse: interval: 60 - - # if you're a lighthouse, say you're a lighthouse - # am_lighthouse: false - hosts: - # If you're a lighthouse, this section should be EMPTY - # or commented out. If you're NOT a lighthouse, list - # lighthouse nodes here, one per line, in the following - # format: - # - - "{{ nebula_lighthouse_internal_ip_addr }}" +{% for lh in _nebula_lighthouses_computed %} + - "{{ lh.internal_ip }}" +{% endfor %} listen: - # 0.0.0.0 means "all interfaces," which is probably what you want - # host: 0.0.0.0 port: 4242 -# "punchy" basically means "send frequent keepalive packets" -# so that your router won't expire and close your NAT tunnels. -# punchy: true relay: am_relay: false use_relays: {{ nebula_node_use_lighthouse_as_relay }} relays: - - {{ nebula_lighthouse_internal_ip_addr }} +{% for lh in _nebula_lighthouses_computed %} +{% if lh.is_relay | default(true) %} + - {{ lh.internal_ip }} +{% endif %} +{% endfor %} -# "punch_back" allows the other node to try punching out to you, -# if you're having trouble punching out to it. Useful for stubborn -# networks with symmetric NAT, etc. -# punch_back: true tun: - # sensible defaults. don't monkey with these unless - # you're CERTAIN you know what you're doing. - # dev: neb0 drop_local_broadcast: false drop_multicast: false @@ -96,15 +75,6 @@ sshd: {% endif %} {% endif %} -# you NEED this firewall section. -# -# Nebula has its own firewall in addition to anything -# your system has in place, and it's all default deny. -# -# So if you don't specify some rules here, you'll drop -# all traffic, and curse and wonder why you can't ping -# one node from another. -# firewall: outbound_action: {{ nebula_firewall_block_action }} inbound_action: {{ nebula_firewall_block_action }} @@ -114,10 +84,6 @@ firewall: default_timeout: 10m max_connections: 100000 -# since everything is default deny, all rules you -# actually SPECIFY here are allow rules. -# - outbound: {% for rule in nebula_outbound_rules %} - port: {{ rule.port }} diff --git a/vars/main.yml b/vars/main.yml index 10ebb4b..470be14 100644 --- a/vars/main.yml +++ b/vars/main.yml @@ -1,4 +1,26 @@ +--- +# Normalize: if nebula_lighthouses list is empty, build it from the legacy +# single-lighthouse variables so the rest of the role only ever deals with the list. +_nebula_lighthouses_computed: >- + {{ + nebula_lighthouses + if nebula_lighthouses | length > 0 + else [ + { + 'hostname': nebula_lighthouse_hostname, + 'internal_ip': nebula_lighthouse_internal_ip_addr, + 'public_hostname': nebula_lighthouse_public_hostname, + 'public_port': nebula_lighthouse_public_port, + 'is_relay': nebula_lighthouse_is_relay + } + ] + }} + +# The primary lighthouse is always index [0] — it holds the CA key. +_nebula_primary_lighthouse: "{{ _nebula_lighthouses_computed[0] }}" + +# CPU architectures map (unchanged from original) nebula_architectures: - x86_64: "amd64" - armv7l: "arm-7" - aarch64: "arm64" + x86_64: amd64 + aarch64: arm64 + armv7l: arm