From 390d6042abbc30d5e737321a9e4574804f6faa71 Mon Sep 17 00:00:00 2001 From: root Date: Thu, 26 Aug 2021 03:13:18 +0000 Subject: [PATCH] make it a role --- .gitignore | 2 + LICENSE.md | 21 +++++++ README.md | 59 ++++++++++++++++++ defaults/main.yml | 15 +++++ handlers/main.yml | 9 +++ tasks/lighthouse.yml | 37 ++++++++++++ tasks/main.yml | 11 ++++ tasks/nebula.yml | 25 ++++++++ tasks/node.yml | 77 ++++++++++++++++++++++++ templates/lighthouse.service.j2 | 16 +++++ templates/lighthouse_config.yml.j2 | 96 ++++++++++++++++++++++++++++++ templates/nebula-check.sh.j2 | 7 +++ templates/node.service.j2 | 16 +++++ templates/node_config.yml.j2 | 96 ++++++++++++++++++++++++++++++ 14 files changed, 487 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE.md create mode 100644 README.md create mode 100644 defaults/main.yml create mode 100644 handlers/main.yml create mode 100644 tasks/lighthouse.yml create mode 100644 tasks/main.yml create mode 100644 tasks/nebula.yml create mode 100644 tasks/node.yml create mode 100644 templates/lighthouse.service.j2 create mode 100644 templates/lighthouse_config.yml.j2 create mode 100644 templates/nebula-check.sh.j2 create mode 100644 templates/node.service.j2 create mode 100644 templates/node_config.yml.j2 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..52a85bb --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +# ssh keys +ansible_* diff --git a/LICENSE.md b/LICENSE.md new file mode 100644 index 0000000..eb2a51f --- /dev/null +++ b/LICENSE.md @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Andrew Paglusch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..f0862c7 --- /dev/null +++ b/README.md @@ -0,0 +1,59 @@ +# Ansible Role for Nebula + +Quickly and easily deploy the [Nebula Overlay VPN](https://github.com/slackhq/nebula) software onto all of your hosts. Just add your servers to the `inventory` file and then edit `nebula_vars.yml`. + +This playbook is meant to get you up and running with Nebula quickly with sane defaults. + +# What Is Nebula + +> Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security. It lets you seamlessly connect computers anywhere in the world. + +You can read more about Nebula [on the official repo](https://github.com/slackhq/nebula) + +# Example Playbook +``` +--- +- name: Deploy Nebula + hosts: all + gather_facts: yes + user: ansible + become: yes + vars: + nebula_version: 1.4.0 + nebula_network_name: "Company Nebula Mgmt Net" + nebula_network_cidr: 16 + + nebula_lighthouse_hostname: lighthouse + nebula_lighthouse_internal_ip_addr: 10.43.0.1 + nebula_lighthouse_public_hostname: lighthouse.company.com + nebula_lighthouse_public_port: 4242 + + nebula_default_inbound_rules: + - { port: 22, proto: "tcp", host: "any" } + - { port: "any", proto: "icmp", host: "any" } + nebula_default_outbound_rules: + - { port: 22, proto: "tcp", host: "any" } + - { port: "any", proto: "icmp", host: "any" } + - { port: 4505, proto: "tcp", host: "10.43.0.1/32" } + - { port: 4506, proto: "tcp", host: "10.43.0.1/32" } + roles: + - role: nebula +``` + +# Example Inventory +``` +[nebula_lighthouse] +lighthouse01.company.com + +[servers] +web01.company.com nebula_internal_ip_addr=10.43.0.2 +docker01.company.com nebula_internal_ip_addr=10.43.0.3 +zabbix01.company.com nebula_internal_ip_addr=10.43.0.4 +backup01.company.com nebula_internal_ip_addr=10.43.0.5 +pbx01.company.com nebula_internal_ip_addr=10.43.0.6 +``` + +# Running the Playbook +``` +ansible-playbook -i inventory nebula.yml +``` diff --git a/defaults/main.yml b/defaults/main.yml new file mode 100644 index 0000000..fe6c579 --- /dev/null +++ b/defaults/main.yml @@ -0,0 +1,15 @@ +nebula_version: 1.2.0 +nebula_network_name: "My Nebula Mesh Network" +nebula_network_cidr: 24 +nebula_ca_cert_duration: "87600h0m0s" #10 years +nebula_client_cert_duration: "43800h0m0s" #5 years + +nebula_lighthouse_hostname: lighthouse +nebula_lighthouse_internal_ip_addr: 192.168.77.1 +nebula_lighthouse_public_hostname: my-nebula-server.com +nebula_lighthouse_public_port: 4242 + +nebula_default_inbound_rules: + - { port: "any", proto: "any", host: "any" } +nebula_default_outbound_rules: + - { port: "any", proto: "any", host: "any" } diff --git a/handlers/main.yml b/handlers/main.yml new file mode 100644 index 0000000..3d7da35 --- /dev/null +++ b/handlers/main.yml @@ -0,0 +1,9 @@ +- name: Restart Nebula + systemd: + name: nebula + state: restarted + +- name: Restart Lighthouse + systemd: + name: lighthouse + state: restarted diff --git a/tasks/lighthouse.yml b/tasks/lighthouse.yml new file mode 100644 index 0000000..ea00590 --- /dev/null +++ b/tasks/lighthouse.yml @@ -0,0 +1,37 @@ +--- +- name: Ensure CA cert/key exists + command: + chdir: /opt/nebula + cmd: ./nebula-cert ca -name "{{ nebula_network_name }}" -duration "{{ nebula_ca_cert_duration }}" + creates: /opt/nebula/ca.crt + +- name: Ensure lighthouse cert/key exists + command: + chdir: /opt/nebula + cmd: ./nebula-cert sign -name "{{ nebula_lighthouse_hostname }}" -ip "{{ nebula_lighthouse_internal_ip_addr }}/{{ nebula_network_cidr }}" -duration "{{ nebula_client_cert_duration }}" + creates: "/opt/nebula/{{ nebula_lighthouse_hostname }}.crt" + +- name: Ensure lighthouse is configured + template: + src: lighthouse_config.yml.j2 + dest: /opt/nebula/config.yml + owner: root + group: root + mode: '0400' + notify: Restart Lighthouse + +- name: Ensure lighthouse service exists + template: + src: lighthouse.service.j2 + dest: /etc/systemd/system/lighthouse.service + owner: root + group: root + mode: '0644' + +- name: Ensure lighthouse service is enabled and running + systemd: + name: lighthouse + daemon_reload: yes + enabled: yes + masked: no + state: started diff --git a/tasks/main.yml b/tasks/main.yml new file mode 100644 index 0000000..0f7b6de --- /dev/null +++ b/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Install Nebula on all hosts + include: nebula.yml + +- name: Deploy Lighthouse + include: lighthouse.yml + when: inventory_hostname in groups['nebula_lighthouse'] + +- name: Deploy Nebula Node + include: node.yml + when: inventory_hostname not in groups['nebula_lighthouse'] diff --git a/tasks/nebula.yml b/tasks/nebula.yml new file mode 100644 index 0000000..362eeef --- /dev/null +++ b/tasks/nebula.yml @@ -0,0 +1,25 @@ +- name: Ensure /opt/nebula directory exists + file: + path: /opt/nebula + state: directory + mode: '0700' + owner: root + group: root + +# TODO: Detect cpu arch correctly +- name: Download & Extract Nebula + unarchive: + src: "https://github.com/slackhq/nebula/releases/download/v{{ nebula_version }}/nebula-linux-{{ cpu_arch | default('amd64') }}.tar.gz" + dest: "/opt/nebula" + remote_src: yes + creates: '/opt/nebula/nebula' + +- name: Ensure Nebula binaries permissions are correct + file: + path: "/opt/nebula/{{ item }}" + owner: root + group: root + mode: '0700' + with_items: + - nebula + - nebula-cert diff --git a/tasks/node.yml b/tasks/node.yml new file mode 100644 index 0000000..0b677b2 --- /dev/null +++ b/tasks/node.yml @@ -0,0 +1,77 @@ +- name: Ensure a cert/key exists for each node on lighthouse + command: + chdir: /opt/nebula + cmd: ./nebula-cert sign -name "{{ inventory_hostname }}" -ip "{{ nebula_internal_ip_addr }}/{{ nebula_network_cidr }}" -duration "{{ nebula_client_cert_duration }}" + creates: "/opt/nebula/{{ inventory_hostname }}.crt" + delegate_to: "{{ groups.nebula_lighthouse[0] }}" + +- name: Ensure lighthouse has hosts file entry for node + lineinfile: + path: /etc/hosts + line: "{{ nebula_internal_ip_addr }} {{ inventory_hostname }}.neb" + delegate_to: "{{ groups.nebula_lighthouse[0] }}" + +- name: Ensure node has hosts file entry for lighthouse + lineinfile: + path: /etc/hosts + line: "{{ nebula_lighthouse_internal_ip_addr }} {{ nebula_lighthouse_hostname }}.neb {{ nebula_lighthouse_hostname }}" + +- name: Read cert/key from lighthouse + slurp: + src: "/opt/nebula/{{ item }}" + register: lighthouse_files + delegate_to: "{{ groups.nebula_lighthouse[0] }}" + with_items: + - "{{ inventory_hostname }}.crt" + - "{{ inventory_hostname }}.key" + - ca.crt + +- name: Ensure Cert, Key, CA files exist + copy: + dest: "/opt/nebula/{{ item['item'] }}" + content: "{{ item['content'] | b64decode }}" + owner: root + group: root + mode: 0600 + loop: "{{ lighthouse_files.results }}" + loop_control: + label: "{{ item['item'] }}" + +- name: Ensure Nebula is configured + template: + src: node_config.yml.j2 + dest: /opt/nebula/config.yml + owner: root + group: root + mode: '0400' + notify: Restart Nebula + +- name: Ensure Nebula service exists + template: + src: node.service.j2 + dest: /etc/systemd/system/nebula.service + owner: root + group: root + mode: '0644' + +- name: Ensure Nebula service is enabled and running + systemd: + name: nebula + daemon_reload: yes + enabled: yes + masked: no + state: started + +- name: Ensure nebula-check is present + template: + src: nebula-check.sh.j2 + dest: /opt/nebula/nebula-check.sh + owner: root + group: root + mode: '0755' + +- name: Ensure nebula-check is scheduled via cron + cron: + name: "nebula-check" + minute: "*/5" + job: "/opt/nebula/nebula-check.sh" diff --git a/templates/lighthouse.service.j2 b/templates/lighthouse.service.j2 new file mode 100644 index 0000000..52f1474 --- /dev/null +++ b/templates/lighthouse.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=Nebula Lighthouse +Wants=basic.target +After=basic.target network.target + +[Service] +SyslogIdentifier=nebula +StandardOutput=syslog +StandardError=syslog +ExecReload=/bin/kill -HUP $MAINPID +ExecStart=/opt/nebula/nebula -config /opt/nebula/config.yml +Restart=always +RestartSec=42s + +[Install] +WantedBy=multi-user.target diff --git a/templates/lighthouse_config.yml.j2 b/templates/lighthouse_config.yml.j2 new file mode 100644 index 0000000..7d4f7b0 --- /dev/null +++ b/templates/lighthouse_config.yml.j2 @@ -0,0 +1,96 @@ +pki: + # every node needs a copy of ca.crt, .key, + # and .crt + ca: /opt/nebula/ca.crt + cert: /opt/nebula/lighthouse.crt + key: /opt/nebula/lighthouse.key + +static_host_map: + # how to find one or more lighthouse nodes + # you do NOT need every node to be listed here! + # Similar to "trackers" for torrents + # + # format "": [":[port] or :[port]"] + # + "{{ nebula_lighthouse_internal_ip_addr }}": ["{{ nebula_lighthouse_public_hostname }}:{{ nebula_lighthouse_public_port }}"] + +lighthouse: + interval: 60 + + # if you're a lighthouse, say you're a lighthouse + # + am_lighthouse: true + + hosts: + # If you're a lighthouse, this section should be EMPTY + # or commented out. If you're NOT a lighthouse, list + # lighthouse nodes here, one per line, in the following + # format: + # + # - "192.168.77.1" + +listen: + # 0.0.0.0 means "all interfaces," which is probably what you want + # + host: 0.0.0.0 + port: {{ nebula_lighthouse_public_port }} + +# "punchy" basically means "send frequent keepalive packets" +# so that your router won't expire and close your NAT tunnels. +# +punchy: true + +# "punch_back" allows the other node to try punching out to you, +# if you're having trouble punching out to it. Useful for stubborn +# networks with symmetric NAT, etc. +# +punch_back: true + +tun: + # sensible defaults. don't monkey with these unless + # you're CERTAIN you know what you're doing. + # + dev: neb0 + drop_local_broadcast: false + drop_multicast: false + tx_queue: 500 + mtu: 1300 + routes: + +logging: + level: info + format: text + +# you NEED this firewall section. +# +# Nebula has its own firewall in addition to anything +# your system has in place, and it's all default deny. +# +# So if you don't specify some rules here, you'll drop +# all traffic, and curse and wonder why you can't ping +# one node from another. +# +firewall: + conntrack: + tcp_timeout: 120h + udp_timeout: 3m + default_timeout: 10m + max_connections: 100000 + +# since everything is default deny, all rules you +# actually SPECIFY here are allow rules. +# + + outbound: +{% for rule in nebula_default_outbound_rules %} + - port: {{ rule.port }} + proto: {{ rule.proto }} + host: {{ rule.host }} +{% endfor %} + + inbound: +{% for rule in nebula_default_outbound_rules %} + - port: {{ rule.port }} + proto: {{ rule.proto }} + host: {{ rule.host }} +{% endfor %} diff --git a/templates/nebula-check.sh.j2 b/templates/nebula-check.sh.j2 new file mode 100644 index 0000000..f841f68 --- /dev/null +++ b/templates/nebula-check.sh.j2 @@ -0,0 +1,7 @@ +#!/bin/bash +PATH=/usr/bin:/bin:/sbin:/usr/sbin +test="$(ping -q -w10 -W2 {{ nebula_lighthouse_internal_ip_addr }} 2>/dev/null | grep -oP '\d{1,3}(?=%)')" +if [ "$test" == "100" ]; then + logger -s -p user.emerg '!!! Unable to reach Nebula server. Bouncing tunnel neb0... !!!' + systemctl restart nebula.service +fi diff --git a/templates/node.service.j2 b/templates/node.service.j2 new file mode 100644 index 0000000..a28544a --- /dev/null +++ b/templates/node.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=nebula +Wants=basic.target +After=basic.target network.target + +[Service] +SyslogIdentifier=nebula +StandardOutput=syslog +StandardError=syslog +ExecReload=/bin/kill -HUP $MAINPID +ExecStart=/opt/nebula/nebula -config /opt/nebula/config.yml +Restart=always +RestartSec=42s + +[Install] +WantedBy=multi-user.target diff --git a/templates/node_config.yml.j2 b/templates/node_config.yml.j2 new file mode 100644 index 0000000..2e62e3c --- /dev/null +++ b/templates/node_config.yml.j2 @@ -0,0 +1,96 @@ +pki: + # every node needs a copy of the CA certificate, + # and its own certificate and key, ONLY. + # + ca: /opt/nebula/ca.crt + cert: /opt/nebula/{{ inventory_hostname }}.crt + key: /opt/nebula/{{ inventory_hostname }}.key + +static_host_map: + # how to find one or more lighthouse nodes + # you do NOT need every node to be listed here! + # + # format "Nebula IP": ["public IP or hostname:port"] + # + "{{ nebula_lighthouse_internal_ip_addr }}": ["{{ nebula_lighthouse_public_hostname }}:{{ nebula_lighthouse_public_port }}"] + +lighthouse: + interval: 60 + + # if you're a lighthouse, say you're a lighthouse + # + am_lighthouse: false + + hosts: + # If you're a lighthouse, this section should be EMPTY + # or commented out. If you're NOT a lighthouse, list + # lighthouse nodes here, one per line, in the following + # format: + # + - "{{ nebula_lighthouse_internal_ip_addr }}" + +listen: + # 0.0.0.0 means "all interfaces," which is probably what you want + # + host: 0.0.0.0 + port: 4242 + +# "punchy" basically means "send frequent keepalive packets" +# so that your router won't expire and close your NAT tunnels. +# +punchy: true + +# "punch_back" allows the other node to try punching out to you, +# if you're having trouble punching out to it. Useful for stubborn +# networks with symmetric NAT, etc. +# +punch_back: true + +tun: + # sensible defaults. don't monkey with these unless + # you're CERTAIN you know what you're doing. + # + dev: neb0 + drop_local_broadcast: false + drop_multicast: false + tx_queue: 500 + mtu: 1300 + routes: + +logging: + level: info + format: text + +# you NEED this firewall section. +# +# Nebula has its own firewall in addition to anything +# your system has in place, and it's all default deny. +# +# So if you don't specify some rules here, you'll drop +# all traffic, and curse and wonder why you can't ping +# one node from another. +# +firewall: + conntrack: + tcp_timeout: 120h + udp_timeout: 3m + default_timeout: 10m + max_connections: 100000 + +# since everything is default deny, all rules you +# actually SPECIFY here are allow rules. +# + + outbound: +{% for rule in nebula_default_outbound_rules %} + - port: {{ rule.port }} + proto: {{ rule.proto }} + host: {{ rule.host }} +{% endfor %} + + inbound: +{% for rule in nebula_default_outbound_rules %} + - port: {{ rule.port }} + proto: {{ rule.proto }} + host: {{ rule.host }} +{% endfor %}