make it a role

This commit is contained in:
root
2021-08-26 03:13:18 +00:00
commit 390d6042ab
14 changed files with 487 additions and 0 deletions

2
.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
# ssh keys
ansible_*

21
LICENSE.md Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 Andrew Paglusch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

59
README.md Normal file
View File

@@ -0,0 +1,59 @@
# Ansible Role for Nebula
Quickly and easily deploy the [Nebula Overlay VPN](https://github.com/slackhq/nebula) software onto all of your hosts. Just add your servers to the `inventory` file and then edit `nebula_vars.yml`.
This playbook is meant to get you up and running with Nebula quickly with sane defaults.
# What Is Nebula
> Nebula is a scalable overlay networking tool with a focus on performance, simplicity and security. It lets you seamlessly connect computers anywhere in the world.
You can read more about Nebula [on the official repo](https://github.com/slackhq/nebula)
# Example Playbook
```
---
- name: Deploy Nebula
hosts: all
gather_facts: yes
user: ansible
become: yes
vars:
nebula_version: 1.4.0
nebula_network_name: "Company Nebula Mgmt Net"
nebula_network_cidr: 16
nebula_lighthouse_hostname: lighthouse
nebula_lighthouse_internal_ip_addr: 10.43.0.1
nebula_lighthouse_public_hostname: lighthouse.company.com
nebula_lighthouse_public_port: 4242
nebula_default_inbound_rules:
- { port: 22, proto: "tcp", host: "any" }
- { port: "any", proto: "icmp", host: "any" }
nebula_default_outbound_rules:
- { port: 22, proto: "tcp", host: "any" }
- { port: "any", proto: "icmp", host: "any" }
- { port: 4505, proto: "tcp", host: "10.43.0.1/32" }
- { port: 4506, proto: "tcp", host: "10.43.0.1/32" }
roles:
- role: nebula
```
# Example Inventory
```
[nebula_lighthouse]
lighthouse01.company.com
[servers]
web01.company.com nebula_internal_ip_addr=10.43.0.2
docker01.company.com nebula_internal_ip_addr=10.43.0.3
zabbix01.company.com nebula_internal_ip_addr=10.43.0.4
backup01.company.com nebula_internal_ip_addr=10.43.0.5
pbx01.company.com nebula_internal_ip_addr=10.43.0.6
```
# Running the Playbook
```
ansible-playbook -i inventory nebula.yml
```

15
defaults/main.yml Normal file
View File

@@ -0,0 +1,15 @@
nebula_version: 1.2.0
nebula_network_name: "My Nebula Mesh Network"
nebula_network_cidr: 24
nebula_ca_cert_duration: "87600h0m0s" #10 years
nebula_client_cert_duration: "43800h0m0s" #5 years
nebula_lighthouse_hostname: lighthouse
nebula_lighthouse_internal_ip_addr: 192.168.77.1
nebula_lighthouse_public_hostname: my-nebula-server.com
nebula_lighthouse_public_port: 4242
nebula_default_inbound_rules:
- { port: "any", proto: "any", host: "any" }
nebula_default_outbound_rules:
- { port: "any", proto: "any", host: "any" }

9
handlers/main.yml Normal file
View File

@@ -0,0 +1,9 @@
- name: Restart Nebula
systemd:
name: nebula
state: restarted
- name: Restart Lighthouse
systemd:
name: lighthouse
state: restarted

37
tasks/lighthouse.yml Normal file
View File

@@ -0,0 +1,37 @@
---
- name: Ensure CA cert/key exists
command:
chdir: /opt/nebula
cmd: ./nebula-cert ca -name "{{ nebula_network_name }}" -duration "{{ nebula_ca_cert_duration }}"
creates: /opt/nebula/ca.crt
- name: Ensure lighthouse cert/key exists
command:
chdir: /opt/nebula
cmd: ./nebula-cert sign -name "{{ nebula_lighthouse_hostname }}" -ip "{{ nebula_lighthouse_internal_ip_addr }}/{{ nebula_network_cidr }}" -duration "{{ nebula_client_cert_duration }}"
creates: "/opt/nebula/{{ nebula_lighthouse_hostname }}.crt"
- name: Ensure lighthouse is configured
template:
src: lighthouse_config.yml.j2
dest: /opt/nebula/config.yml
owner: root
group: root
mode: '0400'
notify: Restart Lighthouse
- name: Ensure lighthouse service exists
template:
src: lighthouse.service.j2
dest: /etc/systemd/system/lighthouse.service
owner: root
group: root
mode: '0644'
- name: Ensure lighthouse service is enabled and running
systemd:
name: lighthouse
daemon_reload: yes
enabled: yes
masked: no
state: started

11
tasks/main.yml Normal file
View File

@@ -0,0 +1,11 @@
---
- name: Install Nebula on all hosts
include: nebula.yml
- name: Deploy Lighthouse
include: lighthouse.yml
when: inventory_hostname in groups['nebula_lighthouse']
- name: Deploy Nebula Node
include: node.yml
when: inventory_hostname not in groups['nebula_lighthouse']

25
tasks/nebula.yml Normal file
View File

@@ -0,0 +1,25 @@
- name: Ensure /opt/nebula directory exists
file:
path: /opt/nebula
state: directory
mode: '0700'
owner: root
group: root
# TODO: Detect cpu arch correctly
- name: Download & Extract Nebula
unarchive:
src: "https://github.com/slackhq/nebula/releases/download/v{{ nebula_version }}/nebula-linux-{{ cpu_arch | default('amd64') }}.tar.gz"
dest: "/opt/nebula"
remote_src: yes
creates: '/opt/nebula/nebula'
- name: Ensure Nebula binaries permissions are correct
file:
path: "/opt/nebula/{{ item }}"
owner: root
group: root
mode: '0700'
with_items:
- nebula
- nebula-cert

77
tasks/node.yml Normal file
View File

@@ -0,0 +1,77 @@
- name: Ensure a cert/key exists for each node on lighthouse
command:
chdir: /opt/nebula
cmd: ./nebula-cert sign -name "{{ inventory_hostname }}" -ip "{{ nebula_internal_ip_addr }}/{{ nebula_network_cidr }}" -duration "{{ nebula_client_cert_duration }}"
creates: "/opt/nebula/{{ inventory_hostname }}.crt"
delegate_to: "{{ groups.nebula_lighthouse[0] }}"
- name: Ensure lighthouse has hosts file entry for node
lineinfile:
path: /etc/hosts
line: "{{ nebula_internal_ip_addr }} {{ inventory_hostname }}.neb"
delegate_to: "{{ groups.nebula_lighthouse[0] }}"
- name: Ensure node has hosts file entry for lighthouse
lineinfile:
path: /etc/hosts
line: "{{ nebula_lighthouse_internal_ip_addr }} {{ nebula_lighthouse_hostname }}.neb {{ nebula_lighthouse_hostname }}"
- name: Read cert/key from lighthouse
slurp:
src: "/opt/nebula/{{ item }}"
register: lighthouse_files
delegate_to: "{{ groups.nebula_lighthouse[0] }}"
with_items:
- "{{ inventory_hostname }}.crt"
- "{{ inventory_hostname }}.key"
- ca.crt
- name: Ensure Cert, Key, CA files exist
copy:
dest: "/opt/nebula/{{ item['item'] }}"
content: "{{ item['content'] | b64decode }}"
owner: root
group: root
mode: 0600
loop: "{{ lighthouse_files.results }}"
loop_control:
label: "{{ item['item'] }}"
- name: Ensure Nebula is configured
template:
src: node_config.yml.j2
dest: /opt/nebula/config.yml
owner: root
group: root
mode: '0400'
notify: Restart Nebula
- name: Ensure Nebula service exists
template:
src: node.service.j2
dest: /etc/systemd/system/nebula.service
owner: root
group: root
mode: '0644'
- name: Ensure Nebula service is enabled and running
systemd:
name: nebula
daemon_reload: yes
enabled: yes
masked: no
state: started
- name: Ensure nebula-check is present
template:
src: nebula-check.sh.j2
dest: /opt/nebula/nebula-check.sh
owner: root
group: root
mode: '0755'
- name: Ensure nebula-check is scheduled via cron
cron:
name: "nebula-check"
minute: "*/5"
job: "/opt/nebula/nebula-check.sh"

View File

@@ -0,0 +1,16 @@
[Unit]
Description=Nebula Lighthouse
Wants=basic.target
After=basic.target network.target
[Service]
SyslogIdentifier=nebula
StandardOutput=syslog
StandardError=syslog
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/opt/nebula/nebula -config /opt/nebula/config.yml
Restart=always
RestartSec=42s
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,96 @@
pki:
# every node needs a copy of ca.crt, <client-name>.key,
# and <client-name>.crt
ca: /opt/nebula/ca.crt
cert: /opt/nebula/lighthouse.crt
key: /opt/nebula/lighthouse.key
static_host_map:
# how to find one or more lighthouse nodes
# you do NOT need every node to be listed here!
# Similar to "trackers" for torrents
#
# format "<internal-nebula-ip-addr>": ["<pub-ip-addr>:[port] or <hostname>:[port]"]
#
"{{ nebula_lighthouse_internal_ip_addr }}": ["{{ nebula_lighthouse_public_hostname }}:{{ nebula_lighthouse_public_port }}"]
lighthouse:
interval: 60
# if you're a lighthouse, say you're a lighthouse
#
am_lighthouse: true
hosts:
# If you're a lighthouse, this section should be EMPTY
# or commented out. If you're NOT a lighthouse, list
# lighthouse nodes here, one per line, in the following
# format:
#
# - "192.168.77.1"
listen:
# 0.0.0.0 means "all interfaces," which is probably what you want
#
host: 0.0.0.0
port: {{ nebula_lighthouse_public_port }}
# "punchy" basically means "send frequent keepalive packets"
# so that your router won't expire and close your NAT tunnels.
#
punchy: true
# "punch_back" allows the other node to try punching out to you,
# if you're having trouble punching out to it. Useful for stubborn
# networks with symmetric NAT, etc.
#
punch_back: true
tun:
# sensible defaults. don't monkey with these unless
# you're CERTAIN you know what you're doing.
#
dev: neb0
drop_local_broadcast: false
drop_multicast: false
tx_queue: 500
mtu: 1300
routes:
logging:
level: info
format: text
# you NEED this firewall section.
#
# Nebula has its own firewall in addition to anything
# your system has in place, and it's all default deny.
#
# So if you don't specify some rules here, you'll drop
# all traffic, and curse and wonder why you can't ping
# one node from another.
#
firewall:
conntrack:
tcp_timeout: 120h
udp_timeout: 3m
default_timeout: 10m
max_connections: 100000
# since everything is default deny, all rules you
# actually SPECIFY here are allow rules.
#
outbound:
{% for rule in nebula_default_outbound_rules %}
- port: {{ rule.port }}
proto: {{ rule.proto }}
host: {{ rule.host }}
{% endfor %}
inbound:
{% for rule in nebula_default_outbound_rules %}
- port: {{ rule.port }}
proto: {{ rule.proto }}
host: {{ rule.host }}
{% endfor %}

View File

@@ -0,0 +1,7 @@
#!/bin/bash
PATH=/usr/bin:/bin:/sbin:/usr/sbin
test="$(ping -q -w10 -W2 {{ nebula_lighthouse_internal_ip_addr }} 2>/dev/null | grep -oP '\d{1,3}(?=%)')"
if [ "$test" == "100" ]; then
logger -s -p user.emerg '!!! Unable to reach Nebula server. Bouncing tunnel neb0... !!!'
systemctl restart nebula.service
fi

16
templates/node.service.j2 Normal file
View File

@@ -0,0 +1,16 @@
[Unit]
Description=nebula
Wants=basic.target
After=basic.target network.target
[Service]
SyslogIdentifier=nebula
StandardOutput=syslog
StandardError=syslog
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/opt/nebula/nebula -config /opt/nebula/config.yml
Restart=always
RestartSec=42s
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,96 @@
pki:
# every node needs a copy of the CA certificate,
# and its own certificate and key, ONLY.
#
ca: /opt/nebula/ca.crt
cert: /opt/nebula/{{ inventory_hostname }}.crt
key: /opt/nebula/{{ inventory_hostname }}.key
static_host_map:
# how to find one or more lighthouse nodes
# you do NOT need every node to be listed here!
#
# format "Nebula IP": ["public IP or hostname:port"]
#
"{{ nebula_lighthouse_internal_ip_addr }}": ["{{ nebula_lighthouse_public_hostname }}:{{ nebula_lighthouse_public_port }}"]
lighthouse:
interval: 60
# if you're a lighthouse, say you're a lighthouse
#
am_lighthouse: false
hosts:
# If you're a lighthouse, this section should be EMPTY
# or commented out. If you're NOT a lighthouse, list
# lighthouse nodes here, one per line, in the following
# format:
#
- "{{ nebula_lighthouse_internal_ip_addr }}"
listen:
# 0.0.0.0 means "all interfaces," which is probably what you want
#
host: 0.0.0.0
port: 4242
# "punchy" basically means "send frequent keepalive packets"
# so that your router won't expire and close your NAT tunnels.
#
punchy: true
# "punch_back" allows the other node to try punching out to you,
# if you're having trouble punching out to it. Useful for stubborn
# networks with symmetric NAT, etc.
#
punch_back: true
tun:
# sensible defaults. don't monkey with these unless
# you're CERTAIN you know what you're doing.
#
dev: neb0
drop_local_broadcast: false
drop_multicast: false
tx_queue: 500
mtu: 1300
routes:
logging:
level: info
format: text
# you NEED this firewall section.
#
# Nebula has its own firewall in addition to anything
# your system has in place, and it's all default deny.
#
# So if you don't specify some rules here, you'll drop
# all traffic, and curse and wonder why you can't ping
# one node from another.
#
firewall:
conntrack:
tcp_timeout: 120h
udp_timeout: 3m
default_timeout: 10m
max_connections: 100000
# since everything is default deny, all rules you
# actually SPECIFY here are allow rules.
#
outbound:
{% for rule in nebula_default_outbound_rules %}
- port: {{ rule.port }}
proto: {{ rule.proto }}
host: {{ rule.host }}
{% endfor %}
inbound:
{% for rule in nebula_default_outbound_rules %}
- port: {{ rule.port }}
proto: {{ rule.proto }}
host: {{ rule.host }}
{% endfor %}