Add core infrastructure security and utility roles
- Add firewall role for UFW/iptables management - Add fail2ban role for intrusion prevention with Docker-aware jails - Add postfix role for mail relay capabilities - Add backups role for automated infrastructure backups - systemd timer for scheduled backups - Backup scripts for Docker volumes and configurations
This commit is contained in:
parent
ac19b5918f
commit
78ad592664
11 changed files with 708 additions and 0 deletions
218
roles/backups/tasks/main.yml
Normal file
218
roles/backups/tasks/main.yml
Normal file
|
|
@ -0,0 +1,218 @@
|
|||
---
|
||||
|
||||
- name: Set backup schedule defaults
|
||||
set_fact:
|
||||
infra_backup_oncalendar: "{{ INFRA_BACKUP_ONCALENDAR | default(lookup('env', 'INFRA_BACKUP_ONCALENDAR') | default('daily', true), true) }}"
|
||||
restic_keep_daily: "{{ RESTIC_KEEP_DAILY | default(lookup('env', 'RESTIC_KEEP_DAILY') | default(7, true), true) }}"
|
||||
restic_keep_weekly: "{{ RESTIC_KEEP_WEEKLY | default(lookup('env', 'RESTIC_KEEP_WEEKLY') | default(4, true), true) }}"
|
||||
restic_keep_monthly: "{{ RESTIC_KEEP_MONTHLY | default(lookup('env', 'RESTIC_KEEP_MONTHLY') | default(6, true), true) }}"
|
||||
|
||||
- name: Read restic repository
|
||||
set_fact:
|
||||
restic_repository: "{{ RESTIC_REPOSITORY | default(lookup('env', 'RESTIC_REPOSITORY') | default('', true), true) }}"
|
||||
|
||||
- name: Read restic password
|
||||
set_fact:
|
||||
restic_password: "{{ RESTIC_PASSWORD | default(lookup('env', 'RESTIC_PASSWORD') | default('', true), true) }}"
|
||||
no_log: true
|
||||
|
||||
- name: Read restic AWS access key ID
|
||||
set_fact:
|
||||
restic_aws_access_key_id: >-
|
||||
{{
|
||||
(RESTIC_AWS_ACCESS_KEY_ID
|
||||
| default(
|
||||
lookup('env', 'RESTIC_AWS_ACCESS_KEY_ID')
|
||||
| default(
|
||||
(RESTIC_S3_ACCESS_KEY
|
||||
| default(
|
||||
lookup('env', 'RESTIC_S3_ACCESS_KEY')
|
||||
| default(
|
||||
(S3_ACCESS_KEY_ID
|
||||
| default(lookup('env', 'S3_ACCESS_KEY_ID') | default('', true), true)
|
||||
),
|
||||
true
|
||||
),
|
||||
true
|
||||
)
|
||||
),
|
||||
true
|
||||
),
|
||||
true
|
||||
)
|
||||
)
|
||||
}}
|
||||
no_log: true
|
||||
|
||||
- name: Read restic AWS secret access key
|
||||
set_fact:
|
||||
restic_aws_secret_access_key: >-
|
||||
{{
|
||||
(RESTIC_AWS_SECRET_ACCESS_KEY
|
||||
| default(
|
||||
lookup('env', 'RESTIC_AWS_SECRET_ACCESS_KEY')
|
||||
| default(
|
||||
(RESTIC_S3_SECRET_ACCESS_KEY
|
||||
| default(
|
||||
lookup('env', 'RESTIC_S3_SECRET_ACCESS_KEY')
|
||||
| default(
|
||||
(S3_SECRET_ACCESS_KEY
|
||||
| default(lookup('env', 'S3_SECRET_ACCESS_KEY') | default('', true), true)
|
||||
),
|
||||
true
|
||||
),
|
||||
true
|
||||
)
|
||||
),
|
||||
true
|
||||
),
|
||||
true
|
||||
)
|
||||
)
|
||||
}}
|
||||
no_log: true
|
||||
|
||||
- name: Read restic AWS default region
|
||||
set_fact:
|
||||
restic_aws_default_region: >-
|
||||
{{
|
||||
(RESTIC_AWS_DEFAULT_REGION
|
||||
| default(
|
||||
lookup('env', 'RESTIC_AWS_DEFAULT_REGION')
|
||||
| default(
|
||||
(S3_REGION
|
||||
| default(lookup('env', 'S3_REGION') | default('us-east-1', true), true)
|
||||
),
|
||||
true
|
||||
),
|
||||
true
|
||||
)
|
||||
)
|
||||
}}
|
||||
|
||||
- name: Fail if restic repository format is invalid
|
||||
assert:
|
||||
that:
|
||||
- (restic_repository | length == 0) or (restic_repository is match('^/')) or (restic_repository is match('^[a-zA-Z][a-zA-Z0-9+.-]*:'))
|
||||
fail_msg: >-
|
||||
RESTIC_REPOSITORY must be an absolute path (e.g. /var/backups/restic) or a backend URL with a scheme
|
||||
(e.g. s3:https://..., sftp:user@host:/path, rest:https://...).
|
||||
Got: {{ restic_repository }}
|
||||
|
||||
- name: Fail if S3 credentials are missing for S3 restic repository
|
||||
assert:
|
||||
that:
|
||||
- (restic_aws_access_key_id | default('') | length) > 0
|
||||
- (restic_aws_secret_access_key | default('') | length) > 0
|
||||
fail_msg: >-
|
||||
RESTIC_REPOSITORY uses the S3 backend but credentials are missing.
|
||||
Set RESTIC_AWS_ACCESS_KEY_ID and RESTIC_AWS_SECRET_ACCESS_KEY (recommended),
|
||||
or RESTIC_S3_ACCESS_KEY and RESTIC_S3_SECRET_ACCESS_KEY,
|
||||
or S3_ACCESS_KEY_ID and S3_SECRET_ACCESS_KEY.
|
||||
when:
|
||||
- (restic_repository | default('')) is match('^s3:')
|
||||
|
||||
- name: Fail if restic repository is missing
|
||||
fail:
|
||||
msg: "RESTIC_REPOSITORY is required"
|
||||
when: restic_repository | length == 0
|
||||
|
||||
- name: Fail if restic password is missing
|
||||
fail:
|
||||
msg: "RESTIC_PASSWORD is required"
|
||||
when: restic_password | length == 0
|
||||
|
||||
- name: Install restic
|
||||
apt:
|
||||
name: restic
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Create backup directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0750"
|
||||
loop:
|
||||
- /var/lib/infra-backups
|
||||
- /var/lib/infra-backups/forgejo
|
||||
- /var/lib/infra-backups/.cache
|
||||
|
||||
- name: Write backup environment file
|
||||
template:
|
||||
src: backup.env.j2
|
||||
dest: /etc/infra-backup.env
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0600"
|
||||
|
||||
- name: Install backup script
|
||||
template:
|
||||
src: infra-backup.sh.j2
|
||||
dest: /usr/local/sbin/infra-backup
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0750"
|
||||
|
||||
- name: Install systemd service
|
||||
template:
|
||||
src: infra-backup.service.j2
|
||||
dest: /etc/systemd/system/infra-backup.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Install systemd timer
|
||||
template:
|
||||
src: infra-backup.timer.j2
|
||||
dest: /etc/systemd/system/infra-backup.timer
|
||||
owner: root
|
||||
group: root
|
||||
mode: "0644"
|
||||
|
||||
- name: Reload systemd
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
|
||||
- name: Check if restic repo is initialized
|
||||
command: restic snapshots
|
||||
environment:
|
||||
RESTIC_REPOSITORY: "{{ restic_repository }}"
|
||||
RESTIC_PASSWORD: "{{ restic_password }}"
|
||||
AWS_ACCESS_KEY_ID: "{{ restic_aws_access_key_id }}"
|
||||
AWS_SECRET_ACCESS_KEY: "{{ restic_aws_secret_access_key }}"
|
||||
AWS_DEFAULT_REGION: "{{ restic_aws_default_region }}"
|
||||
AWS_EC2_METADATA_DISABLED: "true"
|
||||
register: restic_snapshots
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
no_log: true
|
||||
|
||||
- name: Initialize restic repository if missing
|
||||
command: restic init
|
||||
environment:
|
||||
RESTIC_REPOSITORY: "{{ restic_repository }}"
|
||||
RESTIC_PASSWORD: "{{ restic_password }}"
|
||||
AWS_ACCESS_KEY_ID: "{{ restic_aws_access_key_id }}"
|
||||
AWS_SECRET_ACCESS_KEY: "{{ restic_aws_secret_access_key }}"
|
||||
AWS_DEFAULT_REGION: "{{ restic_aws_default_region }}"
|
||||
AWS_EC2_METADATA_DISABLED: "true"
|
||||
when: restic_snapshots.rc != 0
|
||||
register: restic_init
|
||||
changed_when: true
|
||||
failed_when: restic_init.rc != 0
|
||||
|
||||
- name: Fail with restic init error output
|
||||
fail:
|
||||
msg: "restic init failed (rc={{ restic_init.rc }}). stdout: {{ restic_init.stdout | default('') }}\n\nstderr: {{ restic_init.stderr | default('') }}"
|
||||
when:
|
||||
- restic_snapshots.rc != 0
|
||||
- restic_init.rc != 0
|
||||
|
||||
- name: Enable and start infra-backup timer
|
||||
systemd:
|
||||
name: infra-backup.timer
|
||||
enabled: true
|
||||
state: started
|
||||
10
roles/backups/templates/backup.env.j2
Normal file
10
roles/backups/templates/backup.env.j2
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
RESTIC_REPOSITORY={{ restic_repository }}
|
||||
RESTIC_PASSWORD={{ restic_password }}
|
||||
AWS_ACCESS_KEY_ID={{ restic_aws_access_key_id }}
|
||||
AWS_SECRET_ACCESS_KEY={{ restic_aws_secret_access_key }}
|
||||
AWS_DEFAULT_REGION={{ restic_aws_default_region }}
|
||||
AWS_EC2_METADATA_DISABLED=true
|
||||
|
||||
RESTIC_KEEP_DAILY={{ restic_keep_daily }}
|
||||
RESTIC_KEEP_WEEKLY={{ restic_keep_weekly }}
|
||||
RESTIC_KEEP_MONTHLY={{ restic_keep_monthly }}
|
||||
11
roles/backups/templates/infra-backup.service.j2
Normal file
11
roles/backups/templates/infra-backup.service.j2
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
[Unit]
|
||||
Description=Infra backup (restic)
|
||||
Wants=network-online.target
|
||||
After=network-online.target docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
EnvironmentFile=/etc/infra-backup.env
|
||||
Environment=HOME=/var/lib/infra-backups
|
||||
Environment=XDG_CACHE_HOME=/var/lib/infra-backups/.cache
|
||||
ExecStart=/usr/local/sbin/infra-backup
|
||||
62
roles/backups/templates/infra-backup.sh.j2
Normal file
62
roles/backups/templates/infra-backup.sh.j2
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if [ -f /etc/infra-backup.env ]; then
|
||||
set -a
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/infra-backup.env
|
||||
set +a
|
||||
fi
|
||||
|
||||
log_dir=/var/log/custom
|
||||
log_file=${log_dir}/infra-backup.log
|
||||
|
||||
mkdir -p "${log_dir}"
|
||||
mkdir -p /var/lib/infra-backups/forgejo
|
||||
|
||||
ts=$(date -u +%Y%m%dT%H%M%SZ)
|
||||
|
||||
echo "[$(date -Is)] starting infra backup" >>"${log_file}"
|
||||
|
||||
forgejo_dump_path="/var/lib/infra-backups/forgejo/forgejo-dump-${ts}.zip"
|
||||
|
||||
if docker ps --format '{% raw %}{{.Names}}{% endraw %}' | grep -q '^forgejo-forgejo-1$'; then
|
||||
if docker exec --user 1000:1000 forgejo-forgejo-1 forgejo dump --file - --type zip --skip-log >"${forgejo_dump_path}" 2>>"${log_file}"; then
|
||||
echo "[$(date -Is)] forgejo dump ok: ${forgejo_dump_path}" >>"${log_file}"
|
||||
else
|
||||
rm -f "${forgejo_dump_path}" || true
|
||||
echo "[$(date -Is)] forgejo dump failed" >>"${log_file}"
|
||||
fi
|
||||
else
|
||||
echo "[$(date -Is)] forgejo container not found; skipping forgejo dump" >>"${log_file}"
|
||||
fi
|
||||
|
||||
restic_args=(
|
||||
backup
|
||||
--tag infra
|
||||
--tag "${HOSTNAME}"
|
||||
/opt
|
||||
/var/lib/docker/volumes
|
||||
/var/lib/infra-backups
|
||||
)
|
||||
|
||||
if restic "${restic_args[@]}" >>"${log_file}" 2>&1; then
|
||||
echo "[$(date -Is)] restic backup ok" >>"${log_file}"
|
||||
else
|
||||
echo "[$(date -Is)] restic backup failed" >>"${log_file}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
keep_daily=${RESTIC_KEEP_DAILY:-7}
|
||||
keep_weekly=${RESTIC_KEEP_WEEKLY:-4}
|
||||
keep_monthly=${RESTIC_KEEP_MONTHLY:-6}
|
||||
|
||||
if restic forget --keep-daily "${keep_daily}" --keep-weekly "${keep_weekly}" --keep-monthly "${keep_monthly}" --prune >>"${log_file}" 2>&1; then
|
||||
echo "[$(date -Is)] restic prune ok" >>"${log_file}"
|
||||
else
|
||||
echo "[$(date -Is)] restic prune failed" >>"${log_file}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[$(date -Is)] infra backup complete" >>"${log_file}"
|
||||
9
roles/backups/templates/infra-backup.timer.j2
Normal file
9
roles/backups/templates/infra-backup.timer.j2
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
[Unit]
|
||||
Description=Run infra backup on schedule
|
||||
|
||||
[Timer]
|
||||
OnCalendar={{ infra_backup_oncalendar }}
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
62
roles/fail2ban/tasks/main.yml
Normal file
62
roles/fail2ban/tasks/main.yml
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
---
|
||||
- name: Create fail2ban directory
|
||||
file:
|
||||
path: /opt/fail2ban
|
||||
state: directory
|
||||
|
||||
- name: Create fail2ban db directory
|
||||
file:
|
||||
path: /opt/fail2ban/db
|
||||
state: directory
|
||||
|
||||
- name: Copy fail2ban jail configuration
|
||||
template:
|
||||
src: jail.local.j2
|
||||
dest: /opt/fail2ban/jail.local
|
||||
|
||||
- name: Create fail2ban filter.d directory
|
||||
file:
|
||||
path: /opt/fail2ban/filter.d
|
||||
state: directory
|
||||
|
||||
- name: Copy filter definitions
|
||||
copy:
|
||||
dest: /opt/fail2ban/filter.d/{{ item.name }}
|
||||
content: "{{ item.content }}"
|
||||
loop:
|
||||
- name: traefik-auth.conf
|
||||
content: |
|
||||
[Definition]
|
||||
failregex = ^.*"ClientHost":"<HOST>".*"status":"401".*$
|
||||
ignoreregex =
|
||||
datepattern = {^.*"time":"%%Y-%%m-%%dT%%H:%%M:%%S".*$}
|
||||
|
||||
- name: forgejo-auth.conf
|
||||
content: |
|
||||
[Definition]
|
||||
failregex = ^.*authentication failure.*remote_address=<HOST>.*$
|
||||
^.*"level":"error".*"msg":"Authentication failed".*"remote_address":"<HOST>".*$
|
||||
ignoreregex =
|
||||
|
||||
- name: traefik-bad-request.conf
|
||||
content: |
|
||||
[Definition]
|
||||
failregex = ^.*"ClientHost":"<HOST>".*"status":"(400|403|404)".*"request":"(GET|POST) .*\\.(php|cgi|asp|aspx|jsp|env|git|svn|htaccess).*".*$
|
||||
^.*"ClientHost":"<HOST>".*"request":"(GET|POST) /(wp-|wordpress|xmlrpc|admin|wp-content|wp-includes|wp-json).*".*$
|
||||
ignoreregex =
|
||||
|
||||
- name: Copy Docker Compose file for fail2ban
|
||||
template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: /opt/fail2ban/docker-compose.yml
|
||||
|
||||
- name: Deploy fail2ban
|
||||
command: docker compose up -d
|
||||
args:
|
||||
chdir: /opt/fail2ban
|
||||
|
||||
- name: Ensure fail2ban is started
|
||||
command: docker compose start
|
||||
args:
|
||||
chdir: /opt/fail2ban
|
||||
changed_when: false
|
||||
19
roles/fail2ban/templates/docker-compose.yml.j2
Normal file
19
roles/fail2ban/templates/docker-compose.yml.j2
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
services:
|
||||
fail2ban:
|
||||
image: crazymax/fail2ban:latest
|
||||
environment:
|
||||
- TZ=UTC
|
||||
- F2B_DB_FILE=/data/fail2ban.sqlite3
|
||||
- F2B_LOG_LEVEL=INFO
|
||||
volumes:
|
||||
- ./jail.local:/etc/fail2ban/jail.local:ro
|
||||
- ./filter.d:/etc/fail2ban/filter.d:ro
|
||||
- ./db:/data
|
||||
- /var/log:/var/log:ro
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
network_mode: host
|
||||
privileged: true
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
restart: unless-stopped
|
||||
42
roles/fail2ban/templates/jail.local.j2
Normal file
42
roles/fail2ban/templates/jail.local.j2
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
[DEFAULT]
|
||||
bantime = 1h
|
||||
findtime = 10m
|
||||
maxretry = 5
|
||||
backend = systemd
|
||||
banaction = iptables-multiport
|
||||
banaction_allports = iptables-allports
|
||||
|
||||
[sshd]
|
||||
enabled = true
|
||||
port = ssh
|
||||
filter = sshd
|
||||
logpath = /var/log/auth.log
|
||||
maxretry = 3
|
||||
bantime = 2h
|
||||
|
||||
[traefik-auth]
|
||||
enabled = true
|
||||
port = http,https
|
||||
filter = traefik-auth
|
||||
logpath = /var/lib/docker/containers/*/*-json.log
|
||||
maxretry = 5
|
||||
findtime = 5m
|
||||
bantime = 1h
|
||||
|
||||
[forgejo-auth]
|
||||
enabled = true
|
||||
port = http,https
|
||||
filter = forgejo-auth
|
||||
logpath = /var/lib/docker/containers/*/*-json.log
|
||||
maxretry = 5
|
||||
findtime = 5m
|
||||
bantime = 1h
|
||||
|
||||
[traefik-bad-request]
|
||||
enabled = true
|
||||
port = http,https
|
||||
filter = traefik-bad-request
|
||||
logpath = /var/lib/docker/containers/*/*-json.log
|
||||
maxretry = 10
|
||||
findtime = 2m
|
||||
bantime = 30m
|
||||
191
roles/firewall/tasks/main.yml
Normal file
191
roles/firewall/tasks/main.yml
Normal file
|
|
@ -0,0 +1,191 @@
|
|||
---
|
||||
- name: Ensure iptables persistence packages are installed
|
||||
apt:
|
||||
name:
|
||||
- iptables
|
||||
- iptables-persistent
|
||||
state: present
|
||||
|
||||
- name: Define Cloudflare IP allowlist
|
||||
set_fact:
|
||||
cloudflare_ips:
|
||||
- 173.245.48.0/20
|
||||
- 103.21.244.0/22
|
||||
- 103.22.200.0/22
|
||||
- 103.31.4.0/22
|
||||
- 141.101.64.0/18
|
||||
- 108.162.192.0/18
|
||||
- 190.93.240.0/20
|
||||
- 188.114.96.0/20
|
||||
- 197.234.240.0/22
|
||||
- 198.41.128.0/17
|
||||
- 162.158.0.0/15
|
||||
- 104.16.0.0/13
|
||||
- 104.24.0.0/14
|
||||
- 172.64.0.0/13
|
||||
- 131.0.72.0/22
|
||||
- 2400:cb00::/32
|
||||
- 2606:4700::/32
|
||||
- 2803:f800::/32
|
||||
- 2405:b500::/32
|
||||
- 2405:8100::/32
|
||||
- 2a06:98c0::/29
|
||||
- 2c0f:f248::/32
|
||||
|
||||
- name: Remove legacy broad DOCKER-USER allow rules for port 80/443 (IPv4)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
while iptables -C DOCKER-USER -p tcp -s {{ item }} --dport 80 -j ACCEPT 2>/dev/null; do
|
||||
iptables -D DOCKER-USER -p tcp -s {{ item }} --dport 80 -j ACCEPT
|
||||
done
|
||||
while iptables -C DOCKER-USER -p tcp -s {{ item }} --dport 443 -j ACCEPT 2>/dev/null; do
|
||||
iptables -D DOCKER-USER -p tcp -s {{ item }} --dport 443 -j ACCEPT
|
||||
done
|
||||
args:
|
||||
executable: /bin/bash
|
||||
loop: "{{ cloudflare_ips }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: "':' not in item"
|
||||
|
||||
- name: Remove legacy broad DOCKER-USER drop rule for ports 80/443 (IPv4)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
while iptables -C DOCKER-USER -p tcp -m multiport --dports 80,443 -j DROP 2>/dev/null; do
|
||||
iptables -D DOCKER-USER -p tcp -m multiport --dports 80,443 -j DROP
|
||||
done
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Ensure DOCKER-USER chain has an early accept for established connections (IPv4)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
iptables -C DOCKER-USER -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT 2>/dev/null || \
|
||||
iptables -I DOCKER-USER 1 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Allowlist Cloudflare IPs to Docker-published port 80 (IPv4)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
for iface in docker0 br+; do
|
||||
iptables -C DOCKER-USER -o "$iface" -p tcp -s {{ item }} --dport 80 -j ACCEPT 2>/dev/null || \
|
||||
iptables -I DOCKER-USER 2 -o "$iface" -p tcp -s {{ item }} --dport 80 -j ACCEPT
|
||||
done
|
||||
args:
|
||||
executable: /bin/bash
|
||||
loop: "{{ cloudflare_ips }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: "':' not in item"
|
||||
|
||||
- name: Allowlist Cloudflare IPs to Docker-published port 443 (IPv4)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
for iface in docker0 br+; do
|
||||
iptables -C DOCKER-USER -o "$iface" -p tcp -s {{ item }} --dport 443 -j ACCEPT 2>/dev/null || \
|
||||
iptables -I DOCKER-USER 2 -o "$iface" -p tcp -s {{ item }} --dport 443 -j ACCEPT
|
||||
done
|
||||
args:
|
||||
executable: /bin/bash
|
||||
loop: "{{ cloudflare_ips }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: "':' not in item"
|
||||
|
||||
- name: Drop non-Cloudflare traffic to Docker-published ports 80/443 (IPv4)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
for iface in docker0 br+; do
|
||||
iptables -C DOCKER-USER -o "$iface" -p tcp -m multiport --dports 80,443 -j DROP 2>/dev/null || \
|
||||
iptables -A DOCKER-USER -o "$iface" -p tcp -m multiport --dports 80,443 -j DROP
|
||||
done
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Ensure DOCKER-USER chain ends with RETURN (IPv4)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
iptables -C DOCKER-USER -j RETURN 2>/dev/null || iptables -A DOCKER-USER -j RETURN
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Ensure DOCKER-USER chain has an early accept for established connections (IPv6)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
ip6tables -C DOCKER-USER -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT 2>/dev/null || \
|
||||
ip6tables -I DOCKER-USER 1 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Allowlist Cloudflare IPs to Docker-published port 80 (IPv6)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
for iface in docker0 br+; do
|
||||
ip6tables -C DOCKER-USER -o "$iface" -p tcp -s {{ item }} --dport 80 -j ACCEPT 2>/dev/null || \
|
||||
ip6tables -I DOCKER-USER 2 -o "$iface" -p tcp -s {{ item }} --dport 80 -j ACCEPT
|
||||
done
|
||||
args:
|
||||
executable: /bin/bash
|
||||
loop: "{{ cloudflare_ips }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: "':' in item"
|
||||
|
||||
- name: Allowlist Cloudflare IPs to Docker-published port 443 (IPv6)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
for iface in docker0 br+; do
|
||||
ip6tables -C DOCKER-USER -o "$iface" -p tcp -s {{ item }} --dport 443 -j ACCEPT 2>/dev/null || \
|
||||
ip6tables -I DOCKER-USER 2 -o "$iface" -p tcp -s {{ item }} --dport 443 -j ACCEPT
|
||||
done
|
||||
args:
|
||||
executable: /bin/bash
|
||||
loop: "{{ cloudflare_ips }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when: "':' in item"
|
||||
|
||||
- name: Drop non-Cloudflare traffic to Docker-published ports 80/443 (IPv6)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
for iface in docker0 br+; do
|
||||
ip6tables -C DOCKER-USER -o "$iface" -p tcp -m multiport --dports 80,443 -j DROP 2>/dev/null || \
|
||||
ip6tables -A DOCKER-USER -o "$iface" -p tcp -m multiport --dports 80,443 -j DROP
|
||||
done
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Ensure DOCKER-USER chain ends with RETURN (IPv6)
|
||||
shell: |
|
||||
set -euo pipefail
|
||||
ip6tables -C DOCKER-USER -j RETURN 2>/dev/null || ip6tables -A DOCKER-USER -j RETURN
|
||||
args:
|
||||
executable: /bin/bash
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Persist iptables rules
|
||||
command: netfilter-persistent save
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Fix sudo hostname resolution (ensure hostname is in /etc/hosts)
|
||||
lineinfile:
|
||||
path: /etc/hosts
|
||||
regexp: '^127\\.0\\.1\\.1\\s+'
|
||||
line: "127.0.1.1 {{ ansible_facts['hostname'] }}"
|
||||
state: present
|
||||
create: true
|
||||
64
roles/postfix/tasks/main.yml
Normal file
64
roles/postfix/tasks/main.yml
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
---
|
||||
|
||||
- name: Read Authelia use Postfix
|
||||
set_fact:
|
||||
authelia_use_postfix: "{{ (AUTHELIA_USE_POSTFIX | default(lookup('env', 'AUTHELIA_USE_POSTFIX') | default('false', true), true)) | bool }}"
|
||||
no_log: true
|
||||
|
||||
- name: Configure Postfix (send-only relay)
|
||||
block:
|
||||
- name: Read Postfix relay host
|
||||
set_fact:
|
||||
postfix_relayhost: "{{ POSTFIX_RELAYHOST | default(lookup('env', 'POSTFIX_RELAYHOST')) }}"
|
||||
no_log: true
|
||||
|
||||
- name: Fail if Postfix relay host is missing
|
||||
fail:
|
||||
msg: "POSTFIX_RELAYHOST is required"
|
||||
when: postfix_relayhost | length == 0
|
||||
|
||||
- name: Read Postfix relay host username
|
||||
set_fact:
|
||||
postfix_relayhost_username: "{{ POSTFIX_RELAYHOST_USERNAME | default(lookup('env', 'POSTFIX_RELAYHOST_USERNAME') | default('', true), true) }}"
|
||||
no_log: true
|
||||
|
||||
- name: Read Postfix relay host password
|
||||
set_fact:
|
||||
postfix_relayhost_password: "{{ POSTFIX_RELAYHOST_PASSWORD | default(lookup('env', 'POSTFIX_RELAYHOST_PASSWORD') | default('', true), true) }}"
|
||||
no_log: true
|
||||
|
||||
- name: Fail if Postfix relay host username/password pairing is invalid
|
||||
fail:
|
||||
msg: "POSTFIX_RELAYHOST_USERNAME and POSTFIX_RELAYHOST_PASSWORD must both be set, or both be empty"
|
||||
when: (postfix_relayhost_username | length == 0) != (postfix_relayhost_password | length == 0)
|
||||
|
||||
- name: Read Postfix allowed sender domains
|
||||
set_fact:
|
||||
postfix_allowed_sender_domains: "{{ POSTFIX_ALLOWED_SENDER_DOMAINS | default(lookup('env', 'POSTFIX_ALLOWED_SENDER_DOMAINS') | default('', true), true) }}"
|
||||
no_log: true
|
||||
|
||||
- name: Read Postfix allow empty sender domains
|
||||
set_fact:
|
||||
postfix_allow_empty_sender_domains: "{{ (POSTFIX_ALLOW_EMPTY_SENDER_DOMAINS | default(lookup('env', 'POSTFIX_ALLOW_EMPTY_SENDER_DOMAINS') | default('true', true), true)) | bool }}"
|
||||
no_log: true
|
||||
|
||||
- name: Read Postfix SMTP TLS security level
|
||||
set_fact:
|
||||
postfix_smtp_tls_security_level: "{{ POSTFIX_SMTP_TLS_SECURITY_LEVEL | default(lookup('env', 'POSTFIX_SMTP_TLS_SECURITY_LEVEL') | default('may', true), true) }}"
|
||||
no_log: true
|
||||
|
||||
- name: Create Postfix directory
|
||||
file:
|
||||
path: /opt/postfix
|
||||
state: directory
|
||||
|
||||
- name: Copy Docker Compose file for Postfix
|
||||
template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: /opt/postfix/docker-compose.yml
|
||||
|
||||
- name: Deploy Postfix
|
||||
command: docker compose up -d
|
||||
args:
|
||||
chdir: /opt/postfix
|
||||
when: authelia_use_postfix
|
||||
20
roles/postfix/templates/docker-compose.yml.j2
Normal file
20
roles/postfix/templates/docker-compose.yml.j2
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
services:
|
||||
postfix:
|
||||
image: boky/postfix:latest
|
||||
environment:
|
||||
RELAYHOST: "{{ postfix_relayhost }}"
|
||||
{% if postfix_relayhost_username | length > 0 %}
|
||||
RELAYHOST_USERNAME: "{{ postfix_relayhost_username }}"
|
||||
RELAYHOST_PASSWORD: "{{ postfix_relayhost_password }}"
|
||||
{% endif %}
|
||||
POSTFIX_smtp_tls_security_level: "{{ postfix_smtp_tls_security_level }}"
|
||||
ALLOWED_SENDER_DOMAINS: "{{ postfix_allowed_sender_domains }}"
|
||||
ALLOW_EMPTY_SENDER_DOMAINS: "{{ postfix_allow_empty_sender_domains | ternary('true', 'false') }}"
|
||||
POSTFIX_mynetworks: "127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16"
|
||||
networks:
|
||||
- proxy
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
proxy:
|
||||
external: true
|
||||
Loading…
Reference in a new issue