Compare commits
1 Commits
Author | SHA1 | Date | |
---|---|---|---|
7ff2c64ff5 |
5
.github/workflows/vagrant.yml
vendored
5
.github/workflows/vagrant.yml
vendored
@ -3,9 +3,8 @@ name: homelab-ci
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- github_actions
|
||||
# - main
|
||||
# - testing
|
||||
- main
|
||||
- testing
|
||||
|
||||
jobs:
|
||||
homelab-ci:
|
||||
|
10
Makefile
10
Makefile
@ -1,10 +0,0 @@
|
||||
.PHONY: clean install
|
||||
|
||||
all: install
|
||||
|
||||
install:
|
||||
vagrant up --no-destroy-on-error
|
||||
sudo ./forward-ssh.sh
|
||||
|
||||
clean:
|
||||
vagrant destroy -f && rm -rf .vagrant
|
69
README.md
69
README.md
@ -1,76 +1,41 @@
|
||||
# Homelab
|
||||
# Project Moxie
|
||||
|
||||
This project is my personal IT homelab initiative for self-hosting and
|
||||
exploring Free and Open Source Software (FOSS) infrastructure. As a technology
|
||||
enthusiast and professional, this project is primarily a practical tool for
|
||||
hosting services. It serves as a playground for engaging with systems
|
||||
technology in functional, intriguing, and gratifying ways. Self-hosting
|
||||
empowers individuals to govern their digital space, ensuring that their online
|
||||
environments reflect personal ethics rather than centralized entities' opaque
|
||||
policies.
|
||||
|
||||
Built on Debian Stable, this project utilizes Ansible and Vagrant, providing
|
||||
relatively easy-to-use reproducible ephemeral environments to test
|
||||
infrastructure automation before pushing to live systems.
|
||||
Project Moxie is a personal IT homelab project written in Ansible and executed by Jenkins. It is a growing collection of infrastructure as code (IaC) I write out of curiosity and for reference purposes, keeping a handful of beneficial projects managed and secured.
|
||||
|
||||
## Quick Start
|
||||
|
||||
To configure a local virtual machine for testing, follow these simple steps.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Vagrant and VirtualBox are used to develop Project Moxie. You will need to install these before continuing.
|
||||
|
||||
### Installation
|
||||
|
||||
1. Clone this repository
|
||||
```
|
||||
git clone https://git.krislamo.org/kris/homelab
|
||||
```
|
||||
Optionally clone from the GitHub mirror instead:
|
||||
```
|
||||
git clone https://github.com/krislamo/homelab
|
||||
git clone https://github.com/krislamo/moxie
|
||||
```
|
||||
2. Set the `PLAYBOOK` environmental variable to a development playbook name in the `dev/` directory
|
||||
|
||||
To list available options in the `dev/` directory and choose a suitable PLAYBOOK, run:
|
||||
```
|
||||
ls dev/*.yml | xargs -n 1 basename -s .yml
|
||||
```
|
||||
Export the `PLAYBOOK` variable
|
||||
The following `PLAYBOOK` names are available: `dockerbox`, `hypervisor`, `minecraft`, `bitwarden`, `nextcloud`, `nginx`
|
||||
|
||||
```
|
||||
export PLAYBOOK=dockerbox
|
||||
```
|
||||
3. Clean up any previous provision and build the VM
|
||||
3. Bring the Vagrant box up
|
||||
```
|
||||
make clean && make
|
||||
vagrant up
|
||||
```
|
||||
|
||||
## Vagrant Settings
|
||||
The Vagrantfile configures the environment based on settings from `.vagrant.yml`,
|
||||
with default values including:
|
||||
|
||||
- PLAYBOOK: `default`
|
||||
- Runs a `default` playbook that does nothing.
|
||||
- You can set this by an environmental variable with the same name.
|
||||
- VAGRANT_BOX: `debian/bookworm64`
|
||||
- Current Debian Stable codename
|
||||
- VAGRANT_CPUS: `2`
|
||||
- Threads or cores per node, depending on CPU architecture
|
||||
- VAGRANT_MEM: `2048`
|
||||
- Specifies the amount of memory (in MB) allocated
|
||||
- SSH_FORWARD: `false`
|
||||
- Enable this if you need to forward SSH agents to the Vagrant machine
|
||||
|
||||
|
||||
## Copyright and License
|
||||
Copyright (C) 2019-2023 Kris Lamoureux
|
||||
#### Copyright and License
|
||||
Copyright (C) 2020-2021 Kris Lamoureux
|
||||
|
||||
[![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0)
|
||||
|
||||
This program is free software: you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
Foundation, version 3 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 3 of the License.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
1
Vagrantfile
vendored
1
Vagrantfile
vendored
@ -43,6 +43,5 @@ Vagrant.configure("2") do |config|
|
||||
ENV['ANSIBLE_ROLES_PATH'] = File.dirname(__FILE__) + "/roles"
|
||||
ansible.compatibility_mode = "2.0"
|
||||
ansible.playbook = "dev/" + PLAYBOOK + ".yml"
|
||||
ansible.raw_arguments = ["--diff"]
|
||||
end
|
||||
end
|
||||
|
@ -1,7 +1,6 @@
|
||||
[defaults]
|
||||
inventory = ./environments/development
|
||||
interpreter_python = /usr/bin/python3
|
||||
roles_path = ./roles
|
||||
|
||||
[connection]
|
||||
pipelining = true
|
||||
|
@ -1,8 +0,0 @@
|
||||
- name: Install Docker Server
|
||||
hosts: all
|
||||
become: true
|
||||
vars_files:
|
||||
- host_vars/docker.yml
|
||||
roles:
|
||||
- base
|
||||
- docker
|
@ -6,7 +6,8 @@
|
||||
roles:
|
||||
- base
|
||||
- docker
|
||||
- mariadb
|
||||
- traefik
|
||||
- nextcloud
|
||||
- proxy
|
||||
- jenkins
|
||||
- prometheus
|
||||
- nginx
|
||||
|
@ -1,10 +0,0 @@
|
||||
- name: Install Gitea Server
|
||||
hosts: all
|
||||
become: true
|
||||
vars_files:
|
||||
- host_vars/gitea.yml
|
||||
roles:
|
||||
- base
|
||||
- docker
|
||||
- mariadb
|
||||
- gitea
|
@ -1,48 +0,0 @@
|
||||
# base
|
||||
allow_reboot: false
|
||||
manage_network: false
|
||||
|
||||
# Import my GPG key for git signature verification
|
||||
root_gpgkeys:
|
||||
- name: kris@lamoureux.io
|
||||
id: FBF673CEEC030F8AECA814E73EDA9C3441EDA925
|
||||
|
||||
# docker
|
||||
docker_users:
|
||||
- vagrant
|
||||
|
||||
#docker_login_url: https://myregistry.example.com
|
||||
#docker_login_user: myuser
|
||||
#docker_login_pass: YOUR_PASSWD
|
||||
|
||||
docker_compose_env_nolog: false # dev only setting
|
||||
docker_compose_deploy:
|
||||
# Traefik
|
||||
- name: traefik
|
||||
url: https://github.com/krislamo/traefik
|
||||
version: 31ee724feebc1d5f91cb17ffd6892c352537f194
|
||||
enabled: true
|
||||
accept_newhostkey: true # Consider verifying manually instead
|
||||
trusted_keys:
|
||||
- FBF673CEEC030F8AECA814E73EDA9C3441EDA925
|
||||
env:
|
||||
ENABLE: true
|
||||
|
||||
# Traefik 2 (no other external compose to test currently)
|
||||
- name: traefik2
|
||||
url: https://github.com/krislamo/traefik
|
||||
version: 31ee724feebc1d5f91cb17ffd6892c352537f194
|
||||
enabled: true
|
||||
accept_newhostkey: true # Consider verifying manually instead
|
||||
trusted_keys:
|
||||
- FBF673CEEC030F8AECA814E73EDA9C3441EDA925
|
||||
env:
|
||||
ENABLE: true
|
||||
VERSION: "2.10"
|
||||
DOMAIN: traefik2.local.krislamo.org
|
||||
NAME: traefik2
|
||||
ROUTER: traefik2
|
||||
NETWORK: traefik2
|
||||
WEB_PORT: 127.0.0.1:8000:80
|
||||
WEBSECURE_PORT: 127.0.0.1:4443:443
|
||||
LOCAL_PORT: 127.0.0.1:8444:8443
|
@ -2,47 +2,44 @@
|
||||
allow_reboot: false
|
||||
manage_network: false
|
||||
|
||||
# Import my GPG key for git signature verification
|
||||
root_gpgkeys:
|
||||
- name: kris@lamoureux.io
|
||||
id: FBF673CEEC030F8AECA814E73EDA9C3441EDA925
|
||||
|
||||
# proxy
|
||||
proxy:
|
||||
servers:
|
||||
- domain: cloud.local.krislamo.org
|
||||
proxy_pass: http://127.0.0.1:8000
|
||||
|
||||
# docker
|
||||
docker_official: true # docker's apt repos
|
||||
docker_users:
|
||||
- vagrant
|
||||
|
||||
docker_compose_env_nolog: false # dev only setting
|
||||
docker_compose_deploy:
|
||||
# Traefik
|
||||
- name: traefik
|
||||
url: https://github.com/krislamo/traefik
|
||||
version: d62bd06b37ecf0993962b0449a9d708373f9e381
|
||||
enabled: true
|
||||
accept_newhostkey: true # Consider verifying manually instead
|
||||
trusted_keys:
|
||||
- FBF673CEEC030F8AECA814E73EDA9C3441EDA925
|
||||
env:
|
||||
DASHBOARD: true
|
||||
# Nextcloud
|
||||
- name: nextcloud
|
||||
url: https://github.com/krislamo/nextcloud
|
||||
version: fe6d349749f178e91ae7ff726d557f48ebf84356
|
||||
env:
|
||||
DATA: ./data
|
||||
|
||||
# traefik
|
||||
traefik:
|
||||
ENABLE: true
|
||||
traefik_version: latest
|
||||
traefik_dashboard: true
|
||||
traefik_domain: traefik.local.krislamo.org
|
||||
traefik_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin
|
||||
traefik_web_entry: 0.0.0.0:80
|
||||
traefik_websecure_entry: 0.0.0.0:443
|
||||
#traefik_acme_email: realemail@example.com # Let's Encrypt settings
|
||||
#traefik_production: true
|
||||
#traefik_http_only: true # if behind reverse-proxy
|
||||
|
||||
# nextcloud
|
||||
nextcloud:
|
||||
DOMAIN: cloud.local.krislamo.org
|
||||
DB_PASSWD: password
|
||||
ADMIN_PASSWD: password
|
||||
nextcloud_version: stable
|
||||
nextcloud_admin: admin
|
||||
nextcloud_pass: password
|
||||
nextcloud_domain: cloud.local.krislamo.org
|
||||
|
||||
nextcloud_dbversion: latest
|
||||
nextcloud_dbpass: password
|
||||
|
||||
# jenkins
|
||||
jenkins_version: lts
|
||||
jenkins_domain: jenkins.local.krislamo.org
|
||||
|
||||
# prometheus (includes grafana)
|
||||
prom_version: latest
|
||||
prom_domain: prom.local.krislamo.org
|
||||
grafana_version: latest
|
||||
grafana_domain: grafana.local.krislamo.org
|
||||
prom_targets: "['10.0.2.15:9100']"
|
||||
|
||||
# nginx
|
||||
nginx_domain: nginx.local.krislamo.org
|
||||
nginx_name: staticsite
|
||||
nginx_repo_url: https://git.krislamo.org/kris/example-website/
|
||||
nginx_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin
|
||||
nginx_version: latest
|
||||
|
@ -1,50 +0,0 @@
|
||||
# base
|
||||
allow_reboot: false
|
||||
manage_network: false
|
||||
|
||||
users:
|
||||
git:
|
||||
uid: 1001
|
||||
gid: 1001
|
||||
home: true
|
||||
system: true
|
||||
|
||||
# Import my GPG key for git signature verification
|
||||
root_gpgkeys:
|
||||
- name: kris@lamoureux.io
|
||||
id: FBF673CEEC030F8AECA814E73EDA9C3441EDA925
|
||||
|
||||
# docker
|
||||
docker_official: true # docker's apt repos
|
||||
docker_users:
|
||||
- vagrant
|
||||
|
||||
docker_compose_env_nolog: false # dev only setting
|
||||
docker_compose_deploy:
|
||||
# Traefik
|
||||
- name: traefik
|
||||
url: https://github.com/krislamo/traefik
|
||||
version: 398eb48d311db78b86abf783f903af4a1658d773
|
||||
enabled: true
|
||||
accept_newhostkey: true
|
||||
trusted_keys:
|
||||
- FBF673CEEC030F8AECA814E73EDA9C3441EDA925
|
||||
env:
|
||||
ENABLE: true
|
||||
# Gitea
|
||||
- name: gitea
|
||||
url: https://github.com/krislamo/gitea
|
||||
version: b0ce66f6a1ab074172eed79eeeb36d7e9011ef8f
|
||||
enabled: true
|
||||
trusted_keys:
|
||||
- FBF673CEEC030F8AECA814E73EDA9C3441EDA925
|
||||
env:
|
||||
USER_UID: "{{ users.git.uid }}"
|
||||
USER_GID: "{{ users.git.gid }}"
|
||||
DB_PASSWD: "{{ gitea.DB_PASSWD }}"
|
||||
|
||||
# gitea
|
||||
gitea:
|
||||
DB_NAME: gitea
|
||||
DB_USER: gitea
|
||||
DB_PASSWD: password
|
@ -5,12 +5,7 @@ allow_reboot: false
|
||||
manage_network: false
|
||||
|
||||
users:
|
||||
jellyfin:
|
||||
uid: 1001
|
||||
gid: 1001
|
||||
shell: /usr/sbin/nologin
|
||||
home: false
|
||||
system: true
|
||||
- name: jellyfin
|
||||
|
||||
samba:
|
||||
users:
|
||||
|
@ -4,18 +4,6 @@ base_domain: local.krislamo.org
|
||||
allow_reboot: false
|
||||
manage_network: false
|
||||
|
||||
users:
|
||||
git:
|
||||
uid: 1001
|
||||
gid: 1001
|
||||
home: true
|
||||
system: true
|
||||
|
||||
# Import my GPG key for git signature verification
|
||||
root_gpgkeys:
|
||||
- name: kris@lamoureux.io
|
||||
id: FBF673CEEC030F8AECA814E73EDA9C3441EDA925
|
||||
|
||||
# proxy
|
||||
proxy:
|
||||
#production: true
|
||||
@ -27,49 +15,14 @@ proxy:
|
||||
- "{{ base_domain }}"
|
||||
servers:
|
||||
- domain: "{{ bitwarden_domain }}"
|
||||
proxy_pass: "http://127.0.0.1"
|
||||
proxy_pass: "http://127.0.0.1:8080"
|
||||
- domain: "{{ gitea_domain }}"
|
||||
proxy_pass: "http://127.0.0.1"
|
||||
proxy_pass: "http://127.0.0.1:3000"
|
||||
|
||||
# docker
|
||||
docker_official: true # docker's apt repos
|
||||
docker_users:
|
||||
- vagrant
|
||||
|
||||
docker_compose_env_nolog: false # dev only setting
|
||||
docker_compose_deploy:
|
||||
# Traefik
|
||||
- name: traefik
|
||||
url: https://github.com/krislamo/traefik
|
||||
version: e97db75e2e214582fac5f5e495687ab5cdf855ad
|
||||
path: docker-compose.web.yml
|
||||
enabled: true
|
||||
accept_newhostkey: true
|
||||
trusted_keys:
|
||||
- FBF673CEEC030F8AECA814E73EDA9C3441EDA925
|
||||
env:
|
||||
ENABLE: true
|
||||
# Gitea
|
||||
- name: gitea
|
||||
url: https://github.com/krislamo/gitea
|
||||
version: b0ce66f6a1ab074172eed79eeeb36d7e9011ef8f
|
||||
enabled: true
|
||||
trusted_keys:
|
||||
- FBF673CEEC030F8AECA814E73EDA9C3441EDA925
|
||||
env:
|
||||
ENTRYPOINT: web
|
||||
ENABLE_TLS: false
|
||||
USER_UID: "{{ users.git.uid }}"
|
||||
USER_GID: "{{ users.git.gid }}"
|
||||
DB_PASSWD: "{{ gitea.DB_PASSWD }}"
|
||||
|
||||
# gitea
|
||||
gitea_domain: "git.{{ base_domain }}"
|
||||
gitea:
|
||||
DB_NAME: gitea
|
||||
DB_USER: gitea
|
||||
DB_PASSWD: password
|
||||
|
||||
# bitwarden
|
||||
# Get Installation ID & Key at https://bitwarden.com/host/
|
||||
bitwarden_domain: "vault.{{ base_domain }}"
|
||||
@ -77,3 +30,8 @@ bitwarden_dbpass: password
|
||||
bitwarden_install_id: 4ea840a3-532e-4cb6-a472-abd900728b23
|
||||
bitwarden_install_key: 1yB3Z2gRI0KnnH90C6p
|
||||
#bitwarden_prodution: true
|
||||
|
||||
# gitea
|
||||
gitea_domain: "git.{{ base_domain }}"
|
||||
gitea_version: 1
|
||||
gitea_dbpass: password
|
||||
|
@ -5,8 +5,8 @@
|
||||
- host_vars/proxy.yml
|
||||
roles:
|
||||
- base
|
||||
- mariadb
|
||||
- proxy
|
||||
- docker
|
||||
- mariadb
|
||||
- gitea
|
||||
- bitwarden
|
||||
|
@ -1,30 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Finds the SSH private key under ./.vagrant and connects to
|
||||
# the Vagrant box, port forwarding localhost ports: 8443, 443, 80, 22
|
||||
#
|
||||
# Download the latest script:
|
||||
# https://git.krislamo.org/kris/homelab/raw/branch/main/forward-ssh.sh
|
||||
#
|
||||
# Copyright (C) 2023 Kris Lamoureux
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3 of the License.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
# Root check
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
echo "[ERROR]: Please run this script as root"
|
||||
exit 1
|
||||
fi
|
||||
# the Vagrant box, port forwarding localhost ports: 8443, 80, 443
|
||||
|
||||
# Clean environment
|
||||
unset PRIVATE_KEY
|
||||
@ -34,62 +11,34 @@ unset PKILL_ANSWER
|
||||
|
||||
# Function to create the SSH tunnel
|
||||
function ssh_connect {
|
||||
read -rp "Start a new vagrant SSH tunnel? [y/N] " PSTART_ANSWER
|
||||
echo
|
||||
case "$PSTART_ANSWER" in
|
||||
[yY])
|
||||
printf "[INFO]: Starting new vagrant SSH tunnel on PID "
|
||||
sudo -u "$USER" ssh -fNT -i "$PRIVATE_KEY" \
|
||||
-L 22:localhost:22 \
|
||||
-L 80:"$HOST_IP":80 \
|
||||
-L 443:"$HOST_IP":443 \
|
||||
-L 8443:localhost:8443 \
|
||||
-o UserKnownHostsFile=/dev/null \
|
||||
-o StrictHostKeyChecking=no \
|
||||
vagrant@"$HOST_IP" 2>/dev/null
|
||||
sleep 2
|
||||
pgrep -f "$MATCH_PATTERN"
|
||||
;;
|
||||
*)
|
||||
echo "[INFO]: Declined to start a new vagrant SSH tunnel"
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
printf "[INFO]: Starting new vagrant SSH tunnel on PID "
|
||||
sudo ssh -fNT -i "$PRIVATE_KEY" \
|
||||
-L 8443:localhost:8443 \
|
||||
-L 80:localhost:80 \
|
||||
-L 443:localhost:443 \
|
||||
-o UserKnownHostsFile=/dev/null \
|
||||
-o StrictHostKeyChecking=no \
|
||||
vagrant@"$HOST_IP" 2>/dev/null
|
||||
sleep 2
|
||||
pgrep -f "$MATCH_PATTERN"
|
||||
}
|
||||
|
||||
# Check for valid PRIVATE_KEY location
|
||||
PRIVATE_KEY="$(find .vagrant -name "private_key" 2>/dev/null | sort)"
|
||||
|
||||
# Single vagrant machine or multiple
|
||||
if [ "$(echo "$PRIVATE_KEY" | wc -l)" -gt 1 ]; then
|
||||
while IFS= read -r KEYFILE; do
|
||||
if ! ssh-keygen -l -f "$KEYFILE" &>/dev/null; then
|
||||
echo "[ERROR]: The SSH key '$KEYFILE' is not valid. Are your virtual machines running?"
|
||||
exit 1
|
||||
fi
|
||||
echo "[CHECK]: Valid key at $KEYFILE"
|
||||
done < <(echo "$PRIVATE_KEY")
|
||||
PRIVATE_KEY="$(echo "$PRIVATE_KEY" | grep -m1 "${1:-default}")"
|
||||
elif ! ssh-keygen -l -f "$PRIVATE_KEY" &>/dev/null; then
|
||||
PRIVATE_KEY="$(find .vagrant -name "private_key" 2>/dev/null)"
|
||||
if ! ssh-keygen -l -f "$PRIVATE_KEY" &>/dev/null; then
|
||||
echo "[ERROR]: The SSH key '$PRIVATE_KEY' is not valid. Is your virtual machine running?"
|
||||
exit 1
|
||||
else
|
||||
echo "[CHECK]: Valid key at $PRIVATE_KEY"
|
||||
fi
|
||||
echo "[CHECK]: Valid key at $PRIVATE_KEY"
|
||||
|
||||
# Grab first IP or use whatever HOST_IP_FIELD is set to and check that the guest is up
|
||||
HOST_IP="$(sudo -u "$SUDO_USER" vagrant ssh -c "hostname -I | cut -d' ' -f${HOST_IP_FIELD:-1}" "${1:-default}" 2>/dev/null)"
|
||||
if [ -z "$HOST_IP" ]; then
|
||||
echo "[ERROR]: Failed to find ${1:-default}'s IP"
|
||||
exit 1
|
||||
fi
|
||||
HOST_IP="$(vagrant ssh -c "hostname -I | cut -d' ' -f${HOST_IP_FIELD:-1}" 2>/dev/null)"
|
||||
HOST_IP="${HOST_IP::-1}" # trim
|
||||
|
||||
if ! ping -c 1 "$HOST_IP" &>/dev/null; then
|
||||
echo "[ERROR]: Cannot ping the host IP '$HOST_IP'"
|
||||
exit 1
|
||||
fi
|
||||
echo "[CHECK]: Host at $HOST_IP (${1:-default}) is up"
|
||||
echo "[CHECK]: Host at $HOST_IP is up"
|
||||
|
||||
# Pattern for matching processes running
|
||||
MATCH_PATTERN="ssh -fNT -i ${PRIVATE_KEY}.*vagrant@"
|
||||
@ -107,7 +56,7 @@ else
|
||||
case "$PKILL_ANSWER" in
|
||||
[yY])
|
||||
echo "[WARNING]: Killing old vagrant SSH tunnel(s): "
|
||||
pgrep -f "$MATCH_PATTERN" | tee >(xargs kill -15)
|
||||
pgrep -f "$MATCH_PATTERN" | tee >(xargs sudo kill -15)
|
||||
echo
|
||||
if [ "$(pgrep -afc "$MATCH_PATTERN")" -eq 0 ]; then
|
||||
ssh_connect
|
||||
|
@ -4,5 +4,4 @@
|
||||
roles:
|
||||
- base
|
||||
- jenkins
|
||||
- proxy
|
||||
- docker
|
||||
|
@ -2,7 +2,6 @@ allow_reboot: true
|
||||
manage_firewall: true
|
||||
manage_network: false
|
||||
network_type: static
|
||||
locale_default: en_US.UTF-8
|
||||
|
||||
packages:
|
||||
- apache2-utils
|
||||
|
@ -5,10 +5,6 @@
|
||||
listen: reboot_host
|
||||
when: allow_reboot
|
||||
|
||||
- name: Reconfigure locales
|
||||
ansible.builtin.command: dpkg-reconfigure -f noninteractive locales
|
||||
listen: reconfigure_locales
|
||||
|
||||
- name: Restart WireGuard
|
||||
ansible.builtin.service:
|
||||
name: wg-quick@wg0
|
||||
@ -31,4 +27,4 @@
|
||||
ansible.builtin.service:
|
||||
name: smbd
|
||||
state: restarted
|
||||
listen: restart_samba
|
||||
listen: restart_samba
|
@ -2,4 +2,4 @@
|
||||
ansible.builtin.file:
|
||||
path: "~/.ansible/tmp"
|
||||
state: directory
|
||||
mode: "700"
|
||||
mode: 0700
|
||||
|
@ -7,7 +7,7 @@
|
||||
ansible.builtin.template:
|
||||
src: ddclient.conf.j2
|
||||
dest: /etc/ddclient.conf
|
||||
mode: "600"
|
||||
mode: 0600
|
||||
register: ddclient_settings
|
||||
|
||||
- name: Start ddclient and enable on boot
|
||||
|
@ -32,14 +32,14 @@
|
||||
ansible.builtin.template:
|
||||
src: fail2ban-ssh.conf.j2
|
||||
dest: /etc/fail2ban/jail.d/sshd.conf
|
||||
mode: "640"
|
||||
mode: 0640
|
||||
notify: restart_fail2ban
|
||||
|
||||
- name: Install Fail2ban IP allow list
|
||||
ansible.builtin.template:
|
||||
src: fail2ban-allowlist.conf.j2
|
||||
dest: /etc/fail2ban/jail.d/allowlist.conf
|
||||
mode: "640"
|
||||
mode: 0640
|
||||
when: fail2ban_ignoreip is defined
|
||||
notify: restart_fail2ban
|
||||
|
||||
|
@ -11,10 +11,10 @@
|
||||
ansible.builtin.template:
|
||||
src: msmtprc.j2
|
||||
dest: /root/.msmtprc
|
||||
mode: "600"
|
||||
mode: 0600
|
||||
|
||||
- name: Install /etc/aliases
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/aliases
|
||||
content: "root: {{ mail.rootalias }}"
|
||||
mode: "644"
|
||||
mode: 0644
|
||||
|
@ -10,6 +10,6 @@
|
||||
ansible.builtin.template:
|
||||
src: "interface.j2"
|
||||
dest: "/etc/network/interfaces.d/{{ item.name }}"
|
||||
mode: "400"
|
||||
mode: 0400
|
||||
loop: "{{ interfaces }}"
|
||||
notify: reboot_host
|
||||
|
@ -3,15 +3,23 @@
|
||||
name: samba
|
||||
state: present
|
||||
|
||||
- name: Create nologin shell accounts for Samba
|
||||
ansible.builtin.user:
|
||||
name: "{{ item.name }}"
|
||||
state: present
|
||||
shell: /usr/sbin/nologin
|
||||
createhome: false
|
||||
system: yes
|
||||
loop: "{{ samba.users }}"
|
||||
when: item.manage_user is defined and item.manage_user is true
|
||||
|
||||
- name: Create Samba users
|
||||
ansible.builtin.command: "smbpasswd -a {{ item.name }}"
|
||||
ansible.builtin.shell: "smbpasswd -a {{ item.name }}"
|
||||
args:
|
||||
stdin: "{{ item.password }}\n{{ item.password }}"
|
||||
loop: "{{ samba.users }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
register: samba_users
|
||||
changed_when: "'Added user' in samba_users.stdout"
|
||||
changed_when: "'User added' in samba_users.stdout"
|
||||
|
||||
- name: Ensure share directories exist
|
||||
ansible.builtin.file:
|
||||
@ -19,14 +27,13 @@
|
||||
owner: "{{ item.owner }}"
|
||||
group: "{{ item.group }}"
|
||||
state: directory
|
||||
mode: "755"
|
||||
mode: 0755
|
||||
loop: "{{ samba.shares }}"
|
||||
|
||||
- name: Configure Samba shares
|
||||
ansible.builtin.template:
|
||||
src: smb.conf.j2
|
||||
dest: /etc/samba/smb.conf
|
||||
mode: "644"
|
||||
notify: restart_samba
|
||||
|
||||
- name: Start smbd and enable on boot
|
||||
|
@ -4,88 +4,20 @@
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Install GPG
|
||||
ansible.builtin.apt:
|
||||
name: gpg
|
||||
state: present
|
||||
|
||||
- name: Check for existing GPG keys
|
||||
ansible.builtin.command: "gpg --list-keys {{ item.id }} 2>/dev/null"
|
||||
register: gpg_check
|
||||
loop: "{{ root_gpgkeys }}"
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
when: root_gpgkeys is defined
|
||||
|
||||
- name: Import GPG keys
|
||||
ansible.builtin.command:
|
||||
"gpg --keyserver {{ item.item.server | default('keys.openpgp.org') }} --recv-key {{ item.item.id }}"
|
||||
register: gpg_check_import
|
||||
loop: "{{ gpg_check.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.item }}"
|
||||
changed_when: false
|
||||
when: root_gpgkeys is defined and item.rc != 0
|
||||
|
||||
- name: Check GPG key imports
|
||||
ansible.builtin.fail:
|
||||
msg: "{{ item.stderr }}"
|
||||
loop: "{{ gpg_check_import.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.item.item }}"
|
||||
when: root_gpgkeys is defined and (not item.skipped | default(false)) and ('imported' not in item.stderr)
|
||||
|
||||
- name: Install NTPsec
|
||||
ansible.builtin.apt:
|
||||
name: ntpsec
|
||||
state: present
|
||||
|
||||
- name: Install locales
|
||||
ansible.builtin.apt:
|
||||
name: locales
|
||||
state: present
|
||||
|
||||
- name: Generate locale
|
||||
community.general.locale_gen:
|
||||
name: "{{ locale_default }}"
|
||||
state: present
|
||||
notify: reconfigure_locales
|
||||
|
||||
- name: Set the default locale
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/default/locale
|
||||
regexp: "^LANG="
|
||||
line: "LANG={{ locale_default }}"
|
||||
|
||||
- name: Manage root authorized_keys
|
||||
ansible.builtin.template:
|
||||
src: authorized_keys.j2
|
||||
dest: /root/.ssh/authorized_keys
|
||||
mode: "400"
|
||||
mode: 0400
|
||||
when: authorized_keys is defined
|
||||
|
||||
- name: Create system user groups
|
||||
ansible.builtin.group:
|
||||
name: "{{ item.key }}"
|
||||
gid: "{{ item.value.gid }}"
|
||||
state: present
|
||||
loop: "{{ users | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
when: users is defined
|
||||
|
||||
- name: Create system users
|
||||
ansible.builtin.user:
|
||||
name: "{{ item.key }}"
|
||||
name: "{{ item.name }}"
|
||||
state: present
|
||||
uid: "{{ item.value.uid }}"
|
||||
group: "{{ item.value.gid }}"
|
||||
shell: "{{ item.value.shell | default('/bin/bash') }}"
|
||||
create_home: "{{ item.value.home | default(false) }}"
|
||||
system: "{{ item.value.system | default(false) }}"
|
||||
loop: "{{ users | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
shell: "{{ item.shell | default('/bin/bash') }}"
|
||||
create_home: "{{ item.home | default(false) }}"
|
||||
loop: "{{ users }}"
|
||||
when: users is defined
|
||||
|
||||
- name: Set authorized_keys for system users
|
||||
@ -93,9 +25,7 @@
|
||||
user: "{{ item.key }}"
|
||||
key: "{{ item.value.key }}"
|
||||
state: present
|
||||
loop: "{{ users | dict2items }}"
|
||||
loop_control:
|
||||
label: "{{ item.key }}"
|
||||
loop: "{{ users }}"
|
||||
when: users is defined and item.value.key is defined
|
||||
|
||||
- name: Manage filesystem mounts
|
||||
|
@ -18,33 +18,11 @@
|
||||
src: /etc/wireguard/privatekey
|
||||
register: wgkey
|
||||
|
||||
- name: Check if WireGuard preshared key file exists
|
||||
ansible.builtin.stat:
|
||||
path: /etc/wireguard/presharedkey-{{ item.name }}
|
||||
loop: "{{ wireguard.peers }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
register: presharedkey_files
|
||||
|
||||
- name: Grab WireGuard preshared key for configuration
|
||||
ansible.builtin.slurp:
|
||||
src: /etc/wireguard/presharedkey-{{ item.item.name }}
|
||||
register: wgshared
|
||||
loop: "{{ presharedkey_files.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.item.name }}"
|
||||
when: item.stat.exists
|
||||
|
||||
- name: Grab WireGuard private key for configuration
|
||||
ansible.builtin.slurp:
|
||||
src: /etc/wireguard/privatekey
|
||||
register: wgkey
|
||||
|
||||
- name: Install WireGuard configuration
|
||||
ansible.builtin.template:
|
||||
src: wireguard.j2
|
||||
dest: /etc/wireguard/wg0.conf
|
||||
mode: "400"
|
||||
mode: 0400
|
||||
notify: restart_wireguard
|
||||
|
||||
- name: Start WireGuard interface
|
||||
|
@ -1,6 +1,4 @@
|
||||
# {{ ansible_managed }}
|
||||
|
||||
[Interface] # {{ ansible_hostname }}
|
||||
[Interface]
|
||||
PrivateKey = {{ wgkey['content'] | b64decode | trim }}
|
||||
Address = {{ wireguard.address }}
|
||||
{% if wireguard.listenport is defined %}
|
||||
@ -8,26 +6,8 @@ ListenPort = {{ wireguard.listenport }}
|
||||
{% endif %}
|
||||
|
||||
{% for peer in wireguard.peers %}
|
||||
{% if peer.name is defined %}
|
||||
[Peer] # {{ peer.name }}
|
||||
{% else %}
|
||||
[Peer]
|
||||
{% endif %}
|
||||
PublicKey = {{ peer.publickey }}
|
||||
{% if peer.presharedkey is defined %}
|
||||
PresharedKey = {{ peer.presharedkey }}
|
||||
{% else %}
|
||||
{% set preshared_key = (
|
||||
wgshared.results
|
||||
| selectattr('item.item.name', 'equalto', peer.name)
|
||||
| first
|
||||
).content
|
||||
| default(none)
|
||||
%}
|
||||
{% if preshared_key is not none %}
|
||||
PresharedKey = {{ preshared_key | b64decode | trim }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if peer.endpoint is defined %}
|
||||
Endpoint = {{ peer.endpoint }}
|
||||
{% endif %}
|
||||
|
@ -5,12 +5,7 @@
|
||||
listen: rebuild_bitwarden
|
||||
|
||||
- name: Rebuild Bitwarden
|
||||
ansible.builtin.command: "{{ bitwarden_root }}/bitwarden.sh rebuild"
|
||||
listen: rebuild_bitwarden
|
||||
|
||||
- name: Reload systemd manager configuration
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
ansible.builtin.shell: "{{ bitwarden_root }}/bitwarden.sh rebuild"
|
||||
listen: rebuild_bitwarden
|
||||
|
||||
- name: Start Bitwarden after rebuild
|
||||
@ -19,10 +14,3 @@
|
||||
state: started
|
||||
enabled: true
|
||||
listen: rebuild_bitwarden
|
||||
|
||||
- name: Create Bitwarden's initial log file
|
||||
ansible.builtin.file:
|
||||
path: "{{ bitwarden_logs_identity }}/{{ bitwarden_logs_identity_date }}.txt"
|
||||
state: touch
|
||||
mode: "644"
|
||||
listen: touch_bitwarden
|
||||
|
@ -7,7 +7,6 @@
|
||||
ansible.builtin.file:
|
||||
path: "{{ bitwarden_root }}"
|
||||
state: directory
|
||||
mode: "755"
|
||||
|
||||
- name: Download Bitwarden script
|
||||
ansible.builtin.get_url:
|
||||
@ -23,23 +22,22 @@
|
||||
mode: u+x
|
||||
|
||||
- name: Run Bitwarden installation script
|
||||
ansible.builtin.command: "{{ bitwarden_root }}/bw_wrapper"
|
||||
ansible.builtin.shell: "{{ bitwarden_root }}/bw_wrapper"
|
||||
args:
|
||||
creates: "{{ bitwarden_root }}/bwdata/config.yml"
|
||||
|
||||
- name: Install compose override
|
||||
- name: Install docker-compose override
|
||||
ansible.builtin.template:
|
||||
src: compose.override.yml.j2
|
||||
dest: "{{ bitwarden_root }}/bwdata/docker/docker-compose.override.yml"
|
||||
mode: "644"
|
||||
when: bitwarden_override | default(true)
|
||||
when: traefik_version is defined
|
||||
notify: rebuild_bitwarden
|
||||
|
||||
- name: Disable bitwarden-nginx HTTP on 80
|
||||
ansible.builtin.replace:
|
||||
path: "{{ bitwarden_root }}/bwdata/config.yml"
|
||||
regexp: "^http_port: 80$"
|
||||
replace: "http_port: {{ bitwarden_http_port | default('127.0.0.1:9080') }}"
|
||||
replace: "http_port: 127.0.0.1:8080"
|
||||
when: not bitwarden_standalone
|
||||
notify: rebuild_bitwarden
|
||||
|
||||
@ -47,7 +45,7 @@
|
||||
ansible.builtin.replace:
|
||||
path: "{{ bitwarden_root }}/bwdata/config.yml"
|
||||
regexp: "^https_port: 443$"
|
||||
replace: "https_port: {{ bitwarden_https_port | default('127.0.0.1:9443') }}"
|
||||
replace: "https_port: 127.0.0.1:8443"
|
||||
when: not bitwarden_standalone
|
||||
notify: rebuild_bitwarden
|
||||
|
||||
@ -78,7 +76,6 @@
|
||||
ansible.builtin.template:
|
||||
src: bitwarden.service.j2
|
||||
dest: "/etc/systemd/system/{{ bitwarden_name }}.service"
|
||||
mode: "644"
|
||||
register: bitwarden_systemd
|
||||
notify: rebuild_bitwarden
|
||||
|
||||
@ -86,12 +83,22 @@
|
||||
ansible.builtin.file:
|
||||
path: "{{ bitwarden_logs_identity }}"
|
||||
state: directory
|
||||
mode: "755"
|
||||
notify: touch_bitwarden
|
||||
register: bitwarden_logs
|
||||
|
||||
- name: Create Bitwarden's initial log file
|
||||
ansible.builtin.file:
|
||||
path: "{{ bitwarden_logs_identity }}/{{ bitwarden_logs_identity_date }}.txt"
|
||||
state: touch
|
||||
when: bitwarden_logs.changed
|
||||
|
||||
- name: Install Bitwarden's Fail2ban jail
|
||||
ansible.builtin.template:
|
||||
src: fail2ban-jail.conf.j2
|
||||
dest: /etc/fail2ban/jail.d/bitwarden.conf
|
||||
mode: "640"
|
||||
notify: restart_fail2ban
|
||||
|
||||
- name: Reload systemd manager configuration
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
when: bitwarden_systemd.changed
|
||||
notify: rebuild_bitwarden
|
||||
|
@ -23,13 +23,10 @@ send "{{ bitwarden_install_id }}\r"
|
||||
expect "Enter your installation key:"
|
||||
send "{{ bitwarden_install_key }}\r"
|
||||
|
||||
expect "Enter your region (US/EU) \\\[US\\\]:"
|
||||
send "US\r"
|
||||
|
||||
expect "Do you have a SSL certificate to use? (y/N):"
|
||||
expect "Do you have a SSL certificate to use? (y/n):"
|
||||
send "n\r"
|
||||
|
||||
expect "Do you want to generate a self-signed SSL certificate? (y/N):"
|
||||
expect "Do you want to generate a self-signed SSL certificate? (y/n):"
|
||||
{% if bitwarden_standalone and not bitwarden_production %}
|
||||
send "y\r"
|
||||
{% else %}
|
||||
|
@ -6,11 +6,13 @@ services:
|
||||
- traefik
|
||||
labels:
|
||||
traefik.http.routers.bitwarden.rule: "Host(`{{ bitwarden_domain }}`)"
|
||||
traefik.http.routers.bitwarden.entrypoints: {{ bitwarden_entrypoint | default('web') }}
|
||||
traefik.http.routers.bitwarden.tls: {{ bitwarden_traefik_tls | default('false') }}
|
||||
traefik.http.routers.bitwarden.entrypoints: websecure
|
||||
traefik.http.routers.bitwarden.tls.certresolver: letsencrypt
|
||||
traefik.http.routers.bitwarden.middlewares: "securehttps@file"
|
||||
traefik.http.services.bitwarden.loadbalancer.server.port: 8080
|
||||
traefik.docker.network: traefik
|
||||
traefik.enable: "true"
|
||||
|
||||
networks:
|
||||
traefik:
|
||||
external: true
|
||||
|
@ -1,11 +1,6 @@
|
||||
docker_apt_keyring: /etc/apt/keyrings/docker.asc
|
||||
docker_apt_keyring_hash: 1500c1f56fa9e26b9b8f42452a553675796ade0807cdce11975eb98170b3a570
|
||||
docker_apt_keyring_url: https://download.docker.com/linux/debian/gpg
|
||||
docker_apt_repo: https://download.docker.com/linux/debian
|
||||
docker_compose_root: /var/lib/compose
|
||||
docker_compose_service: compose
|
||||
docker_compose: "{{ (docker_official | bool) | ternary('/usr/bin/docker compose', '/usr/bin/docker-compose') }}"
|
||||
docker_official: false
|
||||
docker_compose: /usr/bin/docker-compose
|
||||
docker_repos_keys: "{{ docker_repos_path }}/.keys"
|
||||
docker_repos_keytype: rsa
|
||||
docker_repos_path: /srv/.compose_repos
|
||||
docker_repos_path: /srv/compose_repos
|
||||
|
@ -2,53 +2,3 @@
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
listen: compose_systemd
|
||||
|
||||
- name: Find which services had a docker-compose.yml updated
|
||||
ansible.builtin.set_fact:
|
||||
compose_restart_list: "{{ (compose_restart_list | default([])) + [item.item.name] }}"
|
||||
loop: "{{ compose_update.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.item.name }}"
|
||||
when: item.changed
|
||||
listen: compose_restart
|
||||
|
||||
- name: Find which services had their .env updated
|
||||
ansible.builtin.set_fact:
|
||||
compose_restart_list: "{{ (compose_restart_list | default([])) + [item.item.name] }}"
|
||||
loop: "{{ compose_env_update.results }}"
|
||||
loop_control:
|
||||
label: "{{ item.item.name }}"
|
||||
when: item.changed
|
||||
listen: compose_restart
|
||||
|
||||
- name: Restart MariaDB
|
||||
ansible.builtin.service:
|
||||
name: mariadb
|
||||
state: restarted
|
||||
when: not mariadb_restarted
|
||||
listen: restart_mariadb # hijack handler for early restart
|
||||
|
||||
- name: Set MariaDB as restarted
|
||||
ansible.builtin.set_fact:
|
||||
mariadb_restarted: true
|
||||
when: not mariadb_restarted
|
||||
listen: restart_mariadb
|
||||
|
||||
- name: Restart compose services
|
||||
ansible.builtin.systemd:
|
||||
state: restarted
|
||||
name: "{{ docker_compose_service }}@{{ item }}"
|
||||
loop: "{{ compose_restart_list | default([]) | unique }}"
|
||||
when: compose_restart_list is defined
|
||||
listen: compose_restart
|
||||
|
||||
- name: Start compose services and enable on boot
|
||||
ansible.builtin.service:
|
||||
name: "{{ docker_compose_service }}@{{ item.name }}"
|
||||
state: started
|
||||
enabled: true
|
||||
loop: "{{ docker_compose_deploy }}"
|
||||
loop_control:
|
||||
label: "{{ docker_compose_service }}@{{ item.name }}"
|
||||
when: item.enabled is defined and item.enabled is true
|
||||
listen: compose_enable
|
||||
|
@ -1,67 +1,27 @@
|
||||
- name: Add official Docker APT key
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ docker_apt_keyring_url }}"
|
||||
dest: "{{ docker_apt_keyring }}"
|
||||
checksum: "sha256:{{ docker_apt_keyring_hash }}"
|
||||
mode: "644"
|
||||
owner: root
|
||||
group: root
|
||||
when: docker_official
|
||||
|
||||
- name: Remove official Docker APT key
|
||||
ansible.builtin.file:
|
||||
path: "{{ docker_apt_keyring }}"
|
||||
state: absent
|
||||
when: not docker_official
|
||||
|
||||
- name: Add/remove official Docker APT repository
|
||||
ansible.builtin.apt_repository:
|
||||
repo: >
|
||||
deb [arch=amd64 signed-by={{ docker_apt_keyring }}]
|
||||
{{ docker_apt_repo }} {{ ansible_distribution_release }} stable
|
||||
state: "{{ 'present' if docker_official else 'absent' }}"
|
||||
filename: "{{ docker_apt_keyring | regex_replace('^.*/', '') }}"
|
||||
|
||||
- name: Install/uninstall Docker from Debian repositories
|
||||
- name: Install Docker
|
||||
ansible.builtin.apt:
|
||||
name: ['docker.io', 'docker-compose', 'containerd', 'runc']
|
||||
state: "{{ 'absent' if docker_official else 'present' }}"
|
||||
autoremove: true
|
||||
name: ['docker.io', 'docker-compose']
|
||||
state: present
|
||||
update_cache: true
|
||||
|
||||
- name: Install/uninstall Docker from Docker repositories
|
||||
ansible.builtin.apt:
|
||||
name: ['docker-ce', 'docker-ce-cli', 'containerd.io',
|
||||
'docker-buildx-plugin', 'docker-compose-plugin']
|
||||
state: "{{ 'present' if docker_official else 'absent' }}"
|
||||
autoremove: true
|
||||
update_cache: true
|
||||
|
||||
- name: Login to private registry
|
||||
community.docker.docker_login:
|
||||
registry_url: "{{ docker_login_url | default('') }}"
|
||||
username: "{{ docker_login_user }}"
|
||||
password: "{{ docker_login_pass }}"
|
||||
when: docker_login_user is defined and docker_login_pass is defined
|
||||
|
||||
- name: Create docker-compose root
|
||||
ansible.builtin.file:
|
||||
path: "{{ docker_compose_root }}"
|
||||
state: directory
|
||||
mode: "500"
|
||||
mode: 0500
|
||||
|
||||
- name: Install docker-compose systemd service
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.service.j2
|
||||
dest: "/etc/systemd/system/{{ docker_compose_service }}@.service"
|
||||
mode: "400"
|
||||
mode: 0400
|
||||
notify: compose_systemd
|
||||
|
||||
- name: Create directories to clone docker-compose repositories
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
mode: "400"
|
||||
mode: 0400
|
||||
loop:
|
||||
- "{{ docker_repos_path }}"
|
||||
- "{{ docker_repos_keys }}"
|
||||
@ -71,14 +31,7 @@
|
||||
community.crypto.openssh_keypair:
|
||||
path: "{{ docker_repos_keys }}/id_{{ docker_repos_keytype }}"
|
||||
type: "{{ docker_repos_keytype }}"
|
||||
comment: "{{ ansible_hostname }}-deploy-key"
|
||||
mode: "400"
|
||||
state: present
|
||||
when: docker_compose_deploy is defined
|
||||
|
||||
- name: Check for git installation
|
||||
ansible.builtin.apt:
|
||||
name: git
|
||||
mode: 0400
|
||||
state: present
|
||||
when: docker_compose_deploy is defined
|
||||
|
||||
@ -86,53 +39,34 @@
|
||||
ansible.builtin.git:
|
||||
repo: "{{ item.url }}"
|
||||
dest: "{{ docker_repos_path }}/{{ item.name }}"
|
||||
version: "{{ item.version }}"
|
||||
accept_newhostkey: "{{ item.accept_newhostkey | default(false) }}"
|
||||
gpg_whitelist: "{{ item.trusted_keys | default([]) }}"
|
||||
verify_commit: "{{ true if (item.trusted_keys is defined and item.trusted_keys) else false }}"
|
||||
version: "{{ item.version | default('main') }}"
|
||||
force: true
|
||||
key_file: "{{ docker_repos_keys }}/id_{{ docker_repos_keytype }}"
|
||||
loop: "{{ docker_compose_deploy }}"
|
||||
loop_control:
|
||||
label: "{{ item.url }}"
|
||||
when: docker_compose_deploy is defined
|
||||
loop: "{{ docker_compose_deploy }}"
|
||||
|
||||
- name: Create directories for docker-compose projects using the systemd service
|
||||
ansible.builtin.file:
|
||||
path: "{{ docker_compose_root }}/{{ item.name }}"
|
||||
state: directory
|
||||
mode: "400"
|
||||
mode: 0400
|
||||
loop: "{{ docker_compose_deploy }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
when: docker_compose_deploy is defined
|
||||
|
||||
- name: Synchronize docker-compose.yml
|
||||
ansible.posix.synchronize:
|
||||
- name: Copy docker-compose.yml files to their service directories
|
||||
ansible.builtin.copy:
|
||||
src: "{{ docker_repos_path }}/{{ item.name }}/{{ item.path | default('docker-compose.yml') }}"
|
||||
dest: "{{ docker_compose_root }}/{{ item.name }}/docker-compose.yml"
|
||||
delegate_to: "{{ inventory_hostname }}"
|
||||
register: compose_update
|
||||
notify:
|
||||
- compose_restart
|
||||
- compose_enable
|
||||
loop: "{{ docker_compose_deploy | default([]) }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
when: docker_compose_deploy is defined and docker_compose_deploy | length > 0
|
||||
remote_src: yes
|
||||
loop: "{{ docker_compose_deploy }}"
|
||||
when: docker_compose_deploy is defined
|
||||
|
||||
- name: Set environment variables for docker-compose projects
|
||||
ansible.builtin.template:
|
||||
src: docker-compose-env.j2
|
||||
dest: "{{ docker_compose_root }}/{{ item.name }}/.env"
|
||||
mode: "400"
|
||||
register: compose_env_update
|
||||
notify:
|
||||
- compose_restart
|
||||
- compose_enable
|
||||
no_log: "{{ docker_compose_env_nolog | default(true) }}"
|
||||
mode: 0400
|
||||
loop: "{{ docker_compose_deploy }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
when: docker_compose_deploy is defined and item.env is defined
|
||||
|
||||
- name: Add users to docker group
|
||||
@ -148,4 +82,11 @@
|
||||
name: docker
|
||||
state: started
|
||||
enabled: true
|
||||
when: docker_managed | default(true)
|
||||
|
||||
- name: Start docker-compose services and enable on boot
|
||||
ansible.builtin.service:
|
||||
name: "{{ docker_compose_service }}@{{ item.name }}"
|
||||
state: started
|
||||
enabled: true
|
||||
loop: "{{ docker_compose_deploy }}"
|
||||
when: item.enabled is defined and item.enabled is true
|
||||
|
@ -1,10 +1,7 @@
|
||||
# {{ ansible_managed }}
|
||||
|
||||
{% if item.env is defined %}
|
||||
{% for key, value in item.env.items() %}
|
||||
{% if value is boolean %}
|
||||
{{ key }}={{ value | lower }}
|
||||
{% else %}
|
||||
{{ key }}={{ value }}
|
||||
{% endif %}
|
||||
{% for kvpair in item.env.items() %}
|
||||
{{ kvpair.0 }}={{ kvpair.1 }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
@ -1,5 +1,5 @@
|
||||
[Unit]
|
||||
Description=%i {{ docker_compose_service }} service
|
||||
Description=%i docker-compose service
|
||||
PartOf=docker.service
|
||||
After=docker.service
|
||||
|
||||
|
@ -1,27 +1,41 @@
|
||||
- name: Install MySQL module for Ansible
|
||||
ansible.builtin.apt:
|
||||
name: python3-pymysql
|
||||
state: present
|
||||
- name: Create Gitea directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ gitea_root }}"
|
||||
state: directory
|
||||
|
||||
- name: Create Gitea database
|
||||
community.mysql.mysql_db:
|
||||
name: "{{ gitea.DB_NAME }}"
|
||||
name: "{{ gitea_dbname }}"
|
||||
state: present
|
||||
login_unix_socket: /var/run/mysqld/mysqld.sock
|
||||
|
||||
- name: Create Gitea database user
|
||||
community.mysql.mysql_user:
|
||||
name: "{{ gitea.DB_USER }}"
|
||||
password: "{{ gitea.DB_PASSWD }}"
|
||||
name: "{{ gitea_dbuser }}"
|
||||
password: "{{ gitea_dbpass }}"
|
||||
host: '%'
|
||||
state: present
|
||||
priv: "{{ gitea.DB_NAME }}.*:ALL"
|
||||
priv: "{{ gitea_dbname }}.*:ALL"
|
||||
login_unix_socket: /var/run/mysqld/mysqld.sock
|
||||
|
||||
- name: Create git user
|
||||
ansible.builtin.user:
|
||||
name: git
|
||||
state: present
|
||||
|
||||
- name: Git user uid
|
||||
ansible.builtin.getent:
|
||||
database: passwd
|
||||
key: git
|
||||
|
||||
- name: Git user gid
|
||||
ansible.builtin.getent:
|
||||
database: group
|
||||
key: git
|
||||
|
||||
- name: Create git's .ssh directory
|
||||
ansible.builtin.file:
|
||||
path: /home/git/.ssh
|
||||
mode: "700"
|
||||
state: directory
|
||||
|
||||
- name: Generate git's SSH keys
|
||||
@ -41,7 +55,6 @@
|
||||
- name: Create git's authorized_keys file
|
||||
ansible.builtin.file:
|
||||
path: /home/git/.ssh/authorized_keys
|
||||
mode: "600"
|
||||
state: touch
|
||||
when: not git_authkeys.stat.exists
|
||||
|
||||
@ -55,24 +68,44 @@
|
||||
ansible.builtin.template:
|
||||
src: gitea.sh.j2
|
||||
dest: /usr/local/bin/gitea
|
||||
mode: "755"
|
||||
mode: 0755
|
||||
|
||||
- name: Install Gitea's docker-compose file
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ gitea_root }}/docker-compose.yml"
|
||||
notify: restart_gitea
|
||||
|
||||
- name: Install Gitea's docker-compose variables
|
||||
ansible.builtin.template:
|
||||
src: compose-env.j2
|
||||
dest: "{{ gitea_root }}/.env"
|
||||
notify: restart_gitea
|
||||
|
||||
- name: Create Gitea's logging directory
|
||||
ansible.builtin.file:
|
||||
name: /var/log/gitea
|
||||
state: directory
|
||||
mode: "755"
|
||||
|
||||
- name: Create Gitea's initial log file
|
||||
ansible.builtin.file:
|
||||
name: /var/log/gitea/gitea.log
|
||||
state: touch
|
||||
|
||||
- name: Install Gitea's Fail2ban filter
|
||||
ansible.builtin.template:
|
||||
src: fail2ban-filter.conf.j2
|
||||
dest: /etc/fail2ban/filter.d/gitea.conf
|
||||
mode: "644"
|
||||
notify: restart_fail2ban
|
||||
|
||||
- name: Install Gitea's Fail2ban jail
|
||||
ansible.builtin.template:
|
||||
src: fail2ban-jail.conf.j2
|
||||
dest: /etc/fail2ban/jail.d/gitea.conf
|
||||
mode: "640"
|
||||
notify: restart_fail2ban
|
||||
|
||||
- name: Start and enable Gitea service
|
||||
ansible.builtin.service:
|
||||
name: "{{ docker_compose_service }}@{{ gitea_name }}"
|
||||
state: started
|
||||
enabled: true
|
||||
|
3
roles/mariadb/defaults/main.yml
Normal file
3
roles/mariadb/defaults/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
mariadb_trust:
|
||||
- "172.16.0.0/12"
|
||||
- "192.168.0.0/16"
|
@ -1,12 +0,0 @@
|
||||
- name: Restart MariaDB
|
||||
ansible.builtin.service:
|
||||
name: mariadb
|
||||
state: restarted
|
||||
when: not mariadb_restarted
|
||||
listen: restart_mariadb
|
||||
|
||||
- name: Set MariaDB as restarted
|
||||
ansible.builtin.set_fact:
|
||||
mariadb_restarted: true
|
||||
when: not mariadb_restarted
|
||||
listen: restart_mariadb
|
@ -3,28 +3,23 @@
|
||||
name: mariadb-server
|
||||
state: present
|
||||
|
||||
- name: Set MariaDB restarted fact
|
||||
ansible.builtin.set_fact:
|
||||
mariadb_restarted: false
|
||||
|
||||
- name: Regather facts for the potentially new docker0 interface
|
||||
ansible.builtin.setup:
|
||||
|
||||
- name: Change the bind-address to allow from docker0
|
||||
- name: Change the bind-address to allow Docker
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/mysql/mariadb.conf.d/50-server.cnf
|
||||
regex: "^bind-address"
|
||||
line: "bind-address = {{ ansible_facts.docker0.ipv4.address }}"
|
||||
notify: restart_mariadb
|
||||
line: "bind-address = 0.0.0.0"
|
||||
register: mariadb_conf
|
||||
|
||||
- name: Flush handlers to ensure MariaDB restarts immediately
|
||||
ansible.builtin.meta: flush_handlers
|
||||
tags: restart_mariadb
|
||||
- name: Restart MariaDB
|
||||
ansible.builtin.service:
|
||||
name: mariadb
|
||||
state: restarted
|
||||
when: mariadb_conf.changed
|
||||
|
||||
- name: Allow database connections from Docker
|
||||
- name: Allow database connections
|
||||
community.general.ufw:
|
||||
rule: allow
|
||||
port: "3306"
|
||||
proto: tcp
|
||||
src: "{{ item }}"
|
||||
loop: "{{ mariadb_trust | default(['172.16.0.0/12']) }}"
|
||||
loop: "{{ mariadb_trust }}"
|
||||
|
@ -1 +1,11 @@
|
||||
nextcloud_name: nextcloud
|
||||
# container names
|
||||
nextcloud_container: nextcloud
|
||||
nextcloud_dbcontainer: "{{ nextcloud_container }}-db"
|
||||
|
||||
# database settings
|
||||
nextcloud_dbname: "{{ nextcloud_container }}"
|
||||
nextcloud_dbuser: "{{ nextcloud_dbname }}"
|
||||
|
||||
# host mounts
|
||||
nextcloud_root: "/opt/{{ nextcloud_container }}/public_html"
|
||||
nextcloud_dbroot: "/opt/{{ nextcloud_container }}/database"
|
||||
|
@ -1,25 +0,0 @@
|
||||
- name: Set Nextcloud's Trusted Proxy
|
||||
ansible.builtin.command: >
|
||||
docker exec --user www-data "{{ nextcloud_name }}"
|
||||
php occ config:system:set trusted_proxies 0 --value="{{ traefik_name }}"
|
||||
register: nextcloud_trusted_proxy
|
||||
changed_when: "nextcloud_trusted_proxy.stdout == 'System config value trusted_proxies => 0 set to string ' ~ traefik_name"
|
||||
listen: install_nextcloud
|
||||
|
||||
- name: Set Nextcloud's Trusted Domain
|
||||
ansible.builtin.command: >
|
||||
docker exec --user www-data "{{ nextcloud_name }}"
|
||||
php occ config:system:set trusted_domains 0 --value="{{ nextcloud.DOMAIN }}"
|
||||
register: nextcloud_trusted_domains
|
||||
changed_when: "nextcloud_trusted_domains.stdout == 'System config value trusted_domains => 0 set to string ' ~ nextcloud.DOMAIN"
|
||||
listen: install_nextcloud
|
||||
|
||||
- name: Preform Nextcloud database maintenance
|
||||
ansible.builtin.command: >
|
||||
docker exec --user www-data "{{ nextcloud_name }}" {{ item }}
|
||||
loop:
|
||||
- "php occ maintenance:mode --on"
|
||||
- "php occ db:add-missing-indices"
|
||||
- "php occ db:convert-filecache-bigint"
|
||||
- "php occ maintenance:mode --off"
|
||||
listen: install_nextcloud
|
@ -1,62 +1,109 @@
|
||||
- name: Install MySQL module for Ansible
|
||||
ansible.builtin.apt:
|
||||
name: python3-pymysql
|
||||
state: present
|
||||
- name: Create Nextcloud network
|
||||
community.general.docker_network:
|
||||
name: "{{ nextcloud_container }}"
|
||||
|
||||
- name: Create Nextcloud database
|
||||
community.mysql.mysql_db:
|
||||
name: "{{ nextcloud.DB_NAME | default('nextcloud') }}"
|
||||
state: present
|
||||
login_unix_socket: /var/run/mysqld/mysqld.sock
|
||||
|
||||
- name: Create Nextcloud database user
|
||||
community.mysql.mysql_user:
|
||||
name: "{{ nextcloud.DB_USER | default('nextcloud') }}"
|
||||
password: "{{ nextcloud.DB_PASSWD }}"
|
||||
host: '%'
|
||||
state: present
|
||||
priv: "{{ nextcloud.DB_NAME | default('nextcloud') }}.*:ALL"
|
||||
login_unix_socket: /var/run/mysqld/mysqld.sock
|
||||
|
||||
- name: Start Nextcloud service and enable on boot
|
||||
ansible.builtin.service:
|
||||
name: "{{ docker_compose_service }}@{{ nextcloud_name }}"
|
||||
- name: Start Nextcloud's database container
|
||||
community.general.docker_container:
|
||||
name: "{{ nextcloud_dbcontainer }}"
|
||||
image: mariadb:{{ nextcloud_dbversion }}
|
||||
state: started
|
||||
enabled: true
|
||||
when: nextcloud.ENABLE | default('false')
|
||||
restart_policy: always
|
||||
volumes: "{{ nextcloud_dbroot }}:/var/lib/mysql"
|
||||
networks_cli_compatible: true
|
||||
networks:
|
||||
- name: "{{ nextcloud_container }}"
|
||||
env:
|
||||
MYSQL_RANDOM_ROOT_PASSWORD: "true"
|
||||
MYSQL_DATABASE: "{{ nextcloud_dbname }}"
|
||||
MYSQL_USER: "{{ nextcloud_dbuser }}"
|
||||
MYSQL_PASSWORD: "{{ nextcloud_dbpass }}"
|
||||
|
||||
- name: Start Nextcloud container
|
||||
community.general.docker_container:
|
||||
name: "{{ nextcloud_container }}"
|
||||
image: nextcloud:{{ nextcloud_version }}
|
||||
state: started
|
||||
restart_policy: always
|
||||
volumes: "{{ nextcloud_root }}:/var/www/html"
|
||||
networks_cli_compatible: true
|
||||
networks:
|
||||
- name: "{{ nextcloud_container }}"
|
||||
- name: traefik
|
||||
env:
|
||||
PHP_MEMORY_LIMIT: 1024M
|
||||
labels:
|
||||
traefik.http.routers.nextcloud.rule: "Host(`{{ nextcloud_domain }}`)"
|
||||
traefik.http.routers.nextcloud.entrypoints: websecure
|
||||
traefik.http.routers.nextcloud.tls.certresolver: letsencrypt
|
||||
traefik.http.routers.nextcloud.middlewares: "securehttps@file,nextcloud-webdav"
|
||||
traefik.http.middlewares.nextcloud-webdav.redirectregex.regex: "https://(.*)/.well-known/(card|cal)dav"
|
||||
traefik.http.middlewares.nextcloud-webdav.redirectregex.replacement: "https://${1}/remote.php/dav/"
|
||||
traefik.http.middlewares.nextcloud-webdav.redirectregex.permanent: "true"
|
||||
traefik.docker.network: traefik
|
||||
traefik.enable: "true"
|
||||
|
||||
- name: Grab Nextcloud database container information
|
||||
community.general.docker_container_info:
|
||||
name: "{{ nextcloud_dbcontainer }}"
|
||||
register: nextcloud_dbinfo
|
||||
|
||||
- name: Grab Nextcloud container information
|
||||
community.general.docker_container_info:
|
||||
name: "{{ nextcloud_name }}"
|
||||
name: "{{ nextcloud_container }}"
|
||||
register: nextcloud_info
|
||||
|
||||
- name: Wait for Nextcloud to become available
|
||||
ansible.builtin.wait_for:
|
||||
host: "{{ nextcloud_info.container.NetworkSettings.Networks.traefik.IPAddress }}"
|
||||
delay: 10
|
||||
port: 80
|
||||
|
||||
- name: Check Nextcloud status
|
||||
ansible.builtin.command: >
|
||||
docker exec --user www-data "{{ nextcloud_name }}" php occ status
|
||||
ansible.builtin.command: "docker exec --user www-data {{ nextcloud_container }}
|
||||
php occ status"
|
||||
register: nextcloud_status
|
||||
changed_when: false
|
||||
args:
|
||||
removes: "{{ nextcloud_root }}/config/CAN_INSTALL"
|
||||
|
||||
- name: Wait for Nextcloud database to become available
|
||||
ansible.builtin.wait_for:
|
||||
host: "{{ nextcloud_dbinfo.container.NetworkSettings.Networks.nextcloud.IPAddress }}"
|
||||
port: 3306
|
||||
|
||||
- name: Install Nextcloud
|
||||
ansible.builtin.command: >
|
||||
docker exec --user www-data {{ nextcloud_name }}
|
||||
php occ maintenance:install
|
||||
--database "mysql"
|
||||
--database-host "{{ nextcloud.DB_HOST | default('host.docker.internal') }}"
|
||||
--database-name "{{ nextcloud.DB_NAME | default('nextcloud') }}"
|
||||
--database-user "{{ nextcloud.DB_USER | default('nextcloud') }}"
|
||||
--database-pass "{{ nextcloud.DB_PASSWD }}"
|
||||
--admin-user "{{ nextcloud.ADMIN_USER | default('admin') }}"
|
||||
--admin-pass "{{ nextcloud.ADMIN_PASSWD }}"
|
||||
ansible.builtin.command: 'docker exec --user www-data {{ nextcloud_container }}
|
||||
php occ maintenance:install
|
||||
--database "mysql"
|
||||
--database-host "{{ nextcloud_dbcontainer }}"
|
||||
--database-name "{{ nextcloud_dbname }}"
|
||||
--database-user "{{ nextcloud_dbuser }}"
|
||||
--database-pass "{{ nextcloud_dbpass }}"
|
||||
--admin-user "{{ nextcloud_admin }}"
|
||||
--admin-pass "{{ nextcloud_pass }}"'
|
||||
register: nextcloud_install
|
||||
when: nextcloud_status.stderr[:26] == "Nextcloud is not installed"
|
||||
changed_when: nextcloud_install.stdout == "Nextcloud was successfully installed"
|
||||
notify: install_nextcloud
|
||||
when:
|
||||
- nextcloud_status.stdout[:26] == "Nextcloud is not installed"
|
||||
- nextcloud_domain is defined
|
||||
|
||||
- name: Set Nextcloud's Trusted Proxy
|
||||
ansible.builtin.command: 'docker exec --user www-data {{ nextcloud_container }}
|
||||
php occ config:system:set trusted_proxies 0
|
||||
--value="{{ traefik_name }}"'
|
||||
when: nextcloud_install.changed
|
||||
|
||||
- name: Set Nextcloud's Trusted Domain
|
||||
ansible.builtin.command: 'docker exec --user www-data {{ nextcloud_container }}
|
||||
php occ config:system:set trusted_domains 0
|
||||
--value="{{ nextcloud_domain }}"'
|
||||
when: nextcloud_install.changed
|
||||
|
||||
- name: Preform Nextcloud database maintenance
|
||||
ansible.builtin.command: "docker exec --user www-data {{ nextcloud_container }} {{ item }}"
|
||||
loop:
|
||||
- "php occ maintenance:mode --on"
|
||||
- "php occ db:add-missing-indices"
|
||||
- "php occ db:convert-filecache-bigint"
|
||||
- "php occ maintenance:mode --off"
|
||||
when: nextcloud_install.changed
|
||||
|
||||
- name: Install Nextcloud background jobs cron
|
||||
ansible.builtin.cron:
|
||||
@ -64,3 +111,8 @@
|
||||
minute: "*/5"
|
||||
job: "/usr/bin/docker exec -u www-data nextcloud /usr/local/bin/php -f /var/www/html/cron.php"
|
||||
user: root
|
||||
|
||||
- name: Remove Nextcloud's CAN_INSTALL file
|
||||
ansible.builtin.file:
|
||||
path: "{{ nextcloud_root }}/config/CAN_INSTALL"
|
||||
state: absent
|
||||
|
@ -1 +0,0 @@
|
||||
cached_dhparams_pem: /vagrant/scratch/dhparams.pem
|
@ -1,13 +1,3 @@
|
||||
- name: Enable nginx sites configuration
|
||||
ansible.builtin.file:
|
||||
src: "/etc/nginx/sites-available/{{ item.item.domain }}.conf"
|
||||
dest: "/etc/nginx/sites-enabled/{{ item.item.domain }}.conf"
|
||||
state: link
|
||||
mode: "400"
|
||||
loop: "{{ nginx_sites.results }}"
|
||||
when: item.changed
|
||||
listen: reload_nginx
|
||||
|
||||
- name: Reload nginx
|
||||
ansible.builtin.service:
|
||||
name: nginx
|
||||
|
@ -10,19 +10,6 @@
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: Check for cached dhparams.pem file
|
||||
ansible.builtin.stat:
|
||||
path: "{{ cached_dhparams_pem }}"
|
||||
register: dhparams_file
|
||||
|
||||
- name: Copy cached dhparams.pem to /etc/ssl/
|
||||
ansible.builtin.copy:
|
||||
src: "{{ cached_dhparams_pem }}"
|
||||
dest: /etc/ssl/dhparams.pem
|
||||
mode: "600"
|
||||
remote_src: true
|
||||
when: dhparams_file.stat.exists
|
||||
|
||||
- name: Generate DH Parameters
|
||||
community.crypto.openssl_dhparam:
|
||||
path: /etc/ssl/dhparams.pem
|
||||
@ -32,18 +19,28 @@
|
||||
ansible.builtin.template:
|
||||
src: nginx.conf.j2
|
||||
dest: /etc/nginx/nginx.conf
|
||||
mode: "644"
|
||||
mode: 0644
|
||||
notify: reload_nginx
|
||||
|
||||
- name: Install nginx sites configuration
|
||||
ansible.builtin.template:
|
||||
src: server-nginx.conf.j2
|
||||
dest: "/etc/nginx/sites-available/{{ item.domain }}.conf"
|
||||
mode: "400"
|
||||
mode: 0400
|
||||
loop: "{{ proxy.servers }}"
|
||||
notify: reload_nginx
|
||||
register: nginx_sites
|
||||
|
||||
- name: Enable nginx sites configuration
|
||||
ansible.builtin.file:
|
||||
src: "/etc/nginx/sites-available/{{ item.item.domain }}.conf"
|
||||
dest: "/etc/nginx/sites-enabled/{{ item.item.domain }}.conf"
|
||||
state: link
|
||||
mode: 0400
|
||||
loop: "{{ nginx_sites.results }}"
|
||||
when: item.changed
|
||||
notify: reload_nginx
|
||||
|
||||
- name: Generate self-signed certificate
|
||||
ansible.builtin.command: 'openssl req -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes \
|
||||
-subj "/C=US/ST=Local/L=Local/O=Org/OU=IT/CN=example.com" \
|
||||
@ -64,14 +61,14 @@
|
||||
ansible.builtin.template:
|
||||
src: cloudflare.ini.j2
|
||||
dest: /root/.cloudflare.ini
|
||||
mode: "400"
|
||||
mode: 0400
|
||||
when: proxy.production is defined and proxy.production and proxy.dns_cloudflare is defined
|
||||
|
||||
- name: Create nginx post renewal hook directory
|
||||
ansible.builtin.file:
|
||||
path: /etc/letsencrypt/renewal-hooks/post
|
||||
state: directory
|
||||
mode: "500"
|
||||
mode: 0500
|
||||
when: proxy.production is defined and proxy.production
|
||||
|
||||
- name: Install nginx post renewal hook
|
||||
|
@ -35,13 +35,7 @@ server {
|
||||
client_max_body_size {{ item.client_max_body_size }};
|
||||
{% endif %}
|
||||
location / {
|
||||
{% if item.allowedips is defined %}
|
||||
{% for ip in item.allowedips %}
|
||||
allow {{ ip }};
|
||||
{% endfor %}
|
||||
deny all;
|
||||
{% endif %}
|
||||
{% if item.restrict is defined and item.restrict %}
|
||||
{% if item.restrict is defined and item.restrict %}
|
||||
auth_basic "{{ item.restrict_name | default('Restricted Access') }}";
|
||||
auth_basic_user_file {{ item.restrict_file | default('/etc/nginx/.htpasswd') }};
|
||||
proxy_set_header Authorization "";
|
||||
|
@ -21,6 +21,20 @@
|
||||
loop: "{{ traefik_external }}"
|
||||
when: traefik_external is defined
|
||||
|
||||
- name: Install Traefik's docker-compose file
|
||||
ansible.builtin.template:
|
||||
src: docker-compose.yml.j2
|
||||
dest: "{{ traefik_root }}/docker-compose.yml"
|
||||
mode: 0400
|
||||
notify: restart_traefik
|
||||
|
||||
- name: Install Traefik's docker-compose variables
|
||||
ansible.builtin.template:
|
||||
src: compose-env.j2
|
||||
dest: "{{ traefik_root }}/.env"
|
||||
mode: 0400
|
||||
notify: restart_traefik
|
||||
|
||||
- name: Install static Traefik configuration
|
||||
ansible.builtin.template:
|
||||
src: traefik.yml.j2
|
||||
@ -28,9 +42,8 @@
|
||||
mode: 0400
|
||||
notify: restart_traefik
|
||||
|
||||
- name: Start Traefik service and enable on boot
|
||||
- name: Start and enable Traefik service
|
||||
ansible.builtin.service:
|
||||
name: "{{ docker_compose_service }}@{{ traefik_name }}"
|
||||
state: started
|
||||
enabled: true
|
||||
when: traefik.ENABLED | default('false')
|
||||
|
Loading…
Reference in New Issue
Block a user