Compare commits

..

7 Commits

Author SHA1 Message Date
7c2def16a5 testing 2023-10-09 00:30:54 -04:00
0377a5e642
Add option for private OCI registry auth 2023-09-29 22:18:59 -04:00
2e02efcbb7
Add Makefile, roles_path, and SSH tunnel variable 2023-09-26 21:14:06 -04:00
8fed63792b
Ask permission for starting vagrant SSH tunnels 2023-09-16 00:04:58 -04:00
2c4fcbacc3
Introduce forward-ssh.sh method & reorganize
- Abandoned update-hosts.sh in favor of loopback SSH forwarding
- Adopted *.local.krislamo.org as a wildcard loopback domain
- Bound Traefik to ports 443/80 on Dockerbox dev
- Removed outdated Gitea config from Dockerbox
- Relocated production playbooks to a new directory
2023-09-15 23:46:45 -04:00
b81372c07a
Fix the Vagrantfile for Github runners 2023-08-30 19:45:42 -04:00
9b5be29a1a
Update Vagrantfile to use external settings 2023-08-21 18:46:47 -04:00
31 changed files with 255 additions and 140 deletions

2
.gitignore vendored
View File

@ -1,4 +1,4 @@
.playbook
.vagrant
.vagrant*
.vscode
/environments/

10
Makefile Normal file
View File

@ -0,0 +1,10 @@
.PHONY: clean install
all: install
install:
vagrant up --no-destroy-on-error
sudo ./forward-ssh.sh
clean:
vagrant destroy -f && rm -rf .vagrant

50
Vagrantfile vendored
View File

@ -1,43 +1,41 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
SSH_FORWARD=ENV["SSH_FORWARD"]
if !(SSH_FORWARD == "true")
SSH_FORWARD = false
require 'yaml'
settings_path = '.vagrant.yml'
settings = {}
if File.exist?(settings_path)
settings = YAML.load_file(settings_path)
end
PLAYBOOK=ENV["PLAYBOOK"]
if !PLAYBOOK
if File.exist?('.playbook')
PLAYBOOK = IO.read('.playbook').split("\n")[0]
end
VAGRANT_BOX = settings['VAGRANT_BOX'] || 'debian/bookworm64'
VAGRANT_CPUS = settings['VAGRANT_CPUS'] || 2
VAGRANT_MEM = settings['VAGRANT_MEM'] || 2048
SSH_FORWARD = settings['SSH_FORWARD'] || false
if !PLAYBOOK || PLAYBOOK.empty?
PLAYBOOK = "\nERROR: Set env PLAYBOOK"
end
else
File.write(".playbook", PLAYBOOK)
# Default to shell environment variable: PLAYBOOK (priority #1)
PLAYBOOK=ENV["PLAYBOOK"]
if !PLAYBOOK || PLAYBOOK.empty?
# PLAYBOOK setting in .vagrant.yml (priority #2)
PLAYBOOK = settings['PLAYBOOK'] || 'default'
end
Vagrant.configure("2") do |config|
config.vm.box = "debian/bullseye64"
config.vm.box = VAGRANT_BOX
config.vm.network "private_network", type: "dhcp"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.synced_folder "./scratch", "/vagrant/scratch"
config.ssh.forward_agent = SSH_FORWARD
# Machine Name
config.vm.define :moxie do |moxie| #
end
# Libvrit provider
config.vm.provider :libvirt do |libvirt|
libvirt.cpus = 2
libvirt.memory = 4096
libvirt.default_prefix = ""
libvirt.cpus = VAGRANT_CPUS
libvirt.memory = VAGRANT_MEM
end
config.vm.provider "virtualbox" do |vbox|
vbox.memory = 4096
# Virtualbox provider
config.vm.provider :virtualbox do |vbox|
vbox.cpus = VAGRANT_CPUS
vbox.memory = VAGRANT_MEM
end
# Provision with Ansible
@ -45,6 +43,6 @@ Vagrant.configure("2") do |config|
ENV['ANSIBLE_ROLES_PATH'] = File.dirname(__FILE__) + "/roles"
ansible.compatibility_mode = "2.0"
ansible.playbook = "dev/" + PLAYBOOK + ".yml"
ansible.raw_arguments = ["--diff"]
end
end

View File

@ -1,6 +1,7 @@
[defaults]
inventory = ./environments/development
interpreter_python = /usr/bin/python3
roles_path = ./roles
[connection]
pipelining = true

4
dev/default.yml Normal file
View File

@ -0,0 +1,4 @@
- name: Install 'default' aka nothing
hosts: all
become: true
tasks: []

View File

@ -1,4 +1,4 @@
- name: Install Docker Box Server
- name: Install Dockerbox Server
hosts: all
become: true
vars_files:

View File

@ -9,14 +9,14 @@ docker_users:
# traefik
traefik_version: latest
traefik_dashboard: true
traefik_domain: traefik.vm.krislamo.org
traefik_domain: traefik.local.krislamo.org
traefik_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin
#traefik_acme_email: realemail@example.com # Let's Encrypt settings
#traefik_production: true
# bitwarden
# Get Installation ID & Key at https://bitwarden.com/host/
bitwarden_domain: vault.vm.krislamo.org
bitwarden_domain: vault.local.krislamo.org
bitwarden_dbpass: password
bitwarden_install_id: 4ea840a3-532e-4cb6-a472-abd900728b23
bitwarden_install_key: 1yB3Z2gRI0KnnH90C6p

View File

@ -6,8 +6,34 @@ manage_network: false
docker_users:
- vagrant
#docker_login_url: https://myregistry.example.com
#docker_login_user: myuser
#docker_login_pass: YOUR_PASSWD
docker_compose_deploy:
- name: docs
url: git@git.krislamo.org:kris/homelab-docs.git
version: main
sync: true
- name: traefik
url: https://github.com/krislamo/traefik
version: 4d3391b1644e87dec2d60d3315401e4db2bbc943
enabled: true
accept_newhostkey: true # Consider verifying manually instead
# Must manually add my GPG key to root's keyring
#trusted_keys:
# - FBF673CEEC030F8AECA814E73EDA9C3441EDA925
env:
VERSION: "2.10"
- name: traefik2
url: https://github.com/krislamo/traefik
version: 4d3391b1644e87dec2d60d3315401e4db2bbc943
enabled: true
accept_newhostkey: true # Consider verifying manually instead
# Must manually add my GPG key to root's keyring
#trusted_keys:
# - FBF673CEEC030F8AECA814E73EDA9C3441EDA925
env:
VERSION: "2.10"
DOMAIN: traefik2.local.krislamo.org
ROUTER: traefik2
NETWORK: traefik2
WEB_PORT: 127.0.0.1:8000:80
WEBSECURE_PORT: 127.0.0.1:4443:443
LOCAL_POST: 127.0.0.1:8444:8443

View File

@ -9,40 +9,36 @@ docker_users:
# traefik
traefik_version: latest
traefik_dashboard: true
traefik_domain: traefik.vm.krislamo.org
traefik_domain: traefik.local.krislamo.org
traefik_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin
traefik_web_entry: 0.0.0.0:80
traefik_websecure_entry: 0.0.0.0:443
#traefik_acme_email: realemail@example.com # Let's Encrypt settings
#traefik_production: true
traefik_http_only: true # if behind reverse-proxy
#traefik_http_only: true # if behind reverse-proxy
# nextcloud
nextcloud_version: stable
nextcloud_admin: admin
nextcloud_pass: password
nextcloud_domain: cloud.vm.krislamo.org
nextcloud_domain: cloud.local.krislamo.org
nextcloud_dbversion: latest
nextcloud_dbpass: password
# gitea
gitea_domain: git.vm.krislamo.org
gitea_version: 1
gitea_dbversion: latest
gitea_dbpass: password
# jenkins
jenkins_version: lts
jenkins_domain: jenkins.vm.krislamo.org
jenkins_domain: jenkins.local.krislamo.org
# prometheus (includes grafana)
prom_version: latest
prom_domain: prom.vm.krislamo.org
prom_domain: prom.local.krislamo.org
grafana_version: latest
grafana_domain: grafana.vm.krislamo.org
grafana_domain: grafana.local.krislamo.org
prom_targets: "['10.0.2.15:9100']"
# nginx
nginx_domain: nginx.vm.krislamo.org
nginx_domain: nginx.local.krislamo.org
nginx_name: staticsite
nginx_repo_url: https://git.krislamo.org/kris/example-website/
nginx_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin

View File

@ -1,4 +1,4 @@
base_domain: vm.krislamo.org
base_domain: local.krislamo.org
# base
allow_reboot: false

View File

@ -5,14 +5,14 @@ docker_users:
# traefik
traefik_version: latest
traefik_dashboard: true
traefik_domain: traefik.vm.krislamo.org
traefik_domain: traefik.local.krislamo.org
traefik_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin
# container settings
nextcloud_version: stable
nextcloud_admin: admin
nextcloud_pass: password
nextcloud_domain: cloud.vm.krislamo.org
nextcloud_domain: cloud.local.krislamo.org
# database settings
nextcloud_dbversion: latest

View File

@ -9,13 +9,13 @@ docker_users:
# traefik
traefik_version: latest
traefik_dashboard: true
traefik_domain: traefik.vm.krislamo.org
traefik_domain: traefik.local.krislamo.org
traefik_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin
#traefik_acme_email: realemail@example.com # Let's Encrypt settings
#traefik_production: true
# nginx
nginx_domain: nginx.vm.krislamo.org
nginx_domain: nginx.local.krislamo.org
nginx_name: staticsite
nginx_repo_url: https://git.krislamo.org/kris/example-website/
nginx_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin

View File

@ -1,4 +1,4 @@
base_domain: vm.krislamo.org
base_domain: local.krislamo.org
# base
allow_reboot: false

View File

@ -9,14 +9,14 @@ docker_users:
# traefik
traefik_version: latest
traefik_dashboard: true
traefik_domain: traefik.vm.krislamo.org
traefik_domain: traefik.local.krislamo.org
traefik_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin
#traefik_acme_email: realemail@example.com # Let's Encrypt settings
#traefik_production: true
# container settings
wordpress_version: latest
wordpress_domain: wordpress.vm.krislamo.org
wordpress_domain: wordpress.local.krislamo.org
wordpress_multisite: true
# database settings

View File

@ -1,25 +0,0 @@
# Copyright (C) 2020 Kris Lamoureux
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
- name: Install Docker Box Server
hosts: dockerhosts
become: true
roles:
- base
- docker
- traefik
- nextcloud
- jenkins
- prometheus
- nginx

90
forward-ssh.sh Executable file
View File

@ -0,0 +1,90 @@
#!/bin/bash
# Finds the SSH private key under ./.vagrant and connects to
# the Vagrant box, port forwarding localhost ports: 8443, 80, 443
# Root check
if [ "$EUID" -ne 0 ]; then
echo "[ERROR]: Please run script as root"
exit 1
fi
# Clean environment
unset PRIVATE_KEY
unset HOST_IP
unset MATCH_PATTERN
unset PKILL_ANSWER
# Function to create the SSH tunnel
function ssh_connect {
read -rp "Start a new vagrant SSH tunnel? [y/N] " PSTART_ANSWER
echo
case "$PSTART_ANSWER" in
[yY])
printf "[INFO]: Starting new vagrant SSH tunnel on PID "
sudo -u "$USER" ssh -fNT -i "$PRIVATE_KEY" \
-L 8443:localhost:8443 \
-L 80:localhost:80 \
-L 443:localhost:443 \
-o UserKnownHostsFile=/dev/null \
-o StrictHostKeyChecking=no \
vagrant@"$HOST_IP" 2>/dev/null
sleep 2
pgrep -f "$MATCH_PATTERN"
;;
*)
echo "[INFO]: Delined to start a new vagrant SSH tunnel"
exit 0
;;
esac
}
# Check for valid PRIVATE_KEY location
PRIVATE_KEY="$(find .vagrant -name "private_key" 2>/dev/null)"
if ! ssh-keygen -l -f "$PRIVATE_KEY" &>/dev/null; then
echo "[ERROR]: The SSH key '$PRIVATE_KEY' is not valid. Is your virtual machine running?"
exit 1
fi
echo "[CHECK]: Valid key at $PRIVATE_KEY"
# Grab first IP or use whatever HOST_IP_FIELD is set to and check that the guest is up
HOST_IP="$(vagrant ssh -c "hostname -I | cut -d' ' -f${HOST_IP_FIELD:-1}" 2>/dev/null)"
HOST_IP="${HOST_IP::-1}" # trim
if ! ping -c 1 "$HOST_IP" &>/dev/null; then
echo "[ERROR]: Cannot ping the host IP '$HOST_IP'"
exit 1
fi
echo "[CHECK]: Host at $HOST_IP is up"
# Pattern for matching processes running
MATCH_PATTERN="ssh -fNT -i ${PRIVATE_KEY}.*vagrant@"
# Check amount of processes that match the pattern
if [ "$(pgrep -afc "$MATCH_PATTERN")" -eq 0 ]; then
ssh_connect
else
# Processes found, so prompt to kill remaining ones then start tunnel
printf "\n[WARNING]: Found processes running:\n"
pgrep -fa "$MATCH_PATTERN"
printf '\n'
read -rp "Would you like to kill these processes? [y/N] " PKILL_ANSWER
echo
case "$PKILL_ANSWER" in
[yY])
echo "[WARNING]: Killing old vagrant SSH tunnel(s): "
pgrep -f "$MATCH_PATTERN" | tee >(xargs kill -15)
echo
if [ "$(pgrep -afc "$MATCH_PATTERN")" -eq 0 ]; then
ssh_connect
else
echo "[ERROR]: Unable to kill processes:"
pgrep -f "$MATCH_PATTERN"
exit 1
fi
;;
*)
echo "[INFO]: Declined to kill existing processes"
exit 0
;;
esac
fi

11
playbooks/dockerbox.yml Normal file
View File

@ -0,0 +1,11 @@
- name: Install Dockerbox Server
hosts: "{{ PLAYBOOK_HOST | default('none') }}"
become: true
roles:
- base
- docker
- traefik
- nextcloud
- jenkins
- prometheus
- nginx

View File

@ -4,6 +4,11 @@
state: present
update_cache: true
- name: Install GPG
ansible.builtin.apt:
name: gpg
state: present
- name: Manage root authorized_keys
ansible.builtin.template:
src: authorized_keys.j2

View File

@ -3,4 +3,4 @@ docker_compose_service: compose
docker_compose: /usr/bin/docker-compose
docker_repos_keys: "{{ docker_repos_path }}/.keys"
docker_repos_keytype: rsa
docker_repos_path: /srv/compose_repos
docker_repos_path: /srv/.compose_repos

View File

@ -2,3 +2,29 @@
ansible.builtin.systemd:
daemon_reload: true
listen: compose_systemd
- name: Find which services had a docker-compose.yml updated
set_fact:
compose_restart_list: "{{ (compose_restart_list | default([])) + [item.item.name] }}"
loop: "{{ compose_update.results }}"
loop_control:
label: "{{ item.item.name }}"
when: item.changed
listen: compose_restart
- name: Find which services had their .env updated
set_fact:
compose_restart_list: "{{ (compose_restart_list | default([])) + [item.item.name] }}"
loop: "{{ compose_env_update.results }}"
loop_control:
label: "{{ item.item.name }}"
when: item.changed
listen: compose_restart
- name: Restart {{ docker_compose_service }} services
ansible.builtin.systemd:
state: restarted
name: "{{ docker_compose_service }}@{{ item }}"
loop: "{{ compose_restart_list | unique }}"
when: compose_restart_list is defined
listen: compose_restart

View File

@ -4,6 +4,13 @@
state: present
update_cache: true
- name: Login to private registry
community.docker.docker_login:
registry_url: "{{ docker_login_url | default('') }}"
username: "{{ docker_login_user }}"
password: "{{ docker_login_pass }}"
when: docker_login_user is defined and docker_login_pass is defined
- name: Create docker-compose root
ansible.builtin.file:
path: "{{ docker_compose_root }}"
@ -31,6 +38,7 @@
community.crypto.openssh_keypair:
path: "{{ docker_repos_keys }}/id_{{ docker_repos_keytype }}"
type: "{{ docker_repos_keytype }}"
comment: "{{ ansible_hostname }}-deploy-key"
mode: 0400
state: present
when: docker_compose_deploy is defined
@ -39,11 +47,15 @@
ansible.builtin.git:
repo: "{{ item.url }}"
dest: "{{ docker_repos_path }}/{{ item.name }}"
version: "{{ item.version | default('main') }}"
force: true
version: "{{ item.version }}"
accept_newhostkey: "{{ item.accept_newhostkey | default('false') }}"
gpg_whitelist: "{{ item.trusted_keys | default([]) }}"
verify_commit: "{{ true if (item.trusted_keys is defined and item.trusted_keys) else false }}"
key_file: "{{ docker_repos_keys }}/id_{{ docker_repos_keytype }}"
when: docker_compose_deploy is defined
loop: "{{ docker_compose_deploy }}"
loop_control:
label: "{{ item.url }}"
when: docker_compose_deploy is defined
- name: Create directories for docker-compose projects using the systemd service
ansible.builtin.file:
@ -51,30 +63,32 @@
state: directory
mode: 0400
loop: "{{ docker_compose_deploy }}"
loop_control:
label: "{{ item.name }}"
when: docker_compose_deploy is defined
- name: Copy docker-compose project directory
ansible.builtin.copy:
src: "{{ docker_repos_path }}/{{ item.name }}/"
dest: "{{ docker_compose_root }}/{{ item.name }}/"
remote_src: yes
loop: "{{ docker_compose_deploy }}"
when: docker_compose_deploy is defined and item.sync | default(false)
- name: Copy docker-compose.yml files to their service directories
ansible.builtin.copy:
- name: Synchronize docker-compose.yml
ansible.posix.synchronize:
src: "{{ docker_repos_path }}/{{ item.name }}/{{ item.path | default('docker-compose.yml') }}"
dest: "{{ docker_compose_root }}/{{ item.name }}/docker-compose.yml"
remote_src: yes
delegate_to: "{{ inventory_hostname }}"
register: compose_update
notify: compose_restart
loop: "{{ docker_compose_deploy }}"
when: docker_compose_deploy is defined and not item.sync | default(false)
loop_control:
label: "{{ item.name }}"
when: docker_compose_deploy is defined
- name: Set environment variables for docker-compose projects
ansible.builtin.template:
src: docker-compose-env.j2
dest: "{{ docker_compose_root }}/{{ item.name }}/.env"
mode: 0400
register: compose_env_update
notify: compose_restart
loop: "{{ docker_compose_deploy }}"
loop_control:
label: "{{ item.name }}"
when: docker_compose_deploy is defined and item.env is defined
- name: Add users to docker group
@ -97,4 +111,6 @@
state: started
enabled: true
loop: "{{ docker_compose_deploy }}"
loop_control:
label: "{{ docker_compose_service }}@{{ item.name }}"
when: item.enabled is defined and item.enabled is true

View File

@ -1,5 +1,4 @@
# {{ ansible_managed }}
{% if item.env is defined %}
{% for kvpair in item.env.items() %}
{{ kvpair.0 }}={{ kvpair.1 }}

View File

@ -1,42 +0,0 @@
#!/bin/bash
COMMENT="Project Moxie"
DOMAIN="vm.krislamo.org"
HOST[0]="traefik.${DOMAIN}"
HOST[1]="cloud.${DOMAIN}"
HOST[2]="git.${DOMAIN}"
HOST[3]="jenkins.${DOMAIN}"
HOST[4]="prom.${DOMAIN}"
HOST[5]="grafana.${DOMAIN}"
HOST[6]="nginx.${DOMAIN}"
HOST[7]="vault.${DOMAIN}"
HOST[8]="wordpress.${DOMAIN}"
HOST[9]="site1.wordpress.${DOMAIN}"
HOST[10]="site2.wordpress.${DOMAIN}"
HOST[11]="unifi.${DOMAIN}"
HOST[12]="jellyfin.${DOMAIN}"
# Get Vagrantbox guest IP
VAGRANT_OUTPUT=$(vagrant ssh -c "hostname -I | cut -d' ' -f2" 2>/dev/null)
# Remove ^M from the end
[ ${#VAGRANT_OUTPUT} -gt 1 ] && IP=${VAGRANT_OUTPUT::-1}
echo "Purging project addresses from /etc/hosts"
sudo sed -i "s/# $COMMENT//g" /etc/hosts
for address in "${HOST[@]}"; do
sudo sed -i "/$address/d" /etc/hosts
done
# Remove trailing newline
sudo sed -i '${/^$/d}' /etc/hosts
if [ -n "$IP" ]; then
echo -e "Adding new addresses...\n"
echo -e "# $COMMENT" | sudo tee -a /etc/hosts
for address in "${HOST[@]}"; do
echo -e "$IP\t$address" | sudo tee -a /etc/hosts
done
else
echo "Cannot find address. Is the Vagrant box running?"
fi