Compare commits
	
		
			1 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| ba44547066 | 
							
								
								
									
										40
									
								
								.github/workflows/vagrant.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										40
									
								
								.github/workflows/vagrant.yml
									
									
									
									
										vendored
									
									
								
							| @@ -1,40 +0,0 @@ | ||||
| name: homelab-ci | ||||
|  | ||||
| on: | ||||
|   push: | ||||
|     branches: | ||||
|       - github_actions | ||||
|       # - main | ||||
|       # - testing | ||||
|  | ||||
| jobs: | ||||
|   homelab-ci: | ||||
|     runs-on: macos-latest | ||||
|  | ||||
|     steps: | ||||
|       - uses: actions/checkout@v3 | ||||
|  | ||||
|       - name: Cache Vagrant boxes | ||||
|         uses: actions/cache@v3 | ||||
|         with: | ||||
|           path: ~/.vagrant.d/boxes | ||||
|           key: ${{ runner.os }}-vagrant-${{ hashFiles('Vagrantfile') }} | ||||
|           restore-keys: | | ||||
|             ${{ runner.os }}-vagrant- | ||||
|  | ||||
|       - name: Install Ansible | ||||
|         run: brew install ansible@7 | ||||
|  | ||||
|       - name: Software Versions | ||||
|         run: | | ||||
|           printf "VirtualBox " | ||||
|           vboxmanage --version | ||||
|           vagrant --version | ||||
|           export PATH="/usr/local/opt/ansible@7/bin:$PATH" | ||||
|           ansible --version | ||||
|  | ||||
|       - name: Vagrant Up with Dockerbox Playbook | ||||
|         run: | | ||||
|           export PATH="/usr/local/opt/ansible@7/bin:$PATH" | ||||
|           PLAYBOOK=dockerbox vagrant up | ||||
|           vagrant ssh -c "docker ps" | ||||
							
								
								
									
										6
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										6
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @@ -1,5 +1,3 @@ | ||||
| .ansible* | ||||
| /environments/ | ||||
| .vagrant | ||||
| .playbook | ||||
| .vagrant* | ||||
| .vscode | ||||
| /environments/ | ||||
|   | ||||
							
								
								
									
										10
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										10
									
								
								Makefile
									
									
									
									
									
								
							| @@ -1,10 +0,0 @@ | ||||
| .PHONY: clean install | ||||
|  | ||||
| all: install | ||||
|  | ||||
| install: | ||||
| 	vagrant up --no-destroy-on-error | ||||
| 	sudo ./forward-ssh.sh | ||||
|  | ||||
| clean: | ||||
| 	vagrant destroy -f && rm -rf .vagrant | ||||
							
								
								
									
										69
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										69
									
								
								README.md
									
									
									
									
									
								
							| @@ -1,76 +1,41 @@ | ||||
| # Homelab | ||||
| # Project Moxie | ||||
|  | ||||
| This project is my personal IT homelab initiative for self-hosting and | ||||
| exploring Free and Open Source Software (FOSS) infrastructure. As a technology | ||||
| enthusiast and professional, this project is primarily a practical tool for | ||||
| hosting services. It serves as a playground for engaging with systems | ||||
| technology in functional, intriguing, and gratifying ways. Self-hosting | ||||
| empowers individuals to govern their digital space, ensuring that their online | ||||
| environments reflect personal ethics rather than centralized entities' opaque | ||||
| policies. | ||||
|  | ||||
| Built on Debian Stable, this project utilizes Ansible and Vagrant, providing | ||||
| relatively easy-to-use reproducible ephemeral environments to test | ||||
| infrastructure automation before pushing to live systems. | ||||
| Project Moxie is a personal IT homelab project written in Ansible and executed by Jenkins. It is a growing collection of infrastructure as code (IaC) I write out of curiosity and for reference purposes, keeping a handful of beneficial projects managed and secured. | ||||
|  | ||||
| ## Quick Start | ||||
|  | ||||
| To configure a local virtual machine for testing, follow these simple steps. | ||||
|  | ||||
| ### Prerequisites | ||||
|  | ||||
| Vagrant and VirtualBox are used to develop Project Moxie. You will need to install these before continuing. | ||||
|  | ||||
| ### Installation | ||||
|  | ||||
| 1. Clone this repository | ||||
|    ``` | ||||
|    git clone https://git.krislamo.org/kris/homelab | ||||
|    ``` | ||||
|    Optionally clone from the GitHub mirror instead: | ||||
|    ``` | ||||
|    git clone https://github.com/krislamo/homelab | ||||
|    git clone https://github.com/krislamo/moxie | ||||
|    ``` | ||||
| 2. Set the `PLAYBOOK` environmental variable to a development playbook name in the `dev/` directory | ||||
|  | ||||
|    To list available options in the `dev/` directory and choose a suitable PLAYBOOK, run: | ||||
|    ``` | ||||
|    ls dev/*.yml | xargs -n 1 basename -s .yml | ||||
|    ``` | ||||
|    Export the `PLAYBOOK` variable | ||||
|    The following `PLAYBOOK` names are available: `dockerbox`, `hypervisor`, `minecraft`, `bitwarden`, `nextcloud`, `nginx` | ||||
|  | ||||
|    ``` | ||||
|    export PLAYBOOK=dockerbox | ||||
|    ``` | ||||
| 3. Clean up any previous provision and build the VM | ||||
| 3. Bring the Vagrant box up | ||||
|    ``` | ||||
|    make clean && make | ||||
|    vagrant up | ||||
|    ``` | ||||
|  | ||||
| ## Vagrant Settings | ||||
| The Vagrantfile configures the environment based on settings from `.vagrant.yml`, | ||||
| with default values including: | ||||
|  | ||||
| - PLAYBOOK: `default` | ||||
|    - Runs a `default` playbook that does nothing. | ||||
|    - You can set this by an environmental variable with the same name. | ||||
| - VAGRANT_BOX: `debian/bookworm64` | ||||
|    - Current Debian Stable codename | ||||
| - VAGRANT_CPUS: `2` | ||||
|    - Threads or cores per node, depending on CPU architecture | ||||
| - VAGRANT_MEM: `2048` | ||||
|    - Specifies the amount of memory (in MB) allocated | ||||
| - SSH_FORWARD: `false` | ||||
|    - Enable this if you need to forward SSH agents to the Vagrant machine | ||||
|  | ||||
|  | ||||
| ## Copyright and License | ||||
| Copyright (C) 2019-2023  Kris Lamoureux | ||||
| #### Copyright and License | ||||
| Copyright (C) 2020-2021  Kris Lamoureux | ||||
|  | ||||
| [](https://www.gnu.org/licenses/gpl-3.0) | ||||
|  | ||||
| This program is free software: you can redistribute it and/or modify it under | ||||
| the terms of the GNU General Public License as published by the Free Software | ||||
| Foundation, version 3 of the License. | ||||
|  | ||||
| This program is distributed in the hope that it will be useful, but WITHOUT ANY | ||||
| WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A | ||||
| PARTICULAR PURPOSE.  See the GNU General Public License for more details. | ||||
| This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, version 3 of the License. | ||||
|  | ||||
| You should have received a copy of the GNU General Public License along with | ||||
| this program. If not, see <https://www.gnu.org/licenses/>. | ||||
| This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more details. | ||||
|  | ||||
| You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>. | ||||
|   | ||||
							
								
								
									
										50
									
								
								Vagrantfile
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										50
									
								
								Vagrantfile
									
									
									
									
										vendored
									
									
								
							| @@ -1,41 +1,43 @@ | ||||
| # -*- mode: ruby -*- | ||||
| # vi: set ft=ruby : | ||||
|  | ||||
| require 'yaml' | ||||
| settings_path = '.vagrant.yml' | ||||
| settings = {} | ||||
|  | ||||
| if File.exist?(settings_path) | ||||
|   settings = YAML.load_file(settings_path) | ||||
| SSH_FORWARD=ENV["SSH_FORWARD"] | ||||
| if !(SSH_FORWARD == "true") | ||||
|   SSH_FORWARD = false | ||||
| end | ||||
|  | ||||
| VAGRANT_BOX  = settings['VAGRANT_BOX']  || 'debian/bookworm64' | ||||
| VAGRANT_CPUS = settings['VAGRANT_CPUS'] || 2 | ||||
| VAGRANT_MEM  = settings['VAGRANT_MEM']  || 2048 | ||||
| SSH_FORWARD  = settings['SSH_FORWARD']  || false | ||||
|  | ||||
| # Default to shell environment variable: PLAYBOOK (priority #1) | ||||
| PLAYBOOK=ENV["PLAYBOOK"] | ||||
| if !PLAYBOOK | ||||
|   if File.exist?('.playbook') | ||||
|     PLAYBOOK = IO.read('.playbook').split("\n")[0] | ||||
|   end | ||||
|  | ||||
|   if !PLAYBOOK || PLAYBOOK.empty? | ||||
|   # PLAYBOOK setting in .vagrant.yml (priority #2) | ||||
|   PLAYBOOK = settings['PLAYBOOK'] || 'default' | ||||
|     PLAYBOOK = "\nERROR: Set env PLAYBOOK" | ||||
|   end | ||||
| else | ||||
|   File.write(".playbook", PLAYBOOK) | ||||
| end | ||||
|  | ||||
| Vagrant.configure("2") do |config| | ||||
|   config.vm.box = VAGRANT_BOX | ||||
|   config.vm.box = "debian/bullseye64" | ||||
|   config.vm.network "private_network", type: "dhcp" | ||||
|   config.vm.synced_folder ".", "/vagrant", disabled: true | ||||
|   config.vm.synced_folder "./scratch", "/vagrant/scratch" | ||||
|   config.ssh.forward_agent = SSH_FORWARD | ||||
|  | ||||
|   # Libvrit provider | ||||
|   config.vm.provider :libvirt do |libvirt| | ||||
|     libvirt.cpus   = VAGRANT_CPUS | ||||
|     libvirt.memory = VAGRANT_MEM | ||||
|   # Machine Name | ||||
|   config.vm.define :moxie do |moxie| # | ||||
|   end | ||||
|  | ||||
|   # Virtualbox provider | ||||
|   config.vm.provider :virtualbox do |vbox| | ||||
|     vbox.cpus   = VAGRANT_CPUS | ||||
|     vbox.memory = VAGRANT_MEM | ||||
|   config.vm.provider :libvirt do |libvirt| | ||||
|     libvirt.cpus = 2 | ||||
|     libvirt.memory = 4096 | ||||
|     libvirt.default_prefix = "" | ||||
|   end | ||||
|  | ||||
|   config.vm.provider "virtualbox" do |vbox| | ||||
|     vbox.memory = 4096 | ||||
|   end | ||||
|  | ||||
|   # Provision with Ansible | ||||
| @@ -43,6 +45,6 @@ Vagrant.configure("2") do |config| | ||||
|     ENV['ANSIBLE_ROLES_PATH'] = File.dirname(__FILE__) + "/roles" | ||||
|     ansible.compatibility_mode = "2.0" | ||||
|     ansible.playbook = "dev/" + PLAYBOOK + ".yml" | ||||
|     ansible.raw_arguments = ["--diff"] | ||||
|   end | ||||
|  | ||||
| end | ||||
|   | ||||
| @@ -1,7 +1,6 @@ | ||||
| [defaults] | ||||
| inventory = ./environments/development | ||||
| interpreter_python = /usr/bin/python3 | ||||
| roles_path = ./roles | ||||
|  | ||||
| [connection] | ||||
| pipelining = true | ||||
|   | ||||
| @@ -1,4 +0,0 @@ | ||||
| - name: Install 'default' aka nothing | ||||
|   hosts: all | ||||
|   become: true | ||||
|   tasks: [] | ||||
| @@ -1,8 +0,0 @@ | ||||
| - name: Install Docker Server | ||||
|   hosts: all | ||||
|   become: true | ||||
|   vars_files: | ||||
|     - host_vars/docker.yml | ||||
|   roles: | ||||
|     - base | ||||
|     - docker | ||||
| @@ -1,4 +1,4 @@ | ||||
| - name: Install Dockerbox Server | ||||
| - name: Install Docker Box Server | ||||
|   hosts: all | ||||
|   become: true | ||||
|   vars_files: | ||||
| @@ -6,7 +6,8 @@ | ||||
|   roles: | ||||
|     - base | ||||
|     - docker | ||||
|     - mariadb | ||||
|     - traefik | ||||
|     - nextcloud | ||||
|     - proxy | ||||
|     - jenkins | ||||
|     - prometheus | ||||
|     - nginx | ||||
|   | ||||
| @@ -1,10 +0,0 @@ | ||||
| - name: Install Gitea Server | ||||
|   hosts: all | ||||
|   become: true | ||||
|   vars_files: | ||||
|     - host_vars/gitea.yml | ||||
|   roles: | ||||
|     - base | ||||
|     - docker | ||||
|     - mariadb | ||||
|     - gitea | ||||
| @@ -9,14 +9,14 @@ docker_users: | ||||
| # traefik | ||||
| traefik_version: latest | ||||
| traefik_dashboard: true | ||||
| traefik_domain: traefik.local.krislamo.org | ||||
| traefik_domain: traefik.vm.krislamo.org | ||||
| traefik_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin | ||||
| #traefik_acme_email: realemail@example.com # Let's Encrypt settings | ||||
| #traefik_production: true | ||||
|  | ||||
| # bitwarden | ||||
| # Get Installation ID & Key at https://bitwarden.com/host/ | ||||
| bitwarden_domain: vault.local.krislamo.org | ||||
| bitwarden_domain: vault.vm.krislamo.org | ||||
| bitwarden_dbpass: password | ||||
| bitwarden_install_id: 4ea840a3-532e-4cb6-a472-abd900728b23 | ||||
| bitwarden_install_key: 1yB3Z2gRI0KnnH90C6p | ||||
|   | ||||
| @@ -1,48 +0,0 @@ | ||||
| # base | ||||
| allow_reboot: false | ||||
| manage_network: false | ||||
|  | ||||
| # Import my GPG key for git signature verification | ||||
| root_gpgkeys: | ||||
|   - name: kris@lamoureux.io | ||||
|     id: FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|  | ||||
| # docker | ||||
| docker_users: | ||||
|   - vagrant | ||||
|  | ||||
| #docker_login_url: https://myregistry.example.com | ||||
| #docker_login_user: myuser | ||||
| #docker_login_pass: YOUR_PASSWD | ||||
|  | ||||
| docker_compose_env_nolog: false # dev only setting | ||||
| docker_compose_deploy: | ||||
|   # Traefik | ||||
|   - name: traefik | ||||
|     url: https://github.com/krislamo/traefik | ||||
|     version: 31ee724feebc1d5f91cb17ffd6892c352537f194 | ||||
|     enabled: true | ||||
|     accept_newhostkey: true # Consider verifying manually instead | ||||
|     trusted_keys: | ||||
|       - FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|     env: | ||||
|       ENABLE: true | ||||
|  | ||||
|   # Traefik 2 (no other external compose to test currently) | ||||
|   - name: traefik2 | ||||
|     url: https://github.com/krislamo/traefik | ||||
|     version: 31ee724feebc1d5f91cb17ffd6892c352537f194 | ||||
|     enabled: true | ||||
|     accept_newhostkey: true # Consider verifying manually instead | ||||
|     trusted_keys: | ||||
|       - FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|     env: | ||||
|       ENABLE: true | ||||
|       VERSION: "2.10" | ||||
|       DOMAIN: traefik2.local.krislamo.org | ||||
|       NAME: traefik2 | ||||
|       ROUTER: traefik2 | ||||
|       NETWORK: traefik2 | ||||
|       WEB_PORT: 127.0.0.1:8000:80 | ||||
|       WEBSECURE_PORT: 127.0.0.1:4443:443 | ||||
|       LOCAL_PORT: 127.0.0.1:8444:8443 | ||||
| @@ -2,51 +2,48 @@ | ||||
| allow_reboot: false | ||||
| manage_network: false | ||||
|  | ||||
| # Import my GPG key for git signature verification | ||||
| root_gpgkeys: | ||||
|   - name: kris@lamoureux.io | ||||
|     id: 42A3A92C5DA0F3E5F71A3710105B748C1362EB96 | ||||
|   # Older key, but still in use | ||||
|   - name: kris@lamoureux.io | ||||
|     id: FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|     server: keyserver.ubuntu.com | ||||
|  | ||||
| # proxy | ||||
| proxy: | ||||
|   servers: | ||||
|     - domain: cloud.local.krislamo.org | ||||
|       proxy_pass: http://127.0.0.1:8000 | ||||
|  | ||||
| # docker | ||||
| docker_official: true # docker's apt repos | ||||
| docker_users: | ||||
|   - vagrant | ||||
|  | ||||
| docker_compose_env_nolog: false # dev only setting | ||||
| docker_compose_deploy: | ||||
|   # Traefik | ||||
|   - name: traefik | ||||
|     url: https://github.com/krislamo/traefik | ||||
|     version: d62bd06b37ecf0993962b0449a9d708373f9e381 | ||||
|     enabled: true | ||||
|     accept_newhostkey: true # Consider verifying manually instead | ||||
|     trusted_keys: | ||||
|       - FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|     env: | ||||
|       DASHBOARD: true | ||||
|   # Nextcloud | ||||
|   - name: nextcloud | ||||
|     url: https://github.com/krislamo/nextcloud | ||||
|     version: fe6d349749f178e91ae7ff726d557f48ebf84356 | ||||
|     env: | ||||
|       DATA: ./data | ||||
|  | ||||
| # traefik | ||||
| traefik: | ||||
|   ENABLE: true | ||||
| traefik_version: latest | ||||
| traefik_dashboard: true | ||||
| traefik_domain: traefik.vm.krislamo.org | ||||
| traefik_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin | ||||
| #traefik_acme_email: realemail@example.com # Let's Encrypt settings | ||||
| #traefik_production: true | ||||
| traefik_http_only: true # if behind reverse-proxy | ||||
|  | ||||
| # nextcloud | ||||
| nextcloud: | ||||
|   DOMAIN: cloud.local.krislamo.org | ||||
|   DB_PASSWD: password | ||||
|   ADMIN_PASSWD: password | ||||
| nextcloud_version: stable | ||||
| nextcloud_admin: admin | ||||
| nextcloud_pass: password | ||||
| nextcloud_domain: cloud.vm.krislamo.org | ||||
|  | ||||
| nextcloud_dbversion: latest | ||||
| nextcloud_dbpass: password | ||||
|  | ||||
| # gitea | ||||
| gitea_domain: git.vm.krislamo.org | ||||
| gitea_version: 1 | ||||
| gitea_dbversion: latest | ||||
| gitea_dbpass: password | ||||
|  | ||||
| # jenkins | ||||
| jenkins_version: lts | ||||
| jenkins_domain: jenkins.vm.krislamo.org | ||||
|  | ||||
| # prometheus (includes grafana) | ||||
| prom_version: latest | ||||
| prom_domain: prom.vm.krislamo.org | ||||
| grafana_version: latest | ||||
| grafana_domain: grafana.vm.krislamo.org | ||||
| prom_targets: "['10.0.2.15:9100']" | ||||
|  | ||||
| # nginx | ||||
| nginx_domain: nginx.vm.krislamo.org | ||||
| nginx_name: staticsite | ||||
| nginx_repo_url: https://git.krislamo.org/kris/example-website/ | ||||
| nginx_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin | ||||
| nginx_version: latest | ||||
|   | ||||
| @@ -1,50 +0,0 @@ | ||||
| # base | ||||
| allow_reboot: false | ||||
| manage_network: false | ||||
|  | ||||
| users: | ||||
|   git: | ||||
|     uid: 1001 | ||||
|     gid: 1001 | ||||
|     home: true | ||||
|     system: true | ||||
|  | ||||
| # Import my GPG key for git signature verification | ||||
| root_gpgkeys: | ||||
|   - name: kris@lamoureux.io | ||||
|     id: FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|  | ||||
| # docker | ||||
| docker_official: true # docker's apt repos | ||||
| docker_users: | ||||
|   - vagrant | ||||
|  | ||||
| docker_compose_env_nolog: false # dev only setting | ||||
| docker_compose_deploy: | ||||
|   # Traefik | ||||
|   - name: traefik | ||||
|     url: https://github.com/krislamo/traefik | ||||
|     version: 398eb48d311db78b86abf783f903af4a1658d773 | ||||
|     enabled: true | ||||
|     accept_newhostkey: true | ||||
|     trusted_keys: | ||||
|       - FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|     env: | ||||
|       ENABLE: true | ||||
|   # Gitea | ||||
|   - name: gitea | ||||
|     url: https://github.com/krislamo/gitea | ||||
|     version: b0ce66f6a1ab074172eed79eeeb36d7e9011ef8f | ||||
|     enabled: true | ||||
|     trusted_keys: | ||||
|       - FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|     env: | ||||
|       USER_UID: "{{ users.git.uid }}" | ||||
|       USER_GID: "{{ users.git.gid }}" | ||||
|       DB_PASSWD: "{{ gitea.DB_PASSWD }}" | ||||
|  | ||||
| # gitea | ||||
| gitea: | ||||
|   DB_NAME: gitea | ||||
|   DB_USER: gitea | ||||
|   DB_PASSWD: password | ||||
| @@ -1,32 +1,9 @@ | ||||
| base_domain: local.krislamo.org | ||||
| base_domain: vm.krislamo.org | ||||
|  | ||||
| # base | ||||
| allow_reboot: false | ||||
| manage_network: false | ||||
|  | ||||
| users: | ||||
|   jellyfin: | ||||
|     uid: 1001 | ||||
|     gid: 1001 | ||||
|     shell: /usr/sbin/nologin | ||||
|     home: false | ||||
|     system: true | ||||
|  | ||||
| samba: | ||||
|   users: | ||||
|     - name: jellyfin | ||||
|       password: jellyfin | ||||
|   shares: | ||||
|     - name: jellyfin | ||||
|       path: /srv/jellyfin | ||||
|       owner: jellyfin | ||||
|       group: jellyfin | ||||
|       valid_users: jellyfin | ||||
|   firewall: | ||||
|     - 10.0.0.0/8 | ||||
|     - 172.16.0.0/12 | ||||
|     - 192.168.0.0/16 | ||||
|  | ||||
| # proxy | ||||
| proxy: | ||||
|   #production: true | ||||
| @@ -58,4 +35,3 @@ traefik_http_only: true # if behind reverse-proxy | ||||
| # jellyfin | ||||
| jellyfin_domain: "jellyfin.{{ base_domain }}" | ||||
| jellyfin_version: latest | ||||
| jellyfin_media: /srv/jellyfin | ||||
|   | ||||
| @@ -5,14 +5,14 @@ docker_users: | ||||
| # traefik | ||||
| traefik_version: latest | ||||
| traefik_dashboard: true | ||||
| traefik_domain: traefik.local.krislamo.org | ||||
| traefik_domain: traefik.vm.krislamo.org | ||||
| traefik_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin | ||||
|  | ||||
| # container settings | ||||
| nextcloud_version: stable | ||||
| nextcloud_admin: admin | ||||
| nextcloud_pass: password | ||||
| nextcloud_domain: cloud.local.krislamo.org | ||||
| nextcloud_domain: cloud.vm.krislamo.org | ||||
|  | ||||
| # database settings | ||||
| nextcloud_dbversion: latest | ||||
|   | ||||
| @@ -9,13 +9,13 @@ docker_users: | ||||
| # traefik | ||||
| traefik_version: latest | ||||
| traefik_dashboard: true | ||||
| traefik_domain: traefik.local.krislamo.org | ||||
| traefik_domain: traefik.vm.krislamo.org | ||||
| traefik_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin | ||||
| #traefik_acme_email: realemail@example.com # Let's Encrypt settings | ||||
| #traefik_production: true | ||||
|  | ||||
| # nginx | ||||
| nginx_domain: nginx.local.krislamo.org | ||||
| nginx_domain: nginx.vm.krislamo.org | ||||
| nginx_name: staticsite | ||||
| nginx_repo_url: https://git.krislamo.org/kris/example-website/ | ||||
| nginx_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin | ||||
|   | ||||
| @@ -1,46 +0,0 @@ | ||||
| ############## | ||||
| #### base #### | ||||
| ############## | ||||
|  | ||||
| allow_reboot: false | ||||
| manage_network: false | ||||
|  | ||||
| users: | ||||
|   kris: | ||||
|     uid: 1001 | ||||
|     gid: 1001 | ||||
|     home: true | ||||
|     ansible_temp: true | ||||
|  | ||||
| ################ | ||||
| #### podman #### | ||||
| ################ | ||||
|  | ||||
| user_namespaces: | ||||
|   - kris | ||||
|  | ||||
| podman_compose_deploy: | ||||
|   kris: | ||||
|     root: /home/kris/podman_root | ||||
|     trusted_keys: | ||||
|       - id: FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|         keyserver: keyserver.ubuntu.com | ||||
|     compose: | ||||
|       - name: traefik | ||||
|         url: https://github.com/krislamo/traefik | ||||
|         version: d62bd06b37ecf0993962b0449a9d708373f9e381 | ||||
|         enabled: true | ||||
|         accept_newhostkey: true # Consider verifying manually instead | ||||
|         env: | ||||
|           DASHBOARD: true | ||||
|  | ||||
| ################### | ||||
| #### Bitwarden #### | ||||
| ################### | ||||
|  | ||||
| # Get Installation ID & Key at https://bitwarden.com/host/ | ||||
| bitwarden_domain: vault.local.krislamo.org | ||||
| bitwarden_dbpass: password | ||||
| bitwarden_install_id: 4ea840a3-532e-4cb6-a472-abd900728b23 | ||||
| bitwarden_install_key: 1yB3Z2gRI0KnnH90C6p | ||||
| #bitwarden_prodution: true | ||||
| @@ -1,21 +1,9 @@ | ||||
| base_domain: local.krislamo.org | ||||
| base_domain: vm.krislamo.org | ||||
|  | ||||
| # base | ||||
| allow_reboot: false | ||||
| manage_network: false | ||||
|  | ||||
| users: | ||||
|   git: | ||||
|     uid: 1001 | ||||
|     gid: 1001 | ||||
|     home: true | ||||
|     system: true | ||||
|  | ||||
| # Import my GPG key for git signature verification | ||||
| root_gpgkeys: | ||||
|   - name: kris@lamoureux.io | ||||
|     id: FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|  | ||||
| # proxy | ||||
| proxy: | ||||
|   #production: true | ||||
| @@ -27,49 +15,14 @@ proxy: | ||||
|       - "{{ base_domain }}" | ||||
|   servers: | ||||
|     - domain: "{{ bitwarden_domain }}" | ||||
|       proxy_pass: "http://127.0.0.1" | ||||
|       proxy_pass: "http://127.0.0.1:8080" | ||||
|     - domain: "{{ gitea_domain }}" | ||||
|       proxy_pass: "http://127.0.0.1" | ||||
|       proxy_pass: "http://127.0.0.1:3000" | ||||
|  | ||||
| # docker | ||||
| docker_official: true # docker's apt repos | ||||
| docker_users: | ||||
|   - vagrant | ||||
|  | ||||
| docker_compose_env_nolog: false # dev only setting | ||||
| docker_compose_deploy: | ||||
|   # Traefik | ||||
|   - name: traefik | ||||
|     url: https://github.com/krislamo/traefik | ||||
|     version: e97db75e2e214582fac5f5e495687ab5cdf855ad | ||||
|     path: docker-compose.web.yml | ||||
|     enabled: true | ||||
|     accept_newhostkey: true | ||||
|     trusted_keys: | ||||
|       - FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|     env: | ||||
|       ENABLE: true | ||||
|   # Gitea | ||||
|   - name: gitea | ||||
|     url: https://github.com/krislamo/gitea | ||||
|     version: b0ce66f6a1ab074172eed79eeeb36d7e9011ef8f | ||||
|     enabled: true | ||||
|     trusted_keys: | ||||
|       - FBF673CEEC030F8AECA814E73EDA9C3441EDA925 | ||||
|     env: | ||||
|       ENTRYPOINT: web | ||||
|       ENABLE_TLS: false | ||||
|       USER_UID: "{{ users.git.uid }}" | ||||
|       USER_GID: "{{ users.git.gid }}" | ||||
|       DB_PASSWD: "{{ gitea.DB_PASSWD }}" | ||||
|  | ||||
| # gitea | ||||
| gitea_domain: "git.{{ base_domain }}" | ||||
| gitea: | ||||
|   DB_NAME: gitea | ||||
|   DB_USER: gitea | ||||
|   DB_PASSWD: password | ||||
|  | ||||
| # bitwarden | ||||
| # Get Installation ID & Key at https://bitwarden.com/host/ | ||||
| bitwarden_domain: "vault.{{ base_domain }}" | ||||
| @@ -77,3 +30,8 @@ bitwarden_dbpass: password | ||||
| bitwarden_install_id: 4ea840a3-532e-4cb6-a472-abd900728b23 | ||||
| bitwarden_install_key: 1yB3Z2gRI0KnnH90C6p | ||||
| #bitwarden_prodution: true | ||||
|  | ||||
| # gitea | ||||
| gitea_domain: "git.{{ base_domain }}" | ||||
| gitea_version: 1 | ||||
| gitea_dbpass: password | ||||
|   | ||||
| @@ -9,14 +9,14 @@ docker_users: | ||||
| # traefik | ||||
| traefik_version: latest | ||||
| traefik_dashboard: true | ||||
| traefik_domain: traefik.local.krislamo.org | ||||
| traefik_domain: traefik.vm.krislamo.org | ||||
| traefik_auth: admin:$apr1$T1l.BCFz$Jyg8msXYEAUi3LLH39I9d1 # admin:admin | ||||
| #traefik_acme_email: realemail@example.com # Let's Encrypt settings | ||||
| #traefik_production: true | ||||
|  | ||||
| # container settings | ||||
| wordpress_version: latest | ||||
| wordpress_domain: wordpress.local.krislamo.org | ||||
| wordpress_domain: wordpress.vm.krislamo.org | ||||
| wordpress_multisite: true | ||||
|  | ||||
| # database settings | ||||
|   | ||||
| @@ -1,9 +0,0 @@ | ||||
| - name: Install Podman server | ||||
|   hosts: all | ||||
|   become: true | ||||
|   vars_files: | ||||
|     - host_vars/podman.yml | ||||
|   roles: | ||||
|     - base | ||||
|     - podman | ||||
|     - bitwarden | ||||
| @@ -5,8 +5,8 @@ | ||||
|     - host_vars/proxy.yml | ||||
|   roles: | ||||
|     - base | ||||
|     - mariadb | ||||
|     - proxy | ||||
|     - docker | ||||
|     - mariadb | ||||
|     - gitea | ||||
|     - bitwarden | ||||
|   | ||||
							
								
								
									
										21
									
								
								docker.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								docker.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| # Copyright (C) 2020  Kris Lamoureux | ||||
| # | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU General Public License as published by | ||||
| # the Free Software Foundation, version 3 of the License. | ||||
| # | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU General Public License for more details. | ||||
| # | ||||
| # You should have received a copy of the GNU General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
|  | ||||
| - name: Install Docker Server | ||||
|   hosts: dockerhosts | ||||
|   become: true | ||||
|   roles: | ||||
|     - base | ||||
|     - docker | ||||
|     - jenkins | ||||
							
								
								
									
										25
									
								
								dockerbox.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								dockerbox.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| # Copyright (C) 2020  Kris Lamoureux | ||||
| # | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU General Public License as published by | ||||
| # the Free Software Foundation, version 3 of the License. | ||||
| # | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU General Public License for more details. | ||||
| # | ||||
| # You should have received a copy of the GNU General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
|  | ||||
| - name: Install Docker Box Server | ||||
|   hosts: dockerhosts | ||||
|   become: true | ||||
|   roles: | ||||
|     - base | ||||
|     - docker | ||||
|     - traefik | ||||
|     - nextcloud | ||||
|     - jenkins | ||||
|     - prometheus | ||||
|     - nginx | ||||
							
								
								
									
										125
									
								
								forward-ssh.sh
									
									
									
									
									
								
							
							
						
						
									
										125
									
								
								forward-ssh.sh
									
									
									
									
									
								
							| @@ -1,125 +0,0 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Finds the SSH private key under ./.vagrant and connects to | ||||
| # the Vagrant box, port forwarding localhost ports: 8443, 443, 80, 22 | ||||
| # | ||||
| # Download the latest script: | ||||
| # https://git.krislamo.org/kris/homelab/raw/branch/main/forward-ssh.sh | ||||
| # | ||||
| # Copyright (C) 2023  Kris Lamoureux | ||||
| # | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU General Public License as published by | ||||
| # the Free Software Foundation, version 3 of the License. | ||||
| # | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU General Public License for more details. | ||||
| # | ||||
| # You should have received a copy of the GNU General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
|  | ||||
| # Root check | ||||
| if [ "$EUID" -ne 0 ]; then | ||||
|   echo "[ERROR]: Please run this script as root" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| # Clean environment | ||||
| unset PRIVATE_KEY | ||||
| unset HOST_IP | ||||
| unset MATCH_PATTERN | ||||
| unset PKILL_ANSWER | ||||
|  | ||||
| # Function to create the SSH tunnel | ||||
| function ssh_connect { | ||||
|   read -rp "Start a new vagrant SSH tunnel? [y/N] " PSTART_ANSWER | ||||
|   echo | ||||
|   case "$PSTART_ANSWER" in | ||||
|     [yY]) | ||||
|       printf "[INFO]: Starting new vagrant SSH tunnel on PID " | ||||
|       sudo -u "$USER" ssh -fNT -i "$PRIVATE_KEY" \ | ||||
|         -L 22:localhost:22 \ | ||||
|         -L 80:"$HOST_IP":80 \ | ||||
|         -L 443:"$HOST_IP":443 \ | ||||
|         -L 8443:localhost:8443 \ | ||||
|         -o UserKnownHostsFile=/dev/null \ | ||||
|         -o StrictHostKeyChecking=no \ | ||||
|         vagrant@"$HOST_IP" 2>/dev/null | ||||
|       sleep 2 | ||||
|       pgrep -f "$MATCH_PATTERN" | ||||
|       ;; | ||||
|     *) | ||||
|       echo "[INFO]: Declined to start a new vagrant SSH tunnel" | ||||
|       exit 0 | ||||
|       ;; | ||||
|   esac | ||||
| } | ||||
|  | ||||
| # Check for valid PRIVATE_KEY location | ||||
| PRIVATE_KEY="$(find .vagrant -name "private_key" 2>/dev/null | sort)" | ||||
|  | ||||
| # Single vagrant machine or multiple | ||||
| if [ "$(echo "$PRIVATE_KEY" | wc -l)" -gt 1 ]; then | ||||
|   while IFS= read -r KEYFILE; do | ||||
|     if ! ssh-keygen -l -f "$KEYFILE" &>/dev/null; then | ||||
|       echo "[ERROR]: The SSH key '$KEYFILE' is not valid. Are your virtual machines running?" | ||||
|       exit 1 | ||||
|     fi | ||||
|     echo "[CHECK]: Valid key at $KEYFILE" | ||||
|   done < <(echo "$PRIVATE_KEY") | ||||
|   PRIVATE_KEY="$(echo "$PRIVATE_KEY" | grep -m1 "${1:-default}")" | ||||
| elif ! ssh-keygen -l -f "$PRIVATE_KEY" &>/dev/null; then | ||||
|   echo "[ERROR]: The SSH key '$PRIVATE_KEY' is not valid. Is your virtual machine running?" | ||||
|   exit 1 | ||||
| else | ||||
|   echo "[CHECK]: Valid key at $PRIVATE_KEY" | ||||
| fi | ||||
|  | ||||
| # Grab first IP or use whatever HOST_IP_FIELD is set to and check that the guest is up | ||||
| HOST_IP="$(sudo -u "$SUDO_USER" vagrant ssh -c "hostname -I | cut -d' ' -f${HOST_IP_FIELD:-1}" "${1:-default}" 2>/dev/null)" | ||||
| if [ -z "$HOST_IP" ]; then | ||||
|   echo "[ERROR]: Failed to find ${1:-default}'s IP" | ||||
|   exit 1 | ||||
| fi | ||||
| HOST_IP="${HOST_IP::-1}" # trim | ||||
|  | ||||
| if ! ping -c 1 "$HOST_IP" &>/dev/null; then | ||||
|   echo "[ERROR]: Cannot ping the host IP '$HOST_IP'" | ||||
|   exit 1 | ||||
| fi | ||||
| echo "[CHECK]: Host at $HOST_IP (${1:-default}) is up" | ||||
|  | ||||
| # Pattern for matching processes running | ||||
| MATCH_PATTERN="ssh -fNT -i ${PRIVATE_KEY}.*vagrant@" | ||||
|  | ||||
| # Check amount of processes that match the pattern | ||||
| if [ "$(pgrep -afc "$MATCH_PATTERN")" -eq 0 ]; then | ||||
|   ssh_connect | ||||
| else | ||||
|   # Processes found, so prompt to kill remaining ones then start tunnel | ||||
|   printf "\n[WARNING]: Found processes running:\n" | ||||
|   pgrep -fa "$MATCH_PATTERN" | ||||
|   printf '\n' | ||||
|   read -rp "Would you like to kill these processes? [y/N] " PKILL_ANSWER | ||||
|   echo | ||||
|   case "$PKILL_ANSWER" in | ||||
|     [yY]) | ||||
|       echo "[WARNING]: Killing old vagrant SSH tunnel(s): " | ||||
|       pgrep -f "$MATCH_PATTERN" | tee >(xargs kill -15) | ||||
|       echo | ||||
|       if [ "$(pgrep -afc "$MATCH_PATTERN")" -eq 0 ]; then | ||||
|         ssh_connect | ||||
|       else | ||||
|         echo "[ERROR]: Unable to kill processes:" | ||||
|         pgrep -f "$MATCH_PATTERN" | ||||
|         exit 1 | ||||
|       fi | ||||
|       ;; | ||||
|     *) | ||||
|       echo "[INFO]: Declined to kill existing processes" | ||||
|       exit 0 | ||||
|       ;; | ||||
|   esac | ||||
| fi | ||||
| @@ -1,9 +1,8 @@ | ||||
| - name: Install Media Server | ||||
|   hosts: "{{ PLAYBOOK_HOST | default('none') }}" | ||||
|   hosts: mediaservers | ||||
|   become: true | ||||
|   roles: | ||||
|     - base | ||||
|     - jenkins | ||||
|     - proxy | ||||
|     - docker | ||||
|     - traefik | ||||
| @@ -1,8 +0,0 @@ | ||||
| - name: Install Docker Server | ||||
|   hosts: "{{ PLAYBOOK_HOST | default('none') }}" | ||||
|   become: true | ||||
|   roles: | ||||
|     - base | ||||
|     - jenkins | ||||
|     - proxy | ||||
|     - docker | ||||
| @@ -1,11 +0,0 @@ | ||||
| - name: Install Dockerbox Server | ||||
|   hosts: "{{ PLAYBOOK_HOST | default('none') }}" | ||||
|   become: true | ||||
|   roles: | ||||
|     - base | ||||
|     - jenkins | ||||
|     - docker | ||||
|     - mariadb | ||||
|     - traefik | ||||
|     - nextcloud | ||||
|     - proxy | ||||
| @@ -1,8 +1,6 @@ | ||||
| allow_reboot: true | ||||
| manage_firewall: true | ||||
| manage_network: false | ||||
| network_type: static | ||||
| locale_default: en_US.UTF-8 | ||||
| allow_reboot: true | ||||
|  | ||||
| packages: | ||||
|   - apache2-utils | ||||
|   | ||||
| @@ -5,10 +5,6 @@ | ||||
|   listen: reboot_host | ||||
|   when: allow_reboot | ||||
|  | ||||
| - name: Reconfigure locales | ||||
|   ansible.builtin.command: dpkg-reconfigure -f noninteractive locales | ||||
|   listen: reconfigure_locales | ||||
|  | ||||
| - name: Restart WireGuard | ||||
|   ansible.builtin.service: | ||||
|     name: wg-quick@wg0 | ||||
| @@ -26,9 +22,3 @@ | ||||
|     name: ddclient | ||||
|     state: restarted | ||||
|   listen: restart_ddclient | ||||
|  | ||||
| - name: Restart Samba | ||||
|   ansible.builtin.service: | ||||
|     name: smbd | ||||
|     state: restarted | ||||
|   listen: restart_samba | ||||
|   | ||||
| @@ -1,5 +1,23 @@ | ||||
| - name: 'Install Ansible dependency: python3-apt' | ||||
|   ansible.builtin.shell: 'apt-get update && apt-get install python3-apt -y' | ||||
|   args: | ||||
|     creates: /usr/lib/python3/dist-packages/apt | ||||
|     warn: false | ||||
|  | ||||
| - name: Install additional Ansible dependencies | ||||
|   ansible.builtin.apt: | ||||
|     name: "{{ item }}" | ||||
|     state: present | ||||
|     force_apt_get: true | ||||
|     update_cache: true | ||||
|   loop: | ||||
|     - aptitude | ||||
|     - python3-docker | ||||
|     - python3-pymysql | ||||
|     - python3-psycopg2 | ||||
|  | ||||
| - name: Create Ansible's temporary remote directory | ||||
|   ansible.builtin.file: | ||||
|     path: "~/.ansible/tmp" | ||||
|     state: directory | ||||
|     mode: "700" | ||||
|     mode: 0700 | ||||
|   | ||||
| @@ -7,7 +7,7 @@ | ||||
|   ansible.builtin.template: | ||||
|     src: ddclient.conf.j2 | ||||
|     dest: /etc/ddclient.conf | ||||
|     mode: "600" | ||||
|     mode: 0600 | ||||
|   register: ddclient_settings | ||||
|  | ||||
| - name: Start ddclient and enable on boot | ||||
|   | ||||
| @@ -32,14 +32,14 @@ | ||||
|   ansible.builtin.template: | ||||
|     src: fail2ban-ssh.conf.j2 | ||||
|     dest: /etc/fail2ban/jail.d/sshd.conf | ||||
|     mode: "640" | ||||
|     mode: 0640 | ||||
|   notify: restart_fail2ban | ||||
|  | ||||
| - name: Install Fail2ban IP allow list | ||||
|   ansible.builtin.template: | ||||
|     src: fail2ban-allowlist.conf.j2 | ||||
|     dest: /etc/fail2ban/jail.d/allowlist.conf | ||||
|     mode: "640" | ||||
|     mode: 0640 | ||||
|   when: fail2ban_ignoreip is defined | ||||
|   notify: restart_fail2ban | ||||
|  | ||||
|   | ||||
| @@ -11,10 +11,10 @@ | ||||
|   ansible.builtin.template: | ||||
|     src: msmtprc.j2 | ||||
|     dest: /root/.msmtprc | ||||
|     mode: "600" | ||||
|     mode: 0600 | ||||
|  | ||||
| - name: Install /etc/aliases | ||||
|   ansible.builtin.copy: | ||||
|     dest: /etc/aliases | ||||
|     content: "root: {{ mail.rootalias }}" | ||||
|     mode: "644" | ||||
|     mode: 0644 | ||||
|   | ||||
| @@ -9,7 +9,6 @@ | ||||
| - name: Import Firewall tasks | ||||
|   ansible.builtin.import_tasks: firewall.yml | ||||
|   tags: firewall | ||||
|   when: manage_firewall | ||||
|  | ||||
| - name: Import Network tasks | ||||
|   ansible.builtin.import_tasks: network.yml | ||||
| @@ -30,8 +29,3 @@ | ||||
|   ansible.builtin.import_tasks: wireguard.yml | ||||
|   tags: wireguard | ||||
|   when: wireguard is defined | ||||
|  | ||||
| - name: Import Samba tasks | ||||
|   ansible.builtin.import_tasks: samba.yml | ||||
|   tags: samba | ||||
|   when: samba is defined | ||||
|   | ||||
| @@ -10,6 +10,6 @@ | ||||
|   ansible.builtin.template: | ||||
|     src: "interface.j2" | ||||
|     dest: "/etc/network/interfaces.d/{{ item.name }}" | ||||
|     mode: "400" | ||||
|     mode: 0400 | ||||
|   loop: "{{ interfaces }}" | ||||
|   notify: reboot_host | ||||
|   | ||||
| @@ -4,43 +4,22 @@ | ||||
|     state: present | ||||
|  | ||||
| - name: Create Samba users | ||||
|   ansible.builtin.command: "smbpasswd -a {{ item.name }}" | ||||
|   ansible.builtin.command: "smbpasswd -a -s {{ item.name }}" | ||||
|   args: | ||||
|     stdin: "{{ item.password }}\n{{ item.password }}" | ||||
|   loop: "{{ samba.users }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.name }}" | ||||
|   register: samba_users | ||||
|   changed_when: "'Added user' in samba_users.stdout" | ||||
|   changed_when: "'User added' in samba_users.stdout" | ||||
|  | ||||
| - name: Ensure share directories exist | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ item.path }}" | ||||
|     owner: "{{ item.owner }}" | ||||
|     group: "{{ item.group }}" | ||||
|     state: directory | ||||
|     mode: "755" | ||||
|     mode: 0755 | ||||
|   loop: "{{ samba.shares }}" | ||||
|  | ||||
| - name: Configure Samba shares | ||||
|   ansible.builtin.template: | ||||
|     src: smb.conf.j2 | ||||
|     dest: /etc/samba/smb.conf | ||||
|     mode: "644" | ||||
|   notify: restart_samba | ||||
|  | ||||
| - name: Start smbd and enable on boot | ||||
|   ansible.builtin.service: | ||||
|     name: smbd | ||||
|     state: started | ||||
|     enabled: true | ||||
|  | ||||
| - name: Allow SMB connections | ||||
|   community.general.ufw: | ||||
|     rule: allow | ||||
|     port: 445 | ||||
|     proto: tcp | ||||
|     from: "{{ item }}" | ||||
|     state: enabled | ||||
|   loop: "{{ samba.firewall }}" | ||||
|   when: manage_firewall | ||||
|   notify: samba_restart | ||||
|   | ||||
| @@ -4,116 +4,13 @@ | ||||
|     state: present | ||||
|     update_cache: true | ||||
|  | ||||
| - name: Install GPG | ||||
|   ansible.builtin.apt: | ||||
|     name: gpg | ||||
|     state: present | ||||
|  | ||||
| - name: Check for existing GPG keys | ||||
|   ansible.builtin.command: "gpg --list-keys {{ item.id }} 2>/dev/null" | ||||
|   register: gpg_check | ||||
|   loop: "{{ root_gpgkeys }}" | ||||
|   failed_when: false | ||||
|   changed_when: false | ||||
|   when: root_gpgkeys is defined | ||||
|  | ||||
| - name: Import GPG keys | ||||
|   ansible.builtin.command: | ||||
|     "gpg --keyserver {{ item.item.server | default('keys.openpgp.org') }} --recv-key {{ item.item.id }}" | ||||
|   register: gpg_check_import | ||||
|   loop: "{{ gpg_check.results }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.item }}" | ||||
|   changed_when: false | ||||
|   when: root_gpgkeys is defined and item.rc != 0 | ||||
|  | ||||
| - name: Check GPG key imports | ||||
|   ansible.builtin.fail: | ||||
|     msg: "{{ item.stderr }}" | ||||
|   loop: "{{ gpg_check_import.results }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.item.item }}" | ||||
|   when: root_gpgkeys is defined and (not item.skipped | default(false)) and ('imported' not in item.stderr) | ||||
|  | ||||
| - name: Install NTPsec | ||||
|   ansible.builtin.apt: | ||||
|     name: ntpsec | ||||
|     state: present | ||||
|  | ||||
| - name: Install locales | ||||
|   ansible.builtin.apt: | ||||
|     name: locales | ||||
|     state: present | ||||
|  | ||||
| - name: Generate locale | ||||
|   community.general.locale_gen: | ||||
|     name: "{{ locale_default }}" | ||||
|     state: present | ||||
|   notify: reconfigure_locales | ||||
|  | ||||
| - name: Set the default locale | ||||
|   ansible.builtin.lineinfile: | ||||
|     path: /etc/default/locale | ||||
|     regexp: "^LANG=" | ||||
|     line: "LANG={{ locale_default }}" | ||||
|  | ||||
| - name: Manage root authorized_keys | ||||
|   ansible.builtin.template: | ||||
|     src: authorized_keys.j2 | ||||
|     dest: /root/.ssh/authorized_keys | ||||
|     mode: "400" | ||||
|     mode: 0400 | ||||
|   when: authorized_keys is defined | ||||
|  | ||||
| - name: Create system user groups | ||||
|   ansible.builtin.group: | ||||
|     name: "{{ item.key }}" | ||||
|     gid: "{{ item.value.gid }}" | ||||
|     state: present | ||||
|   loop: "{{ users | dict2items }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.key }}" | ||||
|   when: users is defined | ||||
|  | ||||
| - name: Create system users | ||||
|   ansible.builtin.user: | ||||
|     name: "{{ item.key }}" | ||||
|     state: present | ||||
|     uid: "{{ item.value.uid }}" | ||||
|     group: "{{ item.value.gid }}" | ||||
|     groups: "{{ item.value.groups | default([]) }}" | ||||
|     shell: "{{ item.value.shell | default('/bin/bash') }}" | ||||
|     create_home: "{{ item.value.home | default(false) }}" | ||||
|     home: "{{ item.value.homedir | default('/home/' + item.key) }}" | ||||
|     system: "{{ item.value.system | default(false) }}" | ||||
|   loop: "{{ users | dict2items }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.key }}" | ||||
|   when: users is defined | ||||
|  | ||||
| - name: Create Ansible's temporary remote directory for users | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ item.value.homedir | default('/home/' + item.key) }}/.ansible/tmp" | ||||
|     state: directory | ||||
|     mode: "700" | ||||
|     owner: "{{ item.key }}" | ||||
|     group: "{{ item.value.gid }}" | ||||
|   loop: "{{ users | dict2items }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.key }}" | ||||
|   when: | ||||
|     - users is defined | ||||
|     - item.value.ansible_temp | default(false) | ||||
|  | ||||
| - name: Set authorized_keys for system users | ||||
|   ansible.posix.authorized_key: | ||||
|     user: "{{ item.key }}" | ||||
|     key: "{{ item.value.key }}" | ||||
|     state: present | ||||
|   loop: "{{ users | dict2items }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.key }}" | ||||
|   when: users is defined and item.value.key is defined | ||||
|  | ||||
| - name: Manage filesystem mounts | ||||
|   ansible.posix.mount: | ||||
|     path: "{{ item.path }}" | ||||
|   | ||||
| @@ -11,29 +11,6 @@ | ||||
|   args: | ||||
|     chdir: /etc/wireguard/ | ||||
|     creates: /etc/wireguard/privatekey | ||||
|     executable: /usr/bin/bash | ||||
|  | ||||
| - name: Grab WireGuard private key for configuration | ||||
|   ansible.builtin.slurp: | ||||
|     src: /etc/wireguard/privatekey | ||||
|   register: wgkey | ||||
|  | ||||
| - name: Check if WireGuard preshared key file exists | ||||
|   ansible.builtin.stat: | ||||
|     path: /etc/wireguard/presharedkey-{{ item.name }} | ||||
|   loop: "{{ wireguard.peers }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.name }}" | ||||
|   register: presharedkey_files | ||||
|  | ||||
| - name: Grab WireGuard preshared key for configuration | ||||
|   ansible.builtin.slurp: | ||||
|     src: /etc/wireguard/presharedkey-{{ item.item.name }} | ||||
|   register: wgshared | ||||
|   loop: "{{ presharedkey_files.results }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.item.name }}" | ||||
|   when: item.stat.exists | ||||
|  | ||||
| - name: Grab WireGuard private key for configuration | ||||
|   ansible.builtin.slurp: | ||||
| @@ -44,7 +21,7 @@ | ||||
|   ansible.builtin.template: | ||||
|     src: wireguard.j2 | ||||
|     dest: /etc/wireguard/wg0.conf | ||||
|     mode: "400" | ||||
|     mode: 0400 | ||||
|   notify: restart_wireguard | ||||
|  | ||||
| - name: Start WireGuard interface | ||||
| @@ -57,5 +34,5 @@ | ||||
|   community.general.ufw: | ||||
|     rule: allow | ||||
|     port: "{{ wireguard.listenport }}" | ||||
|     proto: udp | ||||
|     proto: tcp | ||||
|   when: wireguard.listenport is defined | ||||
|   | ||||
| @@ -13,16 +13,7 @@ | ||||
| [{{ share.name }}] | ||||
|    path = {{ share.path }} | ||||
|    browsable = yes | ||||
| {% if share.guest_allow is defined and share.guest_allow %} | ||||
|    guest ok = yes | ||||
| {% else %} | ||||
|    guest ok = no | ||||
| {% endif %} | ||||
|    read only = {{ 'yes' if share.read_only | default(false) else 'no' }} | ||||
| {% if share.valid_users is defined %} | ||||
|    valid users = {{ share.valid_users }} | ||||
| {% endif %} | ||||
| {% if share.force_user is defined %} | ||||
|    force user = {{ share.force_user }} | ||||
| {% endif %} | ||||
| {% endfor %} | ||||
|   | ||||
| @@ -1,6 +1,4 @@ | ||||
| # {{ ansible_managed }} | ||||
|  | ||||
| [Interface] # {{ ansible_hostname }} | ||||
| [Interface] | ||||
| PrivateKey = {{ wgkey['content'] | b64decode | trim }} | ||||
| Address = {{ wireguard.address }} | ||||
| {% if wireguard.listenport is defined %} | ||||
| @@ -8,26 +6,8 @@ ListenPort = {{ wireguard.listenport }} | ||||
| {% endif %} | ||||
|  | ||||
| {% for peer in wireguard.peers %} | ||||
| {% if peer.name is defined %} | ||||
| [Peer] # {{ peer.name }} | ||||
| {% else %} | ||||
| [Peer] | ||||
| {% endif %} | ||||
| PublicKey = {{ peer.publickey }} | ||||
| {% if peer.presharedkey is defined %} | ||||
| PresharedKey = {{ peer.presharedkey }} | ||||
| {% else %} | ||||
| {% set preshared_key = ( | ||||
|     wgshared.results | ||||
|     | selectattr('item.item.name', 'equalto', peer.name) | ||||
|     | first | ||||
|   ).content | ||||
|   | default(none) | ||||
| %} | ||||
| {% if preshared_key is not none %} | ||||
| PresharedKey = {{ preshared_key | b64decode | trim }} | ||||
| {% endif %} | ||||
| {% endif %} | ||||
| {% if peer.endpoint is defined %} | ||||
| Endpoint = {{ peer.endpoint }} | ||||
| {% endif %} | ||||
|   | ||||
| @@ -5,12 +5,7 @@ | ||||
|   listen: rebuild_bitwarden | ||||
|  | ||||
| - name: Rebuild Bitwarden | ||||
|   ansible.builtin.command: "{{ bitwarden_root }}/bitwarden.sh rebuild" | ||||
|   listen: rebuild_bitwarden | ||||
|  | ||||
| - name: Reload systemd manager configuration | ||||
|   ansible.builtin.systemd: | ||||
|     daemon_reload: true | ||||
|   ansible.builtin.shell: "{{ bitwarden_root }}/bitwarden.sh rebuild" | ||||
|   listen: rebuild_bitwarden | ||||
|  | ||||
| - name: Start Bitwarden after rebuild | ||||
| @@ -19,10 +14,3 @@ | ||||
|     state: started | ||||
|     enabled: true | ||||
|   listen: rebuild_bitwarden | ||||
|  | ||||
| - name: Create Bitwarden's initial log file | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ bitwarden_logs_identity }}/{{ bitwarden_logs_identity_date }}.txt" | ||||
|     state: touch | ||||
|     mode: "644" | ||||
|   listen: touch_bitwarden | ||||
|   | ||||
| @@ -7,7 +7,6 @@ | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ bitwarden_root }}" | ||||
|     state: directory | ||||
|     mode: "755" | ||||
|  | ||||
| - name: Download Bitwarden script | ||||
|   ansible.builtin.get_url: | ||||
| @@ -23,23 +22,22 @@ | ||||
|     mode: u+x | ||||
|  | ||||
| - name: Run Bitwarden installation script | ||||
|   ansible.builtin.command: "{{ bitwarden_root }}/bw_wrapper" | ||||
|   ansible.builtin.shell: "{{ bitwarden_root }}/bw_wrapper" | ||||
|   args: | ||||
|     creates: "{{ bitwarden_root }}/bwdata/config.yml" | ||||
|  | ||||
| - name: Install compose override | ||||
| - name: Install docker-compose override | ||||
|   ansible.builtin.template: | ||||
|     src: compose.override.yml.j2 | ||||
|     dest: "{{ bitwarden_root }}/bwdata/docker/docker-compose.override.yml" | ||||
|     mode: "644" | ||||
|   when: bitwarden_override | default(true) | ||||
|   when: traefik_version is defined | ||||
|   notify: rebuild_bitwarden | ||||
|  | ||||
| - name: Disable bitwarden-nginx HTTP on 80 | ||||
|   ansible.builtin.replace: | ||||
|     path: "{{ bitwarden_root }}/bwdata/config.yml" | ||||
|     regexp: "^http_port: 80$" | ||||
|     replace: "http_port: {{ bitwarden_http_port | default('127.0.0.1:9080') }}" | ||||
|     replace: "http_port: 127.0.0.1:8080" | ||||
|   when: not bitwarden_standalone | ||||
|   notify: rebuild_bitwarden | ||||
|  | ||||
| @@ -47,7 +45,7 @@ | ||||
|   ansible.builtin.replace: | ||||
|     path: "{{ bitwarden_root }}/bwdata/config.yml" | ||||
|     regexp: "^https_port: 443$" | ||||
|     replace: "https_port: {{ bitwarden_https_port | default('127.0.0.1:9443') }}" | ||||
|     replace: "https_port: 127.0.0.1:8443" | ||||
|   when: not bitwarden_standalone | ||||
|   notify: rebuild_bitwarden | ||||
|  | ||||
| @@ -78,7 +76,6 @@ | ||||
|   ansible.builtin.template: | ||||
|     src: bitwarden.service.j2 | ||||
|     dest: "/etc/systemd/system/{{ bitwarden_name }}.service" | ||||
|     mode: "644" | ||||
|   register: bitwarden_systemd | ||||
|   notify: rebuild_bitwarden | ||||
|  | ||||
| @@ -86,12 +83,22 @@ | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ bitwarden_logs_identity }}" | ||||
|     state: directory | ||||
|     mode: "755" | ||||
|   notify: touch_bitwarden | ||||
|   register: bitwarden_logs | ||||
|  | ||||
| - name: Create Bitwarden's initial log file | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ bitwarden_logs_identity }}/{{ bitwarden_logs_identity_date }}.txt" | ||||
|     state: touch | ||||
|   when: bitwarden_logs.changed | ||||
|  | ||||
| - name: Install Bitwarden's Fail2ban jail | ||||
|   ansible.builtin.template: | ||||
|     src: fail2ban-jail.conf.j2 | ||||
|     dest: /etc/fail2ban/jail.d/bitwarden.conf | ||||
|     mode: "640" | ||||
|   notify: restart_fail2ban | ||||
|  | ||||
| - name: Reload systemd manager configuration | ||||
|   ansible.builtin.systemd: | ||||
|     daemon_reload: true | ||||
|   when: bitwarden_systemd.changed | ||||
|   notify: rebuild_bitwarden | ||||
|   | ||||
| @@ -23,13 +23,10 @@ send "{{ bitwarden_install_id }}\r" | ||||
| expect "Enter your installation key:" | ||||
| send "{{ bitwarden_install_key }}\r" | ||||
|  | ||||
| expect "Enter your region (US/EU) \\\[US\\\]:" | ||||
| send "US\r" | ||||
|  | ||||
| expect "Do you have a SSL certificate to use? (y/N):" | ||||
| expect "Do you have a SSL certificate to use? (y/n):" | ||||
| send "n\r" | ||||
|  | ||||
| expect "Do you want to generate a self-signed SSL certificate? (y/N):" | ||||
| expect "Do you want to generate a self-signed SSL certificate? (y/n):" | ||||
| {% if bitwarden_standalone and not bitwarden_production %} | ||||
| send "y\r" | ||||
| {% else %} | ||||
|   | ||||
| @@ -6,11 +6,13 @@ services: | ||||
|       - traefik | ||||
|     labels: | ||||
|       traefik.http.routers.bitwarden.rule: "Host(`{{ bitwarden_domain }}`)" | ||||
|       traefik.http.routers.bitwarden.entrypoints: {{ bitwarden_entrypoint | default('web') }} | ||||
|       traefik.http.routers.bitwarden.tls: {{ bitwarden_traefik_tls | default('false') }} | ||||
|       traefik.http.routers.bitwarden.entrypoints: websecure | ||||
|       traefik.http.routers.bitwarden.tls.certresolver: letsencrypt | ||||
|       traefik.http.routers.bitwarden.middlewares: "securehttps@file" | ||||
|       traefik.http.services.bitwarden.loadbalancer.server.port: 8080 | ||||
|       traefik.docker.network: traefik | ||||
|       traefik.enable: "true" | ||||
|  | ||||
| networks: | ||||
|   traefik: | ||||
|     external: true | ||||
|   | ||||
| @@ -1,11 +1,3 @@ | ||||
| docker_apt_keyring: /etc/apt/keyrings/docker.asc | ||||
| docker_apt_keyring_hash: 1500c1f56fa9e26b9b8f42452a553675796ade0807cdce11975eb98170b3a570 | ||||
| docker_apt_keyring_url: https://download.docker.com/linux/debian/gpg | ||||
| docker_apt_repo: https://download.docker.com/linux/debian | ||||
| docker_compose_root: /var/lib/compose | ||||
| docker_compose: /usr/bin/docker-compose | ||||
| docker_compose_service: compose | ||||
| docker_compose: "{{ (docker_official | bool) | ternary('/usr/bin/docker compose', '/usr/bin/docker-compose') }}" | ||||
| docker_official: false | ||||
| docker_repos_keys: "{{ docker_repos_path }}/.keys" | ||||
| docker_repos_keytype: rsa | ||||
| docker_repos_path: /srv/.compose_repos | ||||
|   | ||||
| @@ -2,53 +2,3 @@ | ||||
|   ansible.builtin.systemd: | ||||
|     daemon_reload: true | ||||
|   listen: compose_systemd | ||||
|  | ||||
| - name: Find which services had a docker-compose.yml updated | ||||
|   ansible.builtin.set_fact: | ||||
|     compose_restart_list: "{{ (compose_restart_list | default([])) + [item.item.name] }}" | ||||
|   loop: "{{ compose_update.results }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.item.name }}" | ||||
|   when: item.changed | ||||
|   listen: compose_restart | ||||
|  | ||||
| - name: Find which services had their .env updated | ||||
|   ansible.builtin.set_fact: | ||||
|     compose_restart_list: "{{ (compose_restart_list | default([])) + [item.item.name] }}" | ||||
|   loop: "{{ compose_env_update.results }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.item.name }}" | ||||
|   when: item.changed | ||||
|   listen: compose_restart | ||||
|  | ||||
| - name: Restart MariaDB | ||||
|   ansible.builtin.service: | ||||
|     name: mariadb | ||||
|     state: restarted | ||||
|   when: not mariadb_restarted | ||||
|   listen: restart_mariadb # hijack handler for early restart | ||||
|  | ||||
| - name: Set MariaDB as restarted | ||||
|   ansible.builtin.set_fact: | ||||
|     mariadb_restarted: true | ||||
|   when: not mariadb_restarted | ||||
|   listen: restart_mariadb | ||||
|  | ||||
| - name: Restart compose services | ||||
|   ansible.builtin.systemd: | ||||
|     state: restarted | ||||
|     name: "{{ docker_compose_service }}@{{ item }}" | ||||
|   loop: "{{ compose_restart_list | default([]) | unique }}" | ||||
|   when: compose_restart_list is defined | ||||
|   listen: compose_restart | ||||
|  | ||||
| - name: Start compose services and enable on boot | ||||
|   ansible.builtin.service: | ||||
|     name: "{{ docker_compose_service }}@{{ item.name }}" | ||||
|     state: started | ||||
|     enabled: true | ||||
|   loop: "{{ docker_compose_deploy }}" | ||||
|   loop_control: | ||||
|     label: "{{ docker_compose_service }}@{{ item.name }}" | ||||
|   when: item.enabled is defined and item.enabled is true | ||||
|   listen: compose_enable | ||||
|   | ||||
| @@ -1,149 +1,32 @@ | ||||
| - name: Add official Docker APT key | ||||
|   ansible.builtin.get_url: | ||||
|     url: "{{ docker_apt_keyring_url }}" | ||||
|     dest: "{{ docker_apt_keyring }}" | ||||
|     checksum: "sha256:{{ docker_apt_keyring_hash }}" | ||||
|     mode: "644" | ||||
|     owner: root | ||||
|     group: root | ||||
|   when: docker_official | ||||
|  | ||||
| - name: Remove official Docker APT key | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ docker_apt_keyring }}" | ||||
|     state: absent | ||||
|   when: not docker_official | ||||
|  | ||||
| - name: Add/remove official Docker APT repository | ||||
|   ansible.builtin.apt_repository: | ||||
|     repo: > | ||||
|       deb [arch=amd64 signed-by={{ docker_apt_keyring }}] | ||||
|       {{ docker_apt_repo }} {{ ansible_distribution_release }} stable | ||||
|     state: "{{ 'present' if docker_official else 'absent' }}" | ||||
|     filename: "{{ docker_apt_keyring | regex_replace('^.*/', '') }}" | ||||
|  | ||||
| - name: Install/uninstall Docker from Debian repositories | ||||
| - name: Install Docker | ||||
|   ansible.builtin.apt: | ||||
|     name: ["docker.io", "docker-compose", "containerd", "runc"] | ||||
|     state: "{{ 'absent' if docker_official else 'present' }}" | ||||
|     autoremove: true | ||||
|     name: ['docker.io', 'docker-compose'] | ||||
|     state: present | ||||
|     update_cache: true | ||||
|  | ||||
| - name: Install/uninstall Docker from Docker repositories | ||||
|   ansible.builtin.apt: | ||||
|     name: | ||||
|       [ | ||||
|         "docker-ce", | ||||
|         "docker-ce-cli", | ||||
|         "containerd.io", | ||||
|         "docker-buildx-plugin", | ||||
|         "docker-compose-plugin", | ||||
|       ] | ||||
|     state: "{{ 'present' if docker_official else 'absent' }}" | ||||
|     autoremove: true | ||||
|     update_cache: true | ||||
|  | ||||
| - name: Login to private registry | ||||
|   community.docker.docker_login: | ||||
|     registry_url: "{{ docker_login_url | default('') }}" | ||||
|     username: "{{ docker_login_user }}" | ||||
|     password: "{{ docker_login_pass }}" | ||||
|   when: docker_login_user is defined and docker_login_pass is defined | ||||
|  | ||||
| - name: Create docker-compose root | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ docker_compose_root }}" | ||||
|     state: directory | ||||
|     mode: "500" | ||||
|     mode: 0500 | ||||
|  | ||||
| - name: Install docker-compose systemd service | ||||
|   ansible.builtin.template: | ||||
|     src: docker-compose.service.j2 | ||||
|     dest: "/etc/systemd/system/{{ docker_compose_service }}@.service" | ||||
|     mode: "400" | ||||
|     mode: 0400 | ||||
|   notify: compose_systemd | ||||
|  | ||||
| - name: Create directories to clone docker-compose repositories | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ item }}" | ||||
|     state: directory | ||||
|     mode: "400" | ||||
|   loop: | ||||
|     - "{{ docker_repos_path }}" | ||||
|     - "{{ docker_repos_keys }}" | ||||
|   when: docker_compose_deploy is defined | ||||
|  | ||||
| - name: Generate OpenSSH deploy keys for docker-compose clones | ||||
|   community.crypto.openssh_keypair: | ||||
|     path: "{{ docker_repos_keys }}/id_{{ docker_repos_keytype }}" | ||||
|     type: "{{ docker_repos_keytype }}" | ||||
|     comment: "{{ ansible_hostname }}-deploy-key" | ||||
|     mode: "400" | ||||
|     state: present | ||||
|   when: docker_compose_deploy is defined | ||||
|  | ||||
| - name: Check for git installation | ||||
|   ansible.builtin.apt: | ||||
|     name: git | ||||
|     state: present | ||||
|   when: docker_compose_deploy is defined | ||||
|  | ||||
| - name: Clone external docker-compose projects | ||||
|   ansible.builtin.git: | ||||
|     repo: "{{ item.url }}" | ||||
|     dest: "{{ docker_repos_path }}/{{ item.name }}" | ||||
|     version: "{{ item.version }}" | ||||
|     accept_newhostkey: "{{ item.accept_newhostkey | default(false) }}" | ||||
|     gpg_whitelist: "{{ item.trusted_keys | default([]) }}" | ||||
|     verify_commit: "{{ true if (item.trusted_keys is defined and item.trusted_keys) else false }}" | ||||
|     key_file: "{{ docker_repos_keys }}/id_{{ docker_repos_keytype }}" | ||||
|   loop: "{{ docker_compose_deploy }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.url }}" | ||||
|   when: docker_compose_deploy is defined | ||||
|  | ||||
| - name: Create directories for docker-compose projects using the systemd service | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ docker_compose_root }}/{{ item.name }}" | ||||
|     state: directory | ||||
|     mode: "400" | ||||
|   loop: "{{ docker_compose_deploy }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.name }}" | ||||
|   when: docker_compose_deploy is defined | ||||
|  | ||||
| - name: Synchronize docker-compose.yml | ||||
|   ansible.posix.synchronize: | ||||
|     src: "{{ docker_repos_path }}/{{ item.name }}/{{ item.path | default('docker-compose.yml') }}" | ||||
|     dest: "{{ docker_compose_root }}/{{ item.name }}/docker-compose.yml" | ||||
|   delegate_to: "{{ inventory_hostname }}" | ||||
|   register: compose_update | ||||
|   notify: | ||||
|     - compose_restart | ||||
|     - compose_enable | ||||
|   loop: "{{ docker_compose_deploy | default([]) }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.name }}" | ||||
|   when: docker_compose_deploy is defined and docker_compose_deploy | length > 0 | ||||
|  | ||||
| - name: Set environment variables for docker-compose projects | ||||
|   ansible.builtin.template: | ||||
|     src: docker-compose-env.j2 | ||||
|     dest: "{{ docker_compose_root }}/{{ item.name }}/.env" | ||||
|     mode: "400" | ||||
|   register: compose_env_update | ||||
|   notify: | ||||
|     - compose_restart | ||||
|     - compose_enable | ||||
|   no_log: "{{ docker_compose_env_nolog | default(true) }}" | ||||
|   loop: "{{ docker_compose_deploy }}" | ||||
|   loop_control: | ||||
|     label: "{{ item.name }}" | ||||
|   when: docker_compose_deploy is defined and item.env is defined | ||||
| - name: Add users to docker group | ||||
|   ansible.builtin.user: | ||||
|     name: "{{ item }}" | ||||
|     groups: docker | ||||
|     append: true | ||||
|   loop: "{{ docker_users }}" | ||||
|   when: docker_users is defined | ||||
|  | ||||
| - name: Start Docker and enable on boot | ||||
|   ansible.builtin.service: | ||||
|     name: docker | ||||
|     state: started | ||||
|     enabled: true | ||||
|   when: docker_managed | default(true) | ||||
|   | ||||
| @@ -1,10 +0,0 @@ | ||||
| # {{ ansible_managed }} | ||||
| {% if item.env is defined %} | ||||
| {% for key, value in item.env.items() %} | ||||
| {% if value is boolean %} | ||||
| {{ key }}={{ value | lower }} | ||||
| {% else %} | ||||
| {{ key }}={{ value }} | ||||
| {% endif %} | ||||
| {% endfor %} | ||||
| {% endif %} | ||||
| @@ -1,5 +1,5 @@ | ||||
| [Unit] | ||||
| Description=%i {{ docker_compose_service }} service | ||||
| Description=%i docker-compose service | ||||
| PartOf=docker.service | ||||
| After=docker.service | ||||
|  | ||||
|   | ||||
| @@ -1,27 +1,41 @@ | ||||
| - name: Install MySQL module for Ansible | ||||
|   ansible.builtin.apt: | ||||
|     name: python3-pymysql | ||||
|     state: present | ||||
| - name: Create Gitea directory | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ gitea_root }}" | ||||
|     state: directory | ||||
|  | ||||
| - name: Create Gitea database | ||||
|   community.mysql.mysql_db: | ||||
|     name: "{{ gitea.DB_NAME }}" | ||||
|     name: "{{ gitea_dbname }}" | ||||
|     state: present | ||||
|     login_unix_socket: /var/run/mysqld/mysqld.sock | ||||
|  | ||||
| - name: Create Gitea database user | ||||
|   community.mysql.mysql_user: | ||||
|     name: "{{ gitea.DB_USER }}" | ||||
|     password: "{{ gitea.DB_PASSWD }}" | ||||
|     name: "{{ gitea_dbuser }}" | ||||
|     password: "{{ gitea_dbpass }}" | ||||
|     host: '%' | ||||
|     state: present | ||||
|     priv: "{{ gitea.DB_NAME }}.*:ALL" | ||||
|     priv: "{{ gitea_dbname }}.*:ALL" | ||||
|     login_unix_socket: /var/run/mysqld/mysqld.sock | ||||
|  | ||||
| - name: Create git user | ||||
|   ansible.builtin.user: | ||||
|     name: git | ||||
|     state: present | ||||
|  | ||||
| - name: Git user uid | ||||
|   ansible.builtin.getent: | ||||
|     database: passwd | ||||
|     key: git | ||||
|  | ||||
| - name: Git user gid | ||||
|   ansible.builtin.getent: | ||||
|     database: group | ||||
|     key: git | ||||
|  | ||||
| - name: Create git's .ssh directory | ||||
|   ansible.builtin.file: | ||||
|     path: /home/git/.ssh | ||||
|     mode: "700" | ||||
|     state: directory | ||||
|  | ||||
| - name: Generate git's SSH keys | ||||
| @@ -41,7 +55,6 @@ | ||||
| - name: Create git's authorized_keys file | ||||
|   ansible.builtin.file: | ||||
|     path: /home/git/.ssh/authorized_keys | ||||
|     mode: "600" | ||||
|     state: touch | ||||
|   when: not git_authkeys.stat.exists | ||||
|  | ||||
| @@ -55,24 +68,44 @@ | ||||
|   ansible.builtin.template: | ||||
|     src: gitea.sh.j2 | ||||
|     dest: /usr/local/bin/gitea | ||||
|     mode: "755" | ||||
|     mode: 0755 | ||||
|  | ||||
| - name: Install Gitea's docker-compose file | ||||
|   ansible.builtin.template: | ||||
|     src: docker-compose.yml.j2 | ||||
|     dest: "{{ gitea_root }}/docker-compose.yml" | ||||
|   notify: restart_gitea | ||||
|  | ||||
| - name: Install Gitea's docker-compose variables | ||||
|   ansible.builtin.template: | ||||
|     src: compose-env.j2 | ||||
|     dest: "{{ gitea_root }}/.env" | ||||
|   notify: restart_gitea | ||||
|  | ||||
| - name: Create Gitea's logging directory | ||||
|   ansible.builtin.file: | ||||
|     name: /var/log/gitea | ||||
|     state: directory | ||||
|     mode: "755" | ||||
|  | ||||
| - name: Create Gitea's initial log file | ||||
|   ansible.builtin.file: | ||||
|     name: /var/log/gitea/gitea.log | ||||
|     state: touch | ||||
|  | ||||
| - name: Install Gitea's Fail2ban filter | ||||
|   ansible.builtin.template: | ||||
|     src: fail2ban-filter.conf.j2 | ||||
|     dest: /etc/fail2ban/filter.d/gitea.conf | ||||
|     mode: "644" | ||||
|   notify: restart_fail2ban | ||||
|  | ||||
| - name: Install Gitea's Fail2ban jail | ||||
|   ansible.builtin.template: | ||||
|     src: fail2ban-jail.conf.j2 | ||||
|     dest: /etc/fail2ban/jail.d/gitea.conf | ||||
|     mode: "640" | ||||
|   notify: restart_fail2ban | ||||
|  | ||||
| - name: Start and enable Gitea service | ||||
|   ansible.builtin.service: | ||||
|     name: "{{ docker_compose_service }}@{{ gitea_name }}" | ||||
|     state: started | ||||
|     enabled: true | ||||
|   | ||||
| @@ -1,4 +1,5 @@ | ||||
| jellyfin_name: jellyfin | ||||
| jellyfin_volume: "{{ jellyfin_name }}" | ||||
| jellyfin_router: "{{ jellyfin_name }}" | ||||
| jellyfin_rooturl: "https://{{ jellyfin_domain }}" | ||||
| jellyfin_root: "{{ docker_compose_root }}/{{ jellyfin_name }}" | ||||
|   | ||||
| @@ -4,6 +4,11 @@ | ||||
|     state: directory | ||||
|     mode: 0500 | ||||
|  | ||||
| - name: Create jellyfin user | ||||
|   ansible.builtin.user: | ||||
|     name: jellyfin | ||||
|     state: present | ||||
|  | ||||
| - name: Get user jellyfin uid | ||||
|   ansible.builtin.getent: | ||||
|     database: passwd | ||||
|   | ||||
| @@ -1,8 +1,7 @@ | ||||
| version: '3.7' | ||||
|  | ||||
| volumes: | ||||
|   config: | ||||
|   cache: | ||||
|   {{ jellyfin_volume }}: | ||||
|  | ||||
| networks: | ||||
|   traefik: | ||||
| @@ -15,7 +14,7 @@ services: | ||||
|     networks: | ||||
|       - traefik | ||||
|     labels: | ||||
|       - "traefik.http.routers.{{ jellyfin_router }}.rule=Host({{ jellyfin_domains }})" | ||||
|       - "traefik.http.routers.{{ jellyfin_router }}.rule=Host(`{{ jellyfin_domain }}`)" | ||||
| {% if traefik_http_only %} | ||||
|       - "traefik.http.routers.{{ jellyfin_router }}.entrypoints=web" | ||||
| {% else %} | ||||
| @@ -25,6 +24,6 @@ services: | ||||
|       - "traefik.docker.network=traefik" | ||||
|       - "traefik.enable=true" | ||||
|     volumes: | ||||
|       - config:/config | ||||
|       - cache:/cache | ||||
|       - {{ jellyfin_media }}:/media | ||||
|       - ./config:/config | ||||
|       - ./cache:/cache | ||||
|       - {{ jellyfin_volume }}:/media | ||||
|   | ||||
							
								
								
									
										3
									
								
								roles/mariadb/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								roles/mariadb/defaults/main.yml
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| mariadb_trust: | ||||
|   - "172.16.0.0/12" | ||||
|   - "192.168.0.0/16" | ||||
| @@ -1,12 +0,0 @@ | ||||
| - name: Restart MariaDB | ||||
|   ansible.builtin.service: | ||||
|     name: mariadb | ||||
|     state: restarted | ||||
|   when: not mariadb_restarted | ||||
|   listen: restart_mariadb | ||||
|  | ||||
| - name: Set MariaDB as restarted | ||||
|   ansible.builtin.set_fact: | ||||
|     mariadb_restarted: true | ||||
|   when: not mariadb_restarted | ||||
|   listen: restart_mariadb | ||||
| @@ -3,30 +3,23 @@ | ||||
|     name: mariadb-server | ||||
|     state: present | ||||
|  | ||||
| - name: Set MariaDB restarted fact | ||||
|   ansible.builtin.set_fact: | ||||
|     mariadb_restarted: false | ||||
|  | ||||
| - name: Regather facts for the potentially new docker0 interface | ||||
|   ansible.builtin.setup: | ||||
|  | ||||
| - name: Change the bind-address to allow from docker0 | ||||
| - name: Change the bind-address to allow Docker | ||||
|   ansible.builtin.lineinfile: | ||||
|     path: /etc/mysql/mariadb.conf.d/50-server.cnf | ||||
|     regex: "^bind-address" | ||||
|     line: "bind-address            = {{ ansible_facts.docker0.ipv4.address }}" | ||||
|   notify: restart_mariadb | ||||
|   when: ansible_facts.docker0 is defined | ||||
|     line: "bind-address            = 0.0.0.0" | ||||
|   register: mariadb_conf | ||||
|  | ||||
| - name: Flush handlers to ensure MariaDB restarts immediately | ||||
|   ansible.builtin.meta: flush_handlers | ||||
|   tags: restart_mariadb | ||||
|   when: ansible_facts.docker0 is defined | ||||
| - name: Restart MariaDB | ||||
|   ansible.builtin.service: | ||||
|     name: mariadb | ||||
|     state: restarted | ||||
|   when: mariadb_conf.changed | ||||
|  | ||||
| - name: Allow database connections from Docker | ||||
| - name: Allow database connections | ||||
|   community.general.ufw: | ||||
|     rule: allow | ||||
|     port: "3306" | ||||
|     proto: tcp | ||||
|     src: "{{ item }}" | ||||
|   loop: "{{ mariadb_trust | default(['172.16.0.0/12']) }}" | ||||
|   loop: "{{ mariadb_trust }}" | ||||
|   | ||||
| @@ -1 +1,11 @@ | ||||
| nextcloud_name: nextcloud | ||||
| # container names | ||||
| nextcloud_container: nextcloud | ||||
| nextcloud_dbcontainer: "{{ nextcloud_container }}-db" | ||||
|  | ||||
| # database settings | ||||
| nextcloud_dbname: "{{ nextcloud_container }}" | ||||
| nextcloud_dbuser: "{{ nextcloud_dbname }}" | ||||
|  | ||||
| # host mounts | ||||
| nextcloud_root: "/opt/{{ nextcloud_container }}/public_html" | ||||
| nextcloud_dbroot: "/opt/{{ nextcloud_container }}/database" | ||||
|   | ||||
| @@ -1,25 +0,0 @@ | ||||
| - name: Set Nextcloud's Trusted Proxy | ||||
|   ansible.builtin.command: > | ||||
|     docker exec --user www-data "{{ nextcloud_name }}" | ||||
|       php occ config:system:set trusted_proxies 0 --value="{{ traefik_name }}" | ||||
|   register: nextcloud_trusted_proxy | ||||
|   changed_when: "nextcloud_trusted_proxy.stdout == 'System config value trusted_proxies => 0 set to string ' ~ traefik_name" | ||||
|   listen: install_nextcloud | ||||
|  | ||||
| - name: Set Nextcloud's Trusted Domain | ||||
|   ansible.builtin.command: > | ||||
|     docker exec --user www-data "{{ nextcloud_name }}" | ||||
|       php occ config:system:set trusted_domains 0 --value="{{ nextcloud.DOMAIN }}" | ||||
|   register: nextcloud_trusted_domains | ||||
|   changed_when: "nextcloud_trusted_domains.stdout == 'System config value trusted_domains => 0 set to string ' ~ nextcloud.DOMAIN" | ||||
|   listen: install_nextcloud | ||||
|  | ||||
| - name: Preform Nextcloud database maintenance | ||||
|   ansible.builtin.command: > | ||||
|     docker exec --user www-data "{{ nextcloud_name }}" {{ item }} | ||||
|   loop: | ||||
|     - "php occ maintenance:mode --on" | ||||
|     - "php occ db:add-missing-indices" | ||||
|     - "php occ db:convert-filecache-bigint" | ||||
|     - "php occ maintenance:mode --off" | ||||
|   listen: install_nextcloud | ||||
| @@ -1,66 +1,109 @@ | ||||
| - name: Install MySQL module for Ansible | ||||
|   ansible.builtin.apt: | ||||
|     name: python3-pymysql | ||||
|     state: present | ||||
| - name: Create Nextcloud network | ||||
|   community.general.docker_network: | ||||
|     name: "{{ nextcloud_container }}" | ||||
|  | ||||
| - name: Create Nextcloud database | ||||
|   community.mysql.mysql_db: | ||||
|     name: "{{ nextcloud.DB_NAME | default('nextcloud') }}" | ||||
|     state: present | ||||
|     login_unix_socket: /var/run/mysqld/mysqld.sock | ||||
|  | ||||
| - name: Create Nextcloud database user | ||||
|   community.mysql.mysql_user: | ||||
|     name: "{{ nextcloud.DB_USER | default('nextcloud') }}" | ||||
|     password: "{{ nextcloud.DB_PASSWD }}" | ||||
|     host: '%' | ||||
|     state: present | ||||
|     priv: "{{ nextcloud.DB_NAME | default('nextcloud') }}.*:ALL" | ||||
|     login_unix_socket: /var/run/mysqld/mysqld.sock | ||||
|  | ||||
| - name: Start Nextcloud service and enable on boot | ||||
|   ansible.builtin.service: | ||||
|     name: "{{ docker_compose_service }}@{{ nextcloud_name }}" | ||||
| - name: Start Nextcloud's database container | ||||
|   community.general.docker_container: | ||||
|     name: "{{ nextcloud_dbcontainer }}" | ||||
|     image: mariadb:{{ nextcloud_dbversion }} | ||||
|     state: started | ||||
|     enabled: true | ||||
|   when: nextcloud.ENABLE | default('false') | ||||
|     restart_policy: always | ||||
|     volumes: "{{ nextcloud_dbroot }}:/var/lib/mysql" | ||||
|     networks_cli_compatible: true | ||||
|     networks: | ||||
|       - name: "{{ nextcloud_container }}" | ||||
|     env: | ||||
|       MYSQL_RANDOM_ROOT_PASSWORD: "true" | ||||
|       MYSQL_DATABASE: "{{ nextcloud_dbname }}" | ||||
|       MYSQL_USER: "{{ nextcloud_dbuser }}" | ||||
|       MYSQL_PASSWORD: "{{ nextcloud_dbpass }}" | ||||
|  | ||||
| - name: Start Nextcloud container | ||||
|   community.general.docker_container: | ||||
|     name: "{{ nextcloud_container }}" | ||||
|     image: nextcloud:{{ nextcloud_version }} | ||||
|     state: started | ||||
|     restart_policy: always | ||||
|     volumes: "{{ nextcloud_root }}:/var/www/html" | ||||
|     networks_cli_compatible: true | ||||
|     networks: | ||||
|       - name: "{{ nextcloud_container }}" | ||||
|       - name: traefik | ||||
|     labels: | ||||
|       traefik.http.routers.nextcloud.rule: "Host(`{{ nextcloud_domain }}`)" | ||||
|       traefik.http.routers.nextcloud.entrypoints: websecure | ||||
|       traefik.http.routers.nextcloud.tls.certresolver: letsencrypt | ||||
|       traefik.http.routers.nextcloud.middlewares: "securehttps@file,nextcloud-webdav" | ||||
|       traefik.http.middlewares.nextcloud-webdav.redirectregex.regex: "https://(.*)/.well-known/(card|cal)dav" | ||||
|       traefik.http.middlewares.nextcloud-webdav.redirectregex.replacement: "https://${1}/remote.php/dav/" | ||||
|       traefik.http.middlewares.nextcloud-webdav.redirectregex.permanent: "true" | ||||
|       traefik.docker.network: traefik | ||||
|       traefik.enable: "true" | ||||
|  | ||||
| - name: Grab Nextcloud database container information | ||||
|   community.general.docker_container_info: | ||||
|     name: "{{ nextcloud_dbcontainer }}" | ||||
|   register: nextcloud_dbinfo | ||||
|  | ||||
| - name: Grab Nextcloud container information | ||||
|   community.general.docker_container_info: | ||||
|     name: "{{ nextcloud_name }}" | ||||
|     name: "{{ nextcloud_container }}" | ||||
|   register: nextcloud_info | ||||
|  | ||||
| - name: Wait for Nextcloud to become available | ||||
|   ansible.builtin.wait_for: | ||||
|     host: "{{ nextcloud_info.container.NetworkSettings.Networks.traefik.IPAddress }}" | ||||
|     delay: 10 | ||||
|     port: 80 | ||||
|  | ||||
| - name: Check Nextcloud status | ||||
|   ansible.builtin.command: > | ||||
|     docker exec --user www-data "{{ nextcloud_name }}" php occ status | ||||
|   ansible.builtin.command: "docker exec --user www-data {{ nextcloud_container }} | ||||
|             php occ status" | ||||
|   register: nextcloud_status | ||||
|   changed_when: false | ||||
|   args: | ||||
|     removes: "{{ nextcloud_root }}/config/CAN_INSTALL" | ||||
|  | ||||
| - name: Wait for Nextcloud database to become available | ||||
|   ansible.builtin.wait_for: | ||||
|     host: "{{ nextcloud_dbinfo.container.NetworkSettings.Networks.nextcloud.IPAddress }}" | ||||
|     port: 3306 | ||||
|  | ||||
| - name: Install Nextcloud | ||||
|   ansible.builtin.command: > | ||||
|     docker exec --user www-data {{ nextcloud_name }} | ||||
|   ansible.builtin.command: 'docker exec --user www-data {{ nextcloud_container }} | ||||
|             php occ maintenance:install | ||||
|               --database "mysql" | ||||
|         --database-host "{{ nextcloud.DB_HOST | default('host.docker.internal') }}" | ||||
|         --database-name "{{ nextcloud.DB_NAME | default('nextcloud') }}" | ||||
|         --database-user "{{ nextcloud.DB_USER | default('nextcloud') }}" | ||||
|         --database-pass "{{ nextcloud.DB_PASSWD }}" | ||||
|         --admin-user "{{ nextcloud.ADMIN_USER | default('admin') }}" | ||||
|         --admin-pass "{{ nextcloud.ADMIN_PASSWD }}" | ||||
|               --database-host "{{ nextcloud_dbcontainer }}" | ||||
|               --database-name "{{ nextcloud_dbname }}" | ||||
|               --database-user "{{ nextcloud_dbuser }}" | ||||
|               --database-pass "{{ nextcloud_dbpass }}" | ||||
|               --admin-user "{{ nextcloud_admin }}" | ||||
|               --admin-pass "{{ nextcloud_pass }}"' | ||||
|   register: nextcloud_install | ||||
|   when: nextcloud_status.stderr[:26] == "Nextcloud is not installed" | ||||
|   changed_when: nextcloud_install.stdout == "Nextcloud was successfully installed" | ||||
|   notify: install_nextcloud | ||||
|   when: | ||||
|     - nextcloud_status.stdout[:26] == "Nextcloud is not installed" | ||||
|     - nextcloud_domain is defined | ||||
|  | ||||
| - name: Install Nextcloud background jobs cron | ||||
|   ansible.builtin.cron: | ||||
|     name: Nextcloud background job | ||||
|     minute: "*/5" | ||||
|     job: "/usr/bin/docker exec -u www-data nextcloud /usr/local/bin/php -f /var/www/html/cron.php" | ||||
|     user: root | ||||
| - name: Set Nextcloud's Trusted Proxy | ||||
|   ansible.builtin.command: 'docker exec --user www-data {{ nextcloud_container }} | ||||
|             php occ config:system:set trusted_proxies 0 | ||||
|               --value="{{ traefik_name }}"' | ||||
|   when: nextcloud_install.changed | ||||
|  | ||||
| - name: Set Nextcloud's Trusted Domain | ||||
|   ansible.builtin.command: 'docker exec --user www-data {{ nextcloud_container }} | ||||
|             php occ config:system:set trusted_domains 0 | ||||
|               --value="{{ nextcloud_domain }}"' | ||||
|   when: nextcloud_install.changed | ||||
|  | ||||
| - name: Preform Nextcloud database maintenance | ||||
|   ansible.builtin.command: "docker exec --user www-data {{ nextcloud_container }} {{ item }}" | ||||
|   loop: | ||||
|     - "php occ maintenance:mode --on" | ||||
|     - "php occ db:add-missing-indices" | ||||
|     - "php occ db:convert-filecache-bigint" | ||||
|     - "php occ maintenance:mode --off" | ||||
|   when: nextcloud_install.changed | ||||
|  | ||||
| - name: Remove Nextcloud's CAN_INSTALL file | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ nextcloud_root }}/config/CAN_INSTALL" | ||||
|     state: absent | ||||
|   | ||||
| @@ -1,4 +0,0 @@ | ||||
| # Default configuration for podman role | ||||
| podman_repos_keytype: ed25519 | ||||
| podman_ssh_key_path: "{{ ansible_user_dir }}/.ssh" | ||||
| podman_nodocker: false | ||||
| @@ -1,54 +0,0 @@ | ||||
| - name: Reload systemd manager configuration | ||||
|   ansible.builtin.systemd: | ||||
|     daemon_reload: true | ||||
|     scope: user | ||||
|   become: true | ||||
|   become_user: "{{ podman_user }}" | ||||
|   listen: podman_compose_systemd | ||||
|  | ||||
| - name: Find which services had a podman-compose.yml updated | ||||
|   ansible.builtin.set_fact: | ||||
|     podman_compose_restart_list: | ||||
|       "{{ (podman_compose_restart_list | default([])) + [{'user': podman_user, | ||||
|       'service': item.item.name}] }}" | ||||
|   loop: "{{ podman_compose_update.results }}" | ||||
|   loop_control: | ||||
|     label: "{{ podman_user }}/{{ item.item.name }}" | ||||
|   when: item.changed | ||||
|   listen: podman_compose_restart | ||||
|  | ||||
| - name: Find which services had their .env updated | ||||
|   ansible.builtin.set_fact: | ||||
|     podman_compose_restart_list: | ||||
|       "{{ (podman_compose_restart_list | default([])) + [{'user': podman_user, | ||||
|       'service': item.item.name}] }}" | ||||
|   loop: "{{ podman_compose_env_update.results }}" | ||||
|   loop_control: | ||||
|     label: "{{ podman_user }}/{{ item.item.name }}" | ||||
|   when: item.changed | ||||
|   listen: podman_compose_restart | ||||
|  | ||||
| - name: Restart podman-compose services | ||||
|   ansible.builtin.systemd: | ||||
|     state: restarted | ||||
|     name: "podman-compose@{{ item.service }}" | ||||
|     scope: user | ||||
|   become: true | ||||
|   become_user: "{{ item.user }}" | ||||
|   loop: "{{ podman_compose_restart_list | default([]) | unique }}" | ||||
|   when: podman_compose_restart_list is defined | ||||
|   listen: podman_compose_restart | ||||
|  | ||||
| - name: Start podman-compose services and enable on boot | ||||
|   ansible.builtin.systemd: | ||||
|     name: "podman-compose@{{ item.name }}" | ||||
|     state: started | ||||
|     enabled: true | ||||
|     scope: user | ||||
|   become: true | ||||
|   become_user: "{{ podman_user }}" | ||||
|   loop: "{{ podman_compose }}" | ||||
|   loop_control: | ||||
|     label: "{{ podman_user }}/{{ item.name }}" | ||||
|   when: item.enabled is defined and item.enabled is true | ||||
|   listen: podman_compose_enable | ||||
| @@ -1,182 +0,0 @@ | ||||
| - name: Get user info for podman compose user | ||||
|   ansible.builtin.getent: | ||||
|     database: passwd | ||||
|     key: "{{ podman_user }}" | ||||
|   register: podman_user_info | ||||
|  | ||||
| - name: Set user-specific variables | ||||
|   ansible.builtin.set_fact: | ||||
|     podman_rootdir: "{{ podman_compose_config.root }}" | ||||
|     podman_userid: "{{ podman_user_info.ansible_facts.getent_passwd[podman_user][1] }}" | ||||
|     podman_compose: "{{ podman_compose_config.compose }}" | ||||
|     podman_repos: "{{ podman_compose_config.root }}/.compose_repos" | ||||
|  | ||||
| - name: Create podman-compose root directory for user | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ podman_rootdir }}" | ||||
|     state: directory | ||||
|     owner: "{{ podman_user }}" | ||||
|     group: "{{ podman_user }}" | ||||
|     mode: "0700" | ||||
|  | ||||
| - name: Create user systemd directory | ||||
|   ansible.builtin.file: | ||||
|     path: "/home/{{ podman_user }}/.config/systemd/user" | ||||
|     state: directory | ||||
|     owner: "{{ podman_user }}" | ||||
|     group: "{{ podman_user }}" | ||||
|     mode: "0755" | ||||
|  | ||||
| - name: Install podman-compose systemd service for user | ||||
|   ansible.builtin.template: | ||||
|     src: podman-compose.service.j2 | ||||
|     dest: "/home/{{ podman_user }}/.config/systemd/user/podman-compose@.service" | ||||
|     owner: "{{ podman_user }}" | ||||
|     group: "{{ podman_user }}" | ||||
|     mode: "0644" | ||||
|   notify: podman_compose_systemd | ||||
|  | ||||
| - name: Create directories for cloning podman-compose repositories | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ repo_dir }}" | ||||
|     state: directory | ||||
|     owner: "{{ podman_user }}" | ||||
|     group: "{{ podman_user }}" | ||||
|     mode: "0700" | ||||
|   loop: | ||||
|     - "{{ podman_repos }}" | ||||
|   loop_control: | ||||
|     loop_var: repo_dir | ||||
|   when: | ||||
|     - podman_compose is defined | ||||
|     - podman_compose | length > 0 | ||||
|  | ||||
| - name: Create .ssh directory for podman compose user | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ podman_ssh_key_path }}" | ||||
|     state: directory | ||||
|     owner: "{{ podman_user }}" | ||||
|     group: "{{ podman_user }}" | ||||
|     mode: "0700" | ||||
|   when: | ||||
|     - podman_compose is defined | ||||
|     - podman_compose | length > 0 | ||||
|  | ||||
| - name: Generate OpenSSH deploy keys for podman-compose clones | ||||
|   community.crypto.openssh_keypair: | ||||
|     path: "{{ podman_ssh_key_path }}/podman-id_{{ podman_repos_keytype }}" | ||||
|     type: "{{ podman_repos_keytype }}" | ||||
|     comment: "{{ ansible_hostname }}-{{ podman_user }}-deploy-key" | ||||
|     owner: "{{ podman_user }}" | ||||
|     group: "{{ podman_user }}" | ||||
|     mode: "0600" | ||||
|     state: present | ||||
|   when: podman_compose is defined | ||||
|  | ||||
| - name: Import trusted GPG keys for podman-compose projects | ||||
|   ansible.builtin.command: | ||||
|     cmd: "gpg --keyserver {{ key.keyserver | default('keys.openpgp.org') }} --recv-key {{ key.id }}" | ||||
|   become: true | ||||
|   become_user: "{{ podman_user }}" | ||||
|   loop: "{{ podman_compose_config.trusted_keys }}" | ||||
|   loop_control: | ||||
|     loop_var: key | ||||
|     label: "{{ key.id }}" | ||||
|   changed_when: false | ||||
|   when: podman_compose_config.trusted_keys is defined | ||||
|  | ||||
| - name: Clone external podman-compose projects | ||||
|   ansible.builtin.git: | ||||
|     repo: "{{ project.url }}" | ||||
|     dest: "{{ podman_repos }}/{{ project.name }}" | ||||
|     version: "{{ project.version }}" | ||||
|     accept_newhostkey: "{{ project.accept_newhostkey | default(false) }}" | ||||
|     gpg_whitelist: "{{ (project.trusted_keys | default(podman_compose_config.trusted_keys | default([]))) | map(attribute='id') | list }}" | ||||
|     verify_commit: >- | ||||
|       {{ true if (project.trusted_keys is defined and project.trusted_keys) or | ||||
|          (podman_compose_config.trusted_keys is defined and podman_compose_config.trusted_keys) | ||||
|          else false }} | ||||
|     key_file: "{{ podman_ssh_key_path }}/podman-id_{{ podman_repos_keytype }}" | ||||
|   become: true | ||||
|   become_user: "{{ podman_user }}" | ||||
|   loop: "{{ podman_compose }}" | ||||
|   loop_control: | ||||
|     loop_var: project | ||||
|     label: "{{ project.url }}" | ||||
|   when: | ||||
|     - podman_compose is defined | ||||
|     - podman_compose | length > 0 | ||||
|  | ||||
| - name: Create directories for podman-compose projects | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ podman_rootdir }}/{{ project.name }}" | ||||
|     state: directory | ||||
|     owner: "{{ podman_user }}" | ||||
|     group: "{{ podman_user }}" | ||||
|     mode: "0700" | ||||
|   loop: "{{ podman_compose }}" | ||||
|   loop_control: | ||||
|     loop_var: project | ||||
|     label: "{{ project.name }}" | ||||
|   when: | ||||
|     - podman_compose is defined | ||||
|     - podman_compose | length > 0 | ||||
|  | ||||
| - name: Synchronize podman-compose.yml (or docker-compose.yml) | ||||
|   ansible.posix.synchronize: | ||||
|     src: "{{ podman_repos }}/{{ project.name }}/{{ project.path | default('docker-compose.yml') }}" | ||||
|     dest: "{{ podman_rootdir }}/{{ project.name }}/docker-compose.yml" | ||||
|     owner: false | ||||
|     group: false | ||||
|   delegate_to: "{{ inventory_hostname }}" | ||||
|   register: podman_compose_update | ||||
|   notify: | ||||
|     - podman_compose_restart | ||||
|     - podman_compose_enable | ||||
|   loop: "{{ podman_compose | default([]) }}" | ||||
|   loop_control: | ||||
|     loop_var: project | ||||
|     label: "{{ project.name }}" | ||||
|   when: | ||||
|     - podman_compose is defined | ||||
|     - podman_compose | length > 0 | ||||
|  | ||||
| - name: Fix ownership of synchronized compose files | ||||
|   ansible.builtin.file: | ||||
|     path: "{{ podman_rootdir }}/{{ project.name }}/docker-compose.yml" | ||||
|     owner: "{{ podman_user }}" | ||||
|     group: "{{ podman_user }}" | ||||
|     mode: "0600" | ||||
|   loop: "{{ podman_compose | default([]) }}" | ||||
|   loop_control: | ||||
|     loop_var: project | ||||
|     label: "{{ project.name }}" | ||||
|   when: | ||||
|     - podman_compose is defined | ||||
|     - podman_compose | length > 0 | ||||
|  | ||||
| - name: Set environment variables for podman-compose projects | ||||
|   ansible.builtin.template: | ||||
|     src: podman-compose-env.j2 | ||||
|     dest: "{{ podman_rootdir }}/{{ project.name }}/.env" | ||||
|     owner: "{{ podman_user }}" | ||||
|     group: "{{ podman_user }}" | ||||
|     mode: "0600" | ||||
|   register: podman_compose_env_update | ||||
|   notify: | ||||
|     - podman_compose_restart | ||||
|     - podman_compose_enable | ||||
|   no_log: true | ||||
|   loop: "{{ podman_compose }}" | ||||
|   loop_control: | ||||
|     loop_var: project | ||||
|     label: "{{ project.name }}" | ||||
|   when: podman_compose is defined and project.env is defined | ||||
|  | ||||
| - name: Enable lingering for podman compose user | ||||
|   ansible.builtin.command: | ||||
|     cmd: "loginctl enable-linger {{ podman_user }}" | ||||
|   changed_when: false | ||||
|   when: | ||||
|     - podman_compose is defined | ||||
|     - podman_compose | length > 0 | ||||
| @@ -1,77 +0,0 @@ | ||||
| - name: Install Podman | ||||
|   ansible.builtin.apt: | ||||
|     name: ["podman", "podman-compose", "podman-docker"] | ||||
|     state: present | ||||
|  | ||||
| - name: Get user info for namespace users | ||||
|   ansible.builtin.getent: | ||||
|     database: passwd | ||||
|     key: "{{ item }}" | ||||
|   loop: "{{ user_namespaces }}" | ||||
|   register: user_info | ||||
|  | ||||
| - name: Configure /etc/subuid for rootless users | ||||
|   ansible.builtin.lineinfile: | ||||
|     path: "/etc/subuid" | ||||
|     line: | ||||
|       "{{ item.item }}:{{ 100000 + | ||||
|       ((item.ansible_facts.getent_passwd[item.item][1] | int - 1000) * 65536) | ||||
|       }}:65536" | ||||
|     regexp: "^{{ item.item }}:" | ||||
|     create: true | ||||
|     backup: true | ||||
|     mode: "0644" | ||||
|   loop: "{{ user_info.results }}" | ||||
|  | ||||
| - name: Configure /etc/subgid for rootless users | ||||
|   ansible.builtin.lineinfile: | ||||
|     path: "/etc/subgid" | ||||
|     line: | ||||
|       "{{ item.item }}:{{ 100000 + | ||||
|       ((item.ansible_facts.getent_passwd[item.item][1] | int - 1000) * 65536) | ||||
|       }}:65536" | ||||
|     regexp: "^{{ item.item }}:" | ||||
|     create: true | ||||
|     backup: true | ||||
|     mode: "0644" | ||||
|   loop: "{{ user_info.results }}" | ||||
|  | ||||
| - name: Create nodocker file to disable Docker CLI emulation message | ||||
|   ansible.builtin.file: | ||||
|     path: /etc/containers/nodocker | ||||
|     state: touch | ||||
|     owner: root | ||||
|     group: root | ||||
|     mode: "0644" | ||||
|   when: podman_nodocker | bool | ||||
|  | ||||
| - name: Create global containers config directory | ||||
|   ansible.builtin.file: | ||||
|     path: /etc/containers | ||||
|     state: directory | ||||
|     mode: "0755" | ||||
|  | ||||
| - name: Configure global containers.conf for rootless | ||||
|   ansible.builtin.copy: | ||||
|     content: | | ||||
|       [engine] | ||||
|       cgroup_manager = "cgroupfs" | ||||
|       events_logger = "journald" | ||||
|       runtime = "crun" | ||||
|     dest: /etc/containers/containers.conf | ||||
|     mode: "0644" | ||||
|     backup: true | ||||
|  | ||||
| - name: Install git for repository cloning | ||||
|   ansible.builtin.apt: | ||||
|     name: git | ||||
|     state: present | ||||
|   when: podman_compose_deploy is defined | ||||
|  | ||||
| - name: Deploy Podman compose projects for each user | ||||
|   ansible.builtin.include_tasks: deploy.yml | ||||
|   vars: | ||||
|     podman_user: "{{ item.key }}" | ||||
|     podman_compose_config: "{{ item.value }}" | ||||
|   loop: "{{ podman_compose_deploy | dict2items }}" | ||||
|   when: podman_compose_deploy is defined | ||||
| @@ -1,10 +0,0 @@ | ||||
| # {{ ansible_managed }} | ||||
| {% if item.env is defined %} | ||||
| {% for key, value in item.env.items() %} | ||||
| {% if value is boolean %} | ||||
| {{ key }}={{ value | lower }} | ||||
| {% else %} | ||||
| {{ key }}={{ value }} | ||||
| {% endif %} | ||||
| {% endfor %} | ||||
| {% endif %} | ||||
| @@ -1,17 +0,0 @@ | ||||
| [Unit] | ||||
| Description=%i podman-compose service for {{ podman_user }} | ||||
| After=network-online.target | ||||
| Wants=network-online.target | ||||
|  | ||||
| [Service] | ||||
| Type=oneshot | ||||
| RemainAfterExit=true | ||||
| WorkingDirectory={{ podman_rootdir }}/%i | ||||
| ExecStart=/usr/bin/podman-compose up -d --remove-orphans | ||||
| ExecStop=/usr/bin/podman-compose down | ||||
| Environment="PODMAN_USERNS=keep-id" | ||||
| Environment="PODMAN_SOCKET_PATH=/run/user/{{ podman_userid }}/podman/podman.sock" | ||||
| TimeoutStartSec=0 | ||||
|  | ||||
| [Install] | ||||
| WantedBy=default.target | ||||
| @@ -1 +0,0 @@ | ||||
| cached_dhparams_pem: /vagrant/scratch/dhparams.pem | ||||
| @@ -1,13 +1,3 @@ | ||||
| - name: Enable nginx sites configuration | ||||
|   ansible.builtin.file: | ||||
|     src: "/etc/nginx/sites-available/{{ item.item.domain }}.conf" | ||||
|     dest: "/etc/nginx/sites-enabled/{{ item.item.domain }}.conf" | ||||
|     state: link | ||||
|     mode: "400" | ||||
|   loop: "{{ nginx_sites.results }}" | ||||
|   when: item.changed | ||||
|   listen: reload_nginx | ||||
|  | ||||
| - name: Reload nginx | ||||
|   ansible.builtin.service: | ||||
|     name: nginx | ||||
|   | ||||
| @@ -10,19 +10,6 @@ | ||||
|     state: started | ||||
|     enabled: true | ||||
|  | ||||
| - name: Check for cached dhparams.pem file | ||||
|   ansible.builtin.stat: | ||||
|     path: "{{ cached_dhparams_pem }}" | ||||
|   register: dhparams_file | ||||
|  | ||||
| - name: Copy cached dhparams.pem to /etc/ssl/ | ||||
|   ansible.builtin.copy: | ||||
|     src: "{{ cached_dhparams_pem }}" | ||||
|     dest: /etc/ssl/dhparams.pem | ||||
|     mode: "600" | ||||
|     remote_src: true | ||||
|   when: dhparams_file.stat.exists | ||||
|  | ||||
| - name: Generate DH Parameters | ||||
|   community.crypto.openssl_dhparam: | ||||
|     path: /etc/ssl/dhparams.pem | ||||
| @@ -32,21 +19,30 @@ | ||||
|   ansible.builtin.template: | ||||
|     src: nginx.conf.j2 | ||||
|     dest: /etc/nginx/nginx.conf | ||||
|     mode: "644" | ||||
|     mode: '0644' | ||||
|   notify: reload_nginx | ||||
|  | ||||
| - name: Install nginx sites configuration | ||||
|   ansible.builtin.template: | ||||
|     src: server-nginx.conf.j2 | ||||
|     dest: "/etc/nginx/sites-available/{{ item.domain }}.conf" | ||||
|     mode: "400" | ||||
|     mode: '0644' | ||||
|   loop: "{{ proxy.servers }}" | ||||
|   notify: reload_nginx | ||||
|   register: nginx_sites | ||||
|  | ||||
| - name: Enable nginx sites configuration | ||||
|   ansible.builtin.file: | ||||
|     src: "/etc/nginx/sites-available/{{ item.item.domain }}.conf" | ||||
|     dest: "/etc/nginx/sites-enabled/{{ item.item.domain }}.conf" | ||||
|     state: link | ||||
|     mode: 0400 | ||||
|   loop: "{{ nginx_sites.results }}" | ||||
|   when: item.changed | ||||
|   notify: reload_nginx | ||||
|  | ||||
| - name: Generate self-signed certificate | ||||
|   ansible.builtin.command: | ||||
|     'openssl req -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes \ | ||||
|   ansible.builtin.command: 'openssl req -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes \ | ||||
|           -subj   "/C=US/ST=Local/L=Local/O=Org/OU=IT/CN=example.com" \ | ||||
|           -keyout /etc/ssl/private/nginx-selfsigned.key \ | ||||
|           -out    /etc/ssl/certs/nginx-selfsigned.crt' | ||||
| @@ -57,36 +53,29 @@ | ||||
|  | ||||
| - name: Install LE's certbot | ||||
|   ansible.builtin.apt: | ||||
|     name: ["certbot", "python3-certbot-dns-cloudflare"] | ||||
|     name: ['certbot', 'python3-certbot-dns-cloudflare'] | ||||
|     state: present | ||||
|   when: proxy.production is defined and proxy.production | ||||
|  | ||||
| - name: Grab Cloudflare API token for configuration | ||||
|   ansible.builtin.slurp: | ||||
|     src: /root/.cloudflare-api | ||||
|   register: cfapi | ||||
|   when: proxy.production is defined and proxy.production and proxy.dns_cloudflare is defined | ||||
|  | ||||
| - name: Install Cloudflare API token | ||||
|   ansible.builtin.template: | ||||
|     src: cloudflare.ini.j2 | ||||
|     dest: /root/.cloudflare.ini | ||||
|     mode: "400" | ||||
|   diff: false | ||||
|     mode: 0400 | ||||
|   when: proxy.production is defined and proxy.production and proxy.dns_cloudflare is defined | ||||
|  | ||||
| - name: Create nginx post renewal hook directory | ||||
|   ansible.builtin.file: | ||||
|     path: /etc/letsencrypt/renewal-hooks/post | ||||
|     state: directory | ||||
|     mode: "500" | ||||
|     mode: 0500 | ||||
|   when: proxy.production is defined and proxy.production | ||||
|  | ||||
| - name: Install nginx post renewal hook | ||||
|   ansible.builtin.copy: | ||||
|     src: reload-nginx.sh | ||||
|     dest: /etc/letsencrypt/renewal-hooks/post/reload-nginx.sh | ||||
|     mode: "0755" | ||||
|     mode: '0755' | ||||
|   when: proxy.production is defined and proxy.production | ||||
|  | ||||
| - name: Run Cloudflare DNS-01 challenges on wildcard domains | ||||
|   | ||||
| @@ -1,2 +1,2 @@ | ||||
| # Cloudflare API token used by Certbot | ||||
| dns_cloudflare_api_token = {{ cfapi['content'] | b64decode | trim }} | ||||
| dns_cloudflare_api_token = {{ proxy.dns_cloudflare.api_token }} | ||||
|   | ||||
| @@ -28,19 +28,13 @@ server { | ||||
|   ssl_certificate     /etc/ssl/certs/nginx-selfsigned.crt; | ||||
|   ssl_certificate_key /etc/ssl/private/nginx-selfsigned.key; | ||||
| {% endif %} | ||||
| {% if item.hsts is defined %} | ||||
|   add_header Strict-Transport-Security "max-age={{ item.hsts }}" always; | ||||
| {% endif %} | ||||
| {% if item.client_max_body_size is defined %} | ||||
|   client_max_body_size {{ item.client_max_body_size }}; | ||||
| {% endif %} | ||||
|   location / { | ||||
| {% if item.hsts is defined %} | ||||
|     add_header Strict-Transport-Security "max-age={{ item.hsts }}" always; | ||||
| {% endif %} | ||||
| {% if item.allowedips is defined %} | ||||
| {% for ip in item.allowedips %} | ||||
|     allow {{ ip }}; | ||||
| {% endfor %} | ||||
|     deny all; | ||||
| {% endif %} | ||||
| {% if item.restrict is defined and item.restrict  %} | ||||
|     auth_basic "{{ item.restrict_name | default('Restricted Access') }}"; | ||||
|     auth_basic_user_file {{ item.restrict_file | default('/etc/nginx/.htpasswd') }}; | ||||
| @@ -49,7 +43,6 @@ server { | ||||
|     proxy_set_header Host $host; | ||||
|     proxy_set_header X-Real-IP $remote_addr; | ||||
|     proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | ||||
|     proxy_set_header X-Forwarded-Proto $scheme; | ||||
|     proxy_pass {{ item.proxy_pass }}; | ||||
| {% if item.proxy_ssl_verify is defined and item.proxy_ssl_verify is false %} | ||||
|     proxy_ssl_verify off; | ||||
|   | ||||
| @@ -21,6 +21,20 @@ | ||||
|   loop: "{{ traefik_external }}" | ||||
|   when: traefik_external is defined | ||||
|  | ||||
| - name: Install Traefik's docker-compose file | ||||
|   ansible.builtin.template: | ||||
|     src: docker-compose.yml.j2 | ||||
|     dest: "{{ traefik_root }}/docker-compose.yml" | ||||
|     mode: 0400 | ||||
|   notify: restart_traefik | ||||
|  | ||||
| - name: Install Traefik's docker-compose variables | ||||
|   ansible.builtin.template: | ||||
|     src: compose-env.j2 | ||||
|     dest: "{{ traefik_root }}/.env" | ||||
|     mode: 0400 | ||||
|   notify: restart_traefik | ||||
|  | ||||
| - name: Install static Traefik configuration | ||||
|   ansible.builtin.template: | ||||
|     src: traefik.yml.j2 | ||||
| @@ -28,9 +42,8 @@ | ||||
|     mode: 0400 | ||||
|   notify: restart_traefik | ||||
|  | ||||
| - name: Start Traefik service and enable on boot | ||||
| - name: Start and enable Traefik service | ||||
|   ansible.builtin.service: | ||||
|     name: "{{ docker_compose_service }}@{{ traefik_name }}" | ||||
|     state: started | ||||
|     enabled: true | ||||
|   when: traefik.ENABLED | default('false') | ||||
|   | ||||
							
								
								
									
										42
									
								
								update-hosts.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										42
									
								
								update-hosts.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,42 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| COMMENT="Project Moxie" | ||||
| DOMAIN="vm.krislamo.org" | ||||
| HOST[0]="traefik.${DOMAIN}" | ||||
| HOST[1]="cloud.${DOMAIN}" | ||||
| HOST[2]="git.${DOMAIN}" | ||||
| HOST[3]="jenkins.${DOMAIN}" | ||||
| HOST[4]="prom.${DOMAIN}" | ||||
| HOST[5]="grafana.${DOMAIN}" | ||||
| HOST[6]="nginx.${DOMAIN}" | ||||
| HOST[7]="vault.${DOMAIN}" | ||||
| HOST[8]="wordpress.${DOMAIN}" | ||||
| HOST[9]="site1.wordpress.${DOMAIN}" | ||||
| HOST[10]="site2.wordpress.${DOMAIN}" | ||||
| HOST[11]="unifi.${DOMAIN}" | ||||
| HOST[12]="jellyfin.${DOMAIN}" | ||||
|  | ||||
| # Get Vagrantbox guest IP | ||||
| VAGRANT_OUTPUT=$(vagrant ssh -c "hostname -I | cut -d' ' -f2" 2>/dev/null) | ||||
|  | ||||
| # Remove ^M from the end | ||||
| [ ${#VAGRANT_OUTPUT} -gt 1 ] && IP=${VAGRANT_OUTPUT::-1} | ||||
|  | ||||
| echo "Purging project addresses from /etc/hosts" | ||||
| sudo sed -i "s/# $COMMENT//g" /etc/hosts | ||||
| for address in "${HOST[@]}"; do | ||||
|   sudo sed -i "/$address/d" /etc/hosts | ||||
| done | ||||
|  | ||||
| # Remove trailing newline | ||||
| sudo sed -i '${/^$/d}' /etc/hosts | ||||
|  | ||||
| if [ -n "$IP" ]; then | ||||
|   echo -e "Adding new addresses...\n" | ||||
|   echo -e "# $COMMENT" | sudo tee -a /etc/hosts | ||||
|   for address in "${HOST[@]}"; do | ||||
|     echo -e "$IP\t$address" | sudo tee -a /etc/hosts | ||||
|   done | ||||
| else | ||||
|   echo "Cannot find address. Is the Vagrant box running?" | ||||
| fi | ||||
		Reference in New Issue
	
	Block a user