From 95b73daa36b23565a8566f71f9b202d3459b685f Mon Sep 17 00:00:00 2001 From: Sam Chudnick Date: Sun, 25 Jun 2023 09:52:36 -0400 Subject: Initial Commit --- roles/linux_base/defaults/main.yml | 1 + roles/linux_base/handlers/main.yml | 16 ++ roles/linux_base/tasks/main.yml | 57 +++++ roles/proxmox/cloudinit_guest/defaults/main.yml | 7 + roles/proxmox/cloudinit_guest/tasks/main.yml | 80 ++++++ roles/proxmox/debian_cloudinit/defaults/main.yml | 8 + roles/proxmox/debian_cloudinit/tasks/main.yml | 115 +++++++++ roles/proxmox/fedora_cloudinit/defaults/main.yml | 8 + roles/proxmox/fedora_cloudinit/tasks/main.yml | 122 +++++++++ roles/proxmox/proxmox_backup_server/tasks/main.yml | 42 +++ roles/proxmox/pve_backup/tasks/main.yml | 17 ++ roles/proxmox/system/defaults/main.yml | 8 + roles/proxmox/system/tasks/main.yml | 30 +++ roles/proxmox/system/tasks/proxmox_repo.yml | 8 + roles/proxmox/system/tasks/user.yml | 28 ++ roles/services/chronyd/handlers/main.yml | 4 + roles/services/chronyd/tasks/main.yml | 30 +++ .../containers/arr_stack/handlers/main.yml | 4 + .../containers/arr_stack/tasks/gluetun.yml | 105 ++++++++ .../services/containers/arr_stack/tasks/lidarr.yml | 93 +++++++ roles/services/containers/arr_stack/tasks/main.yml | 130 ++++++++++ .../containers/arr_stack/tasks/prowlarr.yml | 92 +++++++ .../containers/arr_stack/tasks/qbittorrent.yml | 94 +++++++ .../services/containers/arr_stack/tasks/radarr.yml | 93 +++++++ .../containers/arr_stack/tasks/readarr.yml | 93 +++++++ .../services/containers/arr_stack/tasks/sonarr.yml | 93 +++++++ .../services/containers/authelia/handlers/main.yml | 4 + roles/services/containers/authelia/tasks/main.yml | 283 +++++++++++++++++++++ .../containers/bookstack/handlers/main.yml | 4 + roles/services/containers/bookstack/tasks/main.yml | 118 +++++++++ .../services/containers/cadvisor/handlers/main.yml | 4 + roles/services/containers/cadvisor/tasks/main.yml | 90 +++++++ roles/services/containers/drawio/handlers/main.yml | 4 + roles/services/containers/drawio/tasks/main.yml | 149 +++++++++++ .../services/containers/firefly/handlers/main.yml | 4 + roles/services/containers/firefly/tasks/main.yml | 172 +++++++++++++ .../services/containers/freshrss/handlers/main.yml | 4 + roles/services/containers/freshrss/tasks/main.yml | 101 ++++++++ roles/services/containers/gitea/handlers/main.yml | 4 + roles/services/containers/gitea/tasks/main.yml | 171 +++++++++++++ .../containers/home_assistant/handlers/main.yml | 4 + .../containers/home_assistant/tasks/main.yml | 86 +++++++ roles/services/containers/homer/handlers/main.yml | 4 + roles/services/containers/homer/tasks/main.yml | 122 +++++++++ .../containers/invidious/handlers/main.yml | 29 +++ roles/services/containers/invidious/tasks/main.yml | 124 +++++++++ .../services/containers/jellyfin/handlers/main.yml | 4 + roles/services/containers/jellyfin/tasks/main.yml | 159 ++++++++++++ .../services/containers/kanboard/handlers/main.yml | 18 ++ roles/services/containers/kanboard/tasks/main.yml | 93 +++++++ .../containers/navidrome/handlers/main.yml | 4 + roles/services/containers/navidrome/tasks/main.yml | 117 +++++++++ .../containers/nextcloud/handlers/main.yml | 4 + roles/services/containers/nextcloud/tasks/main.yml | 184 ++++++++++++++ .../containers/photoprism/defaults/main.yml | 10 + .../containers/photoprism/handlers/main.yml | 4 + .../services/containers/photoprism/tasks/main.yml | 115 +++++++++ .../containers/pihole_exporter/tasks/main.yml | 97 +++++++ .../containers/pywttr_docker/handlers/main.yml | 18 ++ .../containers/pywttr_docker/tasks/main.yml | 74 ++++++ roles/services/containers/renovate/tasks/main.yml | 87 +++++++ .../services/containers/searxng/handlers/main.yml | 4 + roles/services/containers/searxng/tasks/main.yml | 170 +++++++++++++ .../containers/text_generation/handlers/main.yml | 29 +++ .../containers/text_generation/tasks/main.yml | 89 +++++++ .../containers/vaultwarden/handlers/main.yml | 4 + .../services/containers/vaultwarden/tasks/main.yml | 79 ++++++ roles/services/docker_rootless/defaults/main.yml | 18 ++ roles/services/docker_rootless/handlers/main.yml | 6 + roles/services/docker_rootless/tasks/main.yml | 93 +++++++ roles/services/freeipa/client/defaults/main.yml | 0 roles/services/freeipa/client/tasks/main.yml | 4 + roles/services/freeipa/server/defaults/main.yml | 1 + roles/services/freeipa/server/tasks/main.yml | 43 ++++ roles/services/game_server/handlers/main.yml | 71 ++++++ roles/services/game_server/tasks/main.yml | 223 ++++++++++++++++ roles/services/jenkins/handlers/main.yml | 13 + roles/services/jenkins/tasks/main.yml | 184 ++++++++++++++ .../services/monitoring/grafana/defaults/main.yml | 5 + .../services/monitoring/grafana/handlers/main.yml | 13 + roles/services/monitoring/grafana/tasks/main.yml | 125 +++++++++ .../services/monitoring/influxdb/defaults/main.yml | 6 + .../services/monitoring/influxdb/handlers/main.yml | 4 + roles/services/monitoring/influxdb/tasks/main.yml | 19 ++ roles/services/monitoring/loki/handlers/main.yml | 8 + roles/services/monitoring/loki/tasks/main.yml | 80 ++++++ .../prometheus/blackbox-exporter/tasks/main.yml | 0 .../prometheus/nginx_exporter/defaults/main.yml | 4 + .../prometheus/nginx_exporter/handlers/main.yml | 9 + .../prometheus/nginx_exporter/tasks/main.yml | 44 ++++ .../prometheus/node_exporter/defaults/main.yml | 4 + .../prometheus/node_exporter/tasks/main.yml | 28 ++ .../monitoring/prometheus/server/defaults/main.yml | 6 + .../monitoring/prometheus/server/tasks/main.yml | 79 ++++++ .../services/monitoring/promtail/handlers/main.yml | 39 +++ roles/services/monitoring/promtail/tasks/main.yml | 151 +++++++++++ roles/services/msmtp_mta/tasks/main.yml | 11 + roles/services/pihole/handlers/main.yml | 14 + roles/services/pihole/tasks/main.yml | 80 ++++++ roles/services/ssh/tasks/main.yml | 46 ++++ roles/services/unattended_upgrades/tasks/main.yml | 63 +++++ 101 files changed, 5716 insertions(+) create mode 100644 roles/linux_base/defaults/main.yml create mode 100644 roles/linux_base/handlers/main.yml create mode 100644 roles/linux_base/tasks/main.yml create mode 100644 roles/proxmox/cloudinit_guest/defaults/main.yml create mode 100644 roles/proxmox/cloudinit_guest/tasks/main.yml create mode 100644 roles/proxmox/debian_cloudinit/defaults/main.yml create mode 100644 roles/proxmox/debian_cloudinit/tasks/main.yml create mode 100644 roles/proxmox/fedora_cloudinit/defaults/main.yml create mode 100644 roles/proxmox/fedora_cloudinit/tasks/main.yml create mode 100644 roles/proxmox/proxmox_backup_server/tasks/main.yml create mode 100644 roles/proxmox/pve_backup/tasks/main.yml create mode 100644 roles/proxmox/system/defaults/main.yml create mode 100644 roles/proxmox/system/tasks/main.yml create mode 100644 roles/proxmox/system/tasks/proxmox_repo.yml create mode 100644 roles/proxmox/system/tasks/user.yml create mode 100644 roles/services/chronyd/handlers/main.yml create mode 100644 roles/services/chronyd/tasks/main.yml create mode 100644 roles/services/containers/arr_stack/handlers/main.yml create mode 100644 roles/services/containers/arr_stack/tasks/gluetun.yml create mode 100644 roles/services/containers/arr_stack/tasks/lidarr.yml create mode 100644 roles/services/containers/arr_stack/tasks/main.yml create mode 100644 roles/services/containers/arr_stack/tasks/prowlarr.yml create mode 100644 roles/services/containers/arr_stack/tasks/qbittorrent.yml create mode 100644 roles/services/containers/arr_stack/tasks/radarr.yml create mode 100644 roles/services/containers/arr_stack/tasks/readarr.yml create mode 100644 roles/services/containers/arr_stack/tasks/sonarr.yml create mode 100644 roles/services/containers/authelia/handlers/main.yml create mode 100644 roles/services/containers/authelia/tasks/main.yml create mode 100644 roles/services/containers/bookstack/handlers/main.yml create mode 100644 roles/services/containers/bookstack/tasks/main.yml create mode 100644 roles/services/containers/cadvisor/handlers/main.yml create mode 100644 roles/services/containers/cadvisor/tasks/main.yml create mode 100644 roles/services/containers/drawio/handlers/main.yml create mode 100644 roles/services/containers/drawio/tasks/main.yml create mode 100644 roles/services/containers/firefly/handlers/main.yml create mode 100644 roles/services/containers/firefly/tasks/main.yml create mode 100644 roles/services/containers/freshrss/handlers/main.yml create mode 100644 roles/services/containers/freshrss/tasks/main.yml create mode 100644 roles/services/containers/gitea/handlers/main.yml create mode 100644 roles/services/containers/gitea/tasks/main.yml create mode 100644 roles/services/containers/home_assistant/handlers/main.yml create mode 100644 roles/services/containers/home_assistant/tasks/main.yml create mode 100644 roles/services/containers/homer/handlers/main.yml create mode 100644 roles/services/containers/homer/tasks/main.yml create mode 100644 roles/services/containers/invidious/handlers/main.yml create mode 100644 roles/services/containers/invidious/tasks/main.yml create mode 100644 roles/services/containers/jellyfin/handlers/main.yml create mode 100644 roles/services/containers/jellyfin/tasks/main.yml create mode 100644 roles/services/containers/kanboard/handlers/main.yml create mode 100644 roles/services/containers/kanboard/tasks/main.yml create mode 100644 roles/services/containers/navidrome/handlers/main.yml create mode 100644 roles/services/containers/navidrome/tasks/main.yml create mode 100644 roles/services/containers/nextcloud/handlers/main.yml create mode 100644 roles/services/containers/nextcloud/tasks/main.yml create mode 100644 roles/services/containers/photoprism/defaults/main.yml create mode 100644 roles/services/containers/photoprism/handlers/main.yml create mode 100644 roles/services/containers/photoprism/tasks/main.yml create mode 100644 roles/services/containers/pihole_exporter/tasks/main.yml create mode 100644 roles/services/containers/pywttr_docker/handlers/main.yml create mode 100644 roles/services/containers/pywttr_docker/tasks/main.yml create mode 100644 roles/services/containers/renovate/tasks/main.yml create mode 100644 roles/services/containers/searxng/handlers/main.yml create mode 100644 roles/services/containers/searxng/tasks/main.yml create mode 100644 roles/services/containers/text_generation/handlers/main.yml create mode 100644 roles/services/containers/text_generation/tasks/main.yml create mode 100644 roles/services/containers/vaultwarden/handlers/main.yml create mode 100644 roles/services/containers/vaultwarden/tasks/main.yml create mode 100644 roles/services/docker_rootless/defaults/main.yml create mode 100644 roles/services/docker_rootless/handlers/main.yml create mode 100644 roles/services/docker_rootless/tasks/main.yml create mode 100644 roles/services/freeipa/client/defaults/main.yml create mode 100644 roles/services/freeipa/client/tasks/main.yml create mode 100644 roles/services/freeipa/server/defaults/main.yml create mode 100644 roles/services/freeipa/server/tasks/main.yml create mode 100644 roles/services/game_server/handlers/main.yml create mode 100644 roles/services/game_server/tasks/main.yml create mode 100644 roles/services/jenkins/handlers/main.yml create mode 100644 roles/services/jenkins/tasks/main.yml create mode 100644 roles/services/monitoring/grafana/defaults/main.yml create mode 100644 roles/services/monitoring/grafana/handlers/main.yml create mode 100644 roles/services/monitoring/grafana/tasks/main.yml create mode 100644 roles/services/monitoring/influxdb/defaults/main.yml create mode 100644 roles/services/monitoring/influxdb/handlers/main.yml create mode 100644 roles/services/monitoring/influxdb/tasks/main.yml create mode 100644 roles/services/monitoring/loki/handlers/main.yml create mode 100644 roles/services/monitoring/loki/tasks/main.yml create mode 100644 roles/services/monitoring/prometheus/blackbox-exporter/tasks/main.yml create mode 100644 roles/services/monitoring/prometheus/nginx_exporter/defaults/main.yml create mode 100644 roles/services/monitoring/prometheus/nginx_exporter/handlers/main.yml create mode 100644 roles/services/monitoring/prometheus/nginx_exporter/tasks/main.yml create mode 100644 roles/services/monitoring/prometheus/node_exporter/defaults/main.yml create mode 100644 roles/services/monitoring/prometheus/node_exporter/tasks/main.yml create mode 100644 roles/services/monitoring/prometheus/server/defaults/main.yml create mode 100644 roles/services/monitoring/prometheus/server/tasks/main.yml create mode 100644 roles/services/monitoring/promtail/handlers/main.yml create mode 100644 roles/services/monitoring/promtail/tasks/main.yml create mode 100644 roles/services/msmtp_mta/tasks/main.yml create mode 100644 roles/services/pihole/handlers/main.yml create mode 100644 roles/services/pihole/tasks/main.yml create mode 100644 roles/services/ssh/tasks/main.yml create mode 100644 roles/services/unattended_upgrades/tasks/main.yml (limited to 'roles') diff --git a/roles/linux_base/defaults/main.yml b/roles/linux_base/defaults/main.yml new file mode 100644 index 0000000..3fb0cb5 --- /dev/null +++ b/roles/linux_base/defaults/main.yml @@ -0,0 +1 @@ +domain: "home.local" diff --git a/roles/linux_base/handlers/main.yml b/roles/linux_base/handlers/main.yml new file mode 100644 index 0000000..0065ae9 --- /dev/null +++ b/roles/linux_base/handlers/main.yml @@ -0,0 +1,16 @@ +- name: update and upgrade - debian + when: ansible_facts['distribution'] == 'Debian' + become: yes + apt: + name: "*" + state: latest + update_cache: yes + register: apt_upgrade + retries: 100 + until: apt_upgrade is success or ('Failed to lock apt for exclusive operation' not in apt_upgrade.msg and '/var/lib/dpkg/lock' not in apt_upgrade.msg) + +- name: update and upgrade - fedora + when: ansible_facts['distribution'] == 'Fedora' + dnf: + name: "*" + state: latest diff --git a/roles/linux_base/tasks/main.yml b/roles/linux_base/tasks/main.yml new file mode 100644 index 0000000..ef523ef --- /dev/null +++ b/roles/linux_base/tasks/main.yml @@ -0,0 +1,57 @@ +- name: remove cloud config managed /etc/hosts + lineinfile: + path: /etc/cloud/cloud.cfg + regexp: ".*update_etc_hosts.*" + state: absent + +- name: set fully qualified hostname + notify: + - update and upgrade - debian + - update and upgrade - fedora + hostname: + name: "{{ ansible_hostname }}.{{ domain }}" + +- name: use https repos - debian + when: ansible_facts['distribution'] == 'Debian' + replace: + path: /etc/apt/sources.list + regexp: "http://" + replace: "https://" + +- name: install packages + package: + name: "{{ base_packages }}" + state: latest + +- name: allow ssh + when: ansible_facts['hostname'] != 'proxmox' + ufw: + rule: allow + name: ssh + +- name: reload ufw + when: ansible_facts['hostname'] != 'proxmox' + ufw: + state: reloaded + +- name: enable ufw + when: ansible_facts['hostname'] != 'proxmox' + ufw: + state: enabled + +- name: default deny incoming + when: ansible_facts['hostname'] != 'proxmox' + ufw: + default: deny + direction: incoming + +- name: default allow outgoing + when: ansible_facts['hostname'] != 'proxmox' + ufw: + default: allow + direction: outgoing + +- name: reload ufw + when: ansible_facts['hostname'] != 'proxmox' + ufw: + state: reloaded diff --git a/roles/proxmox/cloudinit_guest/defaults/main.yml b/roles/proxmox/cloudinit_guest/defaults/main.yml new file mode 100644 index 0000000..a562ff3 --- /dev/null +++ b/roles/proxmox/cloudinit_guest/defaults/main.yml @@ -0,0 +1,7 @@ +vm_onboot: yes +vm_agent: yes +vm_bridge: vmbr0 +vm_full_clone: yes +memory_size: 512 +cpu_cores: 1 +cpu_sockets: 1 diff --git a/roles/proxmox/cloudinit_guest/tasks/main.yml b/roles/proxmox/cloudinit_guest/tasks/main.yml new file mode 100644 index 0000000..ab958dc --- /dev/null +++ b/roles/proxmox/cloudinit_guest/tasks/main.yml @@ -0,0 +1,80 @@ +- name: check if id already exists + stat: + path: "/etc/pve/qemu-server/{{ ci_base_id }}.conf" + register: stat_result + +- meta: end_play + when: stat_result.stat.exists + +- name: install packages + package: + name: + - python3-pip + - python3-requests + +- name: ensure latest version of proxmoxer is installed + become: yes + become_user: "{{ proxmox_username }}" + pip: + name: proxmoxer==2.0.0 + +- name: remove any existing api token + command: "pveum user token remove vmadmin@pam ansible" + register: result + changed_when: result.rc == 0 + failed_when: result.rc not in [0,255] + +- name: create api token + register: api_token + changed_when: result.rc == 0 + args: + executable: /bin/bash + shell: | + set -eo pipefail + pveum user token add vmadmin@pam ansible --privsep 0 --output-format yaml | grep value | cut -d ' ' -f 2 + + +- name: clone template and create guest + become: yes + become_user: "{{ proxmox_username }}" + community.general.proxmox_kvm: + api_host: proxmox.home.local + api_user: "{{ proxmox_api_user }}" + api_token_id: "ansible" + api_token_secret: "{{ api_token.stdout }}" + node: proxmox + full: "{{ vm_full_clone }}" + clone: arbitrary + vmid: "{{ template_id }}" + newid: "{{ vm_id }}" + name: "{{ vm_name }}" + memory: "{{ memory_size }}" + sockets: "{{ cpu_sockets }}" + cores: "{{ cpu_cores }}" + bios: "{{ bios_type }}" + ipconfig: + ipconfig0: "ip={{ ip_addr }},gw={{ gateway }}" + net: + net0: "virtio,bridge={{ vm_bridge }},tag={{ vm_vlan }}" + nameservers: "{{ nameserver }}" + onboot: "{{ vm_onboot }}" + agent: "{{ vm_agent }}" + state: present + +- name: start vmn + become: yes + become_user: "{{ proxmox_username }}" + community.general.proxmox_kvm: + api_host: proxmox.home.local + api_user: "{{ proxmox_api_user }}" + api_token_id: "ansible" + api_token_secret: "{{ api_token.stdout }}" + node: proxmox + vmid: "{{ vm_id }}" + state: started + +- name: remove api token + command: "pveum user token remove vmadmin@pam ansible" + register: result + changed_when: result.rc == 0 + failed_when: result.rc not in [0,255] diff --git a/roles/proxmox/debian_cloudinit/defaults/main.yml b/roles/proxmox/debian_cloudinit/defaults/main.yml new file mode 100644 index 0000000..dfebf34 --- /dev/null +++ b/roles/proxmox/debian_cloudinit/defaults/main.yml @@ -0,0 +1,8 @@ +ci_target_dir: "/home/{{ci_user}}" +ci_memory_size: 512 +ci_base_id: 1000 +ci_disk_size: "10G" +ci_storage: "local-lvm" +ci_user: "initadmin" +ssh_key_local: /home/sam/.ssh/id_rsa.pub +ssh_key_dest: /home/vmadmin/ci_sshkey diff --git a/roles/proxmox/debian_cloudinit/tasks/main.yml b/roles/proxmox/debian_cloudinit/tasks/main.yml new file mode 100644 index 0000000..8ed7dfd --- /dev/null +++ b/roles/proxmox/debian_cloudinit/tasks/main.yml @@ -0,0 +1,115 @@ +- name: check if id already exists + stat: + path: "/etc/pve/qemu-server/{{ ci_base_id }}.conf" + register: stat_result + +- meta: end_play + when: stat_result.stat.exists + +- name: install packages + package: + name: + - python3-pip + - python3-requests + +- name: ensure latest version of proxmoxer is installed + become: yes + become_user: "{{ proxmox_username }}" + pip: + name: proxmoxer==2.0.0 + +- name: download the hashes + get_url: + url: "https://cloud.debian.org/images/cloud/bookworm/latest/SHA512SUMS" + dest: "{{ ci_target_dir }}" + +- name: get the hash + changed_when: false + args: + executable: /bin/bash + shell: | + set -eo pipefail + grep debian-12-genericcloud-amd64.qcow2 {{ ci_target_dir }}/SHA512SUMS | cut -d ' ' -f 1 + register: sha512sum + +- name: download the cloud image + get_url: + url: "https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2" + dest: "{{ ci_target_dir }}" + checksum: "sha512:{{ sha512sum.stdout }}" + +- name: remove any existing api token + command: "pveum user token remove vmadmin@pam ansible" + register: result + changed_when: result.rc == 0 + failed_when: result.rc not in [0,255] + +- name: create api token + register: api_token + changed_when: result.rc == 0 + args: + executable: /bin/bash + shell: | + set -eo pipefail + pveum user token add vmadmin@pam ansible --privsep 0 --output-format yaml | grep value | cut -d ' ' -f 2 + +- name: create vm + become: yes + become_user: "{{ proxmox_username }}" + community.general.proxmox_kvm: + api_host: proxmox.home.local + api_user: "{{ proxmox_api_user }}" + api_token_id: "ansible" + api_token_secret: "{{ api_token.stdout }}" + node: proxmox + # basic settings + vmid: "{{ ci_base_id }}" + memory: "{{ ci_memory_size }}" + sockets: "{{ cpu_sockets }}" + cores: "{{ cpu_cores }}" + bios: "{{ bios_type }}" + agent: "{{ vm_agent }}" + state: "present" + # display settings + serial: + "serial0": "socket" + vga: "serial0" + # disks and boot settings + scsihw: "virtio-scsi-pci" + ide: + ide2: "{{ ci_storage }}:cloudinit" + boot: "c" + bootdisk: "scsi0" + onboot: "{{ vm_onboot }}" + # cloud-init + citype: "nocloud" + ciuser: "{{ ci_user }}" + cipassword: "{{ ci_password }}" + sshkeys: "{{ ci_sshkey }}" + # network + net: + net0: "virtio,bridge={{ ci_bridge }},tag={{ ci_vlan }}" + nameservers: "{{ nameserver }}" + template: "yes" + +- name: import the cloud image + changed_when: false + command: + cmd: "qm importdisk {{ ci_base_id }} {{ ci_target_dir }}/debian-12-genericcloud-amd64.qcow2 {{ ci_storage }}" + creates: "/dev/pve/vm-{{ ci_base_id }}-disk-0" + +- name: attach the cloud image as a new disk + changed_when: false + command: + cmd: "qm set {{ ci_base_id }} --scsi0 {{ ci_storage }}:vm-{{ ci_base_id }}-disk-0" + +- name: resize disk to standard size + changed_when: false + command: + cmd: "qm resize {{ ci_base_id }} scsi0 {{ ci_disk_size }}" + +- name: remove api token + command: "pveum user token remove vmadmin@pam ansible" + register: result + changed_when: result.rc == 0 + failed_when: result.rc not in [0,255] diff --git a/roles/proxmox/fedora_cloudinit/defaults/main.yml b/roles/proxmox/fedora_cloudinit/defaults/main.yml new file mode 100644 index 0000000..fb44657 --- /dev/null +++ b/roles/proxmox/fedora_cloudinit/defaults/main.yml @@ -0,0 +1,8 @@ +ci_target_dir: "/home/{{ci_user}}" +ci_memory_size: 512 +ci_base_id: 1001 +ci_storage: "local-lvm" +ci_disk_size: "10G" +ci_user: "initadmin" +ssh_key_local: files/id_rsa.pub +ssh_key_dest: /tmp/ci_sshkey diff --git a/roles/proxmox/fedora_cloudinit/tasks/main.yml b/roles/proxmox/fedora_cloudinit/tasks/main.yml new file mode 100644 index 0000000..61ed185 --- /dev/null +++ b/roles/proxmox/fedora_cloudinit/tasks/main.yml @@ -0,0 +1,122 @@ +- name: download the hashes + get_url: + url: "https://getfedora.org/static/checksums/36/images/Fedora-Cloud-36-1.5-x86_64-CHECKSUM" + dest: "{{ ci_target_dir }}" + +- name: install gpg + package: + name: gnupg + state: latest + +- name: download the GPG key + get_url: + url: "https://getfedora.org/static/fedora.gpg" + dest: "{{ ci_target_dir }}" + +- name: import gpg key + changed_when: false + args: + executable: /bin/bash + shell: | + set -eo pipefail + cat {{ ci_target_dir }}/fedora.gpg | gpg --import + +- name: verify checksum file + command: + cmd: "gpg --verify {{ ci_target_dir }}/Fedora-Cloud-36-1.5-x86_64-CHECKSUM" + register: result + changed_when: false + failed_when: result.rc > 0 + +- name: fail if unable to gpg verify checksums + fail: + msg: "failed to verify the checksums" + when: result.rc > 0 + +- name: get the hash + shell: + cmd: "grep 'qcow2)' {{ ci_target_dir }}/Fedora-Cloud-36-1.5-x86_64-CHECKSUM | cut -d '=' -f 2 | tr -d ' '" + changed_when: false + register: sha256sum + +- name: download the cloud image + get_url: + url: "https://download.fedoraproject.org/pub/fedora/linux/releases/36/Cloud/x86_64/images/Fedora-Cloud-Base-36-1.5.x86_64.qcow2" + dest: "{{ ci_target_dir }}" + checksum: "sha256:{{ sha256sum.stdout }}" + +- name: remove any existing api token + command: "pveum user token remove vmadmin@pam ansible" + register: result + changed_when: result.rc == 0 + failed_when: result.rc not in [0,255] + +- name: create api token + register: api_token + changed_when: result.rc == 0 + args: + executable: /bin/bash + shell: | + set -eo pipefail + pveum user token add vmadmin@pam ansible --privsep 0 --output-format yaml | grep value | cut -d ' ' -f 2 + +- name: create vm + become: yes + become_user: "{{ proxmox_username }}" + community.general.proxmox_kvm: + api_host: proxmox.home.local + api_user: "{{ proxmox_api_user }}" + api_token_id: "ansible" + api_token_secret: "{{ api_token.stdout }}" + node: proxmox + # basic settings + vmid: "{{ ci_base_id }}" + memory: "{{ ci_memory_size }}" + sockets: "{{ cpu_sockets }}" + cores: "{{ cpu_cores }}" + bios: "{{ bios_type }}" + agent: "{{ vm_agent }}" + state: "present" + # display settings + serial: + "serial0": "socket" + vga: "serial0" + # disks and boot settings + scsihw: "virtio-scsi-pci" + ide: + ide2: "{{ ci_storage }}:cloudinit" + boot: "c" + bootdisk: "scsi0" + onboot: "{{ vm_onboot }}" + # cloud-init + citype: "nocloud" + ciuser: "{{ ci_user }}" + cipassword: "{{ ci_password }}" + sshkeys: "{{ ci_sshkey }}" + # network + net: + net0: "virtio,bridge={{ ci_bridge }},tag={{ ci_vlan }}" + nameservers: "{{ nameserver }}" + template: "yes" + +- name: import the cloud image + changed_when: false + command: + cmd: "qm importdisk {{ ci_base_id }} {{ ci_target_dir }}/Fedora-Cloud-Base-36-1.5.x86_64.qcow2 {{ ci_storage }}" + creates: "/dev/pve/vm-{{ ci_base_id }}-disk-0" + +- name: attach the cloud image as a new disk + changed_when: false + command: + cmd: "qm set {{ ci_base_id }} --scsi0 {{ ci_storage }}:vm-{{ ci_base_id }}-disk-0" + +- name: resize disk to standard size + changed_when: false + command: + cmd: "qm resize {{ ci_base_id }} scsi0 {{ ci_disk_size }}" + +- name: remove api token + command: "pveum user token remove vmadmin@pam ansible" + register: result + changed_when: result.rc == 0 + failed_when: result.rc not in [0,255] diff --git a/roles/proxmox/proxmox_backup_server/tasks/main.yml b/roles/proxmox/proxmox_backup_server/tasks/main.yml new file mode 100644 index 0000000..3e91a19 --- /dev/null +++ b/roles/proxmox/proxmox_backup_server/tasks/main.yml @@ -0,0 +1,42 @@ +- name: add proxmox backup repo + apt_repository: + repo: deb http://download.proxmox.com/debian/pbs bullseye pbs-no-subscription + state: present + update_cache: yes + +- name: install proxmox backup server and client + package: + name: + - proxmox-backup-server + - proxmox-backup-client + +- name: create datastore + command: + cmd: "proxmox-backup-manager datastore create {{ pbs_datastore }} {{ pbs_datastore_path }} --keep-last {{ pbs_keep_last }} --keep-daily {{ pbs_keep_daily }} --keep-weekly {{ pbs_keep_weekly }} --keep-monthly {{ pbs_keep_monthly }} --keep-yearly {{ pbs_keep_yearly }}" + register: result + changed_when: false + failed_when: result.rc not in [255] + +- name: create backup admin + command: + cmd: "proxmox-backup-manager user create {{ pbs_admin }} --password {{ pbs_admin_password }}" + register: result + changed_when: false + failed_when: result.rc not in [255] + +- name: assign permissions for backup admin + changed_when: false + command: + cmd: "proxmox-backup-manager acl update / Admin --auth-id {{ pbs_admin }}" + +- name: create backup user + command: + cmd: "proxmox-backup-manager user create {{ pbs_user }} --password {{ pbs_password }}" + register: result + failed_when: result.rc not in [255] + changed_when: false + +- name: assign permissions for backup user + changed_when: false + command: + cmd: "proxmox-backup-manager acl update / DatastoreBackup --auth-id {{ pbs_user }}" diff --git a/roles/proxmox/pve_backup/tasks/main.yml b/roles/proxmox/pve_backup/tasks/main.yml new file mode 100644 index 0000000..eba51d9 --- /dev/null +++ b/roles/proxmox/pve_backup/tasks/main.yml @@ -0,0 +1,17 @@ +- name: create cron job for root backup of proxmox ve + cron: + name: "proxmox / backup" + cron_file: backup + hour: "23" + minute: "0" + user: root + job: "PBS_PASSWORD='{{ pbs_password }}' PBS_FINGERPRINT={{ pbs_fingerprint }} proxmox-backup-client backup root.pxar:/ --repository {{ pbs_user }}@{{ pbs_host }}:{{ pbs_datastore }}" + +- name: create cron job for /etc/pve backup of proxmox ve + cron: + name: "proxmox /etc/pve backup" + cron_file: backup + hour: "23" + minute: "0" + user: root + job: "PBS_PASSWORD='{{ pbs_password }}' PBS_FINGERPRINT={{ pbs_fingerprint }} proxmox-backup-client backup pve.pxar:/etc/pve --repository {{ pbs_user }}@{{ pbs_host }}:{{ pbs_datastore }}" diff --git a/roles/proxmox/system/defaults/main.yml b/roles/proxmox/system/defaults/main.yml new file mode 100644 index 0000000..0091ea1 --- /dev/null +++ b/roles/proxmox/system/defaults/main.yml @@ -0,0 +1,8 @@ +username: vmadmin +ssh_public_key: changme +oath_key: changeme +raid_id: "0" +raid_level: "1" +raid_devices: "/dev/sda1 /dev/sdb1" +raid_name: "prometheus:0" + diff --git a/roles/proxmox/system/tasks/main.yml b/roles/proxmox/system/tasks/main.yml new file mode 100644 index 0000000..ac84900 --- /dev/null +++ b/roles/proxmox/system/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: remove enterprise repo + file: + path: /etc/apt/sources.list.d/pve-enterprise.list + state: absent + +- name: add proxmox no subscription repo + apt_repository: + repo: deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription + +- name: create non-root user + user: + name: "{{ proxmox_username }}" + groups: + - sudo + shell: /bin/bash + +- name: give passwordless sudo to sudo group + lineinfile: + path: /etc/sudoers + state: present + regexp: '^%sudo' + line: '%sudo ALL=(ALL) NOPASSWD: ALL' + validate: '/usr/sbin/visudo -cf %s' + +- name: deploy ssh public key + authorized_key: + user: "{{ proxmox_username }}" + state: present + key: "{{ lookup('file', 'data/common/id_rsa.pub') }}" diff --git a/roles/proxmox/system/tasks/proxmox_repo.yml b/roles/proxmox/system/tasks/proxmox_repo.yml new file mode 100644 index 0000000..bf2508d --- /dev/null +++ b/roles/proxmox/system/tasks/proxmox_repo.yml @@ -0,0 +1,8 @@ +- name: remove enterprise repo + file: + path: /etc/apt/sources.list.d/pve-enterprise.list + state: absent + +- name: add proxmox no subscription repo + apt_repository: + repo: deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription diff --git a/roles/proxmox/system/tasks/user.yml b/roles/proxmox/system/tasks/user.yml new file mode 100644 index 0000000..2ba337a --- /dev/null +++ b/roles/proxmox/system/tasks/user.yml @@ -0,0 +1,28 @@ +- name: create non-root user + user: + name: "{{ username }}" + password: "{{ password | password_hash('sha512') }}" + groups: + - sudo + shell: /bin/bash + update_password: on_create + register: newuser + +- name: ensure primary user group exists + group: + name: "{{ username }}" + state: present + +- name: give passwordless sudo to sudo group + lineinfile: + path: /etc/sudoers + state: present + regexp: '^%sudo' + line: '%sudo ALL=(ALL) NOPASSWD: ALL' + validate: '/usr/sbin/visudo -cf %s' + +- name: deploy ssh public key + authorized_key: + user: "{{ username }}" + state: present + key: "{{ ssh_public_key }}" diff --git a/roles/services/chronyd/handlers/main.yml b/roles/services/chronyd/handlers/main.yml new file mode 100644 index 0000000..7e6f687 --- /dev/null +++ b/roles/services/chronyd/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart chronyd + service: + name: chronyd + state: restarted diff --git a/roles/services/chronyd/tasks/main.yml b/roles/services/chronyd/tasks/main.yml new file mode 100644 index 0000000..73fdc28 --- /dev/null +++ b/roles/services/chronyd/tasks/main.yml @@ -0,0 +1,30 @@ +- name: install packages + package: + name: chrony + state: latest + +- name: deploy chrony configuration + when: ansible_facts['distribution'] == 'Debian' + notify: restart chronyd + copy: + src: "{{ chrony_config }}" + dest: /etc/chrony/chrony.conf + owner: root + group: root + mode: '0644' + +- name: deploy chrony configuration + when: ansible_facts['distribution'] == 'Fedora' + notify: restart chronyd + copy: + src: "{{ chrony_config }}" + dest: /etc/chrony.conf + owner: root + group: root + mode: '0644' + +- name: make sure chronyd is enabled + systemd: + name: chronyd + enabled: yes + masked: no diff --git a/roles/services/containers/arr_stack/handlers/main.yml b/roles/services/containers/arr_stack/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/arr_stack/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/arr_stack/tasks/gluetun.yml b/roles/services/containers/arr_stack/tasks/gluetun.yml new file mode 100644 index 0000000..e47d55a --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/gluetun.yml @@ -0,0 +1,105 @@ +- name: set image fact + set_fact: + image: qmcgaw/gluetun:v3.34.3 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create gluetun directory + file: + path: "{{ docker_home }}/gluetun" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create gluetun data directory + file: + path: "{{ docker_home }}/gluetun/data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: pull and push gluetun image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + +- name: create gluetun docker network + docker_network: + name: "{{ gluetun_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ gluetun_subnet }}" + gateway: "{{ gluetun_gateway }}" + +- name: create and deploy gluetun container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "gluetun" + hostname: "gluetun" + image: "{{ custom_registry }}/{{ repo_tag }}" + recreate: yes + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + capabilities: + - net_admin + devices: + - "/dev/net/tun:/dev/net/tun" + purge_networks: yes + networks: + - name: "{{ gluetun_network_name }}" + ipv4_address: "{{ gluetun_ipv4 }}" + ports: + - "127.0.0.1:{{ qbittorrent_external_port }}:{{ qbittorrent_external_port }}" + - "127.0.0.1:{{ sonarr_external_port }}:8989" + - "127.0.0.1:{{ radarr_external_port }}:7878" + - "127.0.0.1:{{ lidarr_external_port }}:8686" + - "127.0.0.1:{{ readarr_external_port }}:8787" + - "127.0.0.1:{{ prowlarr_external_port }}:9696" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + env: + "TZ": "{{ timezone }}" + "VPN_SERVICE_PROVIDER": "mullvad" + "VPN_TYPE": "wireguard" + "WIREGUARD_PRIVATE_KEY": "{{ wireguard_privkey }}" + "WIREGUARD_ADDRESSES": "{{ wireguard_addrs }}" + "SERVER_CITIES": "{{ gluetun_cities }}" + "DOT_PROVIDERS": "quad9" + "BLOCK_MALICIOUS": "on" + "BLOCK_SURVEILLANCE": "on" + "BLOCK_ADS": "on" + "HEALTH_TARGET_ADDRESS": "www.debian.org:443" + volumes: + - "{{ docker_home }}/gluetun/data:/gluetun" diff --git a/roles/services/containers/arr_stack/tasks/lidarr.yml b/roles/services/containers/arr_stack/tasks/lidarr.yml new file mode 100644 index 0000000..1f70437 --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/lidarr.yml @@ -0,0 +1,93 @@ +- name: set image fact + set_fact: + image: linuxserver/lidarr:1.2.6-nightly + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create lidarr directory + file: + path: "{{ docker_home }}/lidarr" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create lidarr config directory + file: + path: "{{ docker_home }}/lidarr/config" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + xdg_runtime_dir: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: pull and push lidarr image + become: yes + become_user: "{{ docker_username }}" + environment: + xdg_runtime_dir: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + +- name: create and deploy lidarr container + become: yes + become_user: "{{ docker_username }}" + environment: + xdg_runtime_dir: "/run/user/{{ docker_uid }}" + docker_container: + name: "lidarr" + image: "{{ custom_registry }}/{{ repo_tag }}" + recreate: yes + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + network_mode: "container:gluetun" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + env: + "tz": "{{ timezone }}" + "PUID": "0" + "PGID": "0" + volumes: + - "{{ docker_home }}/lidarr/config:/config" + - "{{ docker_home }}/arr/data:/data" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + template: + src: "{{ lidarr_nginx_config }}" + dest: /etc/nginx/sites-available/lidarr.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/lidarr.conf + dest: /etc/nginx/sites-enabled/lidarr.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/arr_stack/tasks/main.yml b/roles/services/containers/arr_stack/tasks/main.yml new file mode 100644 index 0000000..ee27384 --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/main.yml @@ -0,0 +1,130 @@ +- name: create arr directory structure + file: + path: "{{ docker_home }}/arr" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr directory structure + file: + path: "{{ docker_home }}/arr/data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' + +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/torrents" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/torrents/movies" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/torrents/music" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/torrents/books" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/torrents/tv" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' + +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/usenet" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/usenet/movies" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/usenet/music" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/usenet/books" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/usenet/tv" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' + +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/media" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/media/movies" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/media/music" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/media/books" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' +- name: create arr/data directory structure + file: + path: "{{ docker_home }}/arr/data/media/tv" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0775' + +- include_tasks: gluetun.yml +- include_tasks: qbittorrent.yml +- include_tasks: sonarr.yml +- include_tasks: radarr.yml +- include_tasks: lidarr.yml +- include_tasks: readarr.yml +- include_tasks: prowlarr.yml diff --git a/roles/services/containers/arr_stack/tasks/prowlarr.yml b/roles/services/containers/arr_stack/tasks/prowlarr.yml new file mode 100644 index 0000000..53f1a45 --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/prowlarr.yml @@ -0,0 +1,92 @@ +- name: set image fact + set_fact: + image: linuxserver/prowlarr:1.6.2-nightly + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create prowlarr directory + file: + path: "{{ docker_home }}/prowlarr" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create prowlarr config directory + file: + path: "{{ docker_home }}/prowlarr/config" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: pull and push prowlarr image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + +- name: create and deploy prowlarr container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "prowlarr" + image: "{{ custom_registry }}/{{ repo_tag }}" + recreate: yes + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + network_mode: "container:gluetun" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + env: + "TZ": "{{ timezone }}" + "PUID": "0" + "PGID": "0" + volumes: + - "{{ docker_home }}/prowlarr/config:/config" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + template: + src: "{{ prowlarr_nginx_config }}" + dest: /etc/nginx/sites-available/prowlarr.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/prowlarr.conf + dest: /etc/nginx/sites-enabled/prowlarr.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/arr_stack/tasks/qbittorrent.yml b/roles/services/containers/arr_stack/tasks/qbittorrent.yml new file mode 100644 index 0000000..25e554f --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/qbittorrent.yml @@ -0,0 +1,94 @@ +- name: set image fact + set_fact: + image: linuxserver/qbittorrent:4.5.4 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create qbittorrent directory + file: + path: "{{ docker_home }}/qbittorrent" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create qbittorrent config directory + file: + path: "{{ docker_home }}/qbittorrent/config" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: pull and push qbittorrent image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + +- name: create and deploy qbittorrent container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "qbittorrent" + image: "{{ custom_registry }}/{{ repo_tag }}" + recreate: yes + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + network_mode: "container:gluetun" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + env: + "TZ": "{{ timezone }}" + "WEBUI_PORT": "{{ qbittorrent_external_port }}" + "PUID": "0" + "PGID": "0" + volumes: + - "{{ docker_home }}/qbittorrent/config:/config" + - "{{ docker_home }}/arr/data:/data" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + template: + src: "{{ qbittorrent_nginx_config }}" + dest: /etc/nginx/sites-available/qbittorrent.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/qbittorrent.conf + dest: /etc/nginx/sites-enabled/qbittorrent.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/arr_stack/tasks/radarr.yml b/roles/services/containers/arr_stack/tasks/radarr.yml new file mode 100644 index 0000000..2e98c47 --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/radarr.yml @@ -0,0 +1,93 @@ +- name: set image fact + set_fact: + image: linuxserver/radarr:4.6.4-nightly + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create radarr directory + file: + path: "{{ docker_home }}/radarr" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create radarr config directory + file: + path: "{{ docker_home }}/radarr/config" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + xdg_runtime_dir: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: pull and push radarr image + become: yes + become_user: "{{ docker_username }}" + environment: + xdg_runtime_dir: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + +- name: create and deploy radarr container + become: yes + become_user: "{{ docker_username }}" + environment: + xdg_runtime_dir: "/run/user/{{ docker_uid }}" + docker_container: + name: "radarr" + image: "{{ custom_registry }}/{{ repo_tag }}" + recreate: yes + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + network_mode: "container:gluetun" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + env: + "tz": "{{ timezone }}" + "PUID": "0" + "PGID": "0" + volumes: + - "{{ docker_home }}/radarr/config:/config" + - "{{ docker_home }}/arr/data:/data" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + template: + src: "{{ radarr_nginx_config }}" + dest: /etc/nginx/sites-available/radarr.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/radarr.conf + dest: /etc/nginx/sites-enabled/radarr.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/arr_stack/tasks/readarr.yml b/roles/services/containers/arr_stack/tasks/readarr.yml new file mode 100644 index 0000000..bd8b2ec --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/readarr.yml @@ -0,0 +1,93 @@ +- name: set image fact + set_fact: + image: linuxserver/readarr:0.2.0-nightly + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create readarr directory + file: + path: "{{ docker_home }}/readarr" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create readarr config directory + file: + path: "{{ docker_home }}/readarr/config" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: pull and push readarr image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + +- name: create and deploy readarr container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "readarr" + image: "{{ custom_registry }}/{{ repo_tag }}" + recreate: yes + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + network_mode: "container:gluetun" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + env: + "TZ": "{{ timezone }}" + "PUID": "0" + "PGID": "0" + volumes: + - "{{ docker_home }}/readarr/config:/config" + - "{{ docker_home }}/arr/data:/data" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + template: + src: "{{ readarr_nginx_config }}" + dest: /etc/nginx/sites-available/readarr.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/readarr.conf + dest: /etc/nginx/sites-enabled/readarr.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/arr_stack/tasks/sonarr.yml b/roles/services/containers/arr_stack/tasks/sonarr.yml new file mode 100644 index 0000000..ac712ba --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/sonarr.yml @@ -0,0 +1,93 @@ +- name: set image fact + set_fact: + image: linuxserver/sonarr:develop-version-4.0.0.433 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create sonarr directory + file: + path: "{{ docker_home }}/sonarr" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create sonarr config directory + file: + path: "{{ docker_home }}/sonarr/config" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: pull and push sonarr image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + +- name: create and deploy sonarr container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "sonarr" + image: "{{ custom_registry }}/{{ repo_tag }}" + recreate: yes + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + network_mode: "container:gluetun" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + env: + "TZ": "{{ timezone }}" + "PUID": "0" + "PGID": "0" + volumes: + - "{{ docker_home }}/sonarr/config:/config" + - "{{ docker_home }}/arr/data:/data" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + template: + src: "{{ sonarr_nginx_config }}" + dest: /etc/nginx/sites-available/sonarr.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/sonarr.conf + dest: /etc/nginx/sites-enabled/sonarr.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/authelia/handlers/main.yml b/roles/services/containers/authelia/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/authelia/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/authelia/tasks/main.yml b/roles/services/containers/authelia/tasks/main.yml new file mode 100644 index 0000000..c6bb337 --- /dev/null +++ b/roles/services/containers/authelia/tasks/main.yml @@ -0,0 +1,283 @@ +- name: set image fact + set_fact: + image: authelia/authelia:master + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create authelia directory + file: + path: "{{ docker_home }}/authelia" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create authelia config directory + file: + path: "{{ docker_home }}/authelia/config" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create authelia secrets directory + file: + path: "{{ docker_home }}/authelia/secrets" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create redis data directory + file: + path: "{{ docker_home }}/authelia/redis_data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: place authelia config in proper location + copy: + src: "{{ authelia_config }}" + dest: "{{ docker_home }}/authelia/config/configuration.yml" + owner: root + group: docker + mode: '0644' + +# nginx snippets + +- name: copy proxy.conf snippet + copy: + src: "{{ authelia_proxy_snippet }}" + dest: "/etc/nginx/snippets/proxy.conf" + owner: root + group: root + mode: '0644' + +- name: copy authelia-location.conf snippet + copy: + src: "{{ authelia_location_snippet }}" + dest: "/etc/nginx/snippets/authelia-location.conf" + owner: root + group: root + mode: '0644' + +- name: copy authelia-authrequest.conf snippet + copy: + src: "{{ authelia_request_snippet }}" + dest: "/etc/nginx/snippets/authelia-authrequest.conf" + owner: root + group: root + mode: '0644' + + +# authelia secrets + +- name: create jwt_secret file + lineinfile: + path: "{{ docker_home }}/authelia/secrets/jwt_secret" + insertbefore: BOF + line: "{{ authelia_jwt_secret }}" + owner: root + group: root + mode: '0644' + create: yes + +- name: create session_secret file + lineinfile: + path: "{{ docker_home }}/authelia/secrets/session_secret" + insertbefore: BOF + line: "{{ authelia_session_secret }}" + owner: root + group: root + mode: '0644' + create: yes + +- name: create encryption_key file + lineinfile: + path: "{{ docker_home }}/authelia/secrets/encryption_key" + insertbefore: BOF + line: "{{ authelia_encryption_key }}" + owner: root + group: root + mode: '0644' + create: yes + +- name: create oidc_hmac file + lineinfile: + path: "{{ docker_home }}/authelia/secrets/oidc_hmac" + insertbefore: BOF + line: "{{ authelia_oidc_hmac }}" + owner: root + group: root + mode: '0644' + create: yes + +- name: remove existing cert file + file: + path: "{{ docker_home }}/authelia/secrets/oidc_cert" + state: absent + +- name: create oidc_cert file + lineinfile: + path: "{{ docker_home }}/authelia/secrets/oidc_cert" + insertbefore: BOF + line: "{{ authelia_oidc_cert }}" + owner: root + group: root + mode: '0644' + create: yes + +- name: remove existing key file + file: + path: "{{ docker_home }}/authelia/secrets/oidc_key" + state: absent + +- name: create oidc_key file + lineinfile: + path: "{{ docker_home }}/authelia/secrets/oidc_key" + insertbefore: BOF + line: "{{ authelia_oidc_key }}" + owner: root + group: root + mode: '0644' + create: yes + +- name: create smtp_password file + lineinfile: + path: "{{ docker_home }}/authelia/secrets/smtp_password" + insertbefore: BOF + line: "{{ authelia_smtp_password }}" + owner: root + group: root + mode: '0644' + create: yes + +- name: create ldap_password file + lineinfile: + path: "{{ docker_home }}/authelia/secrets/ldap_password" + insertbefore: BOF + line: "{{ authelia_ldap_password }}" + owner: root + group: root + mode: '0644' + create: yes + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: pull and push authelia image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + +- name: create authelia docker network + docker_network: + name: "{{ authelia_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ authelia_subnet }}" + gateway: "{{ authelia_gateway }}" + +- name: create and deploy authelia container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "authelia" + hostname: "authelia" + image: "{{ custom_registry }}/{{ repo_tag }}" + recreate: yes + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ authelia_network_name }}" + ipv4_address: "{{ authelia_ipv4 }}" + ports: + - "127.0.0.1:9091:9091" + - "127.0.0.1:9959:9959" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + env: + "TZ": "{{ timezone }}" + "AUTHELIA_JWT_SECRET_FILE": "/secrets/jwt_secret" + "AUTHELIA_SESSION_SECRET_FILE": "/secrets/session_secret" + "AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE": "/secrets/encryption_key" + "AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE": "/secrets/oidc_hmac" + "AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_CERTIFICATE_CHAIN_FILE": "/secrets/oidc_cert" + "AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_PRIVATE_KEY_FILE": "/secrets/oidc_key" + "AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE": "/secrets/smtp_password" + "AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE": "/secrets/ldap_password" + volumes: + - "{{ docker_home }}/authelia/config:/config" + - "{{ docker_home }}/authelia/secrets:/secrets" + + +- name: create and deploy redis container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "redis_authelia" + hostname: "redis_authelia" + image: redis:alpine + state: 'started' + recreate: yes + pull: yes + restart_policy: unless-stopped + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ authelia_network_name }}" + ipv4_address: "{{ redis_authelia_ipv4 }}" + volumes: + - "{{ docker_home }}/authelia/redis_data:/data" + exposed_ports: + - '6379' + env: + "TZ": "{{ timezone }}" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ authelia_nginx_config }}" + dest: /etc/nginx/sites-available/authelia.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/authelia.conf + dest: /etc/nginx/sites-enabled/authelia.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/bookstack/handlers/main.yml b/roles/services/containers/bookstack/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/bookstack/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/bookstack/tasks/main.yml b/roles/services/containers/bookstack/tasks/main.yml new file mode 100644 index 0000000..3965143 --- /dev/null +++ b/roles/services/containers/bookstack/tasks/main.yml @@ -0,0 +1,118 @@ +- name: set image fact + set_fact: + image: linuxserver/bookstack:version-v23.05 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create bookstack directory + file: + path: "{{ docker_home }}/bookstack" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create data directory + file: + path: "{{ docker_home }}/bookstack/data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create bookstack docker network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ bookstack_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ bookstack_subnet }}" + gateway: "{{ bookstack_gateway }}" + +- name: create and deploy bookstack db + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "bookstack-db" + hostname: "bookstack-db" + image: linuxserver/mariadb:10.11.4 + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ bookstack_network_name }}" + ipv4_address: "{{ bookstack_db_ipv4 }}" + volumes: + - "{{ docker_home }}/bookstack/data:/config" + env: + "TZ": "{{ timezone }}" + "MYSQL_ROOT_PASSWORD": "{{ bookstack_mysql_root_password }}" + "MYSQL_DATABASE": "bookstack" + "MYSQL_USER": "bookstack" + "MYSQL_PASSWORD": "{{ bookstack_mysql_password }}" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: create and deploy bookstack container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "bookstack" + hostname: "bookstack" + image: "{{ image }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ bookstack_network_name }}" + ipv4_address: "{{ bookstack_ipv4 }}" + ports: + - "127.0.0.1:{{ bookstack_external_port }}:80" + volumes: + - "{{ docker_home }}/bookstack/data:/config" + env: + "DB_HOST": "bookstack-db" + "DB_PORT": "3306" + "DB_USER": "bookstack" + "DB_PASS": "{{ bookstack_mysql_password }}" + "DB_DATABASE": "bookstack" + "APP_URL": "https://{{ bookstack_server_name }}" + "AUTH_METHOD": "oidc" + "OIDC_NAME": "SSO" + "OIDC_DISPLAY_NAME_CLAIMS": "name" + "OIDC_CLIENT_ID": "bookstack" + "OIDC_CLIENT_SECRET": "{{ bookstack_oidc_secret }}" + "OIDC_ISSUER": "{{ oidc_issuer }}" + "OIDC_ISSUER_DISCOVER": "true" + "APP_DEFAULT_DARK_MODE": "true" + #"OIDC_DUMP_USER_DETAILS": "true" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: deploy nginx configuration + notify: restart nginx + template: + src: "{{ bookstack_nginx_config }}" + dest: /etc/nginx/sites-available/bookstack.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/bookstack.conf + dest: /etc/nginx/sites-enabled/bookstack.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/cadvisor/handlers/main.yml b/roles/services/containers/cadvisor/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/cadvisor/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/cadvisor/tasks/main.yml b/roles/services/containers/cadvisor/tasks/main.yml new file mode 100644 index 0000000..cc30cdb --- /dev/null +++ b/roles/services/containers/cadvisor/tasks/main.yml @@ -0,0 +1,90 @@ +- name: create cadvisor directory + file: + path: "{{ docker_home }}/cadvisor" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: build cadvisor image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ docker_registry_url }}/{{ docker_registry_username }}/cadvisor:latest" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + build: + path: /srv/docker/cadvisor/src + dockerfile: deploy/Dockerfile + source: build + push: yes + +- name: create cadvisor docker network + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_network: + name: "{{ cadvisor_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ cadvisor_subnet }}" + gateway: "{{ cadvisor_gateway }}" + +- name: create and deploy cadvisor container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "cadvisor" + hostname: "cadvisor" + image: "{{ docker_registry_url }}/{{ docker_registry_username }}/cadvisor:latest" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ cadvisor_network_name }}" + ipv4_address: "{{ cadvisor_ipv4 }}" + ports: + - "127.0.0.1:{{ cadvisor_external_port }}:8080" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + volumes: + - "/:/rootfs:ro" + - "/run/user/{{ docker_uid }}:/var/run:ro" + - "/sys:/sys:ro" + - "/{{ docker_home }}/.local/share/docker:/var/lib/docker:ro" + - "/dev/disk:/dev/disk:ro" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ cadvisor_nginx_config }}" + dest: /etc/nginx/sites-available/cadvisor.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/cadvisor.conf + dest: /etc/nginx/sites-enabled/cadvisor.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/drawio/handlers/main.yml b/roles/services/containers/drawio/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/drawio/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/drawio/tasks/main.yml b/roles/services/containers/drawio/tasks/main.yml new file mode 100644 index 0000000..27bbefd --- /dev/null +++ b/roles/services/containers/drawio/tasks/main.yml @@ -0,0 +1,149 @@ +- name: set image fact + set_fact: + image: jgraph/drawio:21.5.0 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create drawio directory + file: + path: "{{ docker_home }}/drawio" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create drawio fonts directory + file: + path: /usr/share/fonts/drawio + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: get drawio image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + push: yes + +- name: get export-server image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ docker_registry_url }}/{{ docker_registry_username }}/image-export:latest" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + push: yes + +- name: create drawio docker network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ drawio_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ drawio_subnet }}" + gateway: "{{ drawio_gateway }}" + +- name: create and deploy drawio export-server + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "image-export" + image: "{{ docker_registry_url }}/{{ docker_registry_username }}/image-export:latest" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + pull: yes + exposed_ports: + - '8000' + purge_networks: yes + networks: + - name: "{{ drawio_network_name }}" + ipv4_address: "{{ drawio_export_ipv4 }}" + volumes: + - fonts_volume:/usr/share/fonts/drawio + env: + DRAWIO_BASE_URL: "{{ drawio_base_url }}" + cap_drop: + - all + hostname: "image-export" + restart_policy: unless-stopped + state: 'started' + recreate: yes + +- name: create and deploy drawio + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "drawio" + image: "{{ custom_registry }}/{{ repo_tag }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + pull: yes + purge_networks: yes + networks: + - name: "{{ drawio_network_name }}" + ipv4_address: "{{ drawio_ipv4 }}" + ports: + - "127.0.0.1:8443:8443" + - "127.0.0.1:8400:8080" + links: + - image-export:image-export + env: + DRAWIO_SELF_CONTAINED: "1" + PLANTUML_URL: "http://plantuml-server:8080/" + EXPORT_URL: "http://image-export:8000/" + DRAWIO_PUSHER_MODE: "2" + cap_drop: + - all + hostname: "drawio" + restart_policy: unless-stopped + state: 'started' + recreate: yes + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ drawio_nginx_config }}" + dest: /etc/nginx/sites-available/drawio.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/drawio.conf + dest: /etc/nginx/sites-enabled/drawio.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/firefly/handlers/main.yml b/roles/services/containers/firefly/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/firefly/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/firefly/tasks/main.yml b/roles/services/containers/firefly/tasks/main.yml new file mode 100644 index 0000000..ab389e2 --- /dev/null +++ b/roles/services/containers/firefly/tasks/main.yml @@ -0,0 +1,172 @@ +- name: set image fact + set_fact: + image: fireflyiii/core:version-6.0.13 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create firefly directory + file: + path: "{{ docker_home }}/firefly" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create data directory + file: + path: "{{ docker_home }}/firefly/data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create db directory + file: + path: "{{ docker_home }}/firefly/db" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create firefly docker network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ firefly_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ firefly_subnet }}" + gateway: "{{ firefly_gateway }}" + +- name: create and deploy firefly db + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "firefly-db" + hostname: "firefly-db" + image: postgres:alpine + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ firefly_network_name }}" + ipv4_address: "{{ firefly_db_ipv4 }}" + volumes: + - "{{ docker_home }}/firefly/data:/var/lib/postgresql/data" + env: + "POSTGRES_USER": "{{ firefly_postgres_user }}" + "POSTGRES_PASSWORD": "{{ firefly_postgres_password }}" + "POSTGRES_DB": "{{ firefly_postgres_db }}" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: create and deploy firefly container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "firefly" + hostname: "firefly" + image: "{{ image }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ firefly_network_name }}" + ipv4_address: "{{ firefly_ipv4 }}" + ports: + - "127.0.0.1:{{ firefly_external_port }}:8080" + volumes: + - "{{ docker_home }}/firefly/upload:/var/www/html/storage/upload" + env: + "TZ": "{{ timezone }}" + "APP_KEY": "{{ firefly_app_key }}" + "STATIC_CRON_TOKEN": "{{ firefly_cron_token }}" + "DB_HOST": "firefly-db" + "DB_PORT": "5432" + "DB_CONNECTION": "pgsql" + "DB_USERNAME": "{{ firefly_postgres_user }}" + "DB_PASSWORD": "{{ firefly_postgres_password }}" + "DB_DATABASE": "{{ firefly_postgres_db }}" + "AUTHENTICATION_GUARD": "remote_user_guard" + "AUTHENTICATION_GUARD_HEADER": "HTTP_REMOTE_USER" + "AUTHENTICATION_GUARD_EMAIL": "HTTP_REMOTE_EMAIL" + "APP_URL": "https://{{ firefly_server_name }}" + "TRUSTED_PROXIES": "*" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: create and deploy firefly importer container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "firefly-importer" + hostname: "firefly-importer" + image: "fireflyiii/data-importer:version-1.3.0" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ firefly_network_name }}" + ipv4_address: "{{ firefly_importer_ipv4 }}" + ports: + - "127.0.0.1:{{ firefly_importer_external_port }}:8080" + env: + "TZ": "{{ timezone }}" + "FIREFLY_III_URL": "http://firefly:8080" + "FIREFLY_III_ACCESS_TOKEN": "{{ firefly_access_token }}" + "VANITY_URL": "https://{{ firefly_server_name }}" + "TRUSTED_PROXIES": "*" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: create and deploy firefly cron container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "firefly-cron" + hostname: "firefly-cron" + image: alpine + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ firefly_network_name }}" + ipv4_address: "{{ firefly_cron_ipv4 }}" + env: + "POSTGRES_USER": "{{ firefly_postgres_user }}" + "POSTGRES_PASSWORD": "{{ firefly_postgres_password }}" + "POSTGRES_DB": "{{ firefly_postgres_db }}" + command: 'sh -c "echo \"0 3 * * * wget -qO- http://firefly:8080/api/v1/cron/{{ firefly_cron_token }}\" | crontab - && crond -f -L /dev/stdout"' + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: deploy nginx configuration + notify: restart nginx + template: + src: "{{ firefly_nginx_config }}" + dest: /etc/nginx/sites-available/firefly.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/firefly.conf + dest: /etc/nginx/sites-enabled/firefly.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/freshrss/handlers/main.yml b/roles/services/containers/freshrss/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/freshrss/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/freshrss/tasks/main.yml b/roles/services/containers/freshrss/tasks/main.yml new file mode 100644 index 0000000..26109b3 --- /dev/null +++ b/roles/services/containers/freshrss/tasks/main.yml @@ -0,0 +1,101 @@ +- name: set image fact + set_fact: + image: freshrss/freshrss:1.21.0 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create freshrss directory + file: + path: "{{ docker_home }}/freshrss" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: get freshrss image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + +- name: create freshrss data directory + file: + path: "{{ docker_home }}/freshrss/data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create freshrss docker network + docker_network: + name: "{{ freshrss_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ freshrss_subnet }}" + gateway: "{{ freshrss_gateway }}" + +- name: create and deploy freshrss container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "freshrss" + hostname: "freshrss" + image: "{{ custom_registry }}/{{ repo_tag }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ freshrss_network_name }}" + ipv4_address: "{{ freshrss_ipv4 }}" + ports: + - "127.0.0.1:8090:80" + state: 'started' + recreate: yes + restart_policy: unless-stopped + volumes: + - "{{ docker_home }}/freshrss/data:/var/www/FreshRSS/data" + env: + "CRON_MIN": "0,15,30,45" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ freshrss_nginx_config }}" + dest: /etc/nginx/sites-available/freshrss.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/freshrss.conf + dest: /etc/nginx/sites-enabled/freshrss.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/gitea/handlers/main.yml b/roles/services/containers/gitea/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/gitea/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/gitea/tasks/main.yml b/roles/services/containers/gitea/tasks/main.yml new file mode 100644 index 0000000..fecec5e --- /dev/null +++ b/roles/services/containers/gitea/tasks/main.yml @@ -0,0 +1,171 @@ +- name: set image fact + set_fact: + image: gitea/gitea:1.19.3 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create gitea directory + file: + path: "{{ docker_home }}/gitea" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: get gitea image + become: yes + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + source: pull + force_source: yes + +- name: create git user on host + user: + name: "git" + uid: "{{ gitea_git_uid }}" + create_home: yes + generate_ssh_key: yes + shell: /bin/bash + +- name: get git user public key + command: cat /home/git/.ssh/id_rsa.pub + register: pubkey + changed_when: false + +- name: add git user public key to git user's authorized_keys file + authorized_key: + user: git + key: "{{ pubkey.stdout }}" + +- name: create fake host gitea + blockinfile: + path: /usr/local/bin/gitea + create: yes + owner: root + group: root + mode: '0755' + block: | + #!/bin/sh + ssh -p 2222 -o StrictHostKeyChecking=no git@127.0.0.1 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@" + +- name: create gitea data directory + file: + path: "{{ docker_home }}/gitea/data" + state: directory + owner: "{{ gitea_git_uid }}" + group: "{{ gitea_git_uid }}" + mode: '0755' + +- name: create gitea config directory + file: + path: "{{ docker_home }}/gitea/config" + state: directory + owner: "{{ gitea_git_uid }}" + group: "{{ gitea_git_uid }}" + mode: '0755' + +- name: copy gitea config file + copy: + src: "{{ gitea_config }}" + dest: "{{ docker_home }}/gitea/config/app.ini" + owner: "{{ gitea_git_uid }}" + group: "{{ gitea_git_uid }}" + mode: '0644' + +- name: change gitea internal token + lineinfile: + path: "{{ docker_home }}/gitea/config/app.ini" + regexp: "^INTERNAL_TOKEN" + line: "INTERNAL_TOKEN = {{ gitea_internal_token }}" + +- name: change gitea lfs jwt secret + lineinfile: + path: "{{ docker_home }}/gitea/config/app.ini" + regexp: "^LFS_JWT_SECRET" + line: "LFS_JWT_SECRET = {{ gitea_lfs_jwt_secret }}" + +- name: set permissions on gitea data + file: + path: "{{ docker_home }}/gitea/data/" + owner: "{{ gitea_git_uid }}" + group: "{{ gitea_git_uid }}" + mode: u=rwX,g=rX,o=rX + recurse: yes + +- name: set permissions on gitea config + file: + path: "{{ docker_home }}/gitea/config/" + owner: "{{ gitea_git_uid }}" + group: "{{ gitea_git_uid }}" + mode: u=rwX,g=rX,o=rX + recurse: yes + +- name: create gitea docker network + docker_network: + name: "{{ gitea_network_name }}" + driver: bridge + ipam_config: + - subnet: "{{ gitea_subnet }}" + gateway: "{{ gitea_gateway }}" + +- name: create and deploy gitea container + become: yes + docker_container: + name: "gitea" + hostname: "gitea" + image: "{{ custom_registry }}/{{ repo_tag }}" + purge_networks: yes + networks: + - name: "{{ gitea_network_name }}" + ipv4_address: "{{ gitea_ipv4 }}" + ports: + - "127.0.0.1:{{ gitea_external_port }}:3000" + - "127.0.0.1:2222:22" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + env: + "USER_UID": "{{ gitea_git_uid }}" + "USER_GID": "{{ gitea_git_uid }}" + volumes: + - "{{ docker_home }}/gitea/data:/data" + - "{{ docker_home }}/gitea/config:/data/gitea/conf" + - "/home/git/.ssh/:/data/git/.ssh" + - "/etc/timezone:/etc/timezone:ro" + - "/etc/localtime:/etc/localtime:ro" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ gitea_nginx_config }}" + dest: /etc/nginx/sites-available/gitea.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/gitea.conf + dest: /etc/nginx/sites-enabled/gitea.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/home_assistant/handlers/main.yml b/roles/services/containers/home_assistant/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/home_assistant/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/home_assistant/tasks/main.yml b/roles/services/containers/home_assistant/tasks/main.yml new file mode 100644 index 0000000..b44c529 --- /dev/null +++ b/roles/services/containers/home_assistant/tasks/main.yml @@ -0,0 +1,86 @@ +- name: set image fact + set_fact: + image: homeassistant/home-assistant:2023.6.3 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create home_assistant directory + file: + path: "{{ docker_home }}/home_assistant" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create config directory + file: + path: "{{ docker_home }}/home_assistant/config" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: deploy configuration + copy: + src: "{{ home_assistant_config }}" + dest: "{{ docker_home }}/home_assistant/config/configuration.yaml" + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0644' + +- name: create home_assistant network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ home_assistant_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ home_assistant_subnet }}" + gateway: "{{ home_assistant_gateway }}" + +- name: create and deploy home_assistant container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "home_assistant" + hostname: "home_assistant" + image: "{{ image }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ home_assistant_network_name }}" + ipv4_address: "{{ home_assistant_ipv4 }}" + ports: + - "127.0.0.1:{{ home_assistant_external_port }}:8123" + volumes: + - "{{ docker_home }}/home_assistant/config:/config" + env: + "TZ": "{{ timezone }}" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: deploy nginx configuration + notify: restart nginx + template: + src: "{{ home_assistant_nginx_config }}" + dest: /etc/nginx/sites-available/home_assistant.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/home_assistant.conf + dest: /etc/nginx/sites-enabled/home_assistant.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/homer/handlers/main.yml b/roles/services/containers/homer/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/homer/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/homer/tasks/main.yml b/roles/services/containers/homer/tasks/main.yml new file mode 100644 index 0000000..b646d12 --- /dev/null +++ b/roles/services/containers/homer/tasks/main.yml @@ -0,0 +1,122 @@ +- name: set image fact + set_fact: + image: b4bz/homer:v23.05.1 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create homer directory + file: + path: "{{ docker_home }}/homer" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: get homer image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + +- name: create homer assets directory + file: + path: "{{ docker_home }}/homer/assets" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: synchronize homer assets + synchronize: + src: "{{ homer_assets_dir }}" + dest: "{{ docker_home }}/homer/assets/" + delete: yes + +- name: set permissions on homer assets + file: + path: "{{ docker_home }}/homer/assets/" + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: u=rwX,g=rX,o=rX + recurse: yes + +- name: set permissions on homer assets + file: + path: "{{ docker_home }}/homer/assets/" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + recurse: no + +- name: create homer docker network + docker_network: + name: "{{ homer_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ homer_subnet }}" + gateway: "{{ homer_gateway }}" + +- name: create and deploy homer container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "homer" + hostname: "homer" + image: "{{ custom_registry }}/{{ repo_tag }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ homer_network_name }}" + ipv4_address: "{{ homer_ipv4 }}" + ports: + - "127.0.0.1:8001:8080" + state: 'started' + recreate: yes + restart_policy: unless-stopped + volumes: + - "{{ docker_home }}/homer/assets:/www/assets" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ homer_nginx_config }}" + dest: /etc/nginx/sites-available/homer.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/homer.conf + dest: /etc/nginx/sites-enabled/homer.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/invidious/handlers/main.yml b/roles/services/containers/invidious/handlers/main.yml new file mode 100644 index 0000000..a3a5d0b --- /dev/null +++ b/roles/services/containers/invidious/handlers/main.yml @@ -0,0 +1,29 @@ +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: build invidious image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ docker_registry_url }}/{{ docker_registry_username }}/invidious:latest" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + build: + path: /srv/docker/invidious/src + dockerfile: docker/Dockerfile + source: build + push: yes + +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/invidious/tasks/main.yml b/roles/services/containers/invidious/tasks/main.yml new file mode 100644 index 0000000..6bff0e2 --- /dev/null +++ b/roles/services/containers/invidious/tasks/main.yml @@ -0,0 +1,124 @@ +- name: set image fact + set_fact: + image: gitea.chudnick.com/sam/invidious:latest + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create invidious directory + file: + path: "{{ docker_home }}/invidious" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create postgres data directory + file: + path: "{{ docker_home }}/invidious/data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: clone invidious repo + become: yes + become_user: "{{ docker_username }}" + notify: + - login to docker registry + - build invidious image + git: + repo: "{{ invidious_repo }}" + dest: "{{ docker_home }}/invidious/src" + version: "master" + +- meta: flush_handlers + +- name: create invidious docker network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ invidious_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ invidious_subnet }}" + gateway: "{{ invidious_gateway }}" + +- name: create and deploy invidious db + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "invidious-db" + hostname: "invidious-db" + image: postgres:13 + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ invidious_network_name }}" + ipv4_address: "{{ invidious_db_ipv4 }}" + volumes: + - "{{ docker_home }}/invidious/data:/var/lib/postgresql/data" + - "{{ docker_home }}/invidious/src/config/sql:/config/sql" + - "{{ docker_home }}/invidious/src/docker/init-invidious-db.sh:/docker-entrypoint-initdb.d/init-invidious-db.sh" + env: + "POSTGRES_DB": "invidious" + "POSTGRES_USER": "invidious" + "POSTGRES_PASSWORD": "{{ invidious_postgres_password }}" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: create and deploy invidious container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "invidious" + hostname: "invidious" + image: "{{ image }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + env: + "dbname": "invidious" + "user": "invidious" + "password": "{{ invidious_postgres_password }}" + "host": "invidious-db" + "port": "5432" + "check_tables": "true" + "https_only": "true" + "hsts": "true" + "domain": "{{ invidious_server_name }}" + "dark_mode": "dark" + networks: + - name: "{{ invidious_network_name }}" + ipv4_address: "{{ invidious_ipv4 }}" + ports: + - "127.0.0.1:{{ invidious_external_port }}:3000" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: deploy nginx configuration + notify: restart nginx + template: + src: "{{ invidious_nginx_config }}" + dest: /etc/nginx/sites-available/invidious.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/invidious.conf + dest: /etc/nginx/sites-enabled/invidious.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/jellyfin/handlers/main.yml b/roles/services/containers/jellyfin/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/jellyfin/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/jellyfin/tasks/main.yml b/roles/services/containers/jellyfin/tasks/main.yml new file mode 100644 index 0000000..c7a424d --- /dev/null +++ b/roles/services/containers/jellyfin/tasks/main.yml @@ -0,0 +1,159 @@ +- name: set image fact + set_fact: + image: jellyfin/jellyfin:10.8.10 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create jellyfin directory + file: + path: "{{ docker_home }}/jellyfin" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: get jellyfin image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + push: yes + +- name: create jellyfin config directory + file: + path: "{{ docker_home }}/jellyfin/config" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create jellyfin cache directory + file: + path: "{{ docker_home }}/jellyfin/cache" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create jellyfin media directory + file: + path: "{{ docker_home }}/jellyfin/media" + state: directory + group: "{{ docker_username }}" + mode: '0755' + +- name: copy jellyfin config + synchronize: + src: "{{ jellyfin_config }}" + dest: "{{ docker_home }}/jellyfin/config" + +- name: copy jellyfin media + synchronize: + src: "{{ jellyfin_media }}" + dest: "{{ docker_home }}/jellyfin/media" + ignore_errors: yes + +- name: copy jellyfin web config + copy: + src: "{{ jellyfin_web_config }}" + dest: "{{ docker_home }}/jellyfin/web-config.json" + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0644' + +- name: set config permissions + file: + path: "{{ docker_home }}/jellyfin/config" + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + recurse: yes + +- name: set media permissions + file: + path: "{{ docker_home }}/jellyfin/media" + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + recurse: yes + +- name: create jellyfin docker network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ jellyfin_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ jellyfin_subnet }}" + gateway: "{{ jellyfin_gateway }}" + +- name: create and deploy jellyfin container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "jellyfin" + image: "{{ custom_registry }}/{{ repo_tag }}" + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ jellyfin_network_name }}" + ipv4_address: "{{ jellyfin_ipv4 }}" + ports: + - "127.0.0.1:8096:8096" + volumes: + - "{{ docker_home }}/jellyfin/config:/config" + - "{{ docker_home }}/jellyfin/cache:/cache" + - "{{ docker_home }}/arr/data/media:/media:ro" + - "{{ docker_home }}/jellyfin/web-config.json:/jellyfin/jellyfin-web/config.json" + env: + JELLYFIN_PublishedServerUrl: "{{ jellyfin_url }}" + cap_drop: + - all + hostname: "jellyfin" + restart_policy: unless-stopped + state: 'started' + recreate: yes + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ jellyfin_nginx_config }}" + dest: /etc/nginx/sites-available/jellyfin.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/jellyfin.conf + dest: /etc/nginx/sites-enabled/jellyfin.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/kanboard/handlers/main.yml b/roles/services/containers/kanboard/handlers/main.yml new file mode 100644 index 0000000..de5dcb6 --- /dev/null +++ b/roles/services/containers/kanboard/handlers/main.yml @@ -0,0 +1,18 @@ +- name: build pywttr-docker image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ docker_registry_url }}/{{ docker_registry_username }}/pywttr-docker:latest" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + build: + path: /srv/docker/pywttr-docker/src + source: build + push: yes + force_source: yes + +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/kanboard/tasks/main.yml b/roles/services/containers/kanboard/tasks/main.yml new file mode 100644 index 0000000..1efc16e --- /dev/null +++ b/roles/services/containers/kanboard/tasks/main.yml @@ -0,0 +1,93 @@ +- name: set image fact + set_fact: + image: kanboard/kanboard:v1.2.30 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create kanboard directory + file: + path: "{{ docker_home }}/kanboard" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create data directory + file: + path: "{{ docker_home }}/kanboard/data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: deploy custom configuration + copy: + src: "{{ kanboard_config }}" + dest: "{{ docker_home }}/kanboard/data/config.php" + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0644' + +- name: create plugins directory + file: + path: "{{ docker_home }}/kanboard/plugins" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create kanboard network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ kanboard_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ kanboard_subnet }}" + gateway: "{{ kanboard_gateway }}" + +- name: create and deploy kanboard container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "kanboard" + hostname: "kanboard" + image: "{{ image }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ kanboard_network_name }}" + ipv4_address: "{{ kanboard_ipv4 }}" + ports: + - "127.0.0.1:{{ kanboard_external_port }}:80" + volumes: + - "{{ docker_home }}/kanboard/data:/var/www/app/data" + - "{{ docker_home }}/kanboard/plugins:/var/www/app/plugins" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: deploy nginx configuration + notify: restart nginx + template: + src: "{{ kanboard_nginx_config }}" + dest: /etc/nginx/sites-available/kanboard.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/kanboard.conf + dest: /etc/nginx/sites-enabled/kanboard.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/navidrome/handlers/main.yml b/roles/services/containers/navidrome/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/navidrome/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/navidrome/tasks/main.yml b/roles/services/containers/navidrome/tasks/main.yml new file mode 100644 index 0000000..e95e849 --- /dev/null +++ b/roles/services/containers/navidrome/tasks/main.yml @@ -0,0 +1,117 @@ +- name: set image fact + set_fact: + image: deluan/navidrome:0.49.2 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create navidrome directory + file: + path: "{{ docker_home }}/navidrome" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create navidrome data directory + file: + path: "{{ docker_home }}/navidrome/data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create navidrome music directory + file: + path: "{{ docker_home }}/navidrome/music" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: pull and push navidrome image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + push: yes + +- name: create navidrome docker network + docker_network: + name: "{{ navidrome_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ navidrome_subnet }}" + gateway: "{{ navidrome_gateway }}" + +- name: create and deploy navidrome container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "navidrome" + hostname: "navidrome" + image: "{{ custom_registry }}/{{ repo_tag }}" + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ navidrome_network_name }}" + ipv4_address: "{{ navidrome_ipv4 }}" + ports: + - "127.0.0.1:4533:4533" + state: 'started' + recreate: yes + restart_policy: unless-stopped + env: + "ND_AUTHREQUEST_LIMIT": "2" + "ND_PASSWORDENCRYPTIONKEY": "{{ navidrome_encryptionkey }}" + "ND_LASTFM_ENABLED": "false" + "ND_PROMETHEUS_ENABLED": "true" + "ND_PROMETHEUS_METRICSPATH": "/metrics" + "ND_REVERSEPROXYWHITELIST": "172.25.5.0/24" + "ND_LOGLEVEL": "debug" + volumes: + - "{{ docker_home }}/navidrome/data:/data" + - "{{ docker_home }}/arr/data/media/music:/music:ro" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ navidrome_nginx_config }}" + dest: /etc/nginx/sites-available/navidrome.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/navidrome.conf + dest: /etc/nginx/sites-enabled/navidrome.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/nextcloud/handlers/main.yml b/roles/services/containers/nextcloud/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/nextcloud/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/nextcloud/tasks/main.yml b/roles/services/containers/nextcloud/tasks/main.yml new file mode 100644 index 0000000..fbd4a76 --- /dev/null +++ b/roles/services/containers/nextcloud/tasks/main.yml @@ -0,0 +1,184 @@ +- name: set image fact + set_fact: + image: nextcloud:27.0.0-apache + +- name: set other facts + set_fact: + repo_tag: "{{ image }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create nextcloud directory + file: + path: "{{ docker_home }}/nextcloud" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create nextcloud app directory + file: + path: "{{ docker_home }}/nextcloud/app/" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create nextcloud data directory + file: + path: "{{ docker_home }}/nextcloud/data/" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: pull and push nextcloud image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + push: yes + +- name: create nextcloud docker network + docker_network: + name: "{{ nextcloud_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ nextcloud_subnet }}" + gateway: "{{ nextcloud_gateway }}" + +- name: create and deploy postgres container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "nextcloud-postgres" + hostname: "nextcloud-postgres" + image: "postgres:alpine" + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ nextcloud_network_name }}" + ipv4_address: "{{ nextcloud_postgres_ipv4 }}" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + env: + "POSTGRES_USER": "{{ nextcloud_postgres_user }}" + "POSTGRES_PASSWORD": "{{ nextcloud_postgres_password }}" + "POSTGRES_DB": "{{ nextcloud_postgres_db }}" + volumes: + - "{{ docker_home }}/nextcloud/data:/var/lib/postgresql/data" + +- name: create and deploy redis container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "nextcloud-redis" + hostname: "nextcloud-redis" + image: "redis:alpine" + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ nextcloud_network_name }}" + ipv4_address: "{{ nextcloud_redis_ipv4 }}" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + +- name: create and deploy nextcloud container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "nextcloud" + hostname: "nextcloud" + image: "{{ custom_registry }}/{{ repo_tag }}" + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ nextcloud_network_name }}" + ipv4_address: "{{ nextcloud_ipv4 }}" + ports: + - "127.0.0.1:{{ nextcloud_external_port }}:80" + state: 'started' + comparisons: + '*': strict + restart_policy: unless-stopped + env: + "POSTGRES_USER": "{{ nextcloud_postgres_user }}" + "POSTGRES_PASSWORD": "{{ nextcloud_postgres_password }}" + "POSTGRES_DB": "{{ nextcloud_postgres_db }}" + "POSTGRES_HOST": "nextcloud-postgres" + "REDIS_HOST": "nextcloud-redis" + "NEXTCLOUD_ADMIN_USER": "{{ nextcloud_admin }}" + "NEXTCLOUD_ADMIN_PASSWORD": "{{ nextcloud_admin_password }}" + "NEXTCLOUD_TRUSTED_DOMAINS": "{{ nextcloud_trusted_domains }}" + volumes: + - "{{ docker_home }}/nextcloud/app:/var/www/html" + +- name: create and deploy nextcloud cron container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "nextcloud-cron" + hostname: "nextcloud-cron" + image: "{{ custom_registry }}/{{ repo_tag }}" + entrypoint: "/cron.sh" + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ nextcloud_network_name }}" + ipv4_address: "{{ nextcloud_cron_ipv4 }}" + state: 'started' + recreate: yes + restart_policy: unless-stopped + volumes: + - "{{ docker_home }}/nextcloud/app:/var/www/html" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ nextcloud_nginx_config }}" + dest: /etc/nginx/sites-available/nextcloud.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/nextcloud.conf + dest: /etc/nginx/sites-enabled/nextcloud.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/photoprism/defaults/main.yml b/roles/services/containers/photoprism/defaults/main.yml new file mode 100644 index 0000000..ceca8c3 --- /dev/null +++ b/roles/services/containers/photoprism/defaults/main.yml @@ -0,0 +1,10 @@ +photoprism_admin_user: "admin" +photoprism_auth_mode: "password" +photoprism_site_url: "https://photos.chudnick.com" +photoprism_external_port: 2342 +photoprism_nginx_config: data/photoprism/photoprism.conf +photoprism_network_name: photoprism_net +photoprism_subnet: 172.25.15.0/24 +photoprism_gateway: 172.25.15.1 +photoprism_ipv4: 172.25.15.2 +nextcloud_external_port: 8006 diff --git a/roles/services/containers/photoprism/handlers/main.yml b/roles/services/containers/photoprism/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/photoprism/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/photoprism/tasks/main.yml b/roles/services/containers/photoprism/tasks/main.yml new file mode 100644 index 0000000..e6ac544 --- /dev/null +++ b/roles/services/containers/photoprism/tasks/main.yml @@ -0,0 +1,115 @@ +- name: set image fact + set_fact: + image: photoprism/photoprism:221118-jammy + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create photoprism directory + file: + path: "{{ docker_home }}/photoprism" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: get photoprism image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + push: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + +- name: create photoprism data directory + file: + path: "{{ docker_home }}/photoprism/data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create photoprism photos directory + file: + path: "{{ docker_home }}/photoprism/photos" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create photoprism docker network + docker_network: + name: "{{ photoprism_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ photoprism_subnet }}" + gateway: "{{ photoprism_gateway }}" + +- name: create and deploy photoprism container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "photoprism" + hostname: "photoprism" + image: "{{ custom_registry }}/{{ repo_tag }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ photoprism_network_name }}" + ipv4_address: "{{ photoprism_ipv4 }}" + ports: + - "127.0.0.1:{{ photoprism_external_port }}:2342" + state: 'started' + recreate: yes + restart_policy: unless-stopped + volumes: + - "{{ docker_home }}/photoprism/photos:/photoprism/originals" + - "{{ docker_home }}/photoprism/data:/photoprism/storage" + env: + "PHOTOPRISM_ADMIN_USER": "{{ photoprism_admin_user }}" + "PHOTOPRISM_ADMIN_PASSWORD": "{{ photoprism_admin_password }}" + "PHOTOPRISM_AUTH_MODE": "{{ photoprism_auth_mode }}" + "PHOTOPRISM_SITE_URL": "{{ photoprism_site_url }}" + "PHOTOPRISM_DATABASE_DRIVER": "sqlite" + "PHOTOPRISM_DISABLE_PLACES": "true" + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ photoprism_nginx_config }}" + dest: /etc/nginx/sites-available/photoprism.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/photoprism.conf + dest: /etc/nginx/sites-enabled/photoprism.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/pihole_exporter/tasks/main.yml b/roles/services/containers/pihole_exporter/tasks/main.yml new file mode 100644 index 0000000..4c52dc7 --- /dev/null +++ b/roles/services/containers/pihole_exporter/tasks/main.yml @@ -0,0 +1,97 @@ +- name: set image fact + set_fact: + image: ekofr/pihole-exporter:v0.4.0 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create pihole_exporter directory + file: + path: "{{ docker_home }}/pihole_exporter" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: get pihole_exporter image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + push: yes + +- name: create pihole_exporter docker network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ pihole_exporter_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ pihole_exporter_subnet }}" + gateway: "{{ pihole_exporter_gateway }}" + +- name: create and deploy pihole_exporter container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "pihole_exporter" + hostname: "pihole_exporter" + image: "{{ custom_registry }}/{{ repo_tag }}" + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ pihole_exporter_network_name }}" + ports: + - "127.0.0.1:9617:9617" + state: 'started' + recreate: yes + restart_policy: unless-stopped + env: + "PIHOLE_HOSTNAME": "{{ pihole_ip }}" + "PIHOLE_API_TOKEN": "{{ pihole_api_token }}" + "PORT": "{{ pihole_api_port }}" + cap_drop: + - all + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ pihole_exporter_nginx_config }}" + dest: /etc/nginx/sites-available/pihole-exporter.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/pihole-exporter.conf + dest: /etc/nginx/sites-enabled/pihole-exporter.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/pywttr_docker/handlers/main.yml b/roles/services/containers/pywttr_docker/handlers/main.yml new file mode 100644 index 0000000..de5dcb6 --- /dev/null +++ b/roles/services/containers/pywttr_docker/handlers/main.yml @@ -0,0 +1,18 @@ +- name: build pywttr-docker image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ docker_registry_url }}/{{ docker_registry_username }}/pywttr-docker:latest" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + build: + path: /srv/docker/pywttr-docker/src + source: build + push: yes + force_source: yes + +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/pywttr_docker/tasks/main.yml b/roles/services/containers/pywttr_docker/tasks/main.yml new file mode 100644 index 0000000..45f7b2f --- /dev/null +++ b/roles/services/containers/pywttr_docker/tasks/main.yml @@ -0,0 +1,74 @@ +- name: set image fact + set_fact: + image: gitea.chudnick.com/sam/pywttr-docker:latest + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create pywttr-docker directory + file: + path: "{{ docker_home }}/pywttr-docker" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: clone pywttr-docker repository + notify: build pywttr-docker image + git: + repo: https://gitea.chudnick.com/sam/pywttr-docker + dest: "{{ docker_home }}/pywttr-docker/src" + +- meta: flush_handlers + +- name: create pywttr-docker network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ pywttr_docker_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ pywttr_docker_subnet }}" + gateway: "{{ pywttr_docker_gateway }}" + +- name: create and deploy pywttr-docker container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "pywttr-docker" + hostname: "pywttr-docker" + image: "{{ image }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ pywttr_docker_network_name }}" + ipv4_address: "{{ pywttr_docker_ipv4 }}" + ports: + - "127.0.0.1:{{ pywttr_docker_external_port }}:8000" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: deploy nginx configuration + notify: restart nginx + template: + src: "{{ pywttr_docker_nginx_config }}" + dest: /etc/nginx/sites-available/pywttr-docker.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/pywttr-docker.conf + dest: /etc/nginx/sites-enabled/pywttr-docker.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/renovate/tasks/main.yml b/roles/services/containers/renovate/tasks/main.yml new file mode 100644 index 0000000..bbbfe11 --- /dev/null +++ b/roles/services/containers/renovate/tasks/main.yml @@ -0,0 +1,87 @@ +- name: set image fact + set_fact: + image: renovate/renovate:35.141.3-slim + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create renovate directory + file: + path: "{{ docker_home }}/renovate" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: create renovate docker network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ renovate_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ renovate_subnet }}" + gateway: "{{ renovate_gateway }}" + +- name: pull and push renovate image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + source: pull + force_source: yes + push: yes + +- name: create and deploy renovate container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "renovate" + image: "{{ custom_registry }}/{{ repo_tag }}" + pull: yes + recreate: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ renovate_network_name }}" + ipv4_address: "{{ renovate_ipv4 }}" + env: + "RENOVATE_ENDPOINT": "{{ renovate_endpoint }}" + "RENOVATE_PLATFORM": "gitea" + "RENOVATE_TOKEN": "{{ renovate_token }}" + "RENOVATE_AUTODISCOVER": "true" + "LOG_LEVEL": "debug" + "RENOVATE_GIT_AUTHOR": "{{ renovate_author }}" + restart_policy: "no" + state: 'started' + + +- name: create cron job to run renovate container daily + cron: + name: "run renovate" + job: "docker start renovate" + user: "{{ docker_username }}" + minute: "0" + hour: "6" diff --git a/roles/services/containers/searxng/handlers/main.yml b/roles/services/containers/searxng/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/searxng/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/searxng/tasks/main.yml b/roles/services/containers/searxng/tasks/main.yml new file mode 100644 index 0000000..fa7609c --- /dev/null +++ b/roles/services/containers/searxng/tasks/main.yml @@ -0,0 +1,170 @@ +- name: set image fact + set_fact: + image: "searxng/searxng:2023.6.16-71b6ff07" + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create searxng directory + file: + path: "{{ docker_home }}/searxng" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: get searxng image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + source: pull + force_source: yes + name: "{{ image }}" + repository: "{{ custom_registry }}/{{ repo_tag }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + push: yes + +- name: create searxng config directory + file: + path: "{{ docker_home }}/searxng/config" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create redis_searxng directory + file: + path: "{{ docker_home }}/redis_searxng" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create redis_searxng data directory + file: + path: "{{ docker_home }}/redis_searxng/data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: place searxng config in proper location + copy: + src: "{{ searxng_config }}" + dest: "{{ docker_home }}/searxng/config/settings.yml" + owner: root + group: docker + mode: '0644' + +- name: place uwsgi config + copy: + src: "{{ searxng_uwsgi_config }}" + dest: "{{ docker_home }}/searxng/config/uwsgi.ini" + owner: root + group: docker + mode: '0644' + +- name: create searxng docker network + docker_network: + name: "{{ searxng_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ searxng_subnet }}" + gateway: "{{ searxng_gateway }}" + +- name: create and deploy searxng container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "searxng" + image: "{{ custom_registry }}/{{ repo_tag }}" + pull: yes + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ searxng_network_name }}" + ipv4_address: "{{ searxng_ipv4 }}" + ports: + - "127.0.0.1:8080:8080" + volumes: + - "{{ docker_home }}/searxng/config:/etc/searxng" + env: + SEARXNG_BASE_URL: "https://searxng.chudnick.com/" + cap_drop: + - all + capabilities: + - CHOWN + - SETGID + - SETUID + - DAC_OVERRIDE + hostname: "searxng" + restart_policy: unless-stopped + state: 'started' + recreate: yes + +- name: create and deploy redis container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + restart_policy: unless-stopped + name: "redis_searxng" + image: redis:alpine + pull: yes + command: redis-server --save "" --appendonly "no" + purge_networks: yes + networks: + - name: "{{ searxng_network_name }}" + ipv4_address: "{{ redis_searxng_ipv4 }}" + tmpfs: + - /var/lib/redis + cap_drop: + - all + capabilities: + - SETGID + - SETUID + - DAC_OVERRIDE + hostname: "redis" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + state: 'started' + comparisons: + '*': strict + +- name: deploy nginx configuration + notify: restart nginx + register: nginx_config + copy: + src: "{{ searxng_nginx_config }}" + dest: /etc/nginx/sites-available/searxng.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/searxng.conf + dest: /etc/nginx/sites-enabled/searxng.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/text_generation/handlers/main.yml b/roles/services/containers/text_generation/handlers/main.yml new file mode 100644 index 0000000..7aab823 --- /dev/null +++ b/roles/services/containers/text_generation/handlers/main.yml @@ -0,0 +1,29 @@ +- name: login to docker registry + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +- name: build text-generation image + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_image: + name: "{{ docker_registry_url }}/{{ docker_registry_username }}/text-generation:latest" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + build: + path: /srv/docker/text-generation/src + source: build + push: yes + force_source: yes + +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/text_generation/tasks/main.yml b/roles/services/containers/text_generation/tasks/main.yml new file mode 100644 index 0000000..80988a6 --- /dev/null +++ b/roles/services/containers/text_generation/tasks/main.yml @@ -0,0 +1,89 @@ +- name: set image fact + set_fact: + image: gitea.chudnick.com/sam/text-generation:latest + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create text-generation directory + file: + path: "{{ docker_home }}/text-generation" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create models directory + file: + path: "{{ docker_home }}/text-generation/models" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: clone text-generation repository + notify: + - login to docker registry + - build text-generation image + git: + repo: https://gitea.chudnick.com/sam/text-generation-docker + dest: "{{ docker_home }}/text-generation/src" + +- meta: flush_handlers + +- name: create text-generation network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ text_generation_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ text_generation_subnet }}" + gateway: "{{ text_generation_gateway }}" + +- name: create and deploy text-generation container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "text-generation" + hostname: "text-generation" + image: "{{ image }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ text_generation_network_name }}" + ipv4_address: "{{ text_generation_ipv4 }}" + volumes: + - "{{ docker_home }}/text-generation/models:/models" + ports: + - "127.0.0.1:{{ text_generation_external_port }}:7860" + - "127.0.0.1:{{ text_generation_api_port }}:5005" + - "127.0.0.1:{{ text_generation_api_stream_port }}:5000" + command: "--cpu --listen --listen-port 7860 --chat --auto-devices --mlock" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: deploy nginx configuration + notify: restart nginx + template: + src: "{{ text_generation_nginx_config }}" + dest: /etc/nginx/sites-available/text-generation.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/text-generation.conf + dest: /etc/nginx/sites-enabled/text-generation.conf + owner: root + group: root + state: link diff --git a/roles/services/containers/vaultwarden/handlers/main.yml b/roles/services/containers/vaultwarden/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/vaultwarden/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/containers/vaultwarden/tasks/main.yml b/roles/services/containers/vaultwarden/tasks/main.yml new file mode 100644 index 0000000..fa63b58 --- /dev/null +++ b/roles/services/containers/vaultwarden/tasks/main.yml @@ -0,0 +1,79 @@ +- name: set image fact + set_fact: + image: vaultwarden/server:1.28.1 + +- name: set other facts + vars: + array: "{{ image.split('/', 1) }}" + set_fact: + repo_tag: "{{ array.1 }}" + custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" + +- name: create vaultwarden directory + file: + path: "{{ docker_home }}/vaultwarden" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create data directory + file: + path: "{{ docker_home }}/vaultwarden/data" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0755' + +- name: create vaultwarden docker network + become: yes + become_user: "{{ docker_username }}" + docker_network: + name: "{{ vaultwarden_network_name }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + driver: bridge + ipam_config: + - subnet: "{{ vaultwarden_subnet }}" + gateway: "{{ vaultwarden_gateway }}" + +- name: create and deploy vaultwarden container + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_container: + name: "vaultwarden" + hostname: "vaultwarden" + image: "{{ image }}" + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + purge_networks: yes + networks: + - name: "{{ vaultwarden_network_name }}" + ipv4_address: "{{ vaultwarden_ipv4 }}" + ports: + - "127.0.0.1:{{ vaultwarden_external_port }}:80" + volumes: + - "{{ docker_home }}/vaultwarden/data:/data" + env: + "DOMAIN": "https://{{ vaultwarden_server_name }}" + "DISABLE_ADMIN_TOKEN": "true" + state: 'started' + recreate: yes + restart_policy: unless-stopped + +- name: deploy nginx configuration + notify: restart nginx + template: + src: "{{ vaultwarden_nginx_config }}" + dest: /etc/nginx/sites-available/vaultwarden.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/vaultwarden.conf + dest: /etc/nginx/sites-enabled/vaultwarden.conf + owner: root + group: root + state: link diff --git a/roles/services/docker_rootless/defaults/main.yml b/roles/services/docker_rootless/defaults/main.yml new file mode 100644 index 0000000..064825f --- /dev/null +++ b/roles/services/docker_rootless/defaults/main.yml @@ -0,0 +1,18 @@ +docker_packages: + - docker-ce + - acl + - docker-ce-cli + - docker-ce-rootless-extras + - docker-compose-plugin + - uidmap + - dbus-user-session + - slirp4netns + - fuse-overlayfs + +docker_username: docker_rootless +docker_uid: 2000 + +docker_home: /srv/docker +docker_config: /srv/docker/config +docker_data: /srv/docker/data + diff --git a/roles/services/docker_rootless/handlers/main.yml b/roles/services/docker_rootless/handlers/main.yml new file mode 100644 index 0000000..510db7b --- /dev/null +++ b/roles/services/docker_rootless/handlers/main.yml @@ -0,0 +1,6 @@ +- name: update repos + apt: + update_cache: yes + register: apt_upgrade + retries: 100 + until: apt_upgrade is success or ('Failed to lock apt for exclusive operation' not in apt_upgrade.msg and '/var/lib/dpkg/lock' not in apt_upgrade.msg) diff --git a/roles/services/docker_rootless/tasks/main.yml b/roles/services/docker_rootless/tasks/main.yml new file mode 100644 index 0000000..9b2e527 --- /dev/null +++ b/roles/services/docker_rootless/tasks/main.yml @@ -0,0 +1,93 @@ +- name: install packages + package: + name: + - extrepo + - nginx + - python3-docker + state: latest + +- name: allow http (80/tcp) traffic + ufw: + rule: allow + port: '80' + proto: tcp + +- name: allow https (443/tcp) traffic + ufw: + rule: allow + port: '443' + proto: tcp + +- name: enable docker-ce repo + register: result + changed_when: result.stdout | regex_search("skipped") | bool + notify: update repos + command: + cmd: extrepo enable docker-ce + creates: /etc/apt/sources.list.d/extrepo_docker-ce.sources + +- meta: flush_handlers + +- name: enable docker-ce repo + changed_when: false + command: + cmd: extrepo update docker-ce + +- name: create docker user + user: + name: "{{ docker_username }}" + shell: /bin/bash + uid: "{{ docker_uid }}" + home: "{{ docker_home }}" + create_home: yes + +- name: add XDG_RUNTIME_DIR to docker user bash profile + lineinfile: + path: "{{ docker_home }}/.bash_profile" + line: "export XDG_RUNTIME_DIR=/run/user/{{ docker_uid }}" + insertbefore: EOF + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: "0644" + create: yes + +- name: install docker packages + package: + name: "{{ docker_packages }}" + state: latest + +- name: add docker user to /etc/subuid + lineinfile: + path: /etc/subuid + line: "{{ docker_username }}:100000:65536" + insertbefore: EOF + +- name: add docker user to /etc/subgid + lineinfile: + path: /etc/subgid + line: "{{ docker_username }}:100000:65536" + insertbefore: EOF + +- name: enable lingering for docker user + command: + cmd: loginctl enable-linger "{{ docker_username }}" + creates: "/var/lib/systemd/linger/{{ docker_username }}" + +- name: run docker rootless setup script + become_user: "{{ docker_username }}" + register: setup_script + command: + cmd: /usr/bin/dockerd-rootless-setuptool.sh install --force + creates: "{{ docker_home }}/.config/systemd/user/docker.service" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + +- name: enable and start docker service + become_user: "{{ docker_username }}" + systemd: + name: docker + enabled: yes + state: started + scope: user + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" diff --git a/roles/services/freeipa/client/defaults/main.yml b/roles/services/freeipa/client/defaults/main.yml new file mode 100644 index 0000000..e69de29 diff --git a/roles/services/freeipa/client/tasks/main.yml b/roles/services/freeipa/client/tasks/main.yml new file mode 100644 index 0000000..ccb047e --- /dev/null +++ b/roles/services/freeipa/client/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- name: configure freeipa client + include_role: + name: freeipa.ansible_freeipa.ipaclient diff --git a/roles/services/freeipa/server/defaults/main.yml b/roles/services/freeipa/server/defaults/main.yml new file mode 100644 index 0000000..3e91a21 --- /dev/null +++ b/roles/services/freeipa/server/defaults/main.yml @@ -0,0 +1 @@ +ipabackup_from_controller: yes diff --git a/roles/services/freeipa/server/tasks/main.yml b/roles/services/freeipa/server/tasks/main.yml new file mode 100644 index 0000000..32badc2 --- /dev/null +++ b/roles/services/freeipa/server/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: set fedora dns + lineinfile: + path: /etc/systemd/resolved.conf + regexp: "^#?DNS=" + line: "DNS={{ ipa_dns_ip }}" + +- name: restart systemd-resolved + service: + name: systemd-resolved + state: restarted + +- name: set hostname + hostname: + name: ipasrv.home.local + +- name: remove lines from /etc/hosts + lineinfile: + path: /etc/hosts + regexp: "^::1.*ipasrv" + state: absent + +- name: remove lines from /etc/hosts + lineinfile: + path: /etc/hosts + regexp: "^127.0.0.1.*ipasrv" + state: absent + +- name: add line to /etc/hosts + lineinfile: + path: /etc/hosts + line: "{{ ansible_default_ipv4.address }} ipasrv.home.local ipasrv" + state: present + +- name: install freeipa-server + package: + name: freeipa-server + state: latest + +#- name: restore ipaserver from backup + #include_role: + #name: freeipa.ansible_freeipa.ipabackup + #state: restored diff --git a/roles/services/game_server/handlers/main.yml b/roles/services/game_server/handlers/main.yml new file mode 100644 index 0000000..8e221e1 --- /dev/null +++ b/roles/services/game_server/handlers/main.yml @@ -0,0 +1,71 @@ +- name: create sunshine build dir + become: yes + become_user: "{{ games_user }}" + file: + path: "/home/{{ games_user }}/sunshine/build" + state: directory + owner: "{{ games_user }}" + group: "{{ games_user }}" + mode: "0755" + +- name: run npm install + become: yes + become_user: "{{ games_user }}" + command: + cmd: "npm install" + chdir: "/home/{{ games_user }}/sunshine/build" + +- name: build sunshine - cmake + become: yes + become_user: "{{ games_user }}" + command: + cmd: "cmake -DCMAKE_C_COMPILER=gcc-10 -DCMAKE_CXX_COMPILER=g++-10 .." + chdir: "/home/{{ games_user }}/sunshine/build" + +- name: build sunshine - make + become: yes + become_user: "{{ games_user }}" + command: + cmd: "make" + chdir: "/home/{{ games_user }}/sunshine/build" + +- name: build sunshine deb package + become: yes + become_user: "{{ games_user }}" + command: + cmd: "cpack -G DEB" + chdir: "/home/{{ games_user }}/sunshine/build" + +- name: install sunshine from deb + apt: + deb: "/home/{{ games_user }}/sunshine/build/cpack_artifacts/Sunshine.deb" + +- name: restart sunshine + become: yes + become_user: "{{ games_user }}" + systemd: + scope: user + name: sunshine + state: restarted + +- name: decompress and extract firmware + unarchive: + src: "/tmp/linux-firmware-20221109.tar.gz" + dest: "/tmp/" + remote_src: yes + +- name: copy all files from amdgpu to /lib/firmware/amdgpu/ + copy: + src: /tmp/linux-firmware-20221109/amdgpu + dest: /lib/firmware + remote_src: yes + owner: root + group: root + mode: "0644" + +- name: update initramfs + command: + cmd: "update-initramfs -u" + +- name: reboot system + reboot: diff --git a/roles/services/game_server/tasks/main.yml b/roles/services/game_server/tasks/main.yml new file mode 100644 index 0000000..f2b12bd --- /dev/null +++ b/roles/services/game_server/tasks/main.yml @@ -0,0 +1,223 @@ +- name: enable contrib and non-free repos + apt_repository: + repo: deb https://deb.debian.org/debian bookworm main contrib non-free + +- name: enable contrib and non-free repos + apt_repository: + repo: deb https://security.debian.org/debian-security bookworm-security main contrib non-free + +- name: enable contrib and non-free repos + apt_repository: + repo: deb https://deb.debian.org/debian bookworm-updates main contrib non-free + +- name: enable contrib and non-free repos + apt_repository: + repo: deb https://deb.debian.org/debian bookworm-backports main contrib non-free + +- name: enable contrib and non-free repos + apt_repository: + repo: deb-src https://deb.debian.org/debian bookworm main contrib non-free + +- name: enable contrib and non-free repos + apt_repository: + repo: deb-src https://security.debian.org/debian-security bookworm-security main contrib non-free +- name: enable contrib and non-free repos + apt_repository: + repo: deb-src https://deb.debian.org/debian bookworm-updates main contrib non-free + +- name: enable contrib and non-free repos + apt_repository: + repo: deb-src https://deb.debian.org/debian bookworm-backports main contrib non-free + +- name: update repos + apt: + update_cache: yes + register: apt_upgrade + retries: 100 + until: apt_upgrade is success or ('Failed to lock apt for exclusive operation' not in apt_upgrade.msg and '/var/lib/dpkg/lock' not in apt_upgrade.msg) + +- name: install packages + package: + name: "{{ game_server_packages }}" + state: latest + +- name: create games user + user: + name: "{{ games_user }}" + create_home: yes + +- name: add user to sudo group + user: + name: "{{ games_user }}" + groups: sudo + append: yes + +- name: add user to ssl-cert group + user: + name: "{{ games_user }}" + groups: ssl-cert + append: yes + +- name: set authorized ssh key + authorized_key: + user: "{{ games_user }}" + state: present + key: "{{ lookup('file', 'data/common/id_rsa.pub') }}" + +- name: clone sunshine repo + become: yes + become_user: "{{ games_user }}" + git: + repo: "{{ sunshine_repo }}" + dest: "/home/{{ games_user }}/sunshine" + version: "{{ sunshine_version }}" + recursive: yes + force: yes + register: sunshine_repo + notify: + - create sunshine build dir + - run npm install + - build sunshine - cmake + - build sunshine - make + - build sunshine deb package + - install sunshine from deb + - restart sunshine + +- name: install sunshine packages + package: + name: "{{ sunshine_packages }}" + state: latest + +- meta: flush_handlers + +- name: add user to input group + user: + name: "{{ games_user }}" + groups: input + append: yes + +- name: set sunshine udev rules + lineinfile: + path: /etc/udev/rules.d/85-sunshine-input.rules + insertbefore: EOF + line: KERNEL=="uinput", GROUP="input", MODE="0660", OPTIONS+="static_node=uinput" + owner: root + group: root + mode: "0644" + create: yes + +- name: install backports kernel + apt: + name: linux-image-amd64 + state: latest + update_cache: yes + +- name: update-pciids + changed_when: false + command: + cmd: "update-pciids" + +- name: check if needed firmware has alredy been installed + stat: path=/lib/firmware/amdgpu/dimgrey_cavefish_sos.bin + register: bin + +- name: manually download latest firmware for amdgpu from kernel source tree + when: not bin.stat.exists + get_url: + url: "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/snapshot/linux-firmware-20221109.tar.gz" + dest: "/tmp/linux-firmware-20221109.tar.gz" + register: firmware + notify: + - decompress and extract firmware + - copy all files from amdgpu to /lib/firmware/amdgpu/ + - update initramfs + - reboot system + +- name: allow sunshine ports + ufw: + rule: allow + proto: tcp + port: 47984 + +- name: allow sunshine ports + ufw: + rule: allow + proto: tcp + port: 47989 + +- name: allow sunshine ports + ufw: + rule: allow + proto: tcp + port: 47990 + +- name: allow sunshine ports + ufw: + rule: allow + proto: udp + port: 47998 + +- name: allow sunshine ports + ufw: + rule: allow + proto: udp + port: 47999 + +- name: allow sunshine ports + ufw: + rule: allow + proto: tcp + +- name: allow sunshine ports + ufw: + rule: allow + proto: udp + port: 48000 + +- name: allow sunshine ports + ufw: + rule: allow + proto: udp + port: 48002 + +- name: check if i386 architecture is already enabled + args: + executable: /bin/bash + shell: | + set -eo pipefail + dpkg --print-foreign-architectures | grep i386 + register: i386_check + changed_when: false + +- name: add i386 architecture + when: i386_check.rc == 1 + command: + cmd: "dpkg --add-architecture i386" + +- name: update repos + when: i386_check.rc == 1 + apt: + update_cache: yes + register: apt_upgrade + retries: 100 + until: apt_upgrade is success or ('Failed to lock apt for exclusive operation' not in apt_upgrade.msg and '/var/lib/dpkg/lock' not in apt_upgrade.msg) + +- name: install steam and related packages + package: + name: "{{ steam_packages }}" + +- name: copy lightdm config + copy: + src: "{{ lightdm_config }}" + dest: /etc/lightdm/lightdm.conf + owner: root + group: root + mode: "0644" + +- name: copy xfce xinit config + copy: + src: "{{ xfce_xinit }}" + dest: /etc/xdg/xfce4/xinitrc + owner: root + group: root + mode: "0755" diff --git a/roles/services/jenkins/handlers/main.yml b/roles/services/jenkins/handlers/main.yml new file mode 100644 index 0000000..92f0084 --- /dev/null +++ b/roles/services/jenkins/handlers/main.yml @@ -0,0 +1,13 @@ +- name: update repos + apt: + update_cache: yes + +- name: restart nginx + service: + name: nginx + state: restarted + +- name: restart jenkins + service: + name: jenkins + state: restarted diff --git a/roles/services/jenkins/tasks/main.yml b/roles/services/jenkins/tasks/main.yml new file mode 100644 index 0000000..29dbb28 --- /dev/null +++ b/roles/services/jenkins/tasks/main.yml @@ -0,0 +1,184 @@ +- name: install extrepo + package: + name: extrepo + state: latest + +- name: add jenkins repo + register: result + changed_when: result.stdout | regex_search("skipped") | bool + notify: update repos + command: + cmd: extrepo enable jenkins + creates: /etc/apt/sources.list.d/extrepo_jenkins.sources + +- meta: flush_handlers + +- name: update jenkins repo data + changed_when: false + command: + cmd: extrepo update jenkins + +- name: install packages + package: + name: "{{ jenkins_packages }}" + +- name: generate ssh key for jenkins user + user: + name: jenkins + generate_ssh_key: yes + +- name: get jenkins user ssh key + changed_when: false + command: cat /var/lib/jenkins/.ssh/id_rsa.pub + register: pubkey + +- name: create jenkins user in freeipa + freeipa.ansible_freeipa.ipauser: + ipaadmin_principal: + ipaadmin_password: "{{ ipafulladmin_password }}" + name: jenkins + passwordexpiration: "2050-01-01" + first: jenkins + last: ci + sshpubkey: "{{ pubkey.stdout }}" + +- name: create jenkins_admin group in freeipa + freeipa.ansible_freeipa.ipagroup: + ipaadmin_password: "{{ ipafulladmin_password }}" + name: jenkins_admin + +- name: add user jenkins to jenkins_admin group in freeipa + freeipa.ansible_freeipa.ipagroup: + ipaadmin_password: "{{ ipafulladmin_password }}" + name: jenkins_admin + action: member + user: + - jenkins + +- name: create sudo rule to allow jenkins to execute on all without password + freeipa.ansible_freeipa.ipasudorule: + ipaadmin_password: "{{ ipafulladmin_password }}" + name: jenkins_rule + sudooption: "!authenticate" + group: jenkins_admin + hostcategory: all + cmdcategory: all + runasusercategory: all + runasgroupcategory: all + +- name: deploy nginx configuration + copy: + src: "{{ jenkins_nginx_config }}" + dest: /etc/nginx/sites-available/jenkins.conf + owner: root + group: root + mode: '0644' + register: nginx_config + notify: restart nginx + +- name: create cert/key dir + file: + state: directory + path: "/etc/letsencrypt/live/{{ services_domain }}" + owner: root + group: root + mode: "0755" + +- name: remove existing private key file + file: + path: "/etc/letsencrypt/live/{{ services_domain }}/privkey.pem" + state: absent + +- name: write private key to file + lineinfile: + path: "/etc/letsencrypt/live/{{ services_domain }}/privkey.pem" + line: "{{ nginx_key }}" + insertbefore: EOF + create: yes + +- name: deploy cert + copy: + src: "{{ nginx_cert }}" + dest: "/etc/letsencrypt/live/{{ services_domain }}/fullchain.pem" + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/jenkins.conf + dest: /etc/nginx/sites-enabled/jenkins.conf + owner: root + group: root + state: link + +- name: allow http (80/tcp) traffic + ufw: + rule: allow + port: '80' + proto: tcp + +- name: allow https (443/tcp) traffic + ufw: + rule: allow + port: '443' + proto: tcp + +- name: install ansible plugin + jenkins_plugin: + url_username: "{{ jenkins_username }}" + url_password: "{{ jenkins_apikey }}" + url: "{{ jenkins_url }}" + name: ansible + +- name: install gitea plugin + jenkins_plugin: + url_username: "{{ jenkins_username }}" + url_password: "{{ jenkins_apikey }}" + url: "{{ jenkins_url }}" + name: gitea + +- name: install openid login plugin + jenkins_plugin: + url_username: "{{ jenkins_username }}" + url_password: "{{ jenkins_apikey }}" + url: "{{ jenkins_url }}" + name: oic-auth + +- name: install prometheus plugin + jenkins_plugin: + url_username: "{{ jenkins_username }}" + url_password: "{{ jenkins_apikey }}" + url: "{{ jenkins_url }}" + name: prometheus + +- name: install casc plugin + jenkins_plugin: + url_username: "{{ jenkins_username }}" + url_password: "{{ jenkins_apikey }}" + url: "{{ jenkins_url }}" + name: configuration-as-code + +- name: install warnings-ng plugin + jenkins_plugin: + url_username: "{{ jenkins_username }}" + url_password: "{{ jenkins_apikey }}" + url: "{{ jenkins_url }}" + name: warnings-ng + +- name: deploy configuration as code file + register: casc_file + notify: restart jenkins + template: + src: "{{ jenkins_config }}" + dest: "/var/lib/jenkins/jenkins.yaml" + owner: jenkins + group: jenkins + mode: "0644" + +- name: enable jenkins + systemd: + daemon_reload: yes + enabled: yes + masked: no + name: jenkins diff --git a/roles/services/monitoring/grafana/defaults/main.yml b/roles/services/monitoring/grafana/defaults/main.yml new file mode 100644 index 0000000..c346e54 --- /dev/null +++ b/roles/services/monitoring/grafana/defaults/main.yml @@ -0,0 +1,5 @@ +grafana_package: + - grafana + - nginx +grafana_config: files/grafana_config/ +grafana_data: files/grafana.db diff --git a/roles/services/monitoring/grafana/handlers/main.yml b/roles/services/monitoring/grafana/handlers/main.yml new file mode 100644 index 0000000..8026c6d --- /dev/null +++ b/roles/services/monitoring/grafana/handlers/main.yml @@ -0,0 +1,13 @@ +- name: update repos + apt: + update_cache: yes + +- name: restart grafana + service: + name: grafana-server + state: restarted + +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/monitoring/grafana/tasks/main.yml b/roles/services/monitoring/grafana/tasks/main.yml new file mode 100644 index 0000000..e9f824e --- /dev/null +++ b/roles/services/monitoring/grafana/tasks/main.yml @@ -0,0 +1,125 @@ +- name: install extrepo + package: + name: extrepo + state: latest + +- name: add Grafana repo + register: result + changed_when: result.stdout | regex_search("skipped") | bool + notify: update repos + command: + cmd: extrepo enable grafana + creates: /etc/apt/sources.list.d/extrepo_grafana.sources + +- meta: flush_handlers + +- name: update Grafana repo + changed_when: false + command: + cmd: extrepo update grafana + +- name: install grafana + package: + name: "{{ grafana_package }}" + +- name: deploy grafana config + notify: restart grafana + template: + src: "{{ grafana_config }}" + dest: /etc/grafana/grafana.ini + owner: root + group: grafana + mode: '0640' + +- name: deploy nginx configuration + notify: restart nginx + copy: + src: "{{ grafana_nginx_config }}" + dest: /etc/nginx/sites-available/grafana.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + notify: restart nginx + file: + src: /etc/nginx/sites-available/grafana.conf + dest: /etc/nginx/sites-enabled/grafana.conf + owner: root + group: root + state: link + +- name: allow http (80/tcp) traffic + ufw: + rule: allow + port: '80' + proto: tcp + +- name: allow https (443/tcp) traffic + ufw: + rule: allow + port: '443' + proto: tcp + +- name: enable grafana + systemd: + daemon_reload: yes + enabled: yes + masked: no + name: grafana-server + +- meta: flush_handlers + +- name: add grafana user + ignore_errors: yes + community.grafana.grafana_user: + name: "{{ grafana_admin }}" + email: "{{ grafana_email }}" + url: "{{ grafana_url }}" + login: "{{ grafana_admin }}" + password: "{{ grafana_password }}" + is_admin: true + state: present + +- name: add prometheus datasource + community.grafana.grafana_datasource: + grafana_url: "{{ grafana_url }}" + grafana_user: "{{ grafana_admin }}" + grafana_password: "{{ grafana_password }}" + name: "Prometheus" + ds_type: prometheus + ds_url: "{{ prometheus_url }}" + access: proxy + +- name: add influxdb datasource + community.grafana.grafana_datasource: + grafana_url: "{{ grafana_url }}" + grafana_user: "{{ grafana_admin }}" + grafana_password: "{{ grafana_password }}" + name: "Proxmox InfluxDB" + ds_type: influxdb + ds_url: "{{ influxdb_url }}" + database: "{{ influx_database }}" + user: "{{ influx_user }}" + password: "{{ influx_password }}" + access: proxy + +- name: add loki datasource + community.grafana.grafana_datasource: + grafana_url: "{{ grafana_url }}" + grafana_user: "{{ grafana_admin }}" + grafana_password: "{{ grafana_password }}" + name: "Loki" + ds_type: loki + ds_url: "{{ loki_url }}" + access: proxy + +- name: import main custom dashboard + delegate_to: localhost + become: no + community.grafana.grafana_dashboard: + grafana_url: "{{ grafana_url }}" + grafana_user: "{{ grafana_admin }}" + grafana_password: "{{ grafana_password }}" + path: "{{ grafana_dashboard_main }}" + overwrite: yes diff --git a/roles/services/monitoring/influxdb/defaults/main.yml b/roles/services/monitoring/influxdb/defaults/main.yml new file mode 100644 index 0000000..180ad8e --- /dev/null +++ b/roles/services/monitoring/influxdb/defaults/main.yml @@ -0,0 +1,6 @@ +influxdb_packages: + - influxdb + - influxdb-client + +influx_config: files/influxdb.conf +influx_data: files/influx_data/ diff --git a/roles/services/monitoring/influxdb/handlers/main.yml b/roles/services/monitoring/influxdb/handlers/main.yml new file mode 100644 index 0000000..765a040 --- /dev/null +++ b/roles/services/monitoring/influxdb/handlers/main.yml @@ -0,0 +1,4 @@ +- name: restart influxdb + service: + name: influxdb + state: restarted diff --git a/roles/services/monitoring/influxdb/tasks/main.yml b/roles/services/monitoring/influxdb/tasks/main.yml new file mode 100644 index 0000000..06d6e86 --- /dev/null +++ b/roles/services/monitoring/influxdb/tasks/main.yml @@ -0,0 +1,19 @@ +- name: install packages + package: + name: "{{ influxdb_packages }}" + state: latest + +- name: copy config + notify: restart influxdb + copy: + src: "{{ influx_config }}" + dest: /etc/influxdb/influxdb.conf + owner: root + group: root + mode: '0644' + +- name: enable influxdb + systemd: + name: influxdb + enabled: yes + masked: no diff --git a/roles/services/monitoring/loki/handlers/main.yml b/roles/services/monitoring/loki/handlers/main.yml new file mode 100644 index 0000000..e70412f --- /dev/null +++ b/roles/services/monitoring/loki/handlers/main.yml @@ -0,0 +1,8 @@ +- name: update repos + apt: + update_cache: yes + +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/monitoring/loki/tasks/main.yml b/roles/services/monitoring/loki/tasks/main.yml new file mode 100644 index 0000000..31a7375 --- /dev/null +++ b/roles/services/monitoring/loki/tasks/main.yml @@ -0,0 +1,80 @@ +- name: install extrepo + package: + name: extrepo + state: latest + +- name: add Grafana repo + register: result + changed_when: result.stdout | regex_search("skipped") | bool + notify: update repos + command: + cmd: extrepo enable grafana + creates: /etc/apt/sources.list.d/extrepo_grafana.sources + +- meta: flush_handlers + +- name: add Grafana repo + changed_when: false + command: + cmd: extrepo update grafana + +- name: install loki + package: + name: loki + state: latest + +- name: deploy loki configuration + copy: + src: "{{ loki_config }}" + dest: /etc/loki/config.yml + owner: root + group: root + mode: '0644' + +- name: deploy nginx configuration + copy: + src: "{{ loki_nginx_config }}" + dest: /etc/nginx/sites-available/loki.conf + owner: root + group: root + mode: '0644' + register: nginxconfig + notify: restart nginx + +- name: symlink site + file: + src: /etc/nginx/sites-available/loki.conf + dest: /etc/nginx/sites-enabled/loki.conf + owner: root + group: root + state: link + +- name: allow http (80/tcp) traffic + ufw: + rule: allow + port: '80' + proto: tcp + +- name: allow https (443/tcp) traffic + ufw: + rule: allow + port: '443' + proto: tcp + +- name: allow loki log (3100/tcp) traffic + ufw: + rule: allow + port: '3100' + proto: tcp + +- name: enable loki + systemd: + daemon_reload: yes + enabled: yes + masked: no + name: loki + +- name: restart loki + systemd: + name: loki + state: restarted diff --git a/roles/services/monitoring/prometheus/blackbox-exporter/tasks/main.yml b/roles/services/monitoring/prometheus/blackbox-exporter/tasks/main.yml new file mode 100644 index 0000000..e69de29 diff --git a/roles/services/monitoring/prometheus/nginx_exporter/defaults/main.yml b/roles/services/monitoring/prometheus/nginx_exporter/defaults/main.yml new file mode 100644 index 0000000..9d2b8a5 --- /dev/null +++ b/roles/services/monitoring/prometheus/nginx_exporter/defaults/main.yml @@ -0,0 +1,4 @@ +nginx_exporter_debian_package: prometheus-nginx-exporter +nginx_exporter_fedora_package: golang-github-prometheus-node-exporter +prometheus_server_ip: 192.168.88.32 +nginx_exporter_port: '9113' diff --git a/roles/services/monitoring/prometheus/nginx_exporter/handlers/main.yml b/roles/services/monitoring/prometheus/nginx_exporter/handlers/main.yml new file mode 100644 index 0000000..fe9a90d --- /dev/null +++ b/roles/services/monitoring/prometheus/nginx_exporter/handlers/main.yml @@ -0,0 +1,9 @@ +- name: restart nginx + service: + name: nginx + state: restarted + +- name: restart nginx-exporter + service: + name: prometheus-nginx-exporter + state: started diff --git a/roles/services/monitoring/prometheus/nginx_exporter/tasks/main.yml b/roles/services/monitoring/prometheus/nginx_exporter/tasks/main.yml new file mode 100644 index 0000000..819f71e --- /dev/null +++ b/roles/services/monitoring/prometheus/nginx_exporter/tasks/main.yml @@ -0,0 +1,44 @@ +- name: install package (Debian) + when: ansible_facts['distribution'] == "Debian" + package: + name: "{{ nginx_exporter_debian_package }}" + +- name: allow port + ufw: + rule: allow + direction: in + proto: tcp + src: "{{ prometheus_server_ip }}" + to_port: "{{ nginx_exporter_port }}" + +- name: copy defaults file + notify: restart nginx-exporter + copy: + src: "{{ nginx_exporter_defaults }}" + dest: /etc/default/prometheus-nginx-exporter + owner: root + group: root + mode: '0644' + +- name: deploy nginx configuration + notify: restart nginx + copy: + src: "{{ nginx_exporter_config }}" + dest: /etc/nginx/sites-available/metrics.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/metrics.conf + dest: /etc/nginx/sites-enabled/metrics.conf + owner: root + group: root + state: link + +- name: enable service + systemd: + name: prometheus-nginx-exporter + enabled: yes + masked: no diff --git a/roles/services/monitoring/prometheus/node_exporter/defaults/main.yml b/roles/services/monitoring/prometheus/node_exporter/defaults/main.yml new file mode 100644 index 0000000..e4ff351 --- /dev/null +++ b/roles/services/monitoring/prometheus/node_exporter/defaults/main.yml @@ -0,0 +1,4 @@ +node_exporter_debian_package: prometheus-node-exporter +node_exporter_fedora_package: golang-github-prometheus-node-exporter +prometheus_server_ip: 192.168.88.32 +node_exporter_port: '9100' diff --git a/roles/services/monitoring/prometheus/node_exporter/tasks/main.yml b/roles/services/monitoring/prometheus/node_exporter/tasks/main.yml new file mode 100644 index 0000000..6bbcc08 --- /dev/null +++ b/roles/services/monitoring/prometheus/node_exporter/tasks/main.yml @@ -0,0 +1,28 @@ +- name: install package (Debian) + when: ansible_facts['distribution'] == "Debian" + package: + name: "{{ node_exporter_debian_package }}" + +- name: install package (Fedora) + when: ansible_facts['distribution'] == "Fedora" + package: + name: "{{ node_exporter_fedora_package }}" + +- name: allow port + ufw: + rule: allow + direction: in + proto: tcp + src: "{{ prometheus_server_ip }}" + to_port: "{{ node_exporter_port }}" + +- name: enable service + systemd: + name: prometheus-node-exporter + enabled: yes + masked: no + +- name: restart service + service: + name: prometheus-node-exporter + state: restarted diff --git a/roles/services/monitoring/prometheus/server/defaults/main.yml b/roles/services/monitoring/prometheus/server/defaults/main.yml new file mode 100644 index 0000000..696e7cc --- /dev/null +++ b/roles/services/monitoring/prometheus/server/defaults/main.yml @@ -0,0 +1,6 @@ +prometheus_package: prometheus +management_ip: 192.168.88.254 +grafana_server_ip: 192.168.88.21 +prometheus_port: '9090' +prometheus_config: files/prometheus.yml +prometheus_defaults: files/prometheus diff --git a/roles/services/monitoring/prometheus/server/tasks/main.yml b/roles/services/monitoring/prometheus/server/tasks/main.yml new file mode 100644 index 0000000..06ecc10 --- /dev/null +++ b/roles/services/monitoring/prometheus/server/tasks/main.yml @@ -0,0 +1,79 @@ +- name: install package + package: + name: "{{ prometheus_package }}" + +- name: allow access to metrics from grafana + ufw: + rule: allow + direction: in + proto: tcp + src: "{{ grafana_server_ip }}" + to_port: "{{ prometheus_port }}" + +- name: allow access to metrics from management + ufw: + rule: allow + direction: in + proto: tcp + src: "{{ management_ip }}" + to_port: "{{ prometheus_port }}" + +- name: copy config file + copy: + src: "{{ prometheus_config }}" + dest: /etc/prometheus/prometheus.yml + owner: root + group: root + mode: '0644' + +- name: copy defaults file + copy: + src: "{{ prometheus_defaults }}" + dest: /etc/default/prometheus + owner: root + group: root + mode: '0644' + +- name: enable service + systemd: + name: prometheus + enabled: yes + masked: no + +- name: restart service + service: + name: prometheus + state: restarted + +- name: deploy nginx configuration + copy: + src: "{{ prometheus_nginx_config }}" + dest: /etc/nginx/sites-available/grafana.conf + owner: root + group: root + mode: '0644' + +- name: symlink site + file: + src: /etc/nginx/sites-available/grafana.conf + dest: /etc/nginx/sites-enabled/grafana.conf + owner: root + group: root + state: link + +- name: allow http (80/tcp) traffic + ufw: + rule: allow + port: '80' + proto: tcp + +- name: allow https (443/tcp) traffic + ufw: + rule: allow + port: '443' + proto: tcp + +- name: restart nginx + service: + name: nginx + state: restarted diff --git a/roles/services/monitoring/promtail/handlers/main.yml b/roles/services/monitoring/promtail/handlers/main.yml new file mode 100644 index 0000000..97ea7d3 --- /dev/null +++ b/roles/services/monitoring/promtail/handlers/main.yml @@ -0,0 +1,39 @@ +- name: update repos - debian + apt: + update_cache: yes + +- name: update repos - fedora + dnf: + name: "*" + state: latest + +- name: build loki-docker-driver plugin for private repo + become: yes + become_user: "{{ docker_username }}" + environment: + LOKI_DOCKER_DRIVER: "{{ docker_registry_url }}/{{ docker_registry_username }}/loki-docker-driver" + community.general.make: + chdir: "{{ docker_home }}/plugins/loki" + target: docker-driver-push + +- name: restart rootless docker + become: yes + become_user: "{{ docker_username }}" + systemd: + name: docker + enabled: yes + state: restarted + scope: user + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + +- name: restart docker + service: + name: docker + state: restarted + +- name: restart promtail + when: promtail_config.changed + service: + name: promtail + state: restarted diff --git a/roles/services/monitoring/promtail/tasks/main.yml b/roles/services/monitoring/promtail/tasks/main.yml new file mode 100644 index 0000000..f8b28cc --- /dev/null +++ b/roles/services/monitoring/promtail/tasks/main.yml @@ -0,0 +1,151 @@ +- name: install extrepo + when: ansible_facts['distribution'] == 'Debian' + package: + name: extrepo + state: latest + +- name: add grafana repo | debian + when: ansible_facts['distribution'] == 'Debian' + register: result + changed_when: result.stdout | regex_search("skipped") | bool + notify: update repos - debian + command: + cmd: extrepo enable grafana + creates: /etc/apt/sources.list.d/extrepo_grafana.sources + +- meta: flush_handlers + +- name: update grafana extrepo data | debian + when: ansible_facts['distribution'] == 'Debian' + changed_when: false + command: + cmd: extrepo update grafana + +- name: add Grafana repo | fedora + when: ansible_facts['distribution'] == 'Fedora' + notify: update repos - fedora + yum_repository: + name: grafana + file: grafna + description: "Grafana OSS Repo" + baseurl: "https://rpm.grafana.com" + repo_gpgcheck: yes + enabled: yes + gpgcheck: yes + gpgkey: https://rpm.grafana.com/gpg.key + sslverify: yes + sslcacert: /etc/pki/tls/certs/ca-bundle.crt + exclude: "*beta*" + +- name: install promtail + package: + name: promtail + state: latest + +- name: add promtail to adm group for log access (debian) + when: ansible_facts['distribution'] == 'Debian' + user: + name: promtail + groups: adm + append: yes + +- name: add promtail to systemd-journal group for journal access + user: + name: promtail + groups: systemd-journal + append: yes + +- name: create docker plugin directory + when: "'docker_hosts' in group_names" + become: yes + become_user: "{{ docker_username }}" + file: + path: "{{ docker_home }}/plugins" + state: directory + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: "0755" + +- name: clone loki repo + when: "'docker_hosts' in group_names" + become: yes + become_user: "{{ docker_username }}" + git: + repo: "{{ loki_repo }}" + dest: "{{ docker_home }}/plugins/loki" + version: "{{ loki_version }}" + register: repo + notify: build loki-docker-driver plugin for private repo + +- meta: flush_handlers + +- name: login to docker registry + when: "'docker_hosts' in group_names" + become: yes + become_user: "{{ docker_username }}" + environment: + XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" + docker_login: + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + registry_url: "{{ docker_registry_url }}" + username: "{{ docker_registry_username }}" + password: "{{ docker_registry_password }}" + +# docker driver rootless + +- name: enable loki-docker-driver plugin + when: "'docker_hosts' in group_names" + become: yes + become_user: "{{ docker_username }}" + notify: restart rootless docker + community.docker.docker_plugin: + plugin_name: "{{ docker_registry_url }}/{{ docker_registry_username }}/loki-docker-driver:main" + state: enable + docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" + alias: loki + +- name: deploy docker config + when: "'docker_hosts' in group_names" + notify: restart rootless docker + copy: + src: "{{ docker_config }}" + dest: "{{ docker_home }}/.config/docker/daemon.json" + owner: "{{ docker_username }}" + group: "{{ docker_username }}" + mode: '0644' + +# docker driver root + +- name: enable loki-docker-driver plugin + when: "'docker_hosts' in group_names" + notify: restart docker + community.docker.docker_plugin: + plugin_name: "{{ docker_registry_url }}/{{ docker_registry_username }}/loki-docker-driver:main" + state: enable + alias: loki + +- name: deploy docker config + when: "'docker_hosts' in group_names" + notify: restart docker + copy: + src: "{{ docker_config }}" + dest: /etc/docker/daemon.json + owner: root + group: root + mode: '0644' + +- name: deploy promtail configuration + notify: restart promtail + copy: + src: "{{ promtail_config }}" + dest: /etc/promtail/config.yml + owner: root + group: root + mode: '0644' + +- name: enable promtail + systemd: + daemon_reload: yes + enabled: yes + masked: no + name: promtail diff --git a/roles/services/msmtp_mta/tasks/main.yml b/roles/services/msmtp_mta/tasks/main.yml new file mode 100644 index 0000000..4958acc --- /dev/null +++ b/roles/services/msmtp_mta/tasks/main.yml @@ -0,0 +1,11 @@ +- name: install msmtp packages + package: + name: "{{ msmtp_mta_packages }}" + +- name: copy msmtp config file + copy: + src: "{{ msmtp_mta_config }}" + dest: /etc/msmtprc + owner: root + group: msmtp + mode: '0640' diff --git a/roles/services/pihole/handlers/main.yml b/roles/services/pihole/handlers/main.yml new file mode 100644 index 0000000..9c1d311 --- /dev/null +++ b/roles/services/pihole/handlers/main.yml @@ -0,0 +1,14 @@ +- name: restart unbound + service: + name: unbound + state: restarted + +- name: restart lighttpd + service: + name: lighttpd + state: restarted + +- name: restart ftl + service: + name: pihole-FTL + state: restarted diff --git a/roles/services/pihole/tasks/main.yml b/roles/services/pihole/tasks/main.yml new file mode 100644 index 0000000..3f3abde --- /dev/null +++ b/roles/services/pihole/tasks/main.yml @@ -0,0 +1,80 @@ +- name: install packages + package: + name: "{{ pihole_packages }}" + +- name: clone pihole repository + git: + repo: https://github.com/pi-hole/pi-hole.git + dest: /tmp/pi-hole + version: v5.17.1 + depth: 1 + +- name: create configuration directory + file: + path: /etc/pihole + state: directory + owner: root + group: root + mode: '0755' + +- name: copy setupVars.conf + copy: + src: "{{ pihole_setupvars }}" + dest: /etc/pihole/setupVars.conf + owner: root + group: root + mode: '0644' + +- name: copy pihole unbound configuration + notify: restart unbound + copy: + src: "{{ pihole_unboundconf }}" + dest: /etc/unbound/unbound.conf.d/pihole.conf + owner: root + group: root + mode: '0644' + +- name: run installation script + command: + cmd: "/bin/bash '/tmp/pi-hole/automated install/basic-install.sh' --unattended" + creates: /etc/pihole/install.log + ignore_errors: yes + notify: + - restart lighttpd + - restart ftl + +- name: change pihole admin password + register: result + changed_when: result.rc == 0 + command: + cmd: "pihole -a -p {{ pihole_password }}" + +- name: initialize gravity + register: result + changed_when: result.rc == 0 + command: + cmd: "pihole -g" + +- name: allow http (80/tcp) traffic + ufw: + rule: allow + port: '80' + proto: tcp + +- name: allow https (443/tcp) traffic + ufw: + rule: allow + port: '443' + proto: tcp + +- name: allow dns (53/udp) traffic + ufw: + rule: allow + port: '53' + proto: udp + +- name: allow dns tcp (53/tcp) traffic + ufw: + rule: allow + port: '53' + proto: tcp diff --git a/roles/services/ssh/tasks/main.yml b/roles/services/ssh/tasks/main.yml new file mode 100644 index 0000000..d2cabab --- /dev/null +++ b/roles/services/ssh/tasks/main.yml @@ -0,0 +1,46 @@ +- name: explicitly only allow pubkey auth + lineinfile: + path: /etc/ssh/sshd_config + regexp: "^#?AuthenticationMethods.*" + line: "AuthenticationMethods publickey" + +- name: disable root ssh login + lineinfile: + path: /etc/ssh/sshd_config + regexp: "^#?PermitRootLogin" + line: "PermitRootLogin no" + +- name: enable publickey authentication + lineinfile: + path: /etc/ssh/sshd_config + regexp: "^#?PubkeyAuthentication.*" + line: "PubkeyAuthentication yes" + +- name: disable password authentication + lineinfile: + path: /etc/ssh/sshd_config + regexp: "^#?PasswordAuthentication.*" + line: "PasswordAuthentication no" + +- name: disable challenge response + lineinfile: + path: /etc/ssh/sshd_config + regexp: "^#?ChallengeResponseAuthentication.*" + line: "ChallengeResponseAuthentication no" + +- name: disable pam + lineinfile: + path: /etc/ssh/sshd_config + regexp: "^#?UsePAM.*" + line: "UsePAM no" + +- name: ensure sshd is enabled + systemd: + name: sshd + enabled: yes + masked: no + +- name: restart sshd + service: + name: sshd + state: restarted diff --git a/roles/services/unattended_upgrades/tasks/main.yml b/roles/services/unattended_upgrades/tasks/main.yml new file mode 100644 index 0000000..bad3c02 --- /dev/null +++ b/roles/services/unattended_upgrades/tasks/main.yml @@ -0,0 +1,63 @@ +- name: install packages + package: + name: "{{ unattended_upgrades_packages }}" + state: latest + +- name: edit apt update timer + lineinfile: + path: /etc/systemd/system/timers.target.wants/apt-daily.timer + regexp: "OnCalendar.*" + line: "OnCalendar=*-*-* 0,4,8,12,16,20:00" + +- name: edit apt update timer + lineinfile: + path: /etc/systemd/system/timers.target.wants/apt-daily.timer + regexp: "RandomizedDelaySec.*" + line: "RandomizedDelaySec=10m" + +- name: edit apt upgrade timer + lineinfile: + path: /etc/systemd/system/timers.target.wants/apt-daily-upgrade.timer + regexp: "OnCalendar.*" + line: "OnCalendar=*-*-* 0,4,8,12,16,20:30" + +- name: edit apt upgrade timer + lineinfile: + path: /etc/systemd/system/timers.target.wants/apt-daily-upgrade.timer + regexp: "RandomizedDelaySec.*" + line: "RandomizedDelaySec=5m" + +- name: edit APT::Periodic settings + lineinfile: + path: /etc/apt/apt.conf.d/20auto-upgrades + regexp: "APT::Periodic::Update.*" + line: 'APT::Periodic::Update-Package-Lists "always";' + +- name: edit APT::Periodic settings + lineinfile: + path: /etc/apt/apt.conf.d/20auto-upgrades + regexp: "APT::Periodic::Unattended.*" + line: 'APT::Periodic::Unattended-Upgrade "always";' + +- name: configure unattended upgrades + lineinfile: + path: /etc/apt/apt.conf.d/50unattended-upgrades + regexp: ".*Unattended-Upgrade::Mail.*" + line: 'Unattended-Upgrade::Mail "{{ uu_mail_to }}";' + +- name: configure unattended upgrades + lineinfile: + path: /etc/apt/apt.conf.d/50unattended-upgrades + insertafter: 'Unattended-Upgrade::Mail "{{ uu_mail_to }}";' + line: 'Unattended-Upgrade::Sender "{{ uu_mail_from }}";' + +- name: configure unattended upgrades + lineinfile: + path: /etc/apt/apt.conf.d/50unattended-upgrades + regexp: ".*Unattended-Upgrade::MailReport.*" + line: 'Unattended-Upgrade::MailReport "always";' + +- name: restart service + service: + name: unattended-upgrades + state: restarted -- cgit v1.2.3