diff options
author | Sam Chudnick <sam@chudnick.com> | 2023-06-25 09:52:36 -0400 |
---|---|---|
committer | Sam Chudnick <sam@chudnick.com> | 2023-06-25 09:52:36 -0400 |
commit | 95b73daa36b23565a8566f71f9b202d3459b685f (patch) | |
tree | cb17b021be70e7868d0ec235a761f0ecdc80f3f2 /roles |
Diffstat (limited to 'roles')
101 files changed, 5716 insertions, 0 deletions
diff --git a/roles/linux_base/defaults/main.yml b/roles/linux_base/defaults/main.yml new file mode 100644 index 0000000..3fb0cb5 --- /dev/null +++ b/roles/linux_base/defaults/main.yml | |||
@@ -0,0 +1 @@ | |||
domain: "home.local" | |||
diff --git a/roles/linux_base/handlers/main.yml b/roles/linux_base/handlers/main.yml new file mode 100644 index 0000000..0065ae9 --- /dev/null +++ b/roles/linux_base/handlers/main.yml | |||
@@ -0,0 +1,16 @@ | |||
1 | - name: update and upgrade - debian | ||
2 | when: ansible_facts['distribution'] == 'Debian' | ||
3 | become: yes | ||
4 | apt: | ||
5 | name: "*" | ||
6 | state: latest | ||
7 | update_cache: yes | ||
8 | register: apt_upgrade | ||
9 | retries: 100 | ||
10 | until: apt_upgrade is success or ('Failed to lock apt for exclusive operation' not in apt_upgrade.msg and '/var/lib/dpkg/lock' not in apt_upgrade.msg) | ||
11 | |||
12 | - name: update and upgrade - fedora | ||
13 | when: ansible_facts['distribution'] == 'Fedora' | ||
14 | dnf: | ||
15 | name: "*" | ||
16 | state: latest | ||
diff --git a/roles/linux_base/tasks/main.yml b/roles/linux_base/tasks/main.yml new file mode 100644 index 0000000..ef523ef --- /dev/null +++ b/roles/linux_base/tasks/main.yml | |||
@@ -0,0 +1,57 @@ | |||
1 | - name: remove cloud config managed /etc/hosts | ||
2 | lineinfile: | ||
3 | path: /etc/cloud/cloud.cfg | ||
4 | regexp: ".*update_etc_hosts.*" | ||
5 | state: absent | ||
6 | |||
7 | - name: set fully qualified hostname | ||
8 | notify: | ||
9 | - update and upgrade - debian | ||
10 | - update and upgrade - fedora | ||
11 | hostname: | ||
12 | name: "{{ ansible_hostname }}.{{ domain }}" | ||
13 | |||
14 | - name: use https repos - debian | ||
15 | when: ansible_facts['distribution'] == 'Debian' | ||
16 | replace: | ||
17 | path: /etc/apt/sources.list | ||
18 | regexp: "http://" | ||
19 | replace: "https://" | ||
20 | |||
21 | - name: install packages | ||
22 | package: | ||
23 | name: "{{ base_packages }}" | ||
24 | state: latest | ||
25 | |||
26 | - name: allow ssh | ||
27 | when: ansible_facts['hostname'] != 'proxmox' | ||
28 | ufw: | ||
29 | rule: allow | ||
30 | name: ssh | ||
31 | |||
32 | - name: reload ufw | ||
33 | when: ansible_facts['hostname'] != 'proxmox' | ||
34 | ufw: | ||
35 | state: reloaded | ||
36 | |||
37 | - name: enable ufw | ||
38 | when: ansible_facts['hostname'] != 'proxmox' | ||
39 | ufw: | ||
40 | state: enabled | ||
41 | |||
42 | - name: default deny incoming | ||
43 | when: ansible_facts['hostname'] != 'proxmox' | ||
44 | ufw: | ||
45 | default: deny | ||
46 | direction: incoming | ||
47 | |||
48 | - name: default allow outgoing | ||
49 | when: ansible_facts['hostname'] != 'proxmox' | ||
50 | ufw: | ||
51 | default: allow | ||
52 | direction: outgoing | ||
53 | |||
54 | - name: reload ufw | ||
55 | when: ansible_facts['hostname'] != 'proxmox' | ||
56 | ufw: | ||
57 | state: reloaded | ||
diff --git a/roles/proxmox/cloudinit_guest/defaults/main.yml b/roles/proxmox/cloudinit_guest/defaults/main.yml new file mode 100644 index 0000000..a562ff3 --- /dev/null +++ b/roles/proxmox/cloudinit_guest/defaults/main.yml | |||
@@ -0,0 +1,7 @@ | |||
1 | vm_onboot: yes | ||
2 | vm_agent: yes | ||
3 | vm_bridge: vmbr0 | ||
4 | vm_full_clone: yes | ||
5 | memory_size: 512 | ||
6 | cpu_cores: 1 | ||
7 | cpu_sockets: 1 | ||
diff --git a/roles/proxmox/cloudinit_guest/tasks/main.yml b/roles/proxmox/cloudinit_guest/tasks/main.yml new file mode 100644 index 0000000..ab958dc --- /dev/null +++ b/roles/proxmox/cloudinit_guest/tasks/main.yml | |||
@@ -0,0 +1,80 @@ | |||
1 | - name: check if id already exists | ||
2 | stat: | ||
3 | path: "/etc/pve/qemu-server/{{ ci_base_id }}.conf" | ||
4 | register: stat_result | ||
5 | |||
6 | - meta: end_play | ||
7 | when: stat_result.stat.exists | ||
8 | |||
9 | - name: install packages | ||
10 | package: | ||
11 | name: | ||
12 | - python3-pip | ||
13 | - python3-requests | ||
14 | |||
15 | - name: ensure latest version of proxmoxer is installed | ||
16 | become: yes | ||
17 | become_user: "{{ proxmox_username }}" | ||
18 | pip: | ||
19 | name: proxmoxer==2.0.0 | ||
20 | |||
21 | - name: remove any existing api token | ||
22 | command: "pveum user token remove vmadmin@pam ansible" | ||
23 | register: result | ||
24 | changed_when: result.rc == 0 | ||
25 | failed_when: result.rc not in [0,255] | ||
26 | |||
27 | - name: create api token | ||
28 | register: api_token | ||
29 | changed_when: result.rc == 0 | ||
30 | args: | ||
31 | executable: /bin/bash | ||
32 | shell: | | ||
33 | set -eo pipefail | ||
34 | pveum user token add vmadmin@pam ansible --privsep 0 --output-format yaml | grep value | cut -d ' ' -f 2 | ||
35 | |||
36 | |||
37 | - name: clone template and create guest | ||
38 | become: yes | ||
39 | become_user: "{{ proxmox_username }}" | ||
40 | community.general.proxmox_kvm: | ||
41 | api_host: proxmox.home.local | ||
42 | api_user: "{{ proxmox_api_user }}" | ||
43 | api_token_id: "ansible" | ||
44 | api_token_secret: "{{ api_token.stdout }}" | ||
45 | node: proxmox | ||
46 | full: "{{ vm_full_clone }}" | ||
47 | clone: arbitrary | ||
48 | vmid: "{{ template_id }}" | ||
49 | newid: "{{ vm_id }}" | ||
50 | name: "{{ vm_name }}" | ||
51 | memory: "{{ memory_size }}" | ||
52 | sockets: "{{ cpu_sockets }}" | ||
53 | cores: "{{ cpu_cores }}" | ||
54 | bios: "{{ bios_type }}" | ||
55 | ipconfig: | ||
56 | ipconfig0: "ip={{ ip_addr }},gw={{ gateway }}" | ||
57 | net: | ||
58 | net0: "virtio,bridge={{ vm_bridge }},tag={{ vm_vlan }}" | ||
59 | nameservers: "{{ nameserver }}" | ||
60 | onboot: "{{ vm_onboot }}" | ||
61 | agent: "{{ vm_agent }}" | ||
62 | state: present | ||
63 | |||
64 | - name: start vmn | ||
65 | become: yes | ||
66 | become_user: "{{ proxmox_username }}" | ||
67 | community.general.proxmox_kvm: | ||
68 | api_host: proxmox.home.local | ||
69 | api_user: "{{ proxmox_api_user }}" | ||
70 | api_token_id: "ansible" | ||
71 | api_token_secret: "{{ api_token.stdout }}" | ||
72 | node: proxmox | ||
73 | vmid: "{{ vm_id }}" | ||
74 | state: started | ||
75 | |||
76 | - name: remove api token | ||
77 | command: "pveum user token remove vmadmin@pam ansible" | ||
78 | register: result | ||
79 | changed_when: result.rc == 0 | ||
80 | failed_when: result.rc not in [0,255] | ||
diff --git a/roles/proxmox/debian_cloudinit/defaults/main.yml b/roles/proxmox/debian_cloudinit/defaults/main.yml new file mode 100644 index 0000000..dfebf34 --- /dev/null +++ b/roles/proxmox/debian_cloudinit/defaults/main.yml | |||
@@ -0,0 +1,8 @@ | |||
1 | ci_target_dir: "/home/{{ci_user}}" | ||
2 | ci_memory_size: 512 | ||
3 | ci_base_id: 1000 | ||
4 | ci_disk_size: "10G" | ||
5 | ci_storage: "local-lvm" | ||
6 | ci_user: "initadmin" | ||
7 | ssh_key_local: /home/sam/.ssh/id_rsa.pub | ||
8 | ssh_key_dest: /home/vmadmin/ci_sshkey | ||
diff --git a/roles/proxmox/debian_cloudinit/tasks/main.yml b/roles/proxmox/debian_cloudinit/tasks/main.yml new file mode 100644 index 0000000..8ed7dfd --- /dev/null +++ b/roles/proxmox/debian_cloudinit/tasks/main.yml | |||
@@ -0,0 +1,115 @@ | |||
1 | - name: check if id already exists | ||
2 | stat: | ||
3 | path: "/etc/pve/qemu-server/{{ ci_base_id }}.conf" | ||
4 | register: stat_result | ||
5 | |||
6 | - meta: end_play | ||
7 | when: stat_result.stat.exists | ||
8 | |||
9 | - name: install packages | ||
10 | package: | ||
11 | name: | ||
12 | - python3-pip | ||
13 | - python3-requests | ||
14 | |||
15 | - name: ensure latest version of proxmoxer is installed | ||
16 | become: yes | ||
17 | become_user: "{{ proxmox_username }}" | ||
18 | pip: | ||
19 | name: proxmoxer==2.0.0 | ||
20 | |||
21 | - name: download the hashes | ||
22 | get_url: | ||
23 | url: "https://cloud.debian.org/images/cloud/bookworm/latest/SHA512SUMS" | ||
24 | dest: "{{ ci_target_dir }}" | ||
25 | |||
26 | - name: get the hash | ||
27 | changed_when: false | ||
28 | args: | ||
29 | executable: /bin/bash | ||
30 | shell: | | ||
31 | set -eo pipefail | ||
32 | grep debian-12-genericcloud-amd64.qcow2 {{ ci_target_dir }}/SHA512SUMS | cut -d ' ' -f 1 | ||
33 | register: sha512sum | ||
34 | |||
35 | - name: download the cloud image | ||
36 | get_url: | ||
37 | url: "https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2" | ||
38 | dest: "{{ ci_target_dir }}" | ||
39 | checksum: "sha512:{{ sha512sum.stdout }}" | ||
40 | |||
41 | - name: remove any existing api token | ||
42 | command: "pveum user token remove vmadmin@pam ansible" | ||
43 | register: result | ||
44 | changed_when: result.rc == 0 | ||
45 | failed_when: result.rc not in [0,255] | ||
46 | |||
47 | - name: create api token | ||
48 | register: api_token | ||
49 | changed_when: result.rc == 0 | ||
50 | args: | ||
51 | executable: /bin/bash | ||
52 | shell: | | ||
53 | set -eo pipefail | ||
54 | pveum user token add vmadmin@pam ansible --privsep 0 --output-format yaml | grep value | cut -d ' ' -f 2 | ||
55 | |||
56 | - name: create vm | ||
57 | become: yes | ||
58 | become_user: "{{ proxmox_username }}" | ||
59 | community.general.proxmox_kvm: | ||
60 | api_host: proxmox.home.local | ||
61 | api_user: "{{ proxmox_api_user }}" | ||
62 | api_token_id: "ansible" | ||
63 | api_token_secret: "{{ api_token.stdout }}" | ||
64 | node: proxmox | ||
65 | # basic settings | ||
66 | vmid: "{{ ci_base_id }}" | ||
67 | memory: "{{ ci_memory_size }}" | ||
68 | sockets: "{{ cpu_sockets }}" | ||
69 | cores: "{{ cpu_cores }}" | ||
70 | bios: "{{ bios_type }}" | ||
71 | agent: "{{ vm_agent }}" | ||
72 | state: "present" | ||
73 | # display settings | ||
74 | serial: | ||
75 | "serial0": "socket" | ||
76 | vga: "serial0" | ||
77 | # disks and boot settings | ||
78 | scsihw: "virtio-scsi-pci" | ||
79 | ide: | ||
80 | ide2: "{{ ci_storage }}:cloudinit" | ||
81 | boot: "c" | ||
82 | bootdisk: "scsi0" | ||
83 | onboot: "{{ vm_onboot }}" | ||
84 | # cloud-init | ||
85 | citype: "nocloud" | ||
86 | ciuser: "{{ ci_user }}" | ||
87 | cipassword: "{{ ci_password }}" | ||
88 | sshkeys: "{{ ci_sshkey }}" | ||
89 | # network | ||
90 | net: | ||
91 | net0: "virtio,bridge={{ ci_bridge }},tag={{ ci_vlan }}" | ||
92 | nameservers: "{{ nameserver }}" | ||
93 | template: "yes" | ||
94 | |||
95 | - name: import the cloud image | ||
96 | changed_when: false | ||
97 | command: | ||
98 | cmd: "qm importdisk {{ ci_base_id }} {{ ci_target_dir }}/debian-12-genericcloud-amd64.qcow2 {{ ci_storage }}" | ||
99 | creates: "/dev/pve/vm-{{ ci_base_id }}-disk-0" | ||
100 | |||
101 | - name: attach the cloud image as a new disk | ||
102 | changed_when: false | ||
103 | command: | ||
104 | cmd: "qm set {{ ci_base_id }} --scsi0 {{ ci_storage }}:vm-{{ ci_base_id }}-disk-0" | ||
105 | |||
106 | - name: resize disk to standard size | ||
107 | changed_when: false | ||
108 | command: | ||
109 | cmd: "qm resize {{ ci_base_id }} scsi0 {{ ci_disk_size }}" | ||
110 | |||
111 | - name: remove api token | ||
112 | command: "pveum user token remove vmadmin@pam ansible" | ||
113 | register: result | ||
114 | changed_when: result.rc == 0 | ||
115 | failed_when: result.rc not in [0,255] | ||
diff --git a/roles/proxmox/fedora_cloudinit/defaults/main.yml b/roles/proxmox/fedora_cloudinit/defaults/main.yml new file mode 100644 index 0000000..fb44657 --- /dev/null +++ b/roles/proxmox/fedora_cloudinit/defaults/main.yml | |||
@@ -0,0 +1,8 @@ | |||
1 | ci_target_dir: "/home/{{ci_user}}" | ||
2 | ci_memory_size: 512 | ||
3 | ci_base_id: 1001 | ||
4 | ci_storage: "local-lvm" | ||
5 | ci_disk_size: "10G" | ||
6 | ci_user: "initadmin" | ||
7 | ssh_key_local: files/id_rsa.pub | ||
8 | ssh_key_dest: /tmp/ci_sshkey | ||
diff --git a/roles/proxmox/fedora_cloudinit/tasks/main.yml b/roles/proxmox/fedora_cloudinit/tasks/main.yml new file mode 100644 index 0000000..61ed185 --- /dev/null +++ b/roles/proxmox/fedora_cloudinit/tasks/main.yml | |||
@@ -0,0 +1,122 @@ | |||
1 | - name: download the hashes | ||
2 | get_url: | ||
3 | url: "https://getfedora.org/static/checksums/36/images/Fedora-Cloud-36-1.5-x86_64-CHECKSUM" | ||
4 | dest: "{{ ci_target_dir }}" | ||
5 | |||
6 | - name: install gpg | ||
7 | package: | ||
8 | name: gnupg | ||
9 | state: latest | ||
10 | |||
11 | - name: download the GPG key | ||
12 | get_url: | ||
13 | url: "https://getfedora.org/static/fedora.gpg" | ||
14 | dest: "{{ ci_target_dir }}" | ||
15 | |||
16 | - name: import gpg key | ||
17 | changed_when: false | ||
18 | args: | ||
19 | executable: /bin/bash | ||
20 | shell: | | ||
21 | set -eo pipefail | ||
22 | cat {{ ci_target_dir }}/fedora.gpg | gpg --import | ||
23 | |||
24 | - name: verify checksum file | ||
25 | command: | ||
26 | cmd: "gpg --verify {{ ci_target_dir }}/Fedora-Cloud-36-1.5-x86_64-CHECKSUM" | ||
27 | register: result | ||
28 | changed_when: false | ||
29 | failed_when: result.rc > 0 | ||
30 | |||
31 | - name: fail if unable to gpg verify checksums | ||
32 | fail: | ||
33 | msg: "failed to verify the checksums" | ||
34 | when: result.rc > 0 | ||
35 | |||
36 | - name: get the hash | ||
37 | shell: | ||
38 | cmd: "grep 'qcow2)' {{ ci_target_dir }}/Fedora-Cloud-36-1.5-x86_64-CHECKSUM | cut -d '=' -f 2 | tr -d ' '" | ||
39 | changed_when: false | ||
40 | register: sha256sum | ||
41 | |||
42 | - name: download the cloud image | ||
43 | get_url: | ||
44 | url: "https://download.fedoraproject.org/pub/fedora/linux/releases/36/Cloud/x86_64/images/Fedora-Cloud-Base-36-1.5.x86_64.qcow2" | ||
45 | dest: "{{ ci_target_dir }}" | ||
46 | checksum: "sha256:{{ sha256sum.stdout }}" | ||
47 | |||
48 | - name: remove any existing api token | ||
49 | command: "pveum user token remove vmadmin@pam ansible" | ||
50 | register: result | ||
51 | changed_when: result.rc == 0 | ||
52 | failed_when: result.rc not in [0,255] | ||
53 | |||
54 | - name: create api token | ||
55 | register: api_token | ||
56 | changed_when: result.rc == 0 | ||
57 | args: | ||
58 | executable: /bin/bash | ||
59 | shell: | | ||
60 | set -eo pipefail | ||
61 | pveum user token add vmadmin@pam ansible --privsep 0 --output-format yaml | grep value | cut -d ' ' -f 2 | ||
62 | |||
63 | - name: create vm | ||
64 | become: yes | ||
65 | become_user: "{{ proxmox_username }}" | ||
66 | community.general.proxmox_kvm: | ||
67 | api_host: proxmox.home.local | ||
68 | api_user: "{{ proxmox_api_user }}" | ||
69 | api_token_id: "ansible" | ||
70 | api_token_secret: "{{ api_token.stdout }}" | ||
71 | node: proxmox | ||
72 | # basic settings | ||
73 | vmid: "{{ ci_base_id }}" | ||
74 | memory: "{{ ci_memory_size }}" | ||
75 | sockets: "{{ cpu_sockets }}" | ||
76 | cores: "{{ cpu_cores }}" | ||
77 | bios: "{{ bios_type }}" | ||
78 | agent: "{{ vm_agent }}" | ||
79 | state: "present" | ||
80 | # display settings | ||
81 | serial: | ||
82 | "serial0": "socket" | ||
83 | vga: "serial0" | ||
84 | # disks and boot settings | ||
85 | scsihw: "virtio-scsi-pci" | ||
86 | ide: | ||
87 | ide2: "{{ ci_storage }}:cloudinit" | ||
88 | boot: "c" | ||
89 | bootdisk: "scsi0" | ||
90 | onboot: "{{ vm_onboot }}" | ||
91 | # cloud-init | ||
92 | citype: "nocloud" | ||
93 | ciuser: "{{ ci_user }}" | ||
94 | cipassword: "{{ ci_password }}" | ||
95 | sshkeys: "{{ ci_sshkey }}" | ||
96 | # network | ||
97 | net: | ||
98 | net0: "virtio,bridge={{ ci_bridge }},tag={{ ci_vlan }}" | ||
99 | nameservers: "{{ nameserver }}" | ||
100 | template: "yes" | ||
101 | |||
102 | - name: import the cloud image | ||
103 | changed_when: false | ||
104 | command: | ||
105 | cmd: "qm importdisk {{ ci_base_id }} {{ ci_target_dir }}/Fedora-Cloud-Base-36-1.5.x86_64.qcow2 {{ ci_storage }}" | ||
106 | creates: "/dev/pve/vm-{{ ci_base_id }}-disk-0" | ||
107 | |||
108 | - name: attach the cloud image as a new disk | ||
109 | changed_when: false | ||
110 | command: | ||
111 | cmd: "qm set {{ ci_base_id }} --scsi0 {{ ci_storage }}:vm-{{ ci_base_id }}-disk-0" | ||
112 | |||
113 | - name: resize disk to standard size | ||
114 | changed_when: false | ||
115 | command: | ||
116 | cmd: "qm resize {{ ci_base_id }} scsi0 {{ ci_disk_size }}" | ||
117 | |||
118 | - name: remove api token | ||
119 | command: "pveum user token remove vmadmin@pam ansible" | ||
120 | register: result | ||
121 | changed_when: result.rc == 0 | ||
122 | failed_when: result.rc not in [0,255] | ||
diff --git a/roles/proxmox/proxmox_backup_server/tasks/main.yml b/roles/proxmox/proxmox_backup_server/tasks/main.yml new file mode 100644 index 0000000..3e91a19 --- /dev/null +++ b/roles/proxmox/proxmox_backup_server/tasks/main.yml | |||
@@ -0,0 +1,42 @@ | |||
1 | - name: add proxmox backup repo | ||
2 | apt_repository: | ||
3 | repo: deb http://download.proxmox.com/debian/pbs bullseye pbs-no-subscription | ||
4 | state: present | ||
5 | update_cache: yes | ||
6 | |||
7 | - name: install proxmox backup server and client | ||
8 | package: | ||
9 | name: | ||
10 | - proxmox-backup-server | ||
11 | - proxmox-backup-client | ||
12 | |||
13 | - name: create datastore | ||
14 | command: | ||
15 | cmd: "proxmox-backup-manager datastore create {{ pbs_datastore }} {{ pbs_datastore_path }} --keep-last {{ pbs_keep_last }} --keep-daily {{ pbs_keep_daily }} --keep-weekly {{ pbs_keep_weekly }} --keep-monthly {{ pbs_keep_monthly }} --keep-yearly {{ pbs_keep_yearly }}" | ||
16 | register: result | ||
17 | changed_when: false | ||
18 | failed_when: result.rc not in [255] | ||
19 | |||
20 | - name: create backup admin | ||
21 | command: | ||
22 | cmd: "proxmox-backup-manager user create {{ pbs_admin }} --password {{ pbs_admin_password }}" | ||
23 | register: result | ||
24 | changed_when: false | ||
25 | failed_when: result.rc not in [255] | ||
26 | |||
27 | - name: assign permissions for backup admin | ||
28 | changed_when: false | ||
29 | command: | ||
30 | cmd: "proxmox-backup-manager acl update / Admin --auth-id {{ pbs_admin }}" | ||
31 | |||
32 | - name: create backup user | ||
33 | command: | ||
34 | cmd: "proxmox-backup-manager user create {{ pbs_user }} --password {{ pbs_password }}" | ||
35 | register: result | ||
36 | failed_when: result.rc not in [255] | ||
37 | changed_when: false | ||
38 | |||
39 | - name: assign permissions for backup user | ||
40 | changed_when: false | ||
41 | command: | ||
42 | cmd: "proxmox-backup-manager acl update / DatastoreBackup --auth-id {{ pbs_user }}" | ||
diff --git a/roles/proxmox/pve_backup/tasks/main.yml b/roles/proxmox/pve_backup/tasks/main.yml new file mode 100644 index 0000000..eba51d9 --- /dev/null +++ b/roles/proxmox/pve_backup/tasks/main.yml | |||
@@ -0,0 +1,17 @@ | |||
1 | - name: create cron job for root backup of proxmox ve | ||
2 | cron: | ||
3 | name: "proxmox / backup" | ||
4 | cron_file: backup | ||
5 | hour: "23" | ||
6 | minute: "0" | ||
7 | user: root | ||
8 | job: "PBS_PASSWORD='{{ pbs_password }}' PBS_FINGERPRINT={{ pbs_fingerprint }} proxmox-backup-client backup root.pxar:/ --repository {{ pbs_user }}@{{ pbs_host }}:{{ pbs_datastore }}" | ||
9 | |||
10 | - name: create cron job for /etc/pve backup of proxmox ve | ||
11 | cron: | ||
12 | name: "proxmox /etc/pve backup" | ||
13 | cron_file: backup | ||
14 | hour: "23" | ||
15 | minute: "0" | ||
16 | user: root | ||
17 | job: "PBS_PASSWORD='{{ pbs_password }}' PBS_FINGERPRINT={{ pbs_fingerprint }} proxmox-backup-client backup pve.pxar:/etc/pve --repository {{ pbs_user }}@{{ pbs_host }}:{{ pbs_datastore }}" | ||
diff --git a/roles/proxmox/system/defaults/main.yml b/roles/proxmox/system/defaults/main.yml new file mode 100644 index 0000000..0091ea1 --- /dev/null +++ b/roles/proxmox/system/defaults/main.yml | |||
@@ -0,0 +1,8 @@ | |||
1 | username: vmadmin | ||
2 | ssh_public_key: changme | ||
3 | oath_key: changeme | ||
4 | raid_id: "0" | ||
5 | raid_level: "1" | ||
6 | raid_devices: "/dev/sda1 /dev/sdb1" | ||
7 | raid_name: "prometheus:0" | ||
8 | |||
diff --git a/roles/proxmox/system/tasks/main.yml b/roles/proxmox/system/tasks/main.yml new file mode 100644 index 0000000..ac84900 --- /dev/null +++ b/roles/proxmox/system/tasks/main.yml | |||
@@ -0,0 +1,30 @@ | |||
1 | --- | ||
2 | - name: remove enterprise repo | ||
3 | file: | ||
4 | path: /etc/apt/sources.list.d/pve-enterprise.list | ||
5 | state: absent | ||
6 | |||
7 | - name: add proxmox no subscription repo | ||
8 | apt_repository: | ||
9 | repo: deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription | ||
10 | |||
11 | - name: create non-root user | ||
12 | user: | ||
13 | name: "{{ proxmox_username }}" | ||
14 | groups: | ||
15 | - sudo | ||
16 | shell: /bin/bash | ||
17 | |||
18 | - name: give passwordless sudo to sudo group | ||
19 | lineinfile: | ||
20 | path: /etc/sudoers | ||
21 | state: present | ||
22 | regexp: '^%sudo' | ||
23 | line: '%sudo ALL=(ALL) NOPASSWD: ALL' | ||
24 | validate: '/usr/sbin/visudo -cf %s' | ||
25 | |||
26 | - name: deploy ssh public key | ||
27 | authorized_key: | ||
28 | user: "{{ proxmox_username }}" | ||
29 | state: present | ||
30 | key: "{{ lookup('file', 'data/common/id_rsa.pub') }}" | ||
diff --git a/roles/proxmox/system/tasks/proxmox_repo.yml b/roles/proxmox/system/tasks/proxmox_repo.yml new file mode 100644 index 0000000..bf2508d --- /dev/null +++ b/roles/proxmox/system/tasks/proxmox_repo.yml | |||
@@ -0,0 +1,8 @@ | |||
1 | - name: remove enterprise repo | ||
2 | file: | ||
3 | path: /etc/apt/sources.list.d/pve-enterprise.list | ||
4 | state: absent | ||
5 | |||
6 | - name: add proxmox no subscription repo | ||
7 | apt_repository: | ||
8 | repo: deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription | ||
diff --git a/roles/proxmox/system/tasks/user.yml b/roles/proxmox/system/tasks/user.yml new file mode 100644 index 0000000..2ba337a --- /dev/null +++ b/roles/proxmox/system/tasks/user.yml | |||
@@ -0,0 +1,28 @@ | |||
1 | - name: create non-root user | ||
2 | user: | ||
3 | name: "{{ username }}" | ||
4 | password: "{{ password | password_hash('sha512') }}" | ||
5 | groups: | ||
6 | - sudo | ||
7 | shell: /bin/bash | ||
8 | update_password: on_create | ||
9 | register: newuser | ||
10 | |||
11 | - name: ensure primary user group exists | ||
12 | group: | ||
13 | name: "{{ username }}" | ||
14 | state: present | ||
15 | |||
16 | - name: give passwordless sudo to sudo group | ||
17 | lineinfile: | ||
18 | path: /etc/sudoers | ||
19 | state: present | ||
20 | regexp: '^%sudo' | ||
21 | line: '%sudo ALL=(ALL) NOPASSWD: ALL' | ||
22 | validate: '/usr/sbin/visudo -cf %s' | ||
23 | |||
24 | - name: deploy ssh public key | ||
25 | authorized_key: | ||
26 | user: "{{ username }}" | ||
27 | state: present | ||
28 | key: "{{ ssh_public_key }}" | ||
diff --git a/roles/services/chronyd/handlers/main.yml b/roles/services/chronyd/handlers/main.yml new file mode 100644 index 0000000..7e6f687 --- /dev/null +++ b/roles/services/chronyd/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart chronyd | ||
2 | service: | ||
3 | name: chronyd | ||
4 | state: restarted | ||
diff --git a/roles/services/chronyd/tasks/main.yml b/roles/services/chronyd/tasks/main.yml new file mode 100644 index 0000000..73fdc28 --- /dev/null +++ b/roles/services/chronyd/tasks/main.yml | |||
@@ -0,0 +1,30 @@ | |||
1 | - name: install packages | ||
2 | package: | ||
3 | name: chrony | ||
4 | state: latest | ||
5 | |||
6 | - name: deploy chrony configuration | ||
7 | when: ansible_facts['distribution'] == 'Debian' | ||
8 | notify: restart chronyd | ||
9 | copy: | ||
10 | src: "{{ chrony_config }}" | ||
11 | dest: /etc/chrony/chrony.conf | ||
12 | owner: root | ||
13 | group: root | ||
14 | mode: '0644' | ||
15 | |||
16 | - name: deploy chrony configuration | ||
17 | when: ansible_facts['distribution'] == 'Fedora' | ||
18 | notify: restart chronyd | ||
19 | copy: | ||
20 | src: "{{ chrony_config }}" | ||
21 | dest: /etc/chrony.conf | ||
22 | owner: root | ||
23 | group: root | ||
24 | mode: '0644' | ||
25 | |||
26 | - name: make sure chronyd is enabled | ||
27 | systemd: | ||
28 | name: chronyd | ||
29 | enabled: yes | ||
30 | masked: no | ||
diff --git a/roles/services/containers/arr_stack/handlers/main.yml b/roles/services/containers/arr_stack/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/arr_stack/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/arr_stack/tasks/gluetun.yml b/roles/services/containers/arr_stack/tasks/gluetun.yml new file mode 100644 index 0000000..e47d55a --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/gluetun.yml | |||
@@ -0,0 +1,105 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: qmcgaw/gluetun:v3.34.3 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create gluetun directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/gluetun" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create gluetun data directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/gluetun/data" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: login to docker registry | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | environment: | ||
32 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
33 | docker_login: | ||
34 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
35 | registry_url: "{{ docker_registry_url }}" | ||
36 | username: "{{ docker_registry_username }}" | ||
37 | password: "{{ docker_registry_password }}" | ||
38 | |||
39 | - name: pull and push gluetun image | ||
40 | become: yes | ||
41 | become_user: "{{ docker_username }}" | ||
42 | environment: | ||
43 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
44 | docker_image: | ||
45 | name: "{{ image }}" | ||
46 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
47 | push: yes | ||
48 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
49 | source: pull | ||
50 | force_source: yes | ||
51 | |||
52 | - name: create gluetun docker network | ||
53 | docker_network: | ||
54 | name: "{{ gluetun_network_name }}" | ||
55 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
56 | driver: bridge | ||
57 | ipam_config: | ||
58 | - subnet: "{{ gluetun_subnet }}" | ||
59 | gateway: "{{ gluetun_gateway }}" | ||
60 | |||
61 | - name: create and deploy gluetun container | ||
62 | become: yes | ||
63 | become_user: "{{ docker_username }}" | ||
64 | environment: | ||
65 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
66 | docker_container: | ||
67 | name: "gluetun" | ||
68 | hostname: "gluetun" | ||
69 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
70 | recreate: yes | ||
71 | pull: yes | ||
72 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
73 | capabilities: | ||
74 | - net_admin | ||
75 | devices: | ||
76 | - "/dev/net/tun:/dev/net/tun" | ||
77 | purge_networks: yes | ||
78 | networks: | ||
79 | - name: "{{ gluetun_network_name }}" | ||
80 | ipv4_address: "{{ gluetun_ipv4 }}" | ||
81 | ports: | ||
82 | - "127.0.0.1:{{ qbittorrent_external_port }}:{{ qbittorrent_external_port }}" | ||
83 | - "127.0.0.1:{{ sonarr_external_port }}:8989" | ||
84 | - "127.0.0.1:{{ radarr_external_port }}:7878" | ||
85 | - "127.0.0.1:{{ lidarr_external_port }}:8686" | ||
86 | - "127.0.0.1:{{ readarr_external_port }}:8787" | ||
87 | - "127.0.0.1:{{ prowlarr_external_port }}:9696" | ||
88 | state: 'started' | ||
89 | comparisons: | ||
90 | '*': strict | ||
91 | restart_policy: unless-stopped | ||
92 | env: | ||
93 | "TZ": "{{ timezone }}" | ||
94 | "VPN_SERVICE_PROVIDER": "mullvad" | ||
95 | "VPN_TYPE": "wireguard" | ||
96 | "WIREGUARD_PRIVATE_KEY": "{{ wireguard_privkey }}" | ||
97 | "WIREGUARD_ADDRESSES": "{{ wireguard_addrs }}" | ||
98 | "SERVER_CITIES": "{{ gluetun_cities }}" | ||
99 | "DOT_PROVIDERS": "quad9" | ||
100 | "BLOCK_MALICIOUS": "on" | ||
101 | "BLOCK_SURVEILLANCE": "on" | ||
102 | "BLOCK_ADS": "on" | ||
103 | "HEALTH_TARGET_ADDRESS": "www.debian.org:443" | ||
104 | volumes: | ||
105 | - "{{ docker_home }}/gluetun/data:/gluetun" | ||
diff --git a/roles/services/containers/arr_stack/tasks/lidarr.yml b/roles/services/containers/arr_stack/tasks/lidarr.yml new file mode 100644 index 0000000..1f70437 --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/lidarr.yml | |||
@@ -0,0 +1,93 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: linuxserver/lidarr:1.2.6-nightly | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create lidarr directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/lidarr" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create lidarr config directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/lidarr/config" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: login to docker registry | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | environment: | ||
32 | xdg_runtime_dir: "/run/user/{{ docker_uid }}" | ||
33 | docker_login: | ||
34 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
35 | registry_url: "{{ docker_registry_url }}" | ||
36 | username: "{{ docker_registry_username }}" | ||
37 | password: "{{ docker_registry_password }}" | ||
38 | |||
39 | - name: pull and push lidarr image | ||
40 | become: yes | ||
41 | become_user: "{{ docker_username }}" | ||
42 | environment: | ||
43 | xdg_runtime_dir: "/run/user/{{ docker_uid }}" | ||
44 | docker_image: | ||
45 | name: "{{ image }}" | ||
46 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
47 | push: yes | ||
48 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
49 | source: pull | ||
50 | force_source: yes | ||
51 | |||
52 | - name: create and deploy lidarr container | ||
53 | become: yes | ||
54 | become_user: "{{ docker_username }}" | ||
55 | environment: | ||
56 | xdg_runtime_dir: "/run/user/{{ docker_uid }}" | ||
57 | docker_container: | ||
58 | name: "lidarr" | ||
59 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
60 | recreate: yes | ||
61 | pull: yes | ||
62 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
63 | purge_networks: yes | ||
64 | network_mode: "container:gluetun" | ||
65 | state: 'started' | ||
66 | comparisons: | ||
67 | '*': strict | ||
68 | restart_policy: unless-stopped | ||
69 | env: | ||
70 | "tz": "{{ timezone }}" | ||
71 | "PUID": "0" | ||
72 | "PGID": "0" | ||
73 | volumes: | ||
74 | - "{{ docker_home }}/lidarr/config:/config" | ||
75 | - "{{ docker_home }}/arr/data:/data" | ||
76 | |||
77 | - name: deploy nginx configuration | ||
78 | notify: restart nginx | ||
79 | register: nginx_config | ||
80 | template: | ||
81 | src: "{{ lidarr_nginx_config }}" | ||
82 | dest: /etc/nginx/sites-available/lidarr.conf | ||
83 | owner: root | ||
84 | group: root | ||
85 | mode: '0644' | ||
86 | |||
87 | - name: symlink site | ||
88 | file: | ||
89 | src: /etc/nginx/sites-available/lidarr.conf | ||
90 | dest: /etc/nginx/sites-enabled/lidarr.conf | ||
91 | owner: root | ||
92 | group: root | ||
93 | state: link | ||
diff --git a/roles/services/containers/arr_stack/tasks/main.yml b/roles/services/containers/arr_stack/tasks/main.yml new file mode 100644 index 0000000..ee27384 --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/main.yml | |||
@@ -0,0 +1,130 @@ | |||
1 | - name: create arr directory structure | ||
2 | file: | ||
3 | path: "{{ docker_home }}/arr" | ||
4 | state: directory | ||
5 | owner: "{{ docker_username }}" | ||
6 | group: "{{ docker_username }}" | ||
7 | mode: '0775' | ||
8 | - name: create arr directory structure | ||
9 | file: | ||
10 | path: "{{ docker_home }}/arr/data" | ||
11 | state: directory | ||
12 | owner: "{{ docker_username }}" | ||
13 | group: "{{ docker_username }}" | ||
14 | mode: '0775' | ||
15 | |||
16 | - name: create arr/data directory structure | ||
17 | file: | ||
18 | path: "{{ docker_home }}/arr/data/torrents" | ||
19 | state: directory | ||
20 | owner: "{{ docker_username }}" | ||
21 | group: "{{ docker_username }}" | ||
22 | mode: '0775' | ||
23 | - name: create arr/data directory structure | ||
24 | file: | ||
25 | path: "{{ docker_home }}/arr/data/torrents/movies" | ||
26 | state: directory | ||
27 | owner: "{{ docker_username }}" | ||
28 | group: "{{ docker_username }}" | ||
29 | mode: '0775' | ||
30 | - name: create arr/data directory structure | ||
31 | file: | ||
32 | path: "{{ docker_home }}/arr/data/torrents/music" | ||
33 | state: directory | ||
34 | owner: "{{ docker_username }}" | ||
35 | group: "{{ docker_username }}" | ||
36 | mode: '0775' | ||
37 | - name: create arr/data directory structure | ||
38 | file: | ||
39 | path: "{{ docker_home }}/arr/data/torrents/books" | ||
40 | state: directory | ||
41 | owner: "{{ docker_username }}" | ||
42 | group: "{{ docker_username }}" | ||
43 | mode: '0775' | ||
44 | - name: create arr/data directory structure | ||
45 | file: | ||
46 | path: "{{ docker_home }}/arr/data/torrents/tv" | ||
47 | state: directory | ||
48 | owner: "{{ docker_username }}" | ||
49 | group: "{{ docker_username }}" | ||
50 | mode: '0775' | ||
51 | |||
52 | - name: create arr/data directory structure | ||
53 | file: | ||
54 | path: "{{ docker_home }}/arr/data/usenet" | ||
55 | state: directory | ||
56 | owner: "{{ docker_username }}" | ||
57 | group: "{{ docker_username }}" | ||
58 | mode: '0775' | ||
59 | - name: create arr/data directory structure | ||
60 | file: | ||
61 | path: "{{ docker_home }}/arr/data/usenet/movies" | ||
62 | state: directory | ||
63 | owner: "{{ docker_username }}" | ||
64 | group: "{{ docker_username }}" | ||
65 | mode: '0775' | ||
66 | - name: create arr/data directory structure | ||
67 | file: | ||
68 | path: "{{ docker_home }}/arr/data/usenet/music" | ||
69 | state: directory | ||
70 | owner: "{{ docker_username }}" | ||
71 | group: "{{ docker_username }}" | ||
72 | mode: '0775' | ||
73 | - name: create arr/data directory structure | ||
74 | file: | ||
75 | path: "{{ docker_home }}/arr/data/usenet/books" | ||
76 | state: directory | ||
77 | owner: "{{ docker_username }}" | ||
78 | group: "{{ docker_username }}" | ||
79 | mode: '0775' | ||
80 | - name: create arr/data directory structure | ||
81 | file: | ||
82 | path: "{{ docker_home }}/arr/data/usenet/tv" | ||
83 | state: directory | ||
84 | owner: "{{ docker_username }}" | ||
85 | group: "{{ docker_username }}" | ||
86 | mode: '0775' | ||
87 | |||
88 | - name: create arr/data directory structure | ||
89 | file: | ||
90 | path: "{{ docker_home }}/arr/data/media" | ||
91 | state: directory | ||
92 | owner: "{{ docker_username }}" | ||
93 | group: "{{ docker_username }}" | ||
94 | mode: '0775' | ||
95 | - name: create arr/data directory structure | ||
96 | file: | ||
97 | path: "{{ docker_home }}/arr/data/media/movies" | ||
98 | state: directory | ||
99 | owner: "{{ docker_username }}" | ||
100 | group: "{{ docker_username }}" | ||
101 | mode: '0775' | ||
102 | - name: create arr/data directory structure | ||
103 | file: | ||
104 | path: "{{ docker_home }}/arr/data/media/music" | ||
105 | state: directory | ||
106 | owner: "{{ docker_username }}" | ||
107 | group: "{{ docker_username }}" | ||
108 | mode: '0775' | ||
109 | - name: create arr/data directory structure | ||
110 | file: | ||
111 | path: "{{ docker_home }}/arr/data/media/books" | ||
112 | state: directory | ||
113 | owner: "{{ docker_username }}" | ||
114 | group: "{{ docker_username }}" | ||
115 | mode: '0775' | ||
116 | - name: create arr/data directory structure | ||
117 | file: | ||
118 | path: "{{ docker_home }}/arr/data/media/tv" | ||
119 | state: directory | ||
120 | owner: "{{ docker_username }}" | ||
121 | group: "{{ docker_username }}" | ||
122 | mode: '0775' | ||
123 | |||
124 | - include_tasks: gluetun.yml | ||
125 | - include_tasks: qbittorrent.yml | ||
126 | - include_tasks: sonarr.yml | ||
127 | - include_tasks: radarr.yml | ||
128 | - include_tasks: lidarr.yml | ||
129 | - include_tasks: readarr.yml | ||
130 | - include_tasks: prowlarr.yml | ||
diff --git a/roles/services/containers/arr_stack/tasks/prowlarr.yml b/roles/services/containers/arr_stack/tasks/prowlarr.yml new file mode 100644 index 0000000..53f1a45 --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/prowlarr.yml | |||
@@ -0,0 +1,92 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: linuxserver/prowlarr:1.6.2-nightly | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create prowlarr directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/prowlarr" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create prowlarr config directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/prowlarr/config" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: login to docker registry | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | environment: | ||
32 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
33 | docker_login: | ||
34 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
35 | registry_url: "{{ docker_registry_url }}" | ||
36 | username: "{{ docker_registry_username }}" | ||
37 | password: "{{ docker_registry_password }}" | ||
38 | |||
39 | - name: pull and push prowlarr image | ||
40 | become: yes | ||
41 | become_user: "{{ docker_username }}" | ||
42 | environment: | ||
43 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
44 | docker_image: | ||
45 | name: "{{ image }}" | ||
46 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
47 | push: yes | ||
48 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
49 | source: pull | ||
50 | force_source: yes | ||
51 | |||
52 | - name: create and deploy prowlarr container | ||
53 | become: yes | ||
54 | become_user: "{{ docker_username }}" | ||
55 | environment: | ||
56 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
57 | docker_container: | ||
58 | name: "prowlarr" | ||
59 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
60 | recreate: yes | ||
61 | pull: yes | ||
62 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
63 | purge_networks: yes | ||
64 | network_mode: "container:gluetun" | ||
65 | state: 'started' | ||
66 | comparisons: | ||
67 | '*': strict | ||
68 | restart_policy: unless-stopped | ||
69 | env: | ||
70 | "TZ": "{{ timezone }}" | ||
71 | "PUID": "0" | ||
72 | "PGID": "0" | ||
73 | volumes: | ||
74 | - "{{ docker_home }}/prowlarr/config:/config" | ||
75 | |||
76 | - name: deploy nginx configuration | ||
77 | notify: restart nginx | ||
78 | register: nginx_config | ||
79 | template: | ||
80 | src: "{{ prowlarr_nginx_config }}" | ||
81 | dest: /etc/nginx/sites-available/prowlarr.conf | ||
82 | owner: root | ||
83 | group: root | ||
84 | mode: '0644' | ||
85 | |||
86 | - name: symlink site | ||
87 | file: | ||
88 | src: /etc/nginx/sites-available/prowlarr.conf | ||
89 | dest: /etc/nginx/sites-enabled/prowlarr.conf | ||
90 | owner: root | ||
91 | group: root | ||
92 | state: link | ||
diff --git a/roles/services/containers/arr_stack/tasks/qbittorrent.yml b/roles/services/containers/arr_stack/tasks/qbittorrent.yml new file mode 100644 index 0000000..25e554f --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/qbittorrent.yml | |||
@@ -0,0 +1,94 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: linuxserver/qbittorrent:4.5.4 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create qbittorrent directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/qbittorrent" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create qbittorrent config directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/qbittorrent/config" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: login to docker registry | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | environment: | ||
32 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
33 | docker_login: | ||
34 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
35 | registry_url: "{{ docker_registry_url }}" | ||
36 | username: "{{ docker_registry_username }}" | ||
37 | password: "{{ docker_registry_password }}" | ||
38 | |||
39 | - name: pull and push qbittorrent image | ||
40 | become: yes | ||
41 | become_user: "{{ docker_username }}" | ||
42 | environment: | ||
43 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
44 | docker_image: | ||
45 | name: "{{ image }}" | ||
46 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
47 | push: yes | ||
48 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
49 | source: pull | ||
50 | force_source: yes | ||
51 | |||
52 | - name: create and deploy qbittorrent container | ||
53 | become: yes | ||
54 | become_user: "{{ docker_username }}" | ||
55 | environment: | ||
56 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
57 | docker_container: | ||
58 | name: "qbittorrent" | ||
59 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
60 | recreate: yes | ||
61 | pull: yes | ||
62 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
63 | purge_networks: yes | ||
64 | network_mode: "container:gluetun" | ||
65 | state: 'started' | ||
66 | comparisons: | ||
67 | '*': strict | ||
68 | restart_policy: unless-stopped | ||
69 | env: | ||
70 | "TZ": "{{ timezone }}" | ||
71 | "WEBUI_PORT": "{{ qbittorrent_external_port }}" | ||
72 | "PUID": "0" | ||
73 | "PGID": "0" | ||
74 | volumes: | ||
75 | - "{{ docker_home }}/qbittorrent/config:/config" | ||
76 | - "{{ docker_home }}/arr/data:/data" | ||
77 | |||
78 | - name: deploy nginx configuration | ||
79 | notify: restart nginx | ||
80 | register: nginx_config | ||
81 | template: | ||
82 | src: "{{ qbittorrent_nginx_config }}" | ||
83 | dest: /etc/nginx/sites-available/qbittorrent.conf | ||
84 | owner: root | ||
85 | group: root | ||
86 | mode: '0644' | ||
87 | |||
88 | - name: symlink site | ||
89 | file: | ||
90 | src: /etc/nginx/sites-available/qbittorrent.conf | ||
91 | dest: /etc/nginx/sites-enabled/qbittorrent.conf | ||
92 | owner: root | ||
93 | group: root | ||
94 | state: link | ||
diff --git a/roles/services/containers/arr_stack/tasks/radarr.yml b/roles/services/containers/arr_stack/tasks/radarr.yml new file mode 100644 index 0000000..2e98c47 --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/radarr.yml | |||
@@ -0,0 +1,93 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: linuxserver/radarr:4.6.4-nightly | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create radarr directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/radarr" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create radarr config directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/radarr/config" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: login to docker registry | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | environment: | ||
32 | xdg_runtime_dir: "/run/user/{{ docker_uid }}" | ||
33 | docker_login: | ||
34 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
35 | registry_url: "{{ docker_registry_url }}" | ||
36 | username: "{{ docker_registry_username }}" | ||
37 | password: "{{ docker_registry_password }}" | ||
38 | |||
39 | - name: pull and push radarr image | ||
40 | become: yes | ||
41 | become_user: "{{ docker_username }}" | ||
42 | environment: | ||
43 | xdg_runtime_dir: "/run/user/{{ docker_uid }}" | ||
44 | docker_image: | ||
45 | name: "{{ image }}" | ||
46 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
47 | push: yes | ||
48 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
49 | source: pull | ||
50 | force_source: yes | ||
51 | |||
52 | - name: create and deploy radarr container | ||
53 | become: yes | ||
54 | become_user: "{{ docker_username }}" | ||
55 | environment: | ||
56 | xdg_runtime_dir: "/run/user/{{ docker_uid }}" | ||
57 | docker_container: | ||
58 | name: "radarr" | ||
59 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
60 | recreate: yes | ||
61 | pull: yes | ||
62 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
63 | purge_networks: yes | ||
64 | network_mode: "container:gluetun" | ||
65 | state: 'started' | ||
66 | comparisons: | ||
67 | '*': strict | ||
68 | restart_policy: unless-stopped | ||
69 | env: | ||
70 | "tz": "{{ timezone }}" | ||
71 | "PUID": "0" | ||
72 | "PGID": "0" | ||
73 | volumes: | ||
74 | - "{{ docker_home }}/radarr/config:/config" | ||
75 | - "{{ docker_home }}/arr/data:/data" | ||
76 | |||
77 | - name: deploy nginx configuration | ||
78 | notify: restart nginx | ||
79 | register: nginx_config | ||
80 | template: | ||
81 | src: "{{ radarr_nginx_config }}" | ||
82 | dest: /etc/nginx/sites-available/radarr.conf | ||
83 | owner: root | ||
84 | group: root | ||
85 | mode: '0644' | ||
86 | |||
87 | - name: symlink site | ||
88 | file: | ||
89 | src: /etc/nginx/sites-available/radarr.conf | ||
90 | dest: /etc/nginx/sites-enabled/radarr.conf | ||
91 | owner: root | ||
92 | group: root | ||
93 | state: link | ||
diff --git a/roles/services/containers/arr_stack/tasks/readarr.yml b/roles/services/containers/arr_stack/tasks/readarr.yml new file mode 100644 index 0000000..bd8b2ec --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/readarr.yml | |||
@@ -0,0 +1,93 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: linuxserver/readarr:0.2.0-nightly | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create readarr directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/readarr" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create readarr config directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/readarr/config" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: login to docker registry | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | environment: | ||
32 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
33 | docker_login: | ||
34 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
35 | registry_url: "{{ docker_registry_url }}" | ||
36 | username: "{{ docker_registry_username }}" | ||
37 | password: "{{ docker_registry_password }}" | ||
38 | |||
39 | - name: pull and push readarr image | ||
40 | become: yes | ||
41 | become_user: "{{ docker_username }}" | ||
42 | environment: | ||
43 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
44 | docker_image: | ||
45 | name: "{{ image }}" | ||
46 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
47 | push: yes | ||
48 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
49 | source: pull | ||
50 | force_source: yes | ||
51 | |||
52 | - name: create and deploy readarr container | ||
53 | become: yes | ||
54 | become_user: "{{ docker_username }}" | ||
55 | environment: | ||
56 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
57 | docker_container: | ||
58 | name: "readarr" | ||
59 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
60 | recreate: yes | ||
61 | pull: yes | ||
62 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
63 | purge_networks: yes | ||
64 | network_mode: "container:gluetun" | ||
65 | state: 'started' | ||
66 | comparisons: | ||
67 | '*': strict | ||
68 | restart_policy: unless-stopped | ||
69 | env: | ||
70 | "TZ": "{{ timezone }}" | ||
71 | "PUID": "0" | ||
72 | "PGID": "0" | ||
73 | volumes: | ||
74 | - "{{ docker_home }}/readarr/config:/config" | ||
75 | - "{{ docker_home }}/arr/data:/data" | ||
76 | |||
77 | - name: deploy nginx configuration | ||
78 | notify: restart nginx | ||
79 | register: nginx_config | ||
80 | template: | ||
81 | src: "{{ readarr_nginx_config }}" | ||
82 | dest: /etc/nginx/sites-available/readarr.conf | ||
83 | owner: root | ||
84 | group: root | ||
85 | mode: '0644' | ||
86 | |||
87 | - name: symlink site | ||
88 | file: | ||
89 | src: /etc/nginx/sites-available/readarr.conf | ||
90 | dest: /etc/nginx/sites-enabled/readarr.conf | ||
91 | owner: root | ||
92 | group: root | ||
93 | state: link | ||
diff --git a/roles/services/containers/arr_stack/tasks/sonarr.yml b/roles/services/containers/arr_stack/tasks/sonarr.yml new file mode 100644 index 0000000..ac712ba --- /dev/null +++ b/roles/services/containers/arr_stack/tasks/sonarr.yml | |||
@@ -0,0 +1,93 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: linuxserver/sonarr:develop-version-4.0.0.433 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create sonarr directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/sonarr" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create sonarr config directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/sonarr/config" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: login to docker registry | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | environment: | ||
32 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
33 | docker_login: | ||
34 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
35 | registry_url: "{{ docker_registry_url }}" | ||
36 | username: "{{ docker_registry_username }}" | ||
37 | password: "{{ docker_registry_password }}" | ||
38 | |||
39 | - name: pull and push sonarr image | ||
40 | become: yes | ||
41 | become_user: "{{ docker_username }}" | ||
42 | environment: | ||
43 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
44 | docker_image: | ||
45 | name: "{{ image }}" | ||
46 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
47 | push: yes | ||
48 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
49 | source: pull | ||
50 | force_source: yes | ||
51 | |||
52 | - name: create and deploy sonarr container | ||
53 | become: yes | ||
54 | become_user: "{{ docker_username }}" | ||
55 | environment: | ||
56 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
57 | docker_container: | ||
58 | name: "sonarr" | ||
59 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
60 | recreate: yes | ||
61 | pull: yes | ||
62 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
63 | purge_networks: yes | ||
64 | network_mode: "container:gluetun" | ||
65 | state: 'started' | ||
66 | comparisons: | ||
67 | '*': strict | ||
68 | restart_policy: unless-stopped | ||
69 | env: | ||
70 | "TZ": "{{ timezone }}" | ||
71 | "PUID": "0" | ||
72 | "PGID": "0" | ||
73 | volumes: | ||
74 | - "{{ docker_home }}/sonarr/config:/config" | ||
75 | - "{{ docker_home }}/arr/data:/data" | ||
76 | |||
77 | - name: deploy nginx configuration | ||
78 | notify: restart nginx | ||
79 | register: nginx_config | ||
80 | template: | ||
81 | src: "{{ sonarr_nginx_config }}" | ||
82 | dest: /etc/nginx/sites-available/sonarr.conf | ||
83 | owner: root | ||
84 | group: root | ||
85 | mode: '0644' | ||
86 | |||
87 | - name: symlink site | ||
88 | file: | ||
89 | src: /etc/nginx/sites-available/sonarr.conf | ||
90 | dest: /etc/nginx/sites-enabled/sonarr.conf | ||
91 | owner: root | ||
92 | group: root | ||
93 | state: link | ||
diff --git a/roles/services/containers/authelia/handlers/main.yml b/roles/services/containers/authelia/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/authelia/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/authelia/tasks/main.yml b/roles/services/containers/authelia/tasks/main.yml new file mode 100644 index 0000000..c6bb337 --- /dev/null +++ b/roles/services/containers/authelia/tasks/main.yml | |||
@@ -0,0 +1,283 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: authelia/authelia:master | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create authelia directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/authelia" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create authelia config directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/authelia/config" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: create authelia secrets directory | ||
29 | file: | ||
30 | path: "{{ docker_home }}/authelia/secrets" | ||
31 | state: directory | ||
32 | owner: "{{ docker_username }}" | ||
33 | group: "{{ docker_username }}" | ||
34 | mode: '0755' | ||
35 | |||
36 | - name: create redis data directory | ||
37 | file: | ||
38 | path: "{{ docker_home }}/authelia/redis_data" | ||
39 | state: directory | ||
40 | owner: "{{ docker_username }}" | ||
41 | group: "{{ docker_username }}" | ||
42 | mode: '0755' | ||
43 | |||
44 | - name: place authelia config in proper location | ||
45 | copy: | ||
46 | src: "{{ authelia_config }}" | ||
47 | dest: "{{ docker_home }}/authelia/config/configuration.yml" | ||
48 | owner: root | ||
49 | group: docker | ||
50 | mode: '0644' | ||
51 | |||
52 | # nginx snippets | ||
53 | |||
54 | - name: copy proxy.conf snippet | ||
55 | copy: | ||
56 | src: "{{ authelia_proxy_snippet }}" | ||
57 | dest: "/etc/nginx/snippets/proxy.conf" | ||
58 | owner: root | ||
59 | group: root | ||
60 | mode: '0644' | ||
61 | |||
62 | - name: copy authelia-location.conf snippet | ||
63 | copy: | ||
64 | src: "{{ authelia_location_snippet }}" | ||
65 | dest: "/etc/nginx/snippets/authelia-location.conf" | ||
66 | owner: root | ||
67 | group: root | ||
68 | mode: '0644' | ||
69 | |||
70 | - name: copy authelia-authrequest.conf snippet | ||
71 | copy: | ||
72 | src: "{{ authelia_request_snippet }}" | ||
73 | dest: "/etc/nginx/snippets/authelia-authrequest.conf" | ||
74 | owner: root | ||
75 | group: root | ||
76 | mode: '0644' | ||
77 | |||
78 | |||
79 | # authelia secrets | ||
80 | |||
81 | - name: create jwt_secret file | ||
82 | lineinfile: | ||
83 | path: "{{ docker_home }}/authelia/secrets/jwt_secret" | ||
84 | insertbefore: BOF | ||
85 | line: "{{ authelia_jwt_secret }}" | ||
86 | owner: root | ||
87 | group: root | ||
88 | mode: '0644' | ||
89 | create: yes | ||
90 | |||
91 | - name: create session_secret file | ||
92 | lineinfile: | ||
93 | path: "{{ docker_home }}/authelia/secrets/session_secret" | ||
94 | insertbefore: BOF | ||
95 | line: "{{ authelia_session_secret }}" | ||
96 | owner: root | ||
97 | group: root | ||
98 | mode: '0644' | ||
99 | create: yes | ||
100 | |||
101 | - name: create encryption_key file | ||
102 | lineinfile: | ||
103 | path: "{{ docker_home }}/authelia/secrets/encryption_key" | ||
104 | insertbefore: BOF | ||
105 | line: "{{ authelia_encryption_key }}" | ||
106 | owner: root | ||
107 | group: root | ||
108 | mode: '0644' | ||
109 | create: yes | ||
110 | |||
111 | - name: create oidc_hmac file | ||
112 | lineinfile: | ||
113 | path: "{{ docker_home }}/authelia/secrets/oidc_hmac" | ||
114 | insertbefore: BOF | ||
115 | line: "{{ authelia_oidc_hmac }}" | ||
116 | owner: root | ||
117 | group: root | ||
118 | mode: '0644' | ||
119 | create: yes | ||
120 | |||
121 | - name: remove existing cert file | ||
122 | file: | ||
123 | path: "{{ docker_home }}/authelia/secrets/oidc_cert" | ||
124 | state: absent | ||
125 | |||
126 | - name: create oidc_cert file | ||
127 | lineinfile: | ||
128 | path: "{{ docker_home }}/authelia/secrets/oidc_cert" | ||
129 | insertbefore: BOF | ||
130 | line: "{{ authelia_oidc_cert }}" | ||
131 | owner: root | ||
132 | group: root | ||
133 | mode: '0644' | ||
134 | create: yes | ||
135 | |||
136 | - name: remove existing key file | ||
137 | file: | ||
138 | path: "{{ docker_home }}/authelia/secrets/oidc_key" | ||
139 | state: absent | ||
140 | |||
141 | - name: create oidc_key file | ||
142 | lineinfile: | ||
143 | path: "{{ docker_home }}/authelia/secrets/oidc_key" | ||
144 | insertbefore: BOF | ||
145 | line: "{{ authelia_oidc_key }}" | ||
146 | owner: root | ||
147 | group: root | ||
148 | mode: '0644' | ||
149 | create: yes | ||
150 | |||
151 | - name: create smtp_password file | ||
152 | lineinfile: | ||
153 | path: "{{ docker_home }}/authelia/secrets/smtp_password" | ||
154 | insertbefore: BOF | ||
155 | line: "{{ authelia_smtp_password }}" | ||
156 | owner: root | ||
157 | group: root | ||
158 | mode: '0644' | ||
159 | create: yes | ||
160 | |||
161 | - name: create ldap_password file | ||
162 | lineinfile: | ||
163 | path: "{{ docker_home }}/authelia/secrets/ldap_password" | ||
164 | insertbefore: BOF | ||
165 | line: "{{ authelia_ldap_password }}" | ||
166 | owner: root | ||
167 | group: root | ||
168 | mode: '0644' | ||
169 | create: yes | ||
170 | |||
171 | - name: login to docker registry | ||
172 | become: yes | ||
173 | become_user: "{{ docker_username }}" | ||
174 | environment: | ||
175 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
176 | docker_login: | ||
177 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
178 | registry_url: "{{ docker_registry_url }}" | ||
179 | username: "{{ docker_registry_username }}" | ||
180 | password: "{{ docker_registry_password }}" | ||
181 | |||
182 | - name: pull and push authelia image | ||
183 | become: yes | ||
184 | become_user: "{{ docker_username }}" | ||
185 | environment: | ||
186 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
187 | docker_image: | ||
188 | name: "{{ image }}" | ||
189 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
190 | push: yes | ||
191 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
192 | source: pull | ||
193 | force_source: yes | ||
194 | |||
195 | - name: create authelia docker network | ||
196 | docker_network: | ||
197 | name: "{{ authelia_network_name }}" | ||
198 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
199 | driver: bridge | ||
200 | ipam_config: | ||
201 | - subnet: "{{ authelia_subnet }}" | ||
202 | gateway: "{{ authelia_gateway }}" | ||
203 | |||
204 | - name: create and deploy authelia container | ||
205 | become: yes | ||
206 | become_user: "{{ docker_username }}" | ||
207 | environment: | ||
208 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
209 | docker_container: | ||
210 | name: "authelia" | ||
211 | hostname: "authelia" | ||
212 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
213 | recreate: yes | ||
214 | pull: yes | ||
215 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
216 | purge_networks: yes | ||
217 | networks: | ||
218 | - name: "{{ authelia_network_name }}" | ||
219 | ipv4_address: "{{ authelia_ipv4 }}" | ||
220 | ports: | ||
221 | - "127.0.0.1:9091:9091" | ||
222 | - "127.0.0.1:9959:9959" | ||
223 | state: 'started' | ||
224 | comparisons: | ||
225 | '*': strict | ||
226 | restart_policy: unless-stopped | ||
227 | env: | ||
228 | "TZ": "{{ timezone }}" | ||
229 | "AUTHELIA_JWT_SECRET_FILE": "/secrets/jwt_secret" | ||
230 | "AUTHELIA_SESSION_SECRET_FILE": "/secrets/session_secret" | ||
231 | "AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE": "/secrets/encryption_key" | ||
232 | "AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE": "/secrets/oidc_hmac" | ||
233 | "AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_CERTIFICATE_CHAIN_FILE": "/secrets/oidc_cert" | ||
234 | "AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_PRIVATE_KEY_FILE": "/secrets/oidc_key" | ||
235 | "AUTHELIA_NOTIFIER_SMTP_PASSWORD_FILE": "/secrets/smtp_password" | ||
236 | "AUTHELIA_AUTHENTICATION_BACKEND_LDAP_PASSWORD_FILE": "/secrets/ldap_password" | ||
237 | volumes: | ||
238 | - "{{ docker_home }}/authelia/config:/config" | ||
239 | - "{{ docker_home }}/authelia/secrets:/secrets" | ||
240 | |||
241 | |||
242 | - name: create and deploy redis container | ||
243 | become: yes | ||
244 | become_user: "{{ docker_username }}" | ||
245 | environment: | ||
246 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
247 | docker_container: | ||
248 | name: "redis_authelia" | ||
249 | hostname: "redis_authelia" | ||
250 | image: redis:alpine | ||
251 | state: 'started' | ||
252 | recreate: yes | ||
253 | pull: yes | ||
254 | restart_policy: unless-stopped | ||
255 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
256 | purge_networks: yes | ||
257 | networks: | ||
258 | - name: "{{ authelia_network_name }}" | ||
259 | ipv4_address: "{{ redis_authelia_ipv4 }}" | ||
260 | volumes: | ||
261 | - "{{ docker_home }}/authelia/redis_data:/data" | ||
262 | exposed_ports: | ||
263 | - '6379' | ||
264 | env: | ||
265 | "TZ": "{{ timezone }}" | ||
266 | |||
267 | - name: deploy nginx configuration | ||
268 | notify: restart nginx | ||
269 | register: nginx_config | ||
270 | copy: | ||
271 | src: "{{ authelia_nginx_config }}" | ||
272 | dest: /etc/nginx/sites-available/authelia.conf | ||
273 | owner: root | ||
274 | group: root | ||
275 | mode: '0644' | ||
276 | |||
277 | - name: symlink site | ||
278 | file: | ||
279 | src: /etc/nginx/sites-available/authelia.conf | ||
280 | dest: /etc/nginx/sites-enabled/authelia.conf | ||
281 | owner: root | ||
282 | group: root | ||
283 | state: link | ||
diff --git a/roles/services/containers/bookstack/handlers/main.yml b/roles/services/containers/bookstack/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/bookstack/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/bookstack/tasks/main.yml b/roles/services/containers/bookstack/tasks/main.yml new file mode 100644 index 0000000..3965143 --- /dev/null +++ b/roles/services/containers/bookstack/tasks/main.yml | |||
@@ -0,0 +1,118 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: linuxserver/bookstack:version-v23.05 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create bookstack directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/bookstack" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create data directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/bookstack/data" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: create bookstack docker network | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | docker_network: | ||
32 | name: "{{ bookstack_network_name }}" | ||
33 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
34 | driver: bridge | ||
35 | ipam_config: | ||
36 | - subnet: "{{ bookstack_subnet }}" | ||
37 | gateway: "{{ bookstack_gateway }}" | ||
38 | |||
39 | - name: create and deploy bookstack db | ||
40 | become: yes | ||
41 | become_user: "{{ docker_username }}" | ||
42 | environment: | ||
43 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
44 | docker_container: | ||
45 | name: "bookstack-db" | ||
46 | hostname: "bookstack-db" | ||
47 | image: linuxserver/mariadb:10.11.4 | ||
48 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
49 | purge_networks: yes | ||
50 | networks: | ||
51 | - name: "{{ bookstack_network_name }}" | ||
52 | ipv4_address: "{{ bookstack_db_ipv4 }}" | ||
53 | volumes: | ||
54 | - "{{ docker_home }}/bookstack/data:/config" | ||
55 | env: | ||
56 | "TZ": "{{ timezone }}" | ||
57 | "MYSQL_ROOT_PASSWORD": "{{ bookstack_mysql_root_password }}" | ||
58 | "MYSQL_DATABASE": "bookstack" | ||
59 | "MYSQL_USER": "bookstack" | ||
60 | "MYSQL_PASSWORD": "{{ bookstack_mysql_password }}" | ||
61 | state: 'started' | ||
62 | recreate: yes | ||
63 | restart_policy: unless-stopped | ||
64 | |||
65 | - name: create and deploy bookstack container | ||
66 | become: yes | ||
67 | become_user: "{{ docker_username }}" | ||
68 | environment: | ||
69 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
70 | docker_container: | ||
71 | name: "bookstack" | ||
72 | hostname: "bookstack" | ||
73 | image: "{{ image }}" | ||
74 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
75 | purge_networks: yes | ||
76 | networks: | ||
77 | - name: "{{ bookstack_network_name }}" | ||
78 | ipv4_address: "{{ bookstack_ipv4 }}" | ||
79 | ports: | ||
80 | - "127.0.0.1:{{ bookstack_external_port }}:80" | ||
81 | volumes: | ||
82 | - "{{ docker_home }}/bookstack/data:/config" | ||
83 | env: | ||
84 | "DB_HOST": "bookstack-db" | ||
85 | "DB_PORT": "3306" | ||
86 | "DB_USER": "bookstack" | ||
87 | "DB_PASS": "{{ bookstack_mysql_password }}" | ||
88 | "DB_DATABASE": "bookstack" | ||
89 | "APP_URL": "https://{{ bookstack_server_name }}" | ||
90 | "AUTH_METHOD": "oidc" | ||
91 | "OIDC_NAME": "SSO" | ||
92 | "OIDC_DISPLAY_NAME_CLAIMS": "name" | ||
93 | "OIDC_CLIENT_ID": "bookstack" | ||
94 | "OIDC_CLIENT_SECRET": "{{ bookstack_oidc_secret }}" | ||
95 | "OIDC_ISSUER": "{{ oidc_issuer }}" | ||
96 | "OIDC_ISSUER_DISCOVER": "true" | ||
97 | "APP_DEFAULT_DARK_MODE": "true" | ||
98 | #"OIDC_DUMP_USER_DETAILS": "true" | ||
99 | state: 'started' | ||
100 | recreate: yes | ||
101 | restart_policy: unless-stopped | ||
102 | |||
103 | - name: deploy nginx configuration | ||
104 | notify: restart nginx | ||
105 | template: | ||
106 | src: "{{ bookstack_nginx_config }}" | ||
107 | dest: /etc/nginx/sites-available/bookstack.conf | ||
108 | owner: root | ||
109 | group: root | ||
110 | mode: '0644' | ||
111 | |||
112 | - name: symlink site | ||
113 | file: | ||
114 | src: /etc/nginx/sites-available/bookstack.conf | ||
115 | dest: /etc/nginx/sites-enabled/bookstack.conf | ||
116 | owner: root | ||
117 | group: root | ||
118 | state: link | ||
diff --git a/roles/services/containers/cadvisor/handlers/main.yml b/roles/services/containers/cadvisor/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/cadvisor/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/cadvisor/tasks/main.yml b/roles/services/containers/cadvisor/tasks/main.yml new file mode 100644 index 0000000..cc30cdb --- /dev/null +++ b/roles/services/containers/cadvisor/tasks/main.yml | |||
@@ -0,0 +1,90 @@ | |||
1 | - name: create cadvisor directory | ||
2 | file: | ||
3 | path: "{{ docker_home }}/cadvisor" | ||
4 | state: directory | ||
5 | owner: "{{ docker_username }}" | ||
6 | group: "{{ docker_username }}" | ||
7 | mode: '0755' | ||
8 | |||
9 | - name: login to docker registry | ||
10 | become: yes | ||
11 | become_user: "{{ docker_username }}" | ||
12 | environment: | ||
13 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
14 | docker_login: | ||
15 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
16 | registry_url: "{{ docker_registry_url }}" | ||
17 | username: "{{ docker_registry_username }}" | ||
18 | password: "{{ docker_registry_password }}" | ||
19 | |||
20 | - name: build cadvisor image | ||
21 | become: yes | ||
22 | become_user: "{{ docker_username }}" | ||
23 | environment: | ||
24 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
25 | docker_image: | ||
26 | name: "{{ docker_registry_url }}/{{ docker_registry_username }}/cadvisor:latest" | ||
27 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
28 | build: | ||
29 | path: /srv/docker/cadvisor/src | ||
30 | dockerfile: deploy/Dockerfile | ||
31 | source: build | ||
32 | push: yes | ||
33 | |||
34 | - name: create cadvisor docker network | ||
35 | become: yes | ||
36 | become_user: "{{ docker_username }}" | ||
37 | environment: | ||
38 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
39 | docker_network: | ||
40 | name: "{{ cadvisor_network_name }}" | ||
41 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
42 | driver: bridge | ||
43 | ipam_config: | ||
44 | - subnet: "{{ cadvisor_subnet }}" | ||
45 | gateway: "{{ cadvisor_gateway }}" | ||
46 | |||
47 | - name: create and deploy cadvisor container | ||
48 | become: yes | ||
49 | become_user: "{{ docker_username }}" | ||
50 | environment: | ||
51 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
52 | docker_container: | ||
53 | name: "cadvisor" | ||
54 | hostname: "cadvisor" | ||
55 | image: "{{ docker_registry_url }}/{{ docker_registry_username }}/cadvisor:latest" | ||
56 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
57 | purge_networks: yes | ||
58 | networks: | ||
59 | - name: "{{ cadvisor_network_name }}" | ||
60 | ipv4_address: "{{ cadvisor_ipv4 }}" | ||
61 | ports: | ||
62 | - "127.0.0.1:{{ cadvisor_external_port }}:8080" | ||
63 | state: 'started' | ||
64 | comparisons: | ||
65 | '*': strict | ||
66 | restart_policy: unless-stopped | ||
67 | volumes: | ||
68 | - "/:/rootfs:ro" | ||
69 | - "/run/user/{{ docker_uid }}:/var/run:ro" | ||
70 | - "/sys:/sys:ro" | ||
71 | - "/{{ docker_home }}/.local/share/docker:/var/lib/docker:ro" | ||
72 | - "/dev/disk:/dev/disk:ro" | ||
73 | |||
74 | - name: deploy nginx configuration | ||
75 | notify: restart nginx | ||
76 | register: nginx_config | ||
77 | copy: | ||
78 | src: "{{ cadvisor_nginx_config }}" | ||
79 | dest: /etc/nginx/sites-available/cadvisor.conf | ||
80 | owner: root | ||
81 | group: root | ||
82 | mode: '0644' | ||
83 | |||
84 | - name: symlink site | ||
85 | file: | ||
86 | src: /etc/nginx/sites-available/cadvisor.conf | ||
87 | dest: /etc/nginx/sites-enabled/cadvisor.conf | ||
88 | owner: root | ||
89 | group: root | ||
90 | state: link | ||
diff --git a/roles/services/containers/drawio/handlers/main.yml b/roles/services/containers/drawio/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/drawio/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/drawio/tasks/main.yml b/roles/services/containers/drawio/tasks/main.yml new file mode 100644 index 0000000..27bbefd --- /dev/null +++ b/roles/services/containers/drawio/tasks/main.yml | |||
@@ -0,0 +1,149 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: jgraph/drawio:21.5.0 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create drawio directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/drawio" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create drawio fonts directory | ||
21 | file: | ||
22 | path: /usr/share/fonts/drawio | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: login to docker registry | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | environment: | ||
32 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
33 | docker_login: | ||
34 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
35 | registry_url: "{{ docker_registry_url }}" | ||
36 | username: "{{ docker_registry_username }}" | ||
37 | password: "{{ docker_registry_password }}" | ||
38 | |||
39 | - name: get drawio image | ||
40 | become: yes | ||
41 | become_user: "{{ docker_username }}" | ||
42 | environment: | ||
43 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
44 | docker_image: | ||
45 | name: "{{ image }}" | ||
46 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
47 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
48 | source: pull | ||
49 | force_source: yes | ||
50 | push: yes | ||
51 | |||
52 | - name: get export-server image | ||
53 | become: yes | ||
54 | become_user: "{{ docker_username }}" | ||
55 | environment: | ||
56 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
57 | docker_image: | ||
58 | name: "{{ docker_registry_url }}/{{ docker_registry_username }}/image-export:latest" | ||
59 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
60 | source: pull | ||
61 | force_source: yes | ||
62 | push: yes | ||
63 | |||
64 | - name: create drawio docker network | ||
65 | become: yes | ||
66 | become_user: "{{ docker_username }}" | ||
67 | docker_network: | ||
68 | name: "{{ drawio_network_name }}" | ||
69 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
70 | driver: bridge | ||
71 | ipam_config: | ||
72 | - subnet: "{{ drawio_subnet }}" | ||
73 | gateway: "{{ drawio_gateway }}" | ||
74 | |||
75 | - name: create and deploy drawio export-server | ||
76 | become: yes | ||
77 | become_user: "{{ docker_username }}" | ||
78 | environment: | ||
79 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
80 | docker_container: | ||
81 | name: "image-export" | ||
82 | image: "{{ docker_registry_url }}/{{ docker_registry_username }}/image-export:latest" | ||
83 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
84 | pull: yes | ||
85 | exposed_ports: | ||
86 | - '8000' | ||
87 | purge_networks: yes | ||
88 | networks: | ||
89 | - name: "{{ drawio_network_name }}" | ||
90 | ipv4_address: "{{ drawio_export_ipv4 }}" | ||
91 | volumes: | ||
92 | - fonts_volume:/usr/share/fonts/drawio | ||
93 | env: | ||
94 | DRAWIO_BASE_URL: "{{ drawio_base_url }}" | ||
95 | cap_drop: | ||
96 | - all | ||
97 | hostname: "image-export" | ||
98 | restart_policy: unless-stopped | ||
99 | state: 'started' | ||
100 | recreate: yes | ||
101 | |||
102 | - name: create and deploy drawio | ||
103 | become: yes | ||
104 | become_user: "{{ docker_username }}" | ||
105 | environment: | ||
106 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
107 | docker_container: | ||
108 | name: "drawio" | ||
109 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
110 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
111 | pull: yes | ||
112 | purge_networks: yes | ||
113 | networks: | ||
114 | - name: "{{ drawio_network_name }}" | ||
115 | ipv4_address: "{{ drawio_ipv4 }}" | ||
116 | ports: | ||
117 | - "127.0.0.1:8443:8443" | ||
118 | - "127.0.0.1:8400:8080" | ||
119 | links: | ||
120 | - image-export:image-export | ||
121 | env: | ||
122 | DRAWIO_SELF_CONTAINED: "1" | ||
123 | PLANTUML_URL: "http://plantuml-server:8080/" | ||
124 | EXPORT_URL: "http://image-export:8000/" | ||
125 | DRAWIO_PUSHER_MODE: "2" | ||
126 | cap_drop: | ||
127 | - all | ||
128 | hostname: "drawio" | ||
129 | restart_policy: unless-stopped | ||
130 | state: 'started' | ||
131 | recreate: yes | ||
132 | |||
133 | - name: deploy nginx configuration | ||
134 | notify: restart nginx | ||
135 | register: nginx_config | ||
136 | copy: | ||
137 | src: "{{ drawio_nginx_config }}" | ||
138 | dest: /etc/nginx/sites-available/drawio.conf | ||
139 | owner: root | ||
140 | group: root | ||
141 | mode: '0644' | ||
142 | |||
143 | - name: symlink site | ||
144 | file: | ||
145 | src: /etc/nginx/sites-available/drawio.conf | ||
146 | dest: /etc/nginx/sites-enabled/drawio.conf | ||
147 | owner: root | ||
148 | group: root | ||
149 | state: link | ||
diff --git a/roles/services/containers/firefly/handlers/main.yml b/roles/services/containers/firefly/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/firefly/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/firefly/tasks/main.yml b/roles/services/containers/firefly/tasks/main.yml new file mode 100644 index 0000000..ab389e2 --- /dev/null +++ b/roles/services/containers/firefly/tasks/main.yml | |||
@@ -0,0 +1,172 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: fireflyiii/core:version-6.0.13 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create firefly directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/firefly" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create data directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/firefly/data" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: create db directory | ||
29 | file: | ||
30 | path: "{{ docker_home }}/firefly/db" | ||
31 | state: directory | ||
32 | owner: "{{ docker_username }}" | ||
33 | group: "{{ docker_username }}" | ||
34 | mode: '0755' | ||
35 | |||
36 | - name: create firefly docker network | ||
37 | become: yes | ||
38 | become_user: "{{ docker_username }}" | ||
39 | docker_network: | ||
40 | name: "{{ firefly_network_name }}" | ||
41 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
42 | driver: bridge | ||
43 | ipam_config: | ||
44 | - subnet: "{{ firefly_subnet }}" | ||
45 | gateway: "{{ firefly_gateway }}" | ||
46 | |||
47 | - name: create and deploy firefly db | ||
48 | become: yes | ||
49 | become_user: "{{ docker_username }}" | ||
50 | environment: | ||
51 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
52 | docker_container: | ||
53 | name: "firefly-db" | ||
54 | hostname: "firefly-db" | ||
55 | image: postgres:alpine | ||
56 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
57 | purge_networks: yes | ||
58 | networks: | ||
59 | - name: "{{ firefly_network_name }}" | ||
60 | ipv4_address: "{{ firefly_db_ipv4 }}" | ||
61 | volumes: | ||
62 | - "{{ docker_home }}/firefly/data:/var/lib/postgresql/data" | ||
63 | env: | ||
64 | "POSTGRES_USER": "{{ firefly_postgres_user }}" | ||
65 | "POSTGRES_PASSWORD": "{{ firefly_postgres_password }}" | ||
66 | "POSTGRES_DB": "{{ firefly_postgres_db }}" | ||
67 | state: 'started' | ||
68 | recreate: yes | ||
69 | restart_policy: unless-stopped | ||
70 | |||
71 | - name: create and deploy firefly container | ||
72 | become: yes | ||
73 | become_user: "{{ docker_username }}" | ||
74 | environment: | ||
75 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
76 | docker_container: | ||
77 | name: "firefly" | ||
78 | hostname: "firefly" | ||
79 | image: "{{ image }}" | ||
80 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
81 | purge_networks: yes | ||
82 | networks: | ||
83 | - name: "{{ firefly_network_name }}" | ||
84 | ipv4_address: "{{ firefly_ipv4 }}" | ||
85 | ports: | ||
86 | - "127.0.0.1:{{ firefly_external_port }}:8080" | ||
87 | volumes: | ||
88 | - "{{ docker_home }}/firefly/upload:/var/www/html/storage/upload" | ||
89 | env: | ||
90 | "TZ": "{{ timezone }}" | ||
91 | "APP_KEY": "{{ firefly_app_key }}" | ||
92 | "STATIC_CRON_TOKEN": "{{ firefly_cron_token }}" | ||
93 | "DB_HOST": "firefly-db" | ||
94 | "DB_PORT": "5432" | ||
95 | "DB_CONNECTION": "pgsql" | ||
96 | "DB_USERNAME": "{{ firefly_postgres_user }}" | ||
97 | "DB_PASSWORD": "{{ firefly_postgres_password }}" | ||
98 | "DB_DATABASE": "{{ firefly_postgres_db }}" | ||
99 | "AUTHENTICATION_GUARD": "remote_user_guard" | ||
100 | "AUTHENTICATION_GUARD_HEADER": "HTTP_REMOTE_USER" | ||
101 | "AUTHENTICATION_GUARD_EMAIL": "HTTP_REMOTE_EMAIL" | ||
102 | "APP_URL": "https://{{ firefly_server_name }}" | ||
103 | "TRUSTED_PROXIES": "*" | ||
104 | state: 'started' | ||
105 | recreate: yes | ||
106 | restart_policy: unless-stopped | ||
107 | |||
108 | - name: create and deploy firefly importer container | ||
109 | become: yes | ||
110 | become_user: "{{ docker_username }}" | ||
111 | environment: | ||
112 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
113 | docker_container: | ||
114 | name: "firefly-importer" | ||
115 | hostname: "firefly-importer" | ||
116 | image: "fireflyiii/data-importer:version-1.3.0" | ||
117 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
118 | purge_networks: yes | ||
119 | networks: | ||
120 | - name: "{{ firefly_network_name }}" | ||
121 | ipv4_address: "{{ firefly_importer_ipv4 }}" | ||
122 | ports: | ||
123 | - "127.0.0.1:{{ firefly_importer_external_port }}:8080" | ||
124 | env: | ||
125 | "TZ": "{{ timezone }}" | ||
126 | "FIREFLY_III_URL": "http://firefly:8080" | ||
127 | "FIREFLY_III_ACCESS_TOKEN": "{{ firefly_access_token }}" | ||
128 | "VANITY_URL": "https://{{ firefly_server_name }}" | ||
129 | "TRUSTED_PROXIES": "*" | ||
130 | state: 'started' | ||
131 | recreate: yes | ||
132 | restart_policy: unless-stopped | ||
133 | |||
134 | - name: create and deploy firefly cron container | ||
135 | become: yes | ||
136 | become_user: "{{ docker_username }}" | ||
137 | environment: | ||
138 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
139 | docker_container: | ||
140 | name: "firefly-cron" | ||
141 | hostname: "firefly-cron" | ||
142 | image: alpine | ||
143 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
144 | purge_networks: yes | ||
145 | networks: | ||
146 | - name: "{{ firefly_network_name }}" | ||
147 | ipv4_address: "{{ firefly_cron_ipv4 }}" | ||
148 | env: | ||
149 | "POSTGRES_USER": "{{ firefly_postgres_user }}" | ||
150 | "POSTGRES_PASSWORD": "{{ firefly_postgres_password }}" | ||
151 | "POSTGRES_DB": "{{ firefly_postgres_db }}" | ||
152 | command: 'sh -c "echo \"0 3 * * * wget -qO- http://firefly:8080/api/v1/cron/{{ firefly_cron_token }}\" | crontab - && crond -f -L /dev/stdout"' | ||
153 | state: 'started' | ||
154 | recreate: yes | ||
155 | restart_policy: unless-stopped | ||
156 | |||
157 | - name: deploy nginx configuration | ||
158 | notify: restart nginx | ||
159 | template: | ||
160 | src: "{{ firefly_nginx_config }}" | ||
161 | dest: /etc/nginx/sites-available/firefly.conf | ||
162 | owner: root | ||
163 | group: root | ||
164 | mode: '0644' | ||
165 | |||
166 | - name: symlink site | ||
167 | file: | ||
168 | src: /etc/nginx/sites-available/firefly.conf | ||
169 | dest: /etc/nginx/sites-enabled/firefly.conf | ||
170 | owner: root | ||
171 | group: root | ||
172 | state: link | ||
diff --git a/roles/services/containers/freshrss/handlers/main.yml b/roles/services/containers/freshrss/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/freshrss/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/freshrss/tasks/main.yml b/roles/services/containers/freshrss/tasks/main.yml new file mode 100644 index 0000000..26109b3 --- /dev/null +++ b/roles/services/containers/freshrss/tasks/main.yml | |||
@@ -0,0 +1,101 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: freshrss/freshrss:1.21.0 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create freshrss directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/freshrss" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: login to docker registry | ||
21 | become: yes | ||
22 | become_user: "{{ docker_username }}" | ||
23 | environment: | ||
24 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
25 | docker_login: | ||
26 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
27 | registry_url: "{{ docker_registry_url }}" | ||
28 | username: "{{ docker_registry_username }}" | ||
29 | password: "{{ docker_registry_password }}" | ||
30 | |||
31 | - name: get freshrss image | ||
32 | become: yes | ||
33 | become_user: "{{ docker_username }}" | ||
34 | environment: | ||
35 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
36 | docker_image: | ||
37 | name: "{{ image }}" | ||
38 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
39 | push: yes | ||
40 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
41 | source: pull | ||
42 | force_source: yes | ||
43 | |||
44 | - name: create freshrss data directory | ||
45 | file: | ||
46 | path: "{{ docker_home }}/freshrss/data" | ||
47 | state: directory | ||
48 | owner: "{{ docker_username }}" | ||
49 | group: "{{ docker_username }}" | ||
50 | mode: '0755' | ||
51 | |||
52 | - name: create freshrss docker network | ||
53 | docker_network: | ||
54 | name: "{{ freshrss_network_name }}" | ||
55 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
56 | driver: bridge | ||
57 | ipam_config: | ||
58 | - subnet: "{{ freshrss_subnet }}" | ||
59 | gateway: "{{ freshrss_gateway }}" | ||
60 | |||
61 | - name: create and deploy freshrss container | ||
62 | become: yes | ||
63 | become_user: "{{ docker_username }}" | ||
64 | environment: | ||
65 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
66 | docker_container: | ||
67 | name: "freshrss" | ||
68 | hostname: "freshrss" | ||
69 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
70 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
71 | purge_networks: yes | ||
72 | networks: | ||
73 | - name: "{{ freshrss_network_name }}" | ||
74 | ipv4_address: "{{ freshrss_ipv4 }}" | ||
75 | ports: | ||
76 | - "127.0.0.1:8090:80" | ||
77 | state: 'started' | ||
78 | recreate: yes | ||
79 | restart_policy: unless-stopped | ||
80 | volumes: | ||
81 | - "{{ docker_home }}/freshrss/data:/var/www/FreshRSS/data" | ||
82 | env: | ||
83 | "CRON_MIN": "0,15,30,45" | ||
84 | |||
85 | - name: deploy nginx configuration | ||
86 | notify: restart nginx | ||
87 | register: nginx_config | ||
88 | copy: | ||
89 | src: "{{ freshrss_nginx_config }}" | ||
90 | dest: /etc/nginx/sites-available/freshrss.conf | ||
91 | owner: root | ||
92 | group: root | ||
93 | mode: '0644' | ||
94 | |||
95 | - name: symlink site | ||
96 | file: | ||
97 | src: /etc/nginx/sites-available/freshrss.conf | ||
98 | dest: /etc/nginx/sites-enabled/freshrss.conf | ||
99 | owner: root | ||
100 | group: root | ||
101 | state: link | ||
diff --git a/roles/services/containers/gitea/handlers/main.yml b/roles/services/containers/gitea/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/gitea/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/gitea/tasks/main.yml b/roles/services/containers/gitea/tasks/main.yml new file mode 100644 index 0000000..fecec5e --- /dev/null +++ b/roles/services/containers/gitea/tasks/main.yml | |||
@@ -0,0 +1,171 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: gitea/gitea:1.19.3 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create gitea directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/gitea" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: login to docker registry | ||
21 | become: yes | ||
22 | environment: | ||
23 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
24 | docker_login: | ||
25 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
26 | registry_url: "{{ docker_registry_url }}" | ||
27 | username: "{{ docker_registry_username }}" | ||
28 | password: "{{ docker_registry_password }}" | ||
29 | |||
30 | - name: get gitea image | ||
31 | become: yes | ||
32 | docker_image: | ||
33 | name: "{{ image }}" | ||
34 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
35 | push: yes | ||
36 | source: pull | ||
37 | force_source: yes | ||
38 | |||
39 | - name: create git user on host | ||
40 | user: | ||
41 | name: "git" | ||
42 | uid: "{{ gitea_git_uid }}" | ||
43 | create_home: yes | ||
44 | generate_ssh_key: yes | ||
45 | shell: /bin/bash | ||
46 | |||
47 | - name: get git user public key | ||
48 | command: cat /home/git/.ssh/id_rsa.pub | ||
49 | register: pubkey | ||
50 | changed_when: false | ||
51 | |||
52 | - name: add git user public key to git user's authorized_keys file | ||
53 | authorized_key: | ||
54 | user: git | ||
55 | key: "{{ pubkey.stdout }}" | ||
56 | |||
57 | - name: create fake host gitea | ||
58 | blockinfile: | ||
59 | path: /usr/local/bin/gitea | ||
60 | create: yes | ||
61 | owner: root | ||
62 | group: root | ||
63 | mode: '0755' | ||
64 | block: | | ||
65 | #!/bin/sh | ||
66 | ssh -p 2222 -o StrictHostKeyChecking=no git@127.0.0.1 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@" | ||
67 | |||
68 | - name: create gitea data directory | ||
69 | file: | ||
70 | path: "{{ docker_home }}/gitea/data" | ||
71 | state: directory | ||
72 | owner: "{{ gitea_git_uid }}" | ||
73 | group: "{{ gitea_git_uid }}" | ||
74 | mode: '0755' | ||
75 | |||
76 | - name: create gitea config directory | ||
77 | file: | ||
78 | path: "{{ docker_home }}/gitea/config" | ||
79 | state: directory | ||
80 | owner: "{{ gitea_git_uid }}" | ||
81 | group: "{{ gitea_git_uid }}" | ||
82 | mode: '0755' | ||
83 | |||
84 | - name: copy gitea config file | ||
85 | copy: | ||
86 | src: "{{ gitea_config }}" | ||
87 | dest: "{{ docker_home }}/gitea/config/app.ini" | ||
88 | owner: "{{ gitea_git_uid }}" | ||
89 | group: "{{ gitea_git_uid }}" | ||
90 | mode: '0644' | ||
91 | |||
92 | - name: change gitea internal token | ||
93 | lineinfile: | ||
94 | path: "{{ docker_home }}/gitea/config/app.ini" | ||
95 | regexp: "^INTERNAL_TOKEN" | ||
96 | line: "INTERNAL_TOKEN = {{ gitea_internal_token }}" | ||
97 | |||
98 | - name: change gitea lfs jwt secret | ||
99 | lineinfile: | ||
100 | path: "{{ docker_home }}/gitea/config/app.ini" | ||
101 | regexp: "^LFS_JWT_SECRET" | ||
102 | line: "LFS_JWT_SECRET = {{ gitea_lfs_jwt_secret }}" | ||
103 | |||
104 | - name: set permissions on gitea data | ||
105 | file: | ||
106 | path: "{{ docker_home }}/gitea/data/" | ||
107 | owner: "{{ gitea_git_uid }}" | ||
108 | group: "{{ gitea_git_uid }}" | ||
109 | mode: u=rwX,g=rX,o=rX | ||
110 | recurse: yes | ||
111 | |||
112 | - name: set permissions on gitea config | ||
113 | file: | ||
114 | path: "{{ docker_home }}/gitea/config/" | ||
115 | owner: "{{ gitea_git_uid }}" | ||
116 | group: "{{ gitea_git_uid }}" | ||
117 | mode: u=rwX,g=rX,o=rX | ||
118 | recurse: yes | ||
119 | |||
120 | - name: create gitea docker network | ||
121 | docker_network: | ||
122 | name: "{{ gitea_network_name }}" | ||
123 | driver: bridge | ||
124 | ipam_config: | ||
125 | - subnet: "{{ gitea_subnet }}" | ||
126 | gateway: "{{ gitea_gateway }}" | ||
127 | |||
128 | - name: create and deploy gitea container | ||
129 | become: yes | ||
130 | docker_container: | ||
131 | name: "gitea" | ||
132 | hostname: "gitea" | ||
133 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
134 | purge_networks: yes | ||
135 | networks: | ||
136 | - name: "{{ gitea_network_name }}" | ||
137 | ipv4_address: "{{ gitea_ipv4 }}" | ||
138 | ports: | ||
139 | - "127.0.0.1:{{ gitea_external_port }}:3000" | ||
140 | - "127.0.0.1:2222:22" | ||
141 | state: 'started' | ||
142 | comparisons: | ||
143 | '*': strict | ||
144 | restart_policy: unless-stopped | ||
145 | env: | ||
146 | "USER_UID": "{{ gitea_git_uid }}" | ||
147 | "USER_GID": "{{ gitea_git_uid }}" | ||
148 | volumes: | ||
149 | - "{{ docker_home }}/gitea/data:/data" | ||
150 | - "{{ docker_home }}/gitea/config:/data/gitea/conf" | ||
151 | - "/home/git/.ssh/:/data/git/.ssh" | ||
152 | - "/etc/timezone:/etc/timezone:ro" | ||
153 | - "/etc/localtime:/etc/localtime:ro" | ||
154 | |||
155 | - name: deploy nginx configuration | ||
156 | notify: restart nginx | ||
157 | register: nginx_config | ||
158 | copy: | ||
159 | src: "{{ gitea_nginx_config }}" | ||
160 | dest: /etc/nginx/sites-available/gitea.conf | ||
161 | owner: root | ||
162 | group: root | ||
163 | mode: '0644' | ||
164 | |||
165 | - name: symlink site | ||
166 | file: | ||
167 | src: /etc/nginx/sites-available/gitea.conf | ||
168 | dest: /etc/nginx/sites-enabled/gitea.conf | ||
169 | owner: root | ||
170 | group: root | ||
171 | state: link | ||
diff --git a/roles/services/containers/home_assistant/handlers/main.yml b/roles/services/containers/home_assistant/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/home_assistant/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/home_assistant/tasks/main.yml b/roles/services/containers/home_assistant/tasks/main.yml new file mode 100644 index 0000000..b44c529 --- /dev/null +++ b/roles/services/containers/home_assistant/tasks/main.yml | |||
@@ -0,0 +1,86 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: homeassistant/home-assistant:2023.6.3 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create home_assistant directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/home_assistant" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create config directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/home_assistant/config" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: deploy configuration | ||
29 | copy: | ||
30 | src: "{{ home_assistant_config }}" | ||
31 | dest: "{{ docker_home }}/home_assistant/config/configuration.yaml" | ||
32 | owner: "{{ docker_username }}" | ||
33 | group: "{{ docker_username }}" | ||
34 | mode: '0644' | ||
35 | |||
36 | - name: create home_assistant network | ||
37 | become: yes | ||
38 | become_user: "{{ docker_username }}" | ||
39 | docker_network: | ||
40 | name: "{{ home_assistant_network_name }}" | ||
41 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
42 | driver: bridge | ||
43 | ipam_config: | ||
44 | - subnet: "{{ home_assistant_subnet }}" | ||
45 | gateway: "{{ home_assistant_gateway }}" | ||
46 | |||
47 | - name: create and deploy home_assistant container | ||
48 | become: yes | ||
49 | become_user: "{{ docker_username }}" | ||
50 | environment: | ||
51 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
52 | docker_container: | ||
53 | name: "home_assistant" | ||
54 | hostname: "home_assistant" | ||
55 | image: "{{ image }}" | ||
56 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
57 | purge_networks: yes | ||
58 | networks: | ||
59 | - name: "{{ home_assistant_network_name }}" | ||
60 | ipv4_address: "{{ home_assistant_ipv4 }}" | ||
61 | ports: | ||
62 | - "127.0.0.1:{{ home_assistant_external_port }}:8123" | ||
63 | volumes: | ||
64 | - "{{ docker_home }}/home_assistant/config:/config" | ||
65 | env: | ||
66 | "TZ": "{{ timezone }}" | ||
67 | state: 'started' | ||
68 | recreate: yes | ||
69 | restart_policy: unless-stopped | ||
70 | |||
71 | - name: deploy nginx configuration | ||
72 | notify: restart nginx | ||
73 | template: | ||
74 | src: "{{ home_assistant_nginx_config }}" | ||
75 | dest: /etc/nginx/sites-available/home_assistant.conf | ||
76 | owner: root | ||
77 | group: root | ||
78 | mode: '0644' | ||
79 | |||
80 | - name: symlink site | ||
81 | file: | ||
82 | src: /etc/nginx/sites-available/home_assistant.conf | ||
83 | dest: /etc/nginx/sites-enabled/home_assistant.conf | ||
84 | owner: root | ||
85 | group: root | ||
86 | state: link | ||
diff --git a/roles/services/containers/homer/handlers/main.yml b/roles/services/containers/homer/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/homer/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/homer/tasks/main.yml b/roles/services/containers/homer/tasks/main.yml new file mode 100644 index 0000000..b646d12 --- /dev/null +++ b/roles/services/containers/homer/tasks/main.yml | |||
@@ -0,0 +1,122 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: b4bz/homer:v23.05.1 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create homer directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/homer" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: login to docker registry | ||
21 | become: yes | ||
22 | become_user: "{{ docker_username }}" | ||
23 | environment: | ||
24 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
25 | docker_login: | ||
26 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
27 | registry_url: "{{ docker_registry_url }}" | ||
28 | username: "{{ docker_registry_username }}" | ||
29 | password: "{{ docker_registry_password }}" | ||
30 | |||
31 | - name: get homer image | ||
32 | become: yes | ||
33 | become_user: "{{ docker_username }}" | ||
34 | environment: | ||
35 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
36 | docker_image: | ||
37 | name: "{{ image }}" | ||
38 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
39 | push: yes | ||
40 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
41 | source: pull | ||
42 | force_source: yes | ||
43 | |||
44 | - name: create homer assets directory | ||
45 | file: | ||
46 | path: "{{ docker_home }}/homer/assets" | ||
47 | state: directory | ||
48 | owner: "{{ docker_username }}" | ||
49 | group: "{{ docker_username }}" | ||
50 | mode: '0755' | ||
51 | |||
52 | - name: synchronize homer assets | ||
53 | synchronize: | ||
54 | src: "{{ homer_assets_dir }}" | ||
55 | dest: "{{ docker_home }}/homer/assets/" | ||
56 | delete: yes | ||
57 | |||
58 | - name: set permissions on homer assets | ||
59 | file: | ||
60 | path: "{{ docker_home }}/homer/assets/" | ||
61 | owner: "{{ docker_username }}" | ||
62 | group: "{{ docker_username }}" | ||
63 | mode: u=rwX,g=rX,o=rX | ||
64 | recurse: yes | ||
65 | |||
66 | - name: set permissions on homer assets | ||
67 | file: | ||
68 | path: "{{ docker_home }}/homer/assets/" | ||
69 | state: directory | ||
70 | owner: "{{ docker_username }}" | ||
71 | group: "{{ docker_username }}" | ||
72 | mode: '0755' | ||
73 | recurse: no | ||
74 | |||
75 | - name: create homer docker network | ||
76 | docker_network: | ||
77 | name: "{{ homer_network_name }}" | ||
78 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
79 | driver: bridge | ||
80 | ipam_config: | ||
81 | - subnet: "{{ homer_subnet }}" | ||
82 | gateway: "{{ homer_gateway }}" | ||
83 | |||
84 | - name: create and deploy homer container | ||
85 | become: yes | ||
86 | become_user: "{{ docker_username }}" | ||
87 | environment: | ||
88 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
89 | docker_container: | ||
90 | name: "homer" | ||
91 | hostname: "homer" | ||
92 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
93 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
94 | purge_networks: yes | ||
95 | networks: | ||
96 | - name: "{{ homer_network_name }}" | ||
97 | ipv4_address: "{{ homer_ipv4 }}" | ||
98 | ports: | ||
99 | - "127.0.0.1:8001:8080" | ||
100 | state: 'started' | ||
101 | recreate: yes | ||
102 | restart_policy: unless-stopped | ||
103 | volumes: | ||
104 | - "{{ docker_home }}/homer/assets:/www/assets" | ||
105 | |||
106 | - name: deploy nginx configuration | ||
107 | notify: restart nginx | ||
108 | register: nginx_config | ||
109 | copy: | ||
110 | src: "{{ homer_nginx_config }}" | ||
111 | dest: /etc/nginx/sites-available/homer.conf | ||
112 | owner: root | ||
113 | group: root | ||
114 | mode: '0644' | ||
115 | |||
116 | - name: symlink site | ||
117 | file: | ||
118 | src: /etc/nginx/sites-available/homer.conf | ||
119 | dest: /etc/nginx/sites-enabled/homer.conf | ||
120 | owner: root | ||
121 | group: root | ||
122 | state: link | ||
diff --git a/roles/services/containers/invidious/handlers/main.yml b/roles/services/containers/invidious/handlers/main.yml new file mode 100644 index 0000000..a3a5d0b --- /dev/null +++ b/roles/services/containers/invidious/handlers/main.yml | |||
@@ -0,0 +1,29 @@ | |||
1 | - name: login to docker registry | ||
2 | become: yes | ||
3 | become_user: "{{ docker_username }}" | ||
4 | environment: | ||
5 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
6 | docker_login: | ||
7 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
8 | registry_url: "{{ docker_registry_url }}" | ||
9 | username: "{{ docker_registry_username }}" | ||
10 | password: "{{ docker_registry_password }}" | ||
11 | |||
12 | - name: build invidious image | ||
13 | become: yes | ||
14 | become_user: "{{ docker_username }}" | ||
15 | environment: | ||
16 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
17 | docker_image: | ||
18 | name: "{{ docker_registry_url }}/{{ docker_registry_username }}/invidious:latest" | ||
19 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
20 | build: | ||
21 | path: /srv/docker/invidious/src | ||
22 | dockerfile: docker/Dockerfile | ||
23 | source: build | ||
24 | push: yes | ||
25 | |||
26 | - name: restart nginx | ||
27 | service: | ||
28 | name: nginx | ||
29 | state: restarted | ||
diff --git a/roles/services/containers/invidious/tasks/main.yml b/roles/services/containers/invidious/tasks/main.yml new file mode 100644 index 0000000..6bff0e2 --- /dev/null +++ b/roles/services/containers/invidious/tasks/main.yml | |||
@@ -0,0 +1,124 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: gitea.chudnick.com/sam/invidious:latest | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create invidious directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/invidious" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create postgres data directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/invidious/data" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: clone invidious repo | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | notify: | ||
32 | - login to docker registry | ||
33 | - build invidious image | ||
34 | git: | ||
35 | repo: "{{ invidious_repo }}" | ||
36 | dest: "{{ docker_home }}/invidious/src" | ||
37 | version: "master" | ||
38 | |||
39 | - meta: flush_handlers | ||
40 | |||
41 | - name: create invidious docker network | ||
42 | become: yes | ||
43 | become_user: "{{ docker_username }}" | ||
44 | docker_network: | ||
45 | name: "{{ invidious_network_name }}" | ||
46 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
47 | driver: bridge | ||
48 | ipam_config: | ||
49 | - subnet: "{{ invidious_subnet }}" | ||
50 | gateway: "{{ invidious_gateway }}" | ||
51 | |||
52 | - name: create and deploy invidious db | ||
53 | become: yes | ||
54 | become_user: "{{ docker_username }}" | ||
55 | environment: | ||
56 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
57 | docker_container: | ||
58 | name: "invidious-db" | ||
59 | hostname: "invidious-db" | ||
60 | image: postgres:13 | ||
61 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
62 | purge_networks: yes | ||
63 | networks: | ||
64 | - name: "{{ invidious_network_name }}" | ||
65 | ipv4_address: "{{ invidious_db_ipv4 }}" | ||
66 | volumes: | ||
67 | - "{{ docker_home }}/invidious/data:/var/lib/postgresql/data" | ||
68 | - "{{ docker_home }}/invidious/src/config/sql:/config/sql" | ||
69 | - "{{ docker_home }}/invidious/src/docker/init-invidious-db.sh:/docker-entrypoint-initdb.d/init-invidious-db.sh" | ||
70 | env: | ||
71 | "POSTGRES_DB": "invidious" | ||
72 | "POSTGRES_USER": "invidious" | ||
73 | "POSTGRES_PASSWORD": "{{ invidious_postgres_password }}" | ||
74 | state: 'started' | ||
75 | recreate: yes | ||
76 | restart_policy: unless-stopped | ||
77 | |||
78 | - name: create and deploy invidious container | ||
79 | become: yes | ||
80 | become_user: "{{ docker_username }}" | ||
81 | environment: | ||
82 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
83 | docker_container: | ||
84 | name: "invidious" | ||
85 | hostname: "invidious" | ||
86 | image: "{{ image }}" | ||
87 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
88 | purge_networks: yes | ||
89 | env: | ||
90 | "dbname": "invidious" | ||
91 | "user": "invidious" | ||
92 | "password": "{{ invidious_postgres_password }}" | ||
93 | "host": "invidious-db" | ||
94 | "port": "5432" | ||
95 | "check_tables": "true" | ||
96 | "https_only": "true" | ||
97 | "hsts": "true" | ||
98 | "domain": "{{ invidious_server_name }}" | ||
99 | "dark_mode": "dark" | ||
100 | networks: | ||
101 | - name: "{{ invidious_network_name }}" | ||
102 | ipv4_address: "{{ invidious_ipv4 }}" | ||
103 | ports: | ||
104 | - "127.0.0.1:{{ invidious_external_port }}:3000" | ||
105 | state: 'started' | ||
106 | recreate: yes | ||
107 | restart_policy: unless-stopped | ||
108 | |||
109 | - name: deploy nginx configuration | ||
110 | notify: restart nginx | ||
111 | template: | ||
112 | src: "{{ invidious_nginx_config }}" | ||
113 | dest: /etc/nginx/sites-available/invidious.conf | ||
114 | owner: root | ||
115 | group: root | ||
116 | mode: '0644' | ||
117 | |||
118 | - name: symlink site | ||
119 | file: | ||
120 | src: /etc/nginx/sites-available/invidious.conf | ||
121 | dest: /etc/nginx/sites-enabled/invidious.conf | ||
122 | owner: root | ||
123 | group: root | ||
124 | state: link | ||
diff --git a/roles/services/containers/jellyfin/handlers/main.yml b/roles/services/containers/jellyfin/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/jellyfin/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/jellyfin/tasks/main.yml b/roles/services/containers/jellyfin/tasks/main.yml new file mode 100644 index 0000000..c7a424d --- /dev/null +++ b/roles/services/containers/jellyfin/tasks/main.yml | |||
@@ -0,0 +1,159 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: jellyfin/jellyfin:10.8.10 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create jellyfin directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/jellyfin" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: login to docker registry | ||
21 | become: yes | ||
22 | become_user: "{{ docker_username }}" | ||
23 | environment: | ||
24 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
25 | docker_login: | ||
26 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
27 | registry_url: "{{ docker_registry_url }}" | ||
28 | username: "{{ docker_registry_username }}" | ||
29 | password: "{{ docker_registry_password }}" | ||
30 | |||
31 | - name: get jellyfin image | ||
32 | become: yes | ||
33 | become_user: "{{ docker_username }}" | ||
34 | environment: | ||
35 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
36 | docker_image: | ||
37 | name: "{{ image }}" | ||
38 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
39 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
40 | source: pull | ||
41 | force_source: yes | ||
42 | push: yes | ||
43 | |||
44 | - name: create jellyfin config directory | ||
45 | file: | ||
46 | path: "{{ docker_home }}/jellyfin/config" | ||
47 | state: directory | ||
48 | owner: "{{ docker_username }}" | ||
49 | group: "{{ docker_username }}" | ||
50 | mode: '0755' | ||
51 | |||
52 | - name: create jellyfin cache directory | ||
53 | file: | ||
54 | path: "{{ docker_home }}/jellyfin/cache" | ||
55 | state: directory | ||
56 | owner: "{{ docker_username }}" | ||
57 | group: "{{ docker_username }}" | ||
58 | mode: '0755' | ||
59 | |||
60 | - name: create jellyfin media directory | ||
61 | file: | ||
62 | path: "{{ docker_home }}/jellyfin/media" | ||
63 | state: directory | ||
64 | group: "{{ docker_username }}" | ||
65 | mode: '0755' | ||
66 | |||
67 | - name: copy jellyfin config | ||
68 | synchronize: | ||
69 | src: "{{ jellyfin_config }}" | ||
70 | dest: "{{ docker_home }}/jellyfin/config" | ||
71 | |||
72 | - name: copy jellyfin media | ||
73 | synchronize: | ||
74 | src: "{{ jellyfin_media }}" | ||
75 | dest: "{{ docker_home }}/jellyfin/media" | ||
76 | ignore_errors: yes | ||
77 | |||
78 | - name: copy jellyfin web config | ||
79 | copy: | ||
80 | src: "{{ jellyfin_web_config }}" | ||
81 | dest: "{{ docker_home }}/jellyfin/web-config.json" | ||
82 | owner: "{{ docker_username }}" | ||
83 | group: "{{ docker_username }}" | ||
84 | mode: '0644' | ||
85 | |||
86 | - name: set config permissions | ||
87 | file: | ||
88 | path: "{{ docker_home }}/jellyfin/config" | ||
89 | owner: "{{ docker_username }}" | ||
90 | group: "{{ docker_username }}" | ||
91 | mode: '0755' | ||
92 | recurse: yes | ||
93 | |||
94 | - name: set media permissions | ||
95 | file: | ||
96 | path: "{{ docker_home }}/jellyfin/media" | ||
97 | owner: "{{ docker_username }}" | ||
98 | group: "{{ docker_username }}" | ||
99 | mode: '0755' | ||
100 | recurse: yes | ||
101 | |||
102 | - name: create jellyfin docker network | ||
103 | become: yes | ||
104 | become_user: "{{ docker_username }}" | ||
105 | docker_network: | ||
106 | name: "{{ jellyfin_network_name }}" | ||
107 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
108 | driver: bridge | ||
109 | ipam_config: | ||
110 | - subnet: "{{ jellyfin_subnet }}" | ||
111 | gateway: "{{ jellyfin_gateway }}" | ||
112 | |||
113 | - name: create and deploy jellyfin container | ||
114 | become: yes | ||
115 | become_user: "{{ docker_username }}" | ||
116 | environment: | ||
117 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
118 | docker_container: | ||
119 | name: "jellyfin" | ||
120 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
121 | pull: yes | ||
122 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
123 | purge_networks: yes | ||
124 | networks: | ||
125 | - name: "{{ jellyfin_network_name }}" | ||
126 | ipv4_address: "{{ jellyfin_ipv4 }}" | ||
127 | ports: | ||
128 | - "127.0.0.1:8096:8096" | ||
129 | volumes: | ||
130 | - "{{ docker_home }}/jellyfin/config:/config" | ||
131 | - "{{ docker_home }}/jellyfin/cache:/cache" | ||
132 | - "{{ docker_home }}/arr/data/media:/media:ro" | ||
133 | - "{{ docker_home }}/jellyfin/web-config.json:/jellyfin/jellyfin-web/config.json" | ||
134 | env: | ||
135 | JELLYFIN_PublishedServerUrl: "{{ jellyfin_url }}" | ||
136 | cap_drop: | ||
137 | - all | ||
138 | hostname: "jellyfin" | ||
139 | restart_policy: unless-stopped | ||
140 | state: 'started' | ||
141 | recreate: yes | ||
142 | |||
143 | - name: deploy nginx configuration | ||
144 | notify: restart nginx | ||
145 | register: nginx_config | ||
146 | copy: | ||
147 | src: "{{ jellyfin_nginx_config }}" | ||
148 | dest: /etc/nginx/sites-available/jellyfin.conf | ||
149 | owner: root | ||
150 | group: root | ||
151 | mode: '0644' | ||
152 | |||
153 | - name: symlink site | ||
154 | file: | ||
155 | src: /etc/nginx/sites-available/jellyfin.conf | ||
156 | dest: /etc/nginx/sites-enabled/jellyfin.conf | ||
157 | owner: root | ||
158 | group: root | ||
159 | state: link | ||
diff --git a/roles/services/containers/kanboard/handlers/main.yml b/roles/services/containers/kanboard/handlers/main.yml new file mode 100644 index 0000000..de5dcb6 --- /dev/null +++ b/roles/services/containers/kanboard/handlers/main.yml | |||
@@ -0,0 +1,18 @@ | |||
1 | - name: build pywttr-docker image | ||
2 | become: yes | ||
3 | become_user: "{{ docker_username }}" | ||
4 | environment: | ||
5 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
6 | docker_image: | ||
7 | name: "{{ docker_registry_url }}/{{ docker_registry_username }}/pywttr-docker:latest" | ||
8 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
9 | build: | ||
10 | path: /srv/docker/pywttr-docker/src | ||
11 | source: build | ||
12 | push: yes | ||
13 | force_source: yes | ||
14 | |||
15 | - name: restart nginx | ||
16 | service: | ||
17 | name: nginx | ||
18 | state: restarted | ||
diff --git a/roles/services/containers/kanboard/tasks/main.yml b/roles/services/containers/kanboard/tasks/main.yml new file mode 100644 index 0000000..1efc16e --- /dev/null +++ b/roles/services/containers/kanboard/tasks/main.yml | |||
@@ -0,0 +1,93 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: kanboard/kanboard:v1.2.30 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create kanboard directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/kanboard" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create data directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/kanboard/data" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: deploy custom configuration | ||
29 | copy: | ||
30 | src: "{{ kanboard_config }}" | ||
31 | dest: "{{ docker_home }}/kanboard/data/config.php" | ||
32 | owner: "{{ docker_username }}" | ||
33 | group: "{{ docker_username }}" | ||
34 | mode: '0644' | ||
35 | |||
36 | - name: create plugins directory | ||
37 | file: | ||
38 | path: "{{ docker_home }}/kanboard/plugins" | ||
39 | state: directory | ||
40 | owner: "{{ docker_username }}" | ||
41 | group: "{{ docker_username }}" | ||
42 | mode: '0755' | ||
43 | |||
44 | - name: create kanboard network | ||
45 | become: yes | ||
46 | become_user: "{{ docker_username }}" | ||
47 | docker_network: | ||
48 | name: "{{ kanboard_network_name }}" | ||
49 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
50 | driver: bridge | ||
51 | ipam_config: | ||
52 | - subnet: "{{ kanboard_subnet }}" | ||
53 | gateway: "{{ kanboard_gateway }}" | ||
54 | |||
55 | - name: create and deploy kanboard container | ||
56 | become: yes | ||
57 | become_user: "{{ docker_username }}" | ||
58 | environment: | ||
59 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
60 | docker_container: | ||
61 | name: "kanboard" | ||
62 | hostname: "kanboard" | ||
63 | image: "{{ image }}" | ||
64 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
65 | purge_networks: yes | ||
66 | networks: | ||
67 | - name: "{{ kanboard_network_name }}" | ||
68 | ipv4_address: "{{ kanboard_ipv4 }}" | ||
69 | ports: | ||
70 | - "127.0.0.1:{{ kanboard_external_port }}:80" | ||
71 | volumes: | ||
72 | - "{{ docker_home }}/kanboard/data:/var/www/app/data" | ||
73 | - "{{ docker_home }}/kanboard/plugins:/var/www/app/plugins" | ||
74 | state: 'started' | ||
75 | recreate: yes | ||
76 | restart_policy: unless-stopped | ||
77 | |||
78 | - name: deploy nginx configuration | ||
79 | notify: restart nginx | ||
80 | template: | ||
81 | src: "{{ kanboard_nginx_config }}" | ||
82 | dest: /etc/nginx/sites-available/kanboard.conf | ||
83 | owner: root | ||
84 | group: root | ||
85 | mode: '0644' | ||
86 | |||
87 | - name: symlink site | ||
88 | file: | ||
89 | src: /etc/nginx/sites-available/kanboard.conf | ||
90 | dest: /etc/nginx/sites-enabled/kanboard.conf | ||
91 | owner: root | ||
92 | group: root | ||
93 | state: link | ||
diff --git a/roles/services/containers/navidrome/handlers/main.yml b/roles/services/containers/navidrome/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/navidrome/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/navidrome/tasks/main.yml b/roles/services/containers/navidrome/tasks/main.yml new file mode 100644 index 0000000..e95e849 --- /dev/null +++ b/roles/services/containers/navidrome/tasks/main.yml | |||
@@ -0,0 +1,117 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: deluan/navidrome:0.49.2 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create navidrome directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/navidrome" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create navidrome data directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/navidrome/data" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: create navidrome music directory | ||
29 | file: | ||
30 | path: "{{ docker_home }}/navidrome/music" | ||
31 | state: directory | ||
32 | owner: "{{ docker_username }}" | ||
33 | group: "{{ docker_username }}" | ||
34 | mode: '0755' | ||
35 | |||
36 | - name: login to docker registry | ||
37 | become: yes | ||
38 | become_user: "{{ docker_username }}" | ||
39 | environment: | ||
40 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
41 | docker_login: | ||
42 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
43 | registry_url: "{{ docker_registry_url }}" | ||
44 | username: "{{ docker_registry_username }}" | ||
45 | password: "{{ docker_registry_password }}" | ||
46 | |||
47 | - name: pull and push navidrome image | ||
48 | become: yes | ||
49 | become_user: "{{ docker_username }}" | ||
50 | environment: | ||
51 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
52 | docker_image: | ||
53 | name: "{{ image }}" | ||
54 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
55 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
56 | source: pull | ||
57 | force_source: yes | ||
58 | push: yes | ||
59 | |||
60 | - name: create navidrome docker network | ||
61 | docker_network: | ||
62 | name: "{{ navidrome_network_name }}" | ||
63 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
64 | driver: bridge | ||
65 | ipam_config: | ||
66 | - subnet: "{{ navidrome_subnet }}" | ||
67 | gateway: "{{ navidrome_gateway }}" | ||
68 | |||
69 | - name: create and deploy navidrome container | ||
70 | become: yes | ||
71 | become_user: "{{ docker_username }}" | ||
72 | environment: | ||
73 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
74 | docker_container: | ||
75 | name: "navidrome" | ||
76 | hostname: "navidrome" | ||
77 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
78 | pull: yes | ||
79 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
80 | purge_networks: yes | ||
81 | networks: | ||
82 | - name: "{{ navidrome_network_name }}" | ||
83 | ipv4_address: "{{ navidrome_ipv4 }}" | ||
84 | ports: | ||
85 | - "127.0.0.1:4533:4533" | ||
86 | state: 'started' | ||
87 | recreate: yes | ||
88 | restart_policy: unless-stopped | ||
89 | env: | ||
90 | "ND_AUTHREQUEST_LIMIT": "2" | ||
91 | "ND_PASSWORDENCRYPTIONKEY": "{{ navidrome_encryptionkey }}" | ||
92 | "ND_LASTFM_ENABLED": "false" | ||
93 | "ND_PROMETHEUS_ENABLED": "true" | ||
94 | "ND_PROMETHEUS_METRICSPATH": "/metrics" | ||
95 | "ND_REVERSEPROXYWHITELIST": "172.25.5.0/24" | ||
96 | "ND_LOGLEVEL": "debug" | ||
97 | volumes: | ||
98 | - "{{ docker_home }}/navidrome/data:/data" | ||
99 | - "{{ docker_home }}/arr/data/media/music:/music:ro" | ||
100 | |||
101 | - name: deploy nginx configuration | ||
102 | notify: restart nginx | ||
103 | register: nginx_config | ||
104 | copy: | ||
105 | src: "{{ navidrome_nginx_config }}" | ||
106 | dest: /etc/nginx/sites-available/navidrome.conf | ||
107 | owner: root | ||
108 | group: root | ||
109 | mode: '0644' | ||
110 | |||
111 | - name: symlink site | ||
112 | file: | ||
113 | src: /etc/nginx/sites-available/navidrome.conf | ||
114 | dest: /etc/nginx/sites-enabled/navidrome.conf | ||
115 | owner: root | ||
116 | group: root | ||
117 | state: link | ||
diff --git a/roles/services/containers/nextcloud/handlers/main.yml b/roles/services/containers/nextcloud/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/nextcloud/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/nextcloud/tasks/main.yml b/roles/services/containers/nextcloud/tasks/main.yml new file mode 100644 index 0000000..fbd4a76 --- /dev/null +++ b/roles/services/containers/nextcloud/tasks/main.yml | |||
@@ -0,0 +1,184 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: nextcloud:27.0.0-apache | ||
4 | |||
5 | - name: set other facts | ||
6 | set_fact: | ||
7 | repo_tag: "{{ image }}" | ||
8 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
9 | |||
10 | - name: create nextcloud directory | ||
11 | file: | ||
12 | path: "{{ docker_home }}/nextcloud" | ||
13 | state: directory | ||
14 | owner: "{{ docker_username }}" | ||
15 | group: "{{ docker_username }}" | ||
16 | mode: '0755' | ||
17 | |||
18 | - name: create nextcloud app directory | ||
19 | file: | ||
20 | path: "{{ docker_home }}/nextcloud/app/" | ||
21 | state: directory | ||
22 | owner: "{{ docker_username }}" | ||
23 | group: "{{ docker_username }}" | ||
24 | mode: '0755' | ||
25 | |||
26 | - name: create nextcloud data directory | ||
27 | file: | ||
28 | path: "{{ docker_home }}/nextcloud/data/" | ||
29 | state: directory | ||
30 | owner: "{{ docker_username }}" | ||
31 | group: "{{ docker_username }}" | ||
32 | mode: '0755' | ||
33 | |||
34 | - name: login to docker registry | ||
35 | become: yes | ||
36 | become_user: "{{ docker_username }}" | ||
37 | environment: | ||
38 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
39 | docker_login: | ||
40 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
41 | registry_url: "{{ docker_registry_url }}" | ||
42 | username: "{{ docker_registry_username }}" | ||
43 | password: "{{ docker_registry_password }}" | ||
44 | |||
45 | - name: pull and push nextcloud image | ||
46 | become: yes | ||
47 | become_user: "{{ docker_username }}" | ||
48 | environment: | ||
49 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
50 | docker_image: | ||
51 | name: "{{ image }}" | ||
52 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
53 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
54 | source: pull | ||
55 | force_source: yes | ||
56 | push: yes | ||
57 | |||
58 | - name: create nextcloud docker network | ||
59 | docker_network: | ||
60 | name: "{{ nextcloud_network_name }}" | ||
61 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
62 | driver: bridge | ||
63 | ipam_config: | ||
64 | - subnet: "{{ nextcloud_subnet }}" | ||
65 | gateway: "{{ nextcloud_gateway }}" | ||
66 | |||
67 | - name: create and deploy postgres container | ||
68 | become: yes | ||
69 | become_user: "{{ docker_username }}" | ||
70 | environment: | ||
71 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
72 | docker_container: | ||
73 | name: "nextcloud-postgres" | ||
74 | hostname: "nextcloud-postgres" | ||
75 | image: "postgres:alpine" | ||
76 | pull: yes | ||
77 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
78 | purge_networks: yes | ||
79 | networks: | ||
80 | - name: "{{ nextcloud_network_name }}" | ||
81 | ipv4_address: "{{ nextcloud_postgres_ipv4 }}" | ||
82 | state: 'started' | ||
83 | comparisons: | ||
84 | '*': strict | ||
85 | restart_policy: unless-stopped | ||
86 | env: | ||
87 | "POSTGRES_USER": "{{ nextcloud_postgres_user }}" | ||
88 | "POSTGRES_PASSWORD": "{{ nextcloud_postgres_password }}" | ||
89 | "POSTGRES_DB": "{{ nextcloud_postgres_db }}" | ||
90 | volumes: | ||
91 | - "{{ docker_home }}/nextcloud/data:/var/lib/postgresql/data" | ||
92 | |||
93 | - name: create and deploy redis container | ||
94 | become: yes | ||
95 | become_user: "{{ docker_username }}" | ||
96 | environment: | ||
97 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
98 | docker_container: | ||
99 | name: "nextcloud-redis" | ||
100 | hostname: "nextcloud-redis" | ||
101 | image: "redis:alpine" | ||
102 | pull: yes | ||
103 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
104 | purge_networks: yes | ||
105 | networks: | ||
106 | - name: "{{ nextcloud_network_name }}" | ||
107 | ipv4_address: "{{ nextcloud_redis_ipv4 }}" | ||
108 | state: 'started' | ||
109 | comparisons: | ||
110 | '*': strict | ||
111 | restart_policy: unless-stopped | ||
112 | |||
113 | - name: create and deploy nextcloud container | ||
114 | become: yes | ||
115 | become_user: "{{ docker_username }}" | ||
116 | environment: | ||
117 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
118 | docker_container: | ||
119 | name: "nextcloud" | ||
120 | hostname: "nextcloud" | ||
121 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
122 | pull: yes | ||
123 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
124 | purge_networks: yes | ||
125 | networks: | ||
126 | - name: "{{ nextcloud_network_name }}" | ||
127 | ipv4_address: "{{ nextcloud_ipv4 }}" | ||
128 | ports: | ||
129 | - "127.0.0.1:{{ nextcloud_external_port }}:80" | ||
130 | state: 'started' | ||
131 | comparisons: | ||
132 | '*': strict | ||
133 | restart_policy: unless-stopped | ||
134 | env: | ||
135 | "POSTGRES_USER": "{{ nextcloud_postgres_user }}" | ||
136 | "POSTGRES_PASSWORD": "{{ nextcloud_postgres_password }}" | ||
137 | "POSTGRES_DB": "{{ nextcloud_postgres_db }}" | ||
138 | "POSTGRES_HOST": "nextcloud-postgres" | ||
139 | "REDIS_HOST": "nextcloud-redis" | ||
140 | "NEXTCLOUD_ADMIN_USER": "{{ nextcloud_admin }}" | ||
141 | "NEXTCLOUD_ADMIN_PASSWORD": "{{ nextcloud_admin_password }}" | ||
142 | "NEXTCLOUD_TRUSTED_DOMAINS": "{{ nextcloud_trusted_domains }}" | ||
143 | volumes: | ||
144 | - "{{ docker_home }}/nextcloud/app:/var/www/html" | ||
145 | |||
146 | - name: create and deploy nextcloud cron container | ||
147 | become: yes | ||
148 | become_user: "{{ docker_username }}" | ||
149 | environment: | ||
150 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
151 | docker_container: | ||
152 | name: "nextcloud-cron" | ||
153 | hostname: "nextcloud-cron" | ||
154 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
155 | entrypoint: "/cron.sh" | ||
156 | pull: yes | ||
157 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
158 | purge_networks: yes | ||
159 | networks: | ||
160 | - name: "{{ nextcloud_network_name }}" | ||
161 | ipv4_address: "{{ nextcloud_cron_ipv4 }}" | ||
162 | state: 'started' | ||
163 | recreate: yes | ||
164 | restart_policy: unless-stopped | ||
165 | volumes: | ||
166 | - "{{ docker_home }}/nextcloud/app:/var/www/html" | ||
167 | |||
168 | - name: deploy nginx configuration | ||
169 | notify: restart nginx | ||
170 | register: nginx_config | ||
171 | copy: | ||
172 | src: "{{ nextcloud_nginx_config }}" | ||
173 | dest: /etc/nginx/sites-available/nextcloud.conf | ||
174 | owner: root | ||
175 | group: root | ||
176 | mode: '0644' | ||
177 | |||
178 | - name: symlink site | ||
179 | file: | ||
180 | src: /etc/nginx/sites-available/nextcloud.conf | ||
181 | dest: /etc/nginx/sites-enabled/nextcloud.conf | ||
182 | owner: root | ||
183 | group: root | ||
184 | state: link | ||
diff --git a/roles/services/containers/photoprism/defaults/main.yml b/roles/services/containers/photoprism/defaults/main.yml new file mode 100644 index 0000000..ceca8c3 --- /dev/null +++ b/roles/services/containers/photoprism/defaults/main.yml | |||
@@ -0,0 +1,10 @@ | |||
1 | photoprism_admin_user: "admin" | ||
2 | photoprism_auth_mode: "password" | ||
3 | photoprism_site_url: "https://photos.chudnick.com" | ||
4 | photoprism_external_port: 2342 | ||
5 | photoprism_nginx_config: data/photoprism/photoprism.conf | ||
6 | photoprism_network_name: photoprism_net | ||
7 | photoprism_subnet: 172.25.15.0/24 | ||
8 | photoprism_gateway: 172.25.15.1 | ||
9 | photoprism_ipv4: 172.25.15.2 | ||
10 | nextcloud_external_port: 8006 | ||
diff --git a/roles/services/containers/photoprism/handlers/main.yml b/roles/services/containers/photoprism/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/photoprism/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/photoprism/tasks/main.yml b/roles/services/containers/photoprism/tasks/main.yml new file mode 100644 index 0000000..e6ac544 --- /dev/null +++ b/roles/services/containers/photoprism/tasks/main.yml | |||
@@ -0,0 +1,115 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: photoprism/photoprism:221118-jammy | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create photoprism directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/photoprism" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: login to docker registry | ||
21 | become: yes | ||
22 | become_user: "{{ docker_username }}" | ||
23 | environment: | ||
24 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
25 | docker_login: | ||
26 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
27 | registry_url: "{{ docker_registry_url }}" | ||
28 | username: "{{ docker_registry_username }}" | ||
29 | password: "{{ docker_registry_password }}" | ||
30 | |||
31 | - name: get photoprism image | ||
32 | become: yes | ||
33 | become_user: "{{ docker_username }}" | ||
34 | environment: | ||
35 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
36 | docker_image: | ||
37 | name: "{{ image }}" | ||
38 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
39 | push: yes | ||
40 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
41 | source: pull | ||
42 | force_source: yes | ||
43 | |||
44 | - name: create photoprism data directory | ||
45 | file: | ||
46 | path: "{{ docker_home }}/photoprism/data" | ||
47 | state: directory | ||
48 | owner: "{{ docker_username }}" | ||
49 | group: "{{ docker_username }}" | ||
50 | mode: '0755' | ||
51 | |||
52 | - name: create photoprism photos directory | ||
53 | file: | ||
54 | path: "{{ docker_home }}/photoprism/photos" | ||
55 | state: directory | ||
56 | owner: "{{ docker_username }}" | ||
57 | group: "{{ docker_username }}" | ||
58 | mode: '0755' | ||
59 | |||
60 | - name: create photoprism docker network | ||
61 | docker_network: | ||
62 | name: "{{ photoprism_network_name }}" | ||
63 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
64 | driver: bridge | ||
65 | ipam_config: | ||
66 | - subnet: "{{ photoprism_subnet }}" | ||
67 | gateway: "{{ photoprism_gateway }}" | ||
68 | |||
69 | - name: create and deploy photoprism container | ||
70 | become: yes | ||
71 | become_user: "{{ docker_username }}" | ||
72 | environment: | ||
73 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
74 | docker_container: | ||
75 | name: "photoprism" | ||
76 | hostname: "photoprism" | ||
77 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
78 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
79 | purge_networks: yes | ||
80 | networks: | ||
81 | - name: "{{ photoprism_network_name }}" | ||
82 | ipv4_address: "{{ photoprism_ipv4 }}" | ||
83 | ports: | ||
84 | - "127.0.0.1:{{ photoprism_external_port }}:2342" | ||
85 | state: 'started' | ||
86 | recreate: yes | ||
87 | restart_policy: unless-stopped | ||
88 | volumes: | ||
89 | - "{{ docker_home }}/photoprism/photos:/photoprism/originals" | ||
90 | - "{{ docker_home }}/photoprism/data:/photoprism/storage" | ||
91 | env: | ||
92 | "PHOTOPRISM_ADMIN_USER": "{{ photoprism_admin_user }}" | ||
93 | "PHOTOPRISM_ADMIN_PASSWORD": "{{ photoprism_admin_password }}" | ||
94 | "PHOTOPRISM_AUTH_MODE": "{{ photoprism_auth_mode }}" | ||
95 | "PHOTOPRISM_SITE_URL": "{{ photoprism_site_url }}" | ||
96 | "PHOTOPRISM_DATABASE_DRIVER": "sqlite" | ||
97 | "PHOTOPRISM_DISABLE_PLACES": "true" | ||
98 | |||
99 | - name: deploy nginx configuration | ||
100 | notify: restart nginx | ||
101 | register: nginx_config | ||
102 | copy: | ||
103 | src: "{{ photoprism_nginx_config }}" | ||
104 | dest: /etc/nginx/sites-available/photoprism.conf | ||
105 | owner: root | ||
106 | group: root | ||
107 | mode: '0644' | ||
108 | |||
109 | - name: symlink site | ||
110 | file: | ||
111 | src: /etc/nginx/sites-available/photoprism.conf | ||
112 | dest: /etc/nginx/sites-enabled/photoprism.conf | ||
113 | owner: root | ||
114 | group: root | ||
115 | state: link | ||
diff --git a/roles/services/containers/pihole_exporter/tasks/main.yml b/roles/services/containers/pihole_exporter/tasks/main.yml new file mode 100644 index 0000000..4c52dc7 --- /dev/null +++ b/roles/services/containers/pihole_exporter/tasks/main.yml | |||
@@ -0,0 +1,97 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: ekofr/pihole-exporter:v0.4.0 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create pihole_exporter directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/pihole_exporter" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: login to docker registry | ||
21 | become: yes | ||
22 | become_user: "{{ docker_username }}" | ||
23 | environment: | ||
24 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
25 | docker_login: | ||
26 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
27 | registry_url: "{{ docker_registry_url }}" | ||
28 | username: "{{ docker_registry_username }}" | ||
29 | password: "{{ docker_registry_password }}" | ||
30 | |||
31 | - name: get pihole_exporter image | ||
32 | become: yes | ||
33 | become_user: "{{ docker_username }}" | ||
34 | environment: | ||
35 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
36 | docker_image: | ||
37 | name: "{{ image }}" | ||
38 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
39 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
40 | source: pull | ||
41 | force_source: yes | ||
42 | push: yes | ||
43 | |||
44 | - name: create pihole_exporter docker network | ||
45 | become: yes | ||
46 | become_user: "{{ docker_username }}" | ||
47 | docker_network: | ||
48 | name: "{{ pihole_exporter_network_name }}" | ||
49 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
50 | driver: bridge | ||
51 | ipam_config: | ||
52 | - subnet: "{{ pihole_exporter_subnet }}" | ||
53 | gateway: "{{ pihole_exporter_gateway }}" | ||
54 | |||
55 | - name: create and deploy pihole_exporter container | ||
56 | become: yes | ||
57 | become_user: "{{ docker_username }}" | ||
58 | environment: | ||
59 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
60 | docker_container: | ||
61 | name: "pihole_exporter" | ||
62 | hostname: "pihole_exporter" | ||
63 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
64 | pull: yes | ||
65 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
66 | purge_networks: yes | ||
67 | networks: | ||
68 | - name: "{{ pihole_exporter_network_name }}" | ||
69 | ports: | ||
70 | - "127.0.0.1:9617:9617" | ||
71 | state: 'started' | ||
72 | recreate: yes | ||
73 | restart_policy: unless-stopped | ||
74 | env: | ||
75 | "PIHOLE_HOSTNAME": "{{ pihole_ip }}" | ||
76 | "PIHOLE_API_TOKEN": "{{ pihole_api_token }}" | ||
77 | "PORT": "{{ pihole_api_port }}" | ||
78 | cap_drop: | ||
79 | - all | ||
80 | |||
81 | - name: deploy nginx configuration | ||
82 | notify: restart nginx | ||
83 | register: nginx_config | ||
84 | copy: | ||
85 | src: "{{ pihole_exporter_nginx_config }}" | ||
86 | dest: /etc/nginx/sites-available/pihole-exporter.conf | ||
87 | owner: root | ||
88 | group: root | ||
89 | mode: '0644' | ||
90 | |||
91 | - name: symlink site | ||
92 | file: | ||
93 | src: /etc/nginx/sites-available/pihole-exporter.conf | ||
94 | dest: /etc/nginx/sites-enabled/pihole-exporter.conf | ||
95 | owner: root | ||
96 | group: root | ||
97 | state: link | ||
diff --git a/roles/services/containers/pywttr_docker/handlers/main.yml b/roles/services/containers/pywttr_docker/handlers/main.yml new file mode 100644 index 0000000..de5dcb6 --- /dev/null +++ b/roles/services/containers/pywttr_docker/handlers/main.yml | |||
@@ -0,0 +1,18 @@ | |||
1 | - name: build pywttr-docker image | ||
2 | become: yes | ||
3 | become_user: "{{ docker_username }}" | ||
4 | environment: | ||
5 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
6 | docker_image: | ||
7 | name: "{{ docker_registry_url }}/{{ docker_registry_username }}/pywttr-docker:latest" | ||
8 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
9 | build: | ||
10 | path: /srv/docker/pywttr-docker/src | ||
11 | source: build | ||
12 | push: yes | ||
13 | force_source: yes | ||
14 | |||
15 | - name: restart nginx | ||
16 | service: | ||
17 | name: nginx | ||
18 | state: restarted | ||
diff --git a/roles/services/containers/pywttr_docker/tasks/main.yml b/roles/services/containers/pywttr_docker/tasks/main.yml new file mode 100644 index 0000000..45f7b2f --- /dev/null +++ b/roles/services/containers/pywttr_docker/tasks/main.yml | |||
@@ -0,0 +1,74 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: gitea.chudnick.com/sam/pywttr-docker:latest | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create pywttr-docker directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/pywttr-docker" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: clone pywttr-docker repository | ||
21 | notify: build pywttr-docker image | ||
22 | git: | ||
23 | repo: https://gitea.chudnick.com/sam/pywttr-docker | ||
24 | dest: "{{ docker_home }}/pywttr-docker/src" | ||
25 | |||
26 | - meta: flush_handlers | ||
27 | |||
28 | - name: create pywttr-docker network | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | docker_network: | ||
32 | name: "{{ pywttr_docker_network_name }}" | ||
33 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
34 | driver: bridge | ||
35 | ipam_config: | ||
36 | - subnet: "{{ pywttr_docker_subnet }}" | ||
37 | gateway: "{{ pywttr_docker_gateway }}" | ||
38 | |||
39 | - name: create and deploy pywttr-docker container | ||
40 | become: yes | ||
41 | become_user: "{{ docker_username }}" | ||
42 | environment: | ||
43 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
44 | docker_container: | ||
45 | name: "pywttr-docker" | ||
46 | hostname: "pywttr-docker" | ||
47 | image: "{{ image }}" | ||
48 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
49 | purge_networks: yes | ||
50 | networks: | ||
51 | - name: "{{ pywttr_docker_network_name }}" | ||
52 | ipv4_address: "{{ pywttr_docker_ipv4 }}" | ||
53 | ports: | ||
54 | - "127.0.0.1:{{ pywttr_docker_external_port }}:8000" | ||
55 | state: 'started' | ||
56 | recreate: yes | ||
57 | restart_policy: unless-stopped | ||
58 | |||
59 | - name: deploy nginx configuration | ||
60 | notify: restart nginx | ||
61 | template: | ||
62 | src: "{{ pywttr_docker_nginx_config }}" | ||
63 | dest: /etc/nginx/sites-available/pywttr-docker.conf | ||
64 | owner: root | ||
65 | group: root | ||
66 | mode: '0644' | ||
67 | |||
68 | - name: symlink site | ||
69 | file: | ||
70 | src: /etc/nginx/sites-available/pywttr-docker.conf | ||
71 | dest: /etc/nginx/sites-enabled/pywttr-docker.conf | ||
72 | owner: root | ||
73 | group: root | ||
74 | state: link | ||
diff --git a/roles/services/containers/renovate/tasks/main.yml b/roles/services/containers/renovate/tasks/main.yml new file mode 100644 index 0000000..bbbfe11 --- /dev/null +++ b/roles/services/containers/renovate/tasks/main.yml | |||
@@ -0,0 +1,87 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: renovate/renovate:35.141.3-slim | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create renovate directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/renovate" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: login to docker registry | ||
21 | become: yes | ||
22 | become_user: "{{ docker_username }}" | ||
23 | environment: | ||
24 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
25 | docker_login: | ||
26 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
27 | registry_url: "{{ docker_registry_url }}" | ||
28 | username: "{{ docker_registry_username }}" | ||
29 | password: "{{ docker_registry_password }}" | ||
30 | |||
31 | - name: create renovate docker network | ||
32 | become: yes | ||
33 | become_user: "{{ docker_username }}" | ||
34 | docker_network: | ||
35 | name: "{{ renovate_network_name }}" | ||
36 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
37 | driver: bridge | ||
38 | ipam_config: | ||
39 | - subnet: "{{ renovate_subnet }}" | ||
40 | gateway: "{{ renovate_gateway }}" | ||
41 | |||
42 | - name: pull and push renovate image | ||
43 | become: yes | ||
44 | become_user: "{{ docker_username }}" | ||
45 | environment: | ||
46 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
47 | docker_image: | ||
48 | name: "{{ image }}" | ||
49 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
50 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
51 | source: pull | ||
52 | force_source: yes | ||
53 | push: yes | ||
54 | |||
55 | - name: create and deploy renovate container | ||
56 | become: yes | ||
57 | become_user: "{{ docker_username }}" | ||
58 | environment: | ||
59 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
60 | docker_container: | ||
61 | name: "renovate" | ||
62 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
63 | pull: yes | ||
64 | recreate: yes | ||
65 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
66 | purge_networks: yes | ||
67 | networks: | ||
68 | - name: "{{ renovate_network_name }}" | ||
69 | ipv4_address: "{{ renovate_ipv4 }}" | ||
70 | env: | ||
71 | "RENOVATE_ENDPOINT": "{{ renovate_endpoint }}" | ||
72 | "RENOVATE_PLATFORM": "gitea" | ||
73 | "RENOVATE_TOKEN": "{{ renovate_token }}" | ||
74 | "RENOVATE_AUTODISCOVER": "true" | ||
75 | "LOG_LEVEL": "debug" | ||
76 | "RENOVATE_GIT_AUTHOR": "{{ renovate_author }}" | ||
77 | restart_policy: "no" | ||
78 | state: 'started' | ||
79 | |||
80 | |||
81 | - name: create cron job to run renovate container daily | ||
82 | cron: | ||
83 | name: "run renovate" | ||
84 | job: "docker start renovate" | ||
85 | user: "{{ docker_username }}" | ||
86 | minute: "0" | ||
87 | hour: "6" | ||
diff --git a/roles/services/containers/searxng/handlers/main.yml b/roles/services/containers/searxng/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/searxng/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/searxng/tasks/main.yml b/roles/services/containers/searxng/tasks/main.yml new file mode 100644 index 0000000..fa7609c --- /dev/null +++ b/roles/services/containers/searxng/tasks/main.yml | |||
@@ -0,0 +1,170 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: "searxng/searxng:2023.6.16-71b6ff07" | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create searxng directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/searxng" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: login to docker registry | ||
21 | become: yes | ||
22 | become_user: "{{ docker_username }}" | ||
23 | environment: | ||
24 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
25 | docker_login: | ||
26 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
27 | registry_url: "{{ docker_registry_url }}" | ||
28 | username: "{{ docker_registry_username }}" | ||
29 | password: "{{ docker_registry_password }}" | ||
30 | |||
31 | - name: get searxng image | ||
32 | become: yes | ||
33 | become_user: "{{ docker_username }}" | ||
34 | environment: | ||
35 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
36 | docker_image: | ||
37 | source: pull | ||
38 | force_source: yes | ||
39 | name: "{{ image }}" | ||
40 | repository: "{{ custom_registry }}/{{ repo_tag }}" | ||
41 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
42 | push: yes | ||
43 | |||
44 | - name: create searxng config directory | ||
45 | file: | ||
46 | path: "{{ docker_home }}/searxng/config" | ||
47 | state: directory | ||
48 | owner: "{{ docker_username }}" | ||
49 | group: "{{ docker_username }}" | ||
50 | mode: '0755' | ||
51 | |||
52 | - name: create redis_searxng directory | ||
53 | file: | ||
54 | path: "{{ docker_home }}/redis_searxng" | ||
55 | state: directory | ||
56 | owner: "{{ docker_username }}" | ||
57 | group: "{{ docker_username }}" | ||
58 | mode: '0755' | ||
59 | |||
60 | - name: create redis_searxng data directory | ||
61 | file: | ||
62 | path: "{{ docker_home }}/redis_searxng/data" | ||
63 | state: directory | ||
64 | owner: "{{ docker_username }}" | ||
65 | group: "{{ docker_username }}" | ||
66 | mode: '0755' | ||
67 | |||
68 | - name: place searxng config in proper location | ||
69 | copy: | ||
70 | src: "{{ searxng_config }}" | ||
71 | dest: "{{ docker_home }}/searxng/config/settings.yml" | ||
72 | owner: root | ||
73 | group: docker | ||
74 | mode: '0644' | ||
75 | |||
76 | - name: place uwsgi config | ||
77 | copy: | ||
78 | src: "{{ searxng_uwsgi_config }}" | ||
79 | dest: "{{ docker_home }}/searxng/config/uwsgi.ini" | ||
80 | owner: root | ||
81 | group: docker | ||
82 | mode: '0644' | ||
83 | |||
84 | - name: create searxng docker network | ||
85 | docker_network: | ||
86 | name: "{{ searxng_network_name }}" | ||
87 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
88 | driver: bridge | ||
89 | ipam_config: | ||
90 | - subnet: "{{ searxng_subnet }}" | ||
91 | gateway: "{{ searxng_gateway }}" | ||
92 | |||
93 | - name: create and deploy searxng container | ||
94 | become: yes | ||
95 | become_user: "{{ docker_username }}" | ||
96 | environment: | ||
97 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
98 | docker_container: | ||
99 | name: "searxng" | ||
100 | image: "{{ custom_registry }}/{{ repo_tag }}" | ||
101 | pull: yes | ||
102 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
103 | purge_networks: yes | ||
104 | networks: | ||
105 | - name: "{{ searxng_network_name }}" | ||
106 | ipv4_address: "{{ searxng_ipv4 }}" | ||
107 | ports: | ||
108 | - "127.0.0.1:8080:8080" | ||
109 | volumes: | ||
110 | - "{{ docker_home }}/searxng/config:/etc/searxng" | ||
111 | env: | ||
112 | SEARXNG_BASE_URL: "https://searxng.chudnick.com/" | ||
113 | cap_drop: | ||
114 | - all | ||
115 | capabilities: | ||
116 | - CHOWN | ||
117 | - SETGID | ||
118 | - SETUID | ||
119 | - DAC_OVERRIDE | ||
120 | hostname: "searxng" | ||
121 | restart_policy: unless-stopped | ||
122 | state: 'started' | ||
123 | recreate: yes | ||
124 | |||
125 | - name: create and deploy redis container | ||
126 | become: yes | ||
127 | become_user: "{{ docker_username }}" | ||
128 | environment: | ||
129 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
130 | docker_container: | ||
131 | restart_policy: unless-stopped | ||
132 | name: "redis_searxng" | ||
133 | image: redis:alpine | ||
134 | pull: yes | ||
135 | command: redis-server --save "" --appendonly "no" | ||
136 | purge_networks: yes | ||
137 | networks: | ||
138 | - name: "{{ searxng_network_name }}" | ||
139 | ipv4_address: "{{ redis_searxng_ipv4 }}" | ||
140 | tmpfs: | ||
141 | - /var/lib/redis | ||
142 | cap_drop: | ||
143 | - all | ||
144 | capabilities: | ||
145 | - SETGID | ||
146 | - SETUID | ||
147 | - DAC_OVERRIDE | ||
148 | hostname: "redis" | ||
149 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
150 | state: 'started' | ||
151 | comparisons: | ||
152 | '*': strict | ||
153 | |||
154 | - name: deploy nginx configuration | ||
155 | notify: restart nginx | ||
156 | register: nginx_config | ||
157 | copy: | ||
158 | src: "{{ searxng_nginx_config }}" | ||
159 | dest: /etc/nginx/sites-available/searxng.conf | ||
160 | owner: root | ||
161 | group: root | ||
162 | mode: '0644' | ||
163 | |||
164 | - name: symlink site | ||
165 | file: | ||
166 | src: /etc/nginx/sites-available/searxng.conf | ||
167 | dest: /etc/nginx/sites-enabled/searxng.conf | ||
168 | owner: root | ||
169 | group: root | ||
170 | state: link | ||
diff --git a/roles/services/containers/text_generation/handlers/main.yml b/roles/services/containers/text_generation/handlers/main.yml new file mode 100644 index 0000000..7aab823 --- /dev/null +++ b/roles/services/containers/text_generation/handlers/main.yml | |||
@@ -0,0 +1,29 @@ | |||
1 | - name: login to docker registry | ||
2 | become: yes | ||
3 | become_user: "{{ docker_username }}" | ||
4 | environment: | ||
5 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
6 | docker_login: | ||
7 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
8 | registry_url: "{{ docker_registry_url }}" | ||
9 | username: "{{ docker_registry_username }}" | ||
10 | password: "{{ docker_registry_password }}" | ||
11 | |||
12 | - name: build text-generation image | ||
13 | become: yes | ||
14 | become_user: "{{ docker_username }}" | ||
15 | environment: | ||
16 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
17 | docker_image: | ||
18 | name: "{{ docker_registry_url }}/{{ docker_registry_username }}/text-generation:latest" | ||
19 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
20 | build: | ||
21 | path: /srv/docker/text-generation/src | ||
22 | source: build | ||
23 | push: yes | ||
24 | force_source: yes | ||
25 | |||
26 | - name: restart nginx | ||
27 | service: | ||
28 | name: nginx | ||
29 | state: restarted | ||
diff --git a/roles/services/containers/text_generation/tasks/main.yml b/roles/services/containers/text_generation/tasks/main.yml new file mode 100644 index 0000000..80988a6 --- /dev/null +++ b/roles/services/containers/text_generation/tasks/main.yml | |||
@@ -0,0 +1,89 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: gitea.chudnick.com/sam/text-generation:latest | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create text-generation directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/text-generation" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create models directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/text-generation/models" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: clone text-generation repository | ||
29 | notify: | ||
30 | - login to docker registry | ||
31 | - build text-generation image | ||
32 | git: | ||
33 | repo: https://gitea.chudnick.com/sam/text-generation-docker | ||
34 | dest: "{{ docker_home }}/text-generation/src" | ||
35 | |||
36 | - meta: flush_handlers | ||
37 | |||
38 | - name: create text-generation network | ||
39 | become: yes | ||
40 | become_user: "{{ docker_username }}" | ||
41 | docker_network: | ||
42 | name: "{{ text_generation_network_name }}" | ||
43 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
44 | driver: bridge | ||
45 | ipam_config: | ||
46 | - subnet: "{{ text_generation_subnet }}" | ||
47 | gateway: "{{ text_generation_gateway }}" | ||
48 | |||
49 | - name: create and deploy text-generation container | ||
50 | become: yes | ||
51 | become_user: "{{ docker_username }}" | ||
52 | environment: | ||
53 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
54 | docker_container: | ||
55 | name: "text-generation" | ||
56 | hostname: "text-generation" | ||
57 | image: "{{ image }}" | ||
58 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
59 | purge_networks: yes | ||
60 | networks: | ||
61 | - name: "{{ text_generation_network_name }}" | ||
62 | ipv4_address: "{{ text_generation_ipv4 }}" | ||
63 | volumes: | ||
64 | - "{{ docker_home }}/text-generation/models:/models" | ||
65 | ports: | ||
66 | - "127.0.0.1:{{ text_generation_external_port }}:7860" | ||
67 | - "127.0.0.1:{{ text_generation_api_port }}:5005" | ||
68 | - "127.0.0.1:{{ text_generation_api_stream_port }}:5000" | ||
69 | command: "--cpu --listen --listen-port 7860 --chat --auto-devices --mlock" | ||
70 | state: 'started' | ||
71 | recreate: yes | ||
72 | restart_policy: unless-stopped | ||
73 | |||
74 | - name: deploy nginx configuration | ||
75 | notify: restart nginx | ||
76 | template: | ||
77 | src: "{{ text_generation_nginx_config }}" | ||
78 | dest: /etc/nginx/sites-available/text-generation.conf | ||
79 | owner: root | ||
80 | group: root | ||
81 | mode: '0644' | ||
82 | |||
83 | - name: symlink site | ||
84 | file: | ||
85 | src: /etc/nginx/sites-available/text-generation.conf | ||
86 | dest: /etc/nginx/sites-enabled/text-generation.conf | ||
87 | owner: root | ||
88 | group: root | ||
89 | state: link | ||
diff --git a/roles/services/containers/vaultwarden/handlers/main.yml b/roles/services/containers/vaultwarden/handlers/main.yml new file mode 100644 index 0000000..5463835 --- /dev/null +++ b/roles/services/containers/vaultwarden/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
diff --git a/roles/services/containers/vaultwarden/tasks/main.yml b/roles/services/containers/vaultwarden/tasks/main.yml new file mode 100644 index 0000000..fa63b58 --- /dev/null +++ b/roles/services/containers/vaultwarden/tasks/main.yml | |||
@@ -0,0 +1,79 @@ | |||
1 | - name: set image fact | ||
2 | set_fact: | ||
3 | image: vaultwarden/server:1.28.1 | ||
4 | |||
5 | - name: set other facts | ||
6 | vars: | ||
7 | array: "{{ image.split('/', 1) }}" | ||
8 | set_fact: | ||
9 | repo_tag: "{{ array.1 }}" | ||
10 | custom_registry: "{{ docker_registry_url + '/' + docker_registry_username }}" | ||
11 | |||
12 | - name: create vaultwarden directory | ||
13 | file: | ||
14 | path: "{{ docker_home }}/vaultwarden" | ||
15 | state: directory | ||
16 | owner: "{{ docker_username }}" | ||
17 | group: "{{ docker_username }}" | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: create data directory | ||
21 | file: | ||
22 | path: "{{ docker_home }}/vaultwarden/data" | ||
23 | state: directory | ||
24 | owner: "{{ docker_username }}" | ||
25 | group: "{{ docker_username }}" | ||
26 | mode: '0755' | ||
27 | |||
28 | - name: create vaultwarden docker network | ||
29 | become: yes | ||
30 | become_user: "{{ docker_username }}" | ||
31 | docker_network: | ||
32 | name: "{{ vaultwarden_network_name }}" | ||
33 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
34 | driver: bridge | ||
35 | ipam_config: | ||
36 | - subnet: "{{ vaultwarden_subnet }}" | ||
37 | gateway: "{{ vaultwarden_gateway }}" | ||
38 | |||
39 | - name: create and deploy vaultwarden container | ||
40 | become: yes | ||
41 | become_user: "{{ docker_username }}" | ||
42 | environment: | ||
43 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
44 | docker_container: | ||
45 | name: "vaultwarden" | ||
46 | hostname: "vaultwarden" | ||
47 | image: "{{ image }}" | ||
48 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
49 | purge_networks: yes | ||
50 | networks: | ||
51 | - name: "{{ vaultwarden_network_name }}" | ||
52 | ipv4_address: "{{ vaultwarden_ipv4 }}" | ||
53 | ports: | ||
54 | - "127.0.0.1:{{ vaultwarden_external_port }}:80" | ||
55 | volumes: | ||
56 | - "{{ docker_home }}/vaultwarden/data:/data" | ||
57 | env: | ||
58 | "DOMAIN": "https://{{ vaultwarden_server_name }}" | ||
59 | "DISABLE_ADMIN_TOKEN": "true" | ||
60 | state: 'started' | ||
61 | recreate: yes | ||
62 | restart_policy: unless-stopped | ||
63 | |||
64 | - name: deploy nginx configuration | ||
65 | notify: restart nginx | ||
66 | template: | ||
67 | src: "{{ vaultwarden_nginx_config }}" | ||
68 | dest: /etc/nginx/sites-available/vaultwarden.conf | ||
69 | owner: root | ||
70 | group: root | ||
71 | mode: '0644' | ||
72 | |||
73 | - name: symlink site | ||
74 | file: | ||
75 | src: /etc/nginx/sites-available/vaultwarden.conf | ||
76 | dest: /etc/nginx/sites-enabled/vaultwarden.conf | ||
77 | owner: root | ||
78 | group: root | ||
79 | state: link | ||
diff --git a/roles/services/docker_rootless/defaults/main.yml b/roles/services/docker_rootless/defaults/main.yml new file mode 100644 index 0000000..064825f --- /dev/null +++ b/roles/services/docker_rootless/defaults/main.yml | |||
@@ -0,0 +1,18 @@ | |||
1 | docker_packages: | ||
2 | - docker-ce | ||
3 | - acl | ||
4 | - docker-ce-cli | ||
5 | - docker-ce-rootless-extras | ||
6 | - docker-compose-plugin | ||
7 | - uidmap | ||
8 | - dbus-user-session | ||
9 | - slirp4netns | ||
10 | - fuse-overlayfs | ||
11 | |||
12 | docker_username: docker_rootless | ||
13 | docker_uid: 2000 | ||
14 | |||
15 | docker_home: /srv/docker | ||
16 | docker_config: /srv/docker/config | ||
17 | docker_data: /srv/docker/data | ||
18 | |||
diff --git a/roles/services/docker_rootless/handlers/main.yml b/roles/services/docker_rootless/handlers/main.yml new file mode 100644 index 0000000..510db7b --- /dev/null +++ b/roles/services/docker_rootless/handlers/main.yml | |||
@@ -0,0 +1,6 @@ | |||
1 | - name: update repos | ||
2 | apt: | ||
3 | update_cache: yes | ||
4 | register: apt_upgrade | ||
5 | retries: 100 | ||
6 | until: apt_upgrade is success or ('Failed to lock apt for exclusive operation' not in apt_upgrade.msg and '/var/lib/dpkg/lock' not in apt_upgrade.msg) | ||
diff --git a/roles/services/docker_rootless/tasks/main.yml b/roles/services/docker_rootless/tasks/main.yml new file mode 100644 index 0000000..9b2e527 --- /dev/null +++ b/roles/services/docker_rootless/tasks/main.yml | |||
@@ -0,0 +1,93 @@ | |||
1 | - name: install packages | ||
2 | package: | ||
3 | name: | ||
4 | - extrepo | ||
5 | - nginx | ||
6 | - python3-docker | ||
7 | state: latest | ||
8 | |||
9 | - name: allow http (80/tcp) traffic | ||
10 | ufw: | ||
11 | rule: allow | ||
12 | port: '80' | ||
13 | proto: tcp | ||
14 | |||
15 | - name: allow https (443/tcp) traffic | ||
16 | ufw: | ||
17 | rule: allow | ||
18 | port: '443' | ||
19 | proto: tcp | ||
20 | |||
21 | - name: enable docker-ce repo | ||
22 | register: result | ||
23 | changed_when: result.stdout | regex_search("skipped") | bool | ||
24 | notify: update repos | ||
25 | command: | ||
26 | cmd: extrepo enable docker-ce | ||
27 | creates: /etc/apt/sources.list.d/extrepo_docker-ce.sources | ||
28 | |||
29 | - meta: flush_handlers | ||
30 | |||
31 | - name: enable docker-ce repo | ||
32 | changed_when: false | ||
33 | command: | ||
34 | cmd: extrepo update docker-ce | ||
35 | |||
36 | - name: create docker user | ||
37 | user: | ||
38 | name: "{{ docker_username }}" | ||
39 | shell: /bin/bash | ||
40 | uid: "{{ docker_uid }}" | ||
41 | home: "{{ docker_home }}" | ||
42 | create_home: yes | ||
43 | |||
44 | - name: add XDG_RUNTIME_DIR to docker user bash profile | ||
45 | lineinfile: | ||
46 | path: "{{ docker_home }}/.bash_profile" | ||
47 | line: "export XDG_RUNTIME_DIR=/run/user/{{ docker_uid }}" | ||
48 | insertbefore: EOF | ||
49 | owner: "{{ docker_username }}" | ||
50 | group: "{{ docker_username }}" | ||
51 | mode: "0644" | ||
52 | create: yes | ||
53 | |||
54 | - name: install docker packages | ||
55 | package: | ||
56 | name: "{{ docker_packages }}" | ||
57 | state: latest | ||
58 | |||
59 | - name: add docker user to /etc/subuid | ||
60 | lineinfile: | ||
61 | path: /etc/subuid | ||
62 | line: "{{ docker_username }}:100000:65536" | ||
63 | insertbefore: EOF | ||
64 | |||
65 | - name: add docker user to /etc/subgid | ||
66 | lineinfile: | ||
67 | path: /etc/subgid | ||
68 | line: "{{ docker_username }}:100000:65536" | ||
69 | insertbefore: EOF | ||
70 | |||
71 | - name: enable lingering for docker user | ||
72 | command: | ||
73 | cmd: loginctl enable-linger "{{ docker_username }}" | ||
74 | creates: "/var/lib/systemd/linger/{{ docker_username }}" | ||
75 | |||
76 | - name: run docker rootless setup script | ||
77 | become_user: "{{ docker_username }}" | ||
78 | register: setup_script | ||
79 | command: | ||
80 | cmd: /usr/bin/dockerd-rootless-setuptool.sh install --force | ||
81 | creates: "{{ docker_home }}/.config/systemd/user/docker.service" | ||
82 | environment: | ||
83 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
84 | |||
85 | - name: enable and start docker service | ||
86 | become_user: "{{ docker_username }}" | ||
87 | systemd: | ||
88 | name: docker | ||
89 | enabled: yes | ||
90 | state: started | ||
91 | scope: user | ||
92 | environment: | ||
93 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
diff --git a/roles/services/freeipa/client/defaults/main.yml b/roles/services/freeipa/client/defaults/main.yml new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/roles/services/freeipa/client/defaults/main.yml | |||
diff --git a/roles/services/freeipa/client/tasks/main.yml b/roles/services/freeipa/client/tasks/main.yml new file mode 100644 index 0000000..ccb047e --- /dev/null +++ b/roles/services/freeipa/client/tasks/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | --- | ||
2 | - name: configure freeipa client | ||
3 | include_role: | ||
4 | name: freeipa.ansible_freeipa.ipaclient | ||
diff --git a/roles/services/freeipa/server/defaults/main.yml b/roles/services/freeipa/server/defaults/main.yml new file mode 100644 index 0000000..3e91a21 --- /dev/null +++ b/roles/services/freeipa/server/defaults/main.yml | |||
@@ -0,0 +1 @@ | |||
ipabackup_from_controller: yes | |||
diff --git a/roles/services/freeipa/server/tasks/main.yml b/roles/services/freeipa/server/tasks/main.yml new file mode 100644 index 0000000..32badc2 --- /dev/null +++ b/roles/services/freeipa/server/tasks/main.yml | |||
@@ -0,0 +1,43 @@ | |||
1 | --- | ||
2 | - name: set fedora dns | ||
3 | lineinfile: | ||
4 | path: /etc/systemd/resolved.conf | ||
5 | regexp: "^#?DNS=" | ||
6 | line: "DNS={{ ipa_dns_ip }}" | ||
7 | |||
8 | - name: restart systemd-resolved | ||
9 | service: | ||
10 | name: systemd-resolved | ||
11 | state: restarted | ||
12 | |||
13 | - name: set hostname | ||
14 | hostname: | ||
15 | name: ipasrv.home.local | ||
16 | |||
17 | - name: remove lines from /etc/hosts | ||
18 | lineinfile: | ||
19 | path: /etc/hosts | ||
20 | regexp: "^::1.*ipasrv" | ||
21 | state: absent | ||
22 | |||
23 | - name: remove lines from /etc/hosts | ||
24 | lineinfile: | ||
25 | path: /etc/hosts | ||
26 | regexp: "^127.0.0.1.*ipasrv" | ||
27 | state: absent | ||
28 | |||
29 | - name: add line to /etc/hosts | ||
30 | lineinfile: | ||
31 | path: /etc/hosts | ||
32 | line: "{{ ansible_default_ipv4.address }} ipasrv.home.local ipasrv" | ||
33 | state: present | ||
34 | |||
35 | - name: install freeipa-server | ||
36 | package: | ||
37 | name: freeipa-server | ||
38 | state: latest | ||
39 | |||
40 | #- name: restore ipaserver from backup | ||
41 | #include_role: | ||
42 | #name: freeipa.ansible_freeipa.ipabackup | ||
43 | #state: restored | ||
diff --git a/roles/services/game_server/handlers/main.yml b/roles/services/game_server/handlers/main.yml new file mode 100644 index 0000000..8e221e1 --- /dev/null +++ b/roles/services/game_server/handlers/main.yml | |||
@@ -0,0 +1,71 @@ | |||
1 | - name: create sunshine build dir | ||
2 | become: yes | ||
3 | become_user: "{{ games_user }}" | ||
4 | file: | ||
5 | path: "/home/{{ games_user }}/sunshine/build" | ||
6 | state: directory | ||
7 | owner: "{{ games_user }}" | ||
8 | group: "{{ games_user }}" | ||
9 | mode: "0755" | ||
10 | |||
11 | - name: run npm install | ||
12 | become: yes | ||
13 | become_user: "{{ games_user }}" | ||
14 | command: | ||
15 | cmd: "npm install" | ||
16 | chdir: "/home/{{ games_user }}/sunshine/build" | ||
17 | |||
18 | - name: build sunshine - cmake | ||
19 | become: yes | ||
20 | become_user: "{{ games_user }}" | ||
21 | command: | ||
22 | cmd: "cmake -DCMAKE_C_COMPILER=gcc-10 -DCMAKE_CXX_COMPILER=g++-10 .." | ||
23 | chdir: "/home/{{ games_user }}/sunshine/build" | ||
24 | |||
25 | - name: build sunshine - make | ||
26 | become: yes | ||
27 | become_user: "{{ games_user }}" | ||
28 | command: | ||
29 | cmd: "make" | ||
30 | chdir: "/home/{{ games_user }}/sunshine/build" | ||
31 | |||
32 | - name: build sunshine deb package | ||
33 | become: yes | ||
34 | become_user: "{{ games_user }}" | ||
35 | command: | ||
36 | cmd: "cpack -G DEB" | ||
37 | chdir: "/home/{{ games_user }}/sunshine/build" | ||
38 | |||
39 | - name: install sunshine from deb | ||
40 | apt: | ||
41 | deb: "/home/{{ games_user }}/sunshine/build/cpack_artifacts/Sunshine.deb" | ||
42 | |||
43 | - name: restart sunshine | ||
44 | become: yes | ||
45 | become_user: "{{ games_user }}" | ||
46 | systemd: | ||
47 | scope: user | ||
48 | name: sunshine | ||
49 | state: restarted | ||
50 | |||
51 | - name: decompress and extract firmware | ||
52 | unarchive: | ||
53 | src: "/tmp/linux-firmware-20221109.tar.gz" | ||
54 | dest: "/tmp/" | ||
55 | remote_src: yes | ||
56 | |||
57 | - name: copy all files from amdgpu to /lib/firmware/amdgpu/ | ||
58 | copy: | ||
59 | src: /tmp/linux-firmware-20221109/amdgpu | ||
60 | dest: /lib/firmware | ||
61 | remote_src: yes | ||
62 | owner: root | ||
63 | group: root | ||
64 | mode: "0644" | ||
65 | |||
66 | - name: update initramfs | ||
67 | command: | ||
68 | cmd: "update-initramfs -u" | ||
69 | |||
70 | - name: reboot system | ||
71 | reboot: | ||
diff --git a/roles/services/game_server/tasks/main.yml b/roles/services/game_server/tasks/main.yml new file mode 100644 index 0000000..f2b12bd --- /dev/null +++ b/roles/services/game_server/tasks/main.yml | |||
@@ -0,0 +1,223 @@ | |||
1 | - name: enable contrib and non-free repos | ||
2 | apt_repository: | ||
3 | repo: deb https://deb.debian.org/debian bookworm main contrib non-free | ||
4 | |||
5 | - name: enable contrib and non-free repos | ||
6 | apt_repository: | ||
7 | repo: deb https://security.debian.org/debian-security bookworm-security main contrib non-free | ||
8 | |||
9 | - name: enable contrib and non-free repos | ||
10 | apt_repository: | ||
11 | repo: deb https://deb.debian.org/debian bookworm-updates main contrib non-free | ||
12 | |||
13 | - name: enable contrib and non-free repos | ||
14 | apt_repository: | ||
15 | repo: deb https://deb.debian.org/debian bookworm-backports main contrib non-free | ||
16 | |||
17 | - name: enable contrib and non-free repos | ||
18 | apt_repository: | ||
19 | repo: deb-src https://deb.debian.org/debian bookworm main contrib non-free | ||
20 | |||
21 | - name: enable contrib and non-free repos | ||
22 | apt_repository: | ||
23 | repo: deb-src https://security.debian.org/debian-security bookworm-security main contrib non-free | ||
24 | - name: enable contrib and non-free repos | ||
25 | apt_repository: | ||
26 | repo: deb-src https://deb.debian.org/debian bookworm-updates main contrib non-free | ||
27 | |||
28 | - name: enable contrib and non-free repos | ||
29 | apt_repository: | ||
30 | repo: deb-src https://deb.debian.org/debian bookworm-backports main contrib non-free | ||
31 | |||
32 | - name: update repos | ||
33 | apt: | ||
34 | update_cache: yes | ||
35 | register: apt_upgrade | ||
36 | retries: 100 | ||
37 | until: apt_upgrade is success or ('Failed to lock apt for exclusive operation' not in apt_upgrade.msg and '/var/lib/dpkg/lock' not in apt_upgrade.msg) | ||
38 | |||
39 | - name: install packages | ||
40 | package: | ||
41 | name: "{{ game_server_packages }}" | ||
42 | state: latest | ||
43 | |||
44 | - name: create games user | ||
45 | user: | ||
46 | name: "{{ games_user }}" | ||
47 | create_home: yes | ||
48 | |||
49 | - name: add user to sudo group | ||
50 | user: | ||
51 | name: "{{ games_user }}" | ||
52 | groups: sudo | ||
53 | append: yes | ||
54 | |||
55 | - name: add user to ssl-cert group | ||
56 | user: | ||
57 | name: "{{ games_user }}" | ||
58 | groups: ssl-cert | ||
59 | append: yes | ||
60 | |||
61 | - name: set authorized ssh key | ||
62 | authorized_key: | ||
63 | user: "{{ games_user }}" | ||
64 | state: present | ||
65 | key: "{{ lookup('file', 'data/common/id_rsa.pub') }}" | ||
66 | |||
67 | - name: clone sunshine repo | ||
68 | become: yes | ||
69 | become_user: "{{ games_user }}" | ||
70 | git: | ||
71 | repo: "{{ sunshine_repo }}" | ||
72 | dest: "/home/{{ games_user }}/sunshine" | ||
73 | version: "{{ sunshine_version }}" | ||
74 | recursive: yes | ||
75 | force: yes | ||
76 | register: sunshine_repo | ||
77 | notify: | ||
78 | - create sunshine build dir | ||
79 | - run npm install | ||
80 | - build sunshine - cmake | ||
81 | - build sunshine - make | ||
82 | - build sunshine deb package | ||
83 | - install sunshine from deb | ||
84 | - restart sunshine | ||
85 | |||
86 | - name: install sunshine packages | ||
87 | package: | ||
88 | name: "{{ sunshine_packages }}" | ||
89 | state: latest | ||
90 | |||
91 | - meta: flush_handlers | ||
92 | |||
93 | - name: add user to input group | ||
94 | user: | ||
95 | name: "{{ games_user }}" | ||
96 | groups: input | ||
97 | append: yes | ||
98 | |||
99 | - name: set sunshine udev rules | ||
100 | lineinfile: | ||
101 | path: /etc/udev/rules.d/85-sunshine-input.rules | ||
102 | insertbefore: EOF | ||
103 | line: KERNEL=="uinput", GROUP="input", MODE="0660", OPTIONS+="static_node=uinput" | ||
104 | owner: root | ||
105 | group: root | ||
106 | mode: "0644" | ||
107 | create: yes | ||
108 | |||
109 | - name: install backports kernel | ||
110 | apt: | ||
111 | name: linux-image-amd64 | ||
112 | state: latest | ||
113 | update_cache: yes | ||
114 | |||
115 | - name: update-pciids | ||
116 | changed_when: false | ||
117 | command: | ||
118 | cmd: "update-pciids" | ||
119 | |||
120 | - name: check if needed firmware has alredy been installed | ||
121 | stat: path=/lib/firmware/amdgpu/dimgrey_cavefish_sos.bin | ||
122 | register: bin | ||
123 | |||
124 | - name: manually download latest firmware for amdgpu from kernel source tree | ||
125 | when: not bin.stat.exists | ||
126 | get_url: | ||
127 | url: "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/snapshot/linux-firmware-20221109.tar.gz" | ||
128 | dest: "/tmp/linux-firmware-20221109.tar.gz" | ||
129 | register: firmware | ||
130 | notify: | ||
131 | - decompress and extract firmware | ||
132 | - copy all files from amdgpu to /lib/firmware/amdgpu/ | ||
133 | - update initramfs | ||
134 | - reboot system | ||
135 | |||
136 | - name: allow sunshine ports | ||
137 | ufw: | ||
138 | rule: allow | ||
139 | proto: tcp | ||
140 | port: 47984 | ||
141 | |||
142 | - name: allow sunshine ports | ||
143 | ufw: | ||
144 | rule: allow | ||
145 | proto: tcp | ||
146 | port: 47989 | ||
147 | |||
148 | - name: allow sunshine ports | ||
149 | ufw: | ||
150 | rule: allow | ||
151 | proto: tcp | ||
152 | port: 47990 | ||
153 | |||
154 | - name: allow sunshine ports | ||
155 | ufw: | ||
156 | rule: allow | ||
157 | proto: udp | ||
158 | port: 47998 | ||
159 | |||
160 | - name: allow sunshine ports | ||
161 | ufw: | ||
162 | rule: allow | ||
163 | proto: udp | ||
164 | port: 47999 | ||
165 | |||
166 | - name: allow sunshine ports | ||
167 | ufw: | ||
168 | rule: allow | ||
169 | proto: tcp | ||
170 | |||
171 | - name: allow sunshine ports | ||
172 | ufw: | ||
173 | rule: allow | ||
174 | proto: udp | ||
175 | port: 48000 | ||
176 | |||
177 | - name: allow sunshine ports | ||
178 | ufw: | ||
179 | rule: allow | ||
180 | proto: udp | ||
181 | port: 48002 | ||
182 | |||
183 | - name: check if i386 architecture is already enabled | ||
184 | args: | ||
185 | executable: /bin/bash | ||
186 | shell: | | ||
187 | set -eo pipefail | ||
188 | dpkg --print-foreign-architectures | grep i386 | ||
189 | register: i386_check | ||
190 | changed_when: false | ||
191 | |||
192 | - name: add i386 architecture | ||
193 | when: i386_check.rc == 1 | ||
194 | command: | ||
195 | cmd: "dpkg --add-architecture i386" | ||
196 | |||
197 | - name: update repos | ||
198 | when: i386_check.rc == 1 | ||
199 | apt: | ||
200 | update_cache: yes | ||
201 | register: apt_upgrade | ||
202 | retries: 100 | ||
203 | until: apt_upgrade is success or ('Failed to lock apt for exclusive operation' not in apt_upgrade.msg and '/var/lib/dpkg/lock' not in apt_upgrade.msg) | ||
204 | |||
205 | - name: install steam and related packages | ||
206 | package: | ||
207 | name: "{{ steam_packages }}" | ||
208 | |||
209 | - name: copy lightdm config | ||
210 | copy: | ||
211 | src: "{{ lightdm_config }}" | ||
212 | dest: /etc/lightdm/lightdm.conf | ||
213 | owner: root | ||
214 | group: root | ||
215 | mode: "0644" | ||
216 | |||
217 | - name: copy xfce xinit config | ||
218 | copy: | ||
219 | src: "{{ xfce_xinit }}" | ||
220 | dest: /etc/xdg/xfce4/xinitrc | ||
221 | owner: root | ||
222 | group: root | ||
223 | mode: "0755" | ||
diff --git a/roles/services/jenkins/handlers/main.yml b/roles/services/jenkins/handlers/main.yml new file mode 100644 index 0000000..92f0084 --- /dev/null +++ b/roles/services/jenkins/handlers/main.yml | |||
@@ -0,0 +1,13 @@ | |||
1 | - name: update repos | ||
2 | apt: | ||
3 | update_cache: yes | ||
4 | |||
5 | - name: restart nginx | ||
6 | service: | ||
7 | name: nginx | ||
8 | state: restarted | ||
9 | |||
10 | - name: restart jenkins | ||
11 | service: | ||
12 | name: jenkins | ||
13 | state: restarted | ||
diff --git a/roles/services/jenkins/tasks/main.yml b/roles/services/jenkins/tasks/main.yml new file mode 100644 index 0000000..29dbb28 --- /dev/null +++ b/roles/services/jenkins/tasks/main.yml | |||
@@ -0,0 +1,184 @@ | |||
1 | - name: install extrepo | ||
2 | package: | ||
3 | name: extrepo | ||
4 | state: latest | ||
5 | |||
6 | - name: add jenkins repo | ||
7 | register: result | ||
8 | changed_when: result.stdout | regex_search("skipped") | bool | ||
9 | notify: update repos | ||
10 | command: | ||
11 | cmd: extrepo enable jenkins | ||
12 | creates: /etc/apt/sources.list.d/extrepo_jenkins.sources | ||
13 | |||
14 | - meta: flush_handlers | ||
15 | |||
16 | - name: update jenkins repo data | ||
17 | changed_when: false | ||
18 | command: | ||
19 | cmd: extrepo update jenkins | ||
20 | |||
21 | - name: install packages | ||
22 | package: | ||
23 | name: "{{ jenkins_packages }}" | ||
24 | |||
25 | - name: generate ssh key for jenkins user | ||
26 | user: | ||
27 | name: jenkins | ||
28 | generate_ssh_key: yes | ||
29 | |||
30 | - name: get jenkins user ssh key | ||
31 | changed_when: false | ||
32 | command: cat /var/lib/jenkins/.ssh/id_rsa.pub | ||
33 | register: pubkey | ||
34 | |||
35 | - name: create jenkins user in freeipa | ||
36 | freeipa.ansible_freeipa.ipauser: | ||
37 | ipaadmin_principal: | ||
38 | ipaadmin_password: "{{ ipafulladmin_password }}" | ||
39 | name: jenkins | ||
40 | passwordexpiration: "2050-01-01" | ||
41 | first: jenkins | ||
42 | last: ci | ||
43 | sshpubkey: "{{ pubkey.stdout }}" | ||
44 | |||
45 | - name: create jenkins_admin group in freeipa | ||
46 | freeipa.ansible_freeipa.ipagroup: | ||
47 | ipaadmin_password: "{{ ipafulladmin_password }}" | ||
48 | name: jenkins_admin | ||
49 | |||
50 | - name: add user jenkins to jenkins_admin group in freeipa | ||
51 | freeipa.ansible_freeipa.ipagroup: | ||
52 | ipaadmin_password: "{{ ipafulladmin_password }}" | ||
53 | name: jenkins_admin | ||
54 | action: member | ||
55 | user: | ||
56 | - jenkins | ||
57 | |||
58 | - name: create sudo rule to allow jenkins to execute on all without password | ||
59 | freeipa.ansible_freeipa.ipasudorule: | ||
60 | ipaadmin_password: "{{ ipafulladmin_password }}" | ||
61 | name: jenkins_rule | ||
62 | sudooption: "!authenticate" | ||
63 | group: jenkins_admin | ||
64 | hostcategory: all | ||
65 | cmdcategory: all | ||
66 | runasusercategory: all | ||
67 | runasgroupcategory: all | ||
68 | |||
69 | - name: deploy nginx configuration | ||
70 | copy: | ||
71 | src: "{{ jenkins_nginx_config }}" | ||
72 | dest: /etc/nginx/sites-available/jenkins.conf | ||
73 | owner: root | ||
74 | group: root | ||
75 | mode: '0644' | ||
76 | register: nginx_config | ||
77 | notify: restart nginx | ||
78 | |||
79 | - name: create cert/key dir | ||
80 | file: | ||
81 | state: directory | ||
82 | path: "/etc/letsencrypt/live/{{ services_domain }}" | ||
83 | owner: root | ||
84 | group: root | ||
85 | mode: "0755" | ||
86 | |||
87 | - name: remove existing private key file | ||
88 | file: | ||
89 | path: "/etc/letsencrypt/live/{{ services_domain }}/privkey.pem" | ||
90 | state: absent | ||
91 | |||
92 | - name: write private key to file | ||
93 | lineinfile: | ||
94 | path: "/etc/letsencrypt/live/{{ services_domain }}/privkey.pem" | ||
95 | line: "{{ nginx_key }}" | ||
96 | insertbefore: EOF | ||
97 | create: yes | ||
98 | |||
99 | - name: deploy cert | ||
100 | copy: | ||
101 | src: "{{ nginx_cert }}" | ||
102 | dest: "/etc/letsencrypt/live/{{ services_domain }}/fullchain.pem" | ||
103 | owner: root | ||
104 | group: root | ||
105 | mode: '0644' | ||
106 | |||
107 | - name: symlink site | ||
108 | file: | ||
109 | src: /etc/nginx/sites-available/jenkins.conf | ||
110 | dest: /etc/nginx/sites-enabled/jenkins.conf | ||
111 | owner: root | ||
112 | group: root | ||
113 | state: link | ||
114 | |||
115 | - name: allow http (80/tcp) traffic | ||
116 | ufw: | ||
117 | rule: allow | ||
118 | port: '80' | ||
119 | proto: tcp | ||
120 | |||
121 | - name: allow https (443/tcp) traffic | ||
122 | ufw: | ||
123 | rule: allow | ||
124 | port: '443' | ||
125 | proto: tcp | ||
126 | |||
127 | - name: install ansible plugin | ||
128 | jenkins_plugin: | ||
129 | url_username: "{{ jenkins_username }}" | ||
130 | url_password: "{{ jenkins_apikey }}" | ||
131 | url: "{{ jenkins_url }}" | ||
132 | name: ansible | ||
133 | |||
134 | - name: install gitea plugin | ||
135 | jenkins_plugin: | ||
136 | url_username: "{{ jenkins_username }}" | ||
137 | url_password: "{{ jenkins_apikey }}" | ||
138 | url: "{{ jenkins_url }}" | ||
139 | name: gitea | ||
140 | |||
141 | - name: install openid login plugin | ||
142 | jenkins_plugin: | ||
143 | url_username: "{{ jenkins_username }}" | ||
144 | url_password: "{{ jenkins_apikey }}" | ||
145 | url: "{{ jenkins_url }}" | ||
146 | name: oic-auth | ||
147 | |||
148 | - name: install prometheus plugin | ||
149 | jenkins_plugin: | ||
150 | url_username: "{{ jenkins_username }}" | ||
151 | url_password: "{{ jenkins_apikey }}" | ||
152 | url: "{{ jenkins_url }}" | ||
153 | name: prometheus | ||
154 | |||
155 | - name: install casc plugin | ||
156 | jenkins_plugin: | ||
157 | url_username: "{{ jenkins_username }}" | ||
158 | url_password: "{{ jenkins_apikey }}" | ||
159 | url: "{{ jenkins_url }}" | ||
160 | name: configuration-as-code | ||
161 | |||
162 | - name: install warnings-ng plugin | ||
163 | jenkins_plugin: | ||
164 | url_username: "{{ jenkins_username }}" | ||
165 | url_password: "{{ jenkins_apikey }}" | ||
166 | url: "{{ jenkins_url }}" | ||
167 | name: warnings-ng | ||
168 | |||
169 | - name: deploy configuration as code file | ||
170 | register: casc_file | ||
171 | notify: restart jenkins | ||
172 | template: | ||
173 | src: "{{ jenkins_config }}" | ||
174 | dest: "/var/lib/jenkins/jenkins.yaml" | ||
175 | owner: jenkins | ||
176 | group: jenkins | ||
177 | mode: "0644" | ||
178 | |||
179 | - name: enable jenkins | ||
180 | systemd: | ||
181 | daemon_reload: yes | ||
182 | enabled: yes | ||
183 | masked: no | ||
184 | name: jenkins | ||
diff --git a/roles/services/monitoring/grafana/defaults/main.yml b/roles/services/monitoring/grafana/defaults/main.yml new file mode 100644 index 0000000..c346e54 --- /dev/null +++ b/roles/services/monitoring/grafana/defaults/main.yml | |||
@@ -0,0 +1,5 @@ | |||
1 | grafana_package: | ||
2 | - grafana | ||
3 | - nginx | ||
4 | grafana_config: files/grafana_config/ | ||
5 | grafana_data: files/grafana.db | ||
diff --git a/roles/services/monitoring/grafana/handlers/main.yml b/roles/services/monitoring/grafana/handlers/main.yml new file mode 100644 index 0000000..8026c6d --- /dev/null +++ b/roles/services/monitoring/grafana/handlers/main.yml | |||
@@ -0,0 +1,13 @@ | |||
1 | - name: update repos | ||
2 | apt: | ||
3 | update_cache: yes | ||
4 | |||
5 | - name: restart grafana | ||
6 | service: | ||
7 | name: grafana-server | ||
8 | state: restarted | ||
9 | |||
10 | - name: restart nginx | ||
11 | service: | ||
12 | name: nginx | ||
13 | state: restarted | ||
diff --git a/roles/services/monitoring/grafana/tasks/main.yml b/roles/services/monitoring/grafana/tasks/main.yml new file mode 100644 index 0000000..e9f824e --- /dev/null +++ b/roles/services/monitoring/grafana/tasks/main.yml | |||
@@ -0,0 +1,125 @@ | |||
1 | - name: install extrepo | ||
2 | package: | ||
3 | name: extrepo | ||
4 | state: latest | ||
5 | |||
6 | - name: add Grafana repo | ||
7 | register: result | ||
8 | changed_when: result.stdout | regex_search("skipped") | bool | ||
9 | notify: update repos | ||
10 | command: | ||
11 | cmd: extrepo enable grafana | ||
12 | creates: /etc/apt/sources.list.d/extrepo_grafana.sources | ||
13 | |||
14 | - meta: flush_handlers | ||
15 | |||
16 | - name: update Grafana repo | ||
17 | changed_when: false | ||
18 | command: | ||
19 | cmd: extrepo update grafana | ||
20 | |||
21 | - name: install grafana | ||
22 | package: | ||
23 | name: "{{ grafana_package }}" | ||
24 | |||
25 | - name: deploy grafana config | ||
26 | notify: restart grafana | ||
27 | template: | ||
28 | src: "{{ grafana_config }}" | ||
29 | dest: /etc/grafana/grafana.ini | ||
30 | owner: root | ||
31 | group: grafana | ||
32 | mode: '0640' | ||
33 | |||
34 | - name: deploy nginx configuration | ||
35 | notify: restart nginx | ||
36 | copy: | ||
37 | src: "{{ grafana_nginx_config }}" | ||
38 | dest: /etc/nginx/sites-available/grafana.conf | ||
39 | owner: root | ||
40 | group: root | ||
41 | mode: '0644' | ||
42 | |||
43 | - name: symlink site | ||
44 | notify: restart nginx | ||
45 | file: | ||
46 | src: /etc/nginx/sites-available/grafana.conf | ||
47 | dest: /etc/nginx/sites-enabled/grafana.conf | ||
48 | owner: root | ||
49 | group: root | ||
50 | state: link | ||
51 | |||
52 | - name: allow http (80/tcp) traffic | ||
53 | ufw: | ||
54 | rule: allow | ||
55 | port: '80' | ||
56 | proto: tcp | ||
57 | |||
58 | - name: allow https (443/tcp) traffic | ||
59 | ufw: | ||
60 | rule: allow | ||
61 | port: '443' | ||
62 | proto: tcp | ||
63 | |||
64 | - name: enable grafana | ||
65 | systemd: | ||
66 | daemon_reload: yes | ||
67 | enabled: yes | ||
68 | masked: no | ||
69 | name: grafana-server | ||
70 | |||
71 | - meta: flush_handlers | ||
72 | |||
73 | - name: add grafana user | ||
74 | ignore_errors: yes | ||
75 | community.grafana.grafana_user: | ||
76 | name: "{{ grafana_admin }}" | ||
77 | email: "{{ grafana_email }}" | ||
78 | url: "{{ grafana_url }}" | ||
79 | login: "{{ grafana_admin }}" | ||
80 | password: "{{ grafana_password }}" | ||
81 | is_admin: true | ||
82 | state: present | ||
83 | |||
84 | - name: add prometheus datasource | ||
85 | community.grafana.grafana_datasource: | ||
86 | grafana_url: "{{ grafana_url }}" | ||
87 | grafana_user: "{{ grafana_admin }}" | ||
88 | grafana_password: "{{ grafana_password }}" | ||
89 | name: "Prometheus" | ||
90 | ds_type: prometheus | ||
91 | ds_url: "{{ prometheus_url }}" | ||
92 | access: proxy | ||
93 | |||
94 | - name: add influxdb datasource | ||
95 | community.grafana.grafana_datasource: | ||
96 | grafana_url: "{{ grafana_url }}" | ||
97 | grafana_user: "{{ grafana_admin }}" | ||
98 | grafana_password: "{{ grafana_password }}" | ||
99 | name: "Proxmox InfluxDB" | ||
100 | ds_type: influxdb | ||
101 | ds_url: "{{ influxdb_url }}" | ||
102 | database: "{{ influx_database }}" | ||
103 | user: "{{ influx_user }}" | ||
104 | password: "{{ influx_password }}" | ||
105 | access: proxy | ||
106 | |||
107 | - name: add loki datasource | ||
108 | community.grafana.grafana_datasource: | ||
109 | grafana_url: "{{ grafana_url }}" | ||
110 | grafana_user: "{{ grafana_admin }}" | ||
111 | grafana_password: "{{ grafana_password }}" | ||
112 | name: "Loki" | ||
113 | ds_type: loki | ||
114 | ds_url: "{{ loki_url }}" | ||
115 | access: proxy | ||
116 | |||
117 | - name: import main custom dashboard | ||
118 | delegate_to: localhost | ||
119 | become: no | ||
120 | community.grafana.grafana_dashboard: | ||
121 | grafana_url: "{{ grafana_url }}" | ||
122 | grafana_user: "{{ grafana_admin }}" | ||
123 | grafana_password: "{{ grafana_password }}" | ||
124 | path: "{{ grafana_dashboard_main }}" | ||
125 | overwrite: yes | ||
diff --git a/roles/services/monitoring/influxdb/defaults/main.yml b/roles/services/monitoring/influxdb/defaults/main.yml new file mode 100644 index 0000000..180ad8e --- /dev/null +++ b/roles/services/monitoring/influxdb/defaults/main.yml | |||
@@ -0,0 +1,6 @@ | |||
1 | influxdb_packages: | ||
2 | - influxdb | ||
3 | - influxdb-client | ||
4 | |||
5 | influx_config: files/influxdb.conf | ||
6 | influx_data: files/influx_data/ | ||
diff --git a/roles/services/monitoring/influxdb/handlers/main.yml b/roles/services/monitoring/influxdb/handlers/main.yml new file mode 100644 index 0000000..765a040 --- /dev/null +++ b/roles/services/monitoring/influxdb/handlers/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | - name: restart influxdb | ||
2 | service: | ||
3 | name: influxdb | ||
4 | state: restarted | ||
diff --git a/roles/services/monitoring/influxdb/tasks/main.yml b/roles/services/monitoring/influxdb/tasks/main.yml new file mode 100644 index 0000000..06d6e86 --- /dev/null +++ b/roles/services/monitoring/influxdb/tasks/main.yml | |||
@@ -0,0 +1,19 @@ | |||
1 | - name: install packages | ||
2 | package: | ||
3 | name: "{{ influxdb_packages }}" | ||
4 | state: latest | ||
5 | |||
6 | - name: copy config | ||
7 | notify: restart influxdb | ||
8 | copy: | ||
9 | src: "{{ influx_config }}" | ||
10 | dest: /etc/influxdb/influxdb.conf | ||
11 | owner: root | ||
12 | group: root | ||
13 | mode: '0644' | ||
14 | |||
15 | - name: enable influxdb | ||
16 | systemd: | ||
17 | name: influxdb | ||
18 | enabled: yes | ||
19 | masked: no | ||
diff --git a/roles/services/monitoring/loki/handlers/main.yml b/roles/services/monitoring/loki/handlers/main.yml new file mode 100644 index 0000000..e70412f --- /dev/null +++ b/roles/services/monitoring/loki/handlers/main.yml | |||
@@ -0,0 +1,8 @@ | |||
1 | - name: update repos | ||
2 | apt: | ||
3 | update_cache: yes | ||
4 | |||
5 | - name: restart nginx | ||
6 | service: | ||
7 | name: nginx | ||
8 | state: restarted | ||
diff --git a/roles/services/monitoring/loki/tasks/main.yml b/roles/services/monitoring/loki/tasks/main.yml new file mode 100644 index 0000000..31a7375 --- /dev/null +++ b/roles/services/monitoring/loki/tasks/main.yml | |||
@@ -0,0 +1,80 @@ | |||
1 | - name: install extrepo | ||
2 | package: | ||
3 | name: extrepo | ||
4 | state: latest | ||
5 | |||
6 | - name: add Grafana repo | ||
7 | register: result | ||
8 | changed_when: result.stdout | regex_search("skipped") | bool | ||
9 | notify: update repos | ||
10 | command: | ||
11 | cmd: extrepo enable grafana | ||
12 | creates: /etc/apt/sources.list.d/extrepo_grafana.sources | ||
13 | |||
14 | - meta: flush_handlers | ||
15 | |||
16 | - name: add Grafana repo | ||
17 | changed_when: false | ||
18 | command: | ||
19 | cmd: extrepo update grafana | ||
20 | |||
21 | - name: install loki | ||
22 | package: | ||
23 | name: loki | ||
24 | state: latest | ||
25 | |||
26 | - name: deploy loki configuration | ||
27 | copy: | ||
28 | src: "{{ loki_config }}" | ||
29 | dest: /etc/loki/config.yml | ||
30 | owner: root | ||
31 | group: root | ||
32 | mode: '0644' | ||
33 | |||
34 | - name: deploy nginx configuration | ||
35 | copy: | ||
36 | src: "{{ loki_nginx_config }}" | ||
37 | dest: /etc/nginx/sites-available/loki.conf | ||
38 | owner: root | ||
39 | group: root | ||
40 | mode: '0644' | ||
41 | register: nginxconfig | ||
42 | notify: restart nginx | ||
43 | |||
44 | - name: symlink site | ||
45 | file: | ||
46 | src: /etc/nginx/sites-available/loki.conf | ||
47 | dest: /etc/nginx/sites-enabled/loki.conf | ||
48 | owner: root | ||
49 | group: root | ||
50 | state: link | ||
51 | |||
52 | - name: allow http (80/tcp) traffic | ||
53 | ufw: | ||
54 | rule: allow | ||
55 | port: '80' | ||
56 | proto: tcp | ||
57 | |||
58 | - name: allow https (443/tcp) traffic | ||
59 | ufw: | ||
60 | rule: allow | ||
61 | port: '443' | ||
62 | proto: tcp | ||
63 | |||
64 | - name: allow loki log (3100/tcp) traffic | ||
65 | ufw: | ||
66 | rule: allow | ||
67 | port: '3100' | ||
68 | proto: tcp | ||
69 | |||
70 | - name: enable loki | ||
71 | systemd: | ||
72 | daemon_reload: yes | ||
73 | enabled: yes | ||
74 | masked: no | ||
75 | name: loki | ||
76 | |||
77 | - name: restart loki | ||
78 | systemd: | ||
79 | name: loki | ||
80 | state: restarted | ||
diff --git a/roles/services/monitoring/prometheus/blackbox-exporter/tasks/main.yml b/roles/services/monitoring/prometheus/blackbox-exporter/tasks/main.yml new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/roles/services/monitoring/prometheus/blackbox-exporter/tasks/main.yml | |||
diff --git a/roles/services/monitoring/prometheus/nginx_exporter/defaults/main.yml b/roles/services/monitoring/prometheus/nginx_exporter/defaults/main.yml new file mode 100644 index 0000000..9d2b8a5 --- /dev/null +++ b/roles/services/monitoring/prometheus/nginx_exporter/defaults/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | nginx_exporter_debian_package: prometheus-nginx-exporter | ||
2 | nginx_exporter_fedora_package: golang-github-prometheus-node-exporter | ||
3 | prometheus_server_ip: 192.168.88.32 | ||
4 | nginx_exporter_port: '9113' | ||
diff --git a/roles/services/monitoring/prometheus/nginx_exporter/handlers/main.yml b/roles/services/monitoring/prometheus/nginx_exporter/handlers/main.yml new file mode 100644 index 0000000..fe9a90d --- /dev/null +++ b/roles/services/monitoring/prometheus/nginx_exporter/handlers/main.yml | |||
@@ -0,0 +1,9 @@ | |||
1 | - name: restart nginx | ||
2 | service: | ||
3 | name: nginx | ||
4 | state: restarted | ||
5 | |||
6 | - name: restart nginx-exporter | ||
7 | service: | ||
8 | name: prometheus-nginx-exporter | ||
9 | state: started | ||
diff --git a/roles/services/monitoring/prometheus/nginx_exporter/tasks/main.yml b/roles/services/monitoring/prometheus/nginx_exporter/tasks/main.yml new file mode 100644 index 0000000..819f71e --- /dev/null +++ b/roles/services/monitoring/prometheus/nginx_exporter/tasks/main.yml | |||
@@ -0,0 +1,44 @@ | |||
1 | - name: install package (Debian) | ||
2 | when: ansible_facts['distribution'] == "Debian" | ||
3 | package: | ||
4 | name: "{{ nginx_exporter_debian_package }}" | ||
5 | |||
6 | - name: allow port | ||
7 | ufw: | ||
8 | rule: allow | ||
9 | direction: in | ||
10 | proto: tcp | ||
11 | src: "{{ prometheus_server_ip }}" | ||
12 | to_port: "{{ nginx_exporter_port }}" | ||
13 | |||
14 | - name: copy defaults file | ||
15 | notify: restart nginx-exporter | ||
16 | copy: | ||
17 | src: "{{ nginx_exporter_defaults }}" | ||
18 | dest: /etc/default/prometheus-nginx-exporter | ||
19 | owner: root | ||
20 | group: root | ||
21 | mode: '0644' | ||
22 | |||
23 | - name: deploy nginx configuration | ||
24 | notify: restart nginx | ||
25 | copy: | ||
26 | src: "{{ nginx_exporter_config }}" | ||
27 | dest: /etc/nginx/sites-available/metrics.conf | ||
28 | owner: root | ||
29 | group: root | ||
30 | mode: '0644' | ||
31 | |||
32 | - name: symlink site | ||
33 | file: | ||
34 | src: /etc/nginx/sites-available/metrics.conf | ||
35 | dest: /etc/nginx/sites-enabled/metrics.conf | ||
36 | owner: root | ||
37 | group: root | ||
38 | state: link | ||
39 | |||
40 | - name: enable service | ||
41 | systemd: | ||
42 | name: prometheus-nginx-exporter | ||
43 | enabled: yes | ||
44 | masked: no | ||
diff --git a/roles/services/monitoring/prometheus/node_exporter/defaults/main.yml b/roles/services/monitoring/prometheus/node_exporter/defaults/main.yml new file mode 100644 index 0000000..e4ff351 --- /dev/null +++ b/roles/services/monitoring/prometheus/node_exporter/defaults/main.yml | |||
@@ -0,0 +1,4 @@ | |||
1 | node_exporter_debian_package: prometheus-node-exporter | ||
2 | node_exporter_fedora_package: golang-github-prometheus-node-exporter | ||
3 | prometheus_server_ip: 192.168.88.32 | ||
4 | node_exporter_port: '9100' | ||
diff --git a/roles/services/monitoring/prometheus/node_exporter/tasks/main.yml b/roles/services/monitoring/prometheus/node_exporter/tasks/main.yml new file mode 100644 index 0000000..6bbcc08 --- /dev/null +++ b/roles/services/monitoring/prometheus/node_exporter/tasks/main.yml | |||
@@ -0,0 +1,28 @@ | |||
1 | - name: install package (Debian) | ||
2 | when: ansible_facts['distribution'] == "Debian" | ||
3 | package: | ||
4 | name: "{{ node_exporter_debian_package }}" | ||
5 | |||
6 | - name: install package (Fedora) | ||
7 | when: ansible_facts['distribution'] == "Fedora" | ||
8 | package: | ||
9 | name: "{{ node_exporter_fedora_package }}" | ||
10 | |||
11 | - name: allow port | ||
12 | ufw: | ||
13 | rule: allow | ||
14 | direction: in | ||
15 | proto: tcp | ||
16 | src: "{{ prometheus_server_ip }}" | ||
17 | to_port: "{{ node_exporter_port }}" | ||
18 | |||
19 | - name: enable service | ||
20 | systemd: | ||
21 | name: prometheus-node-exporter | ||
22 | enabled: yes | ||
23 | masked: no | ||
24 | |||
25 | - name: restart service | ||
26 | service: | ||
27 | name: prometheus-node-exporter | ||
28 | state: restarted | ||
diff --git a/roles/services/monitoring/prometheus/server/defaults/main.yml b/roles/services/monitoring/prometheus/server/defaults/main.yml new file mode 100644 index 0000000..696e7cc --- /dev/null +++ b/roles/services/monitoring/prometheus/server/defaults/main.yml | |||
@@ -0,0 +1,6 @@ | |||
1 | prometheus_package: prometheus | ||
2 | management_ip: 192.168.88.254 | ||
3 | grafana_server_ip: 192.168.88.21 | ||
4 | prometheus_port: '9090' | ||
5 | prometheus_config: files/prometheus.yml | ||
6 | prometheus_defaults: files/prometheus | ||
diff --git a/roles/services/monitoring/prometheus/server/tasks/main.yml b/roles/services/monitoring/prometheus/server/tasks/main.yml new file mode 100644 index 0000000..06ecc10 --- /dev/null +++ b/roles/services/monitoring/prometheus/server/tasks/main.yml | |||
@@ -0,0 +1,79 @@ | |||
1 | - name: install package | ||
2 | package: | ||
3 | name: "{{ prometheus_package }}" | ||
4 | |||
5 | - name: allow access to metrics from grafana | ||
6 | ufw: | ||
7 | rule: allow | ||
8 | direction: in | ||
9 | proto: tcp | ||
10 | src: "{{ grafana_server_ip }}" | ||
11 | to_port: "{{ prometheus_port }}" | ||
12 | |||
13 | - name: allow access to metrics from management | ||
14 | ufw: | ||
15 | rule: allow | ||
16 | direction: in | ||
17 | proto: tcp | ||
18 | src: "{{ management_ip }}" | ||
19 | to_port: "{{ prometheus_port }}" | ||
20 | |||
21 | - name: copy config file | ||
22 | copy: | ||
23 | src: "{{ prometheus_config }}" | ||
24 | dest: /etc/prometheus/prometheus.yml | ||
25 | owner: root | ||
26 | group: root | ||
27 | mode: '0644' | ||
28 | |||
29 | - name: copy defaults file | ||
30 | copy: | ||
31 | src: "{{ prometheus_defaults }}" | ||
32 | dest: /etc/default/prometheus | ||
33 | owner: root | ||
34 | group: root | ||
35 | mode: '0644' | ||
36 | |||
37 | - name: enable service | ||
38 | systemd: | ||
39 | name: prometheus | ||
40 | enabled: yes | ||
41 | masked: no | ||
42 | |||
43 | - name: restart service | ||
44 | service: | ||
45 | name: prometheus | ||
46 | state: restarted | ||
47 | |||
48 | - name: deploy nginx configuration | ||
49 | copy: | ||
50 | src: "{{ prometheus_nginx_config }}" | ||
51 | dest: /etc/nginx/sites-available/grafana.conf | ||
52 | owner: root | ||
53 | group: root | ||
54 | mode: '0644' | ||
55 | |||
56 | - name: symlink site | ||
57 | file: | ||
58 | src: /etc/nginx/sites-available/grafana.conf | ||
59 | dest: /etc/nginx/sites-enabled/grafana.conf | ||
60 | owner: root | ||
61 | group: root | ||
62 | state: link | ||
63 | |||
64 | - name: allow http (80/tcp) traffic | ||
65 | ufw: | ||
66 | rule: allow | ||
67 | port: '80' | ||
68 | proto: tcp | ||
69 | |||
70 | - name: allow https (443/tcp) traffic | ||
71 | ufw: | ||
72 | rule: allow | ||
73 | port: '443' | ||
74 | proto: tcp | ||
75 | |||
76 | - name: restart nginx | ||
77 | service: | ||
78 | name: nginx | ||
79 | state: restarted | ||
diff --git a/roles/services/monitoring/promtail/handlers/main.yml b/roles/services/monitoring/promtail/handlers/main.yml new file mode 100644 index 0000000..97ea7d3 --- /dev/null +++ b/roles/services/monitoring/promtail/handlers/main.yml | |||
@@ -0,0 +1,39 @@ | |||
1 | - name: update repos - debian | ||
2 | apt: | ||
3 | update_cache: yes | ||
4 | |||
5 | - name: update repos - fedora | ||
6 | dnf: | ||
7 | name: "*" | ||
8 | state: latest | ||
9 | |||
10 | - name: build loki-docker-driver plugin for private repo | ||
11 | become: yes | ||
12 | become_user: "{{ docker_username }}" | ||
13 | environment: | ||
14 | LOKI_DOCKER_DRIVER: "{{ docker_registry_url }}/{{ docker_registry_username }}/loki-docker-driver" | ||
15 | community.general.make: | ||
16 | chdir: "{{ docker_home }}/plugins/loki" | ||
17 | target: docker-driver-push | ||
18 | |||
19 | - name: restart rootless docker | ||
20 | become: yes | ||
21 | become_user: "{{ docker_username }}" | ||
22 | systemd: | ||
23 | name: docker | ||
24 | enabled: yes | ||
25 | state: restarted | ||
26 | scope: user | ||
27 | environment: | ||
28 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
29 | |||
30 | - name: restart docker | ||
31 | service: | ||
32 | name: docker | ||
33 | state: restarted | ||
34 | |||
35 | - name: restart promtail | ||
36 | when: promtail_config.changed | ||
37 | service: | ||
38 | name: promtail | ||
39 | state: restarted | ||
diff --git a/roles/services/monitoring/promtail/tasks/main.yml b/roles/services/monitoring/promtail/tasks/main.yml new file mode 100644 index 0000000..f8b28cc --- /dev/null +++ b/roles/services/monitoring/promtail/tasks/main.yml | |||
@@ -0,0 +1,151 @@ | |||
1 | - name: install extrepo | ||
2 | when: ansible_facts['distribution'] == 'Debian' | ||
3 | package: | ||
4 | name: extrepo | ||
5 | state: latest | ||
6 | |||
7 | - name: add grafana repo | debian | ||
8 | when: ansible_facts['distribution'] == 'Debian' | ||
9 | register: result | ||
10 | changed_when: result.stdout | regex_search("skipped") | bool | ||
11 | notify: update repos - debian | ||
12 | command: | ||
13 | cmd: extrepo enable grafana | ||
14 | creates: /etc/apt/sources.list.d/extrepo_grafana.sources | ||
15 | |||
16 | - meta: flush_handlers | ||
17 | |||
18 | - name: update grafana extrepo data | debian | ||
19 | when: ansible_facts['distribution'] == 'Debian' | ||
20 | changed_when: false | ||
21 | command: | ||
22 | cmd: extrepo update grafana | ||
23 | |||
24 | - name: add Grafana repo | fedora | ||
25 | when: ansible_facts['distribution'] == 'Fedora' | ||
26 | notify: update repos - fedora | ||
27 | yum_repository: | ||
28 | name: grafana | ||
29 | file: grafna | ||
30 | description: "Grafana OSS Repo" | ||
31 | baseurl: "https://rpm.grafana.com" | ||
32 | repo_gpgcheck: yes | ||
33 | enabled: yes | ||
34 | gpgcheck: yes | ||
35 | gpgkey: https://rpm.grafana.com/gpg.key | ||
36 | sslverify: yes | ||
37 | sslcacert: /etc/pki/tls/certs/ca-bundle.crt | ||
38 | exclude: "*beta*" | ||
39 | |||
40 | - name: install promtail | ||
41 | package: | ||
42 | name: promtail | ||
43 | state: latest | ||
44 | |||
45 | - name: add promtail to adm group for log access (debian) | ||
46 | when: ansible_facts['distribution'] == 'Debian' | ||
47 | user: | ||
48 | name: promtail | ||
49 | groups: adm | ||
50 | append: yes | ||
51 | |||
52 | - name: add promtail to systemd-journal group for journal access | ||
53 | user: | ||
54 | name: promtail | ||
55 | groups: systemd-journal | ||
56 | append: yes | ||
57 | |||
58 | - name: create docker plugin directory | ||
59 | when: "'docker_hosts' in group_names" | ||
60 | become: yes | ||
61 | become_user: "{{ docker_username }}" | ||
62 | file: | ||
63 | path: "{{ docker_home }}/plugins" | ||
64 | state: directory | ||
65 | owner: "{{ docker_username }}" | ||
66 | group: "{{ docker_username }}" | ||
67 | mode: "0755" | ||
68 | |||
69 | - name: clone loki repo | ||
70 | when: "'docker_hosts' in group_names" | ||
71 | become: yes | ||
72 | become_user: "{{ docker_username }}" | ||
73 | git: | ||
74 | repo: "{{ loki_repo }}" | ||
75 | dest: "{{ docker_home }}/plugins/loki" | ||
76 | version: "{{ loki_version }}" | ||
77 | register: repo | ||
78 | notify: build loki-docker-driver plugin for private repo | ||
79 | |||
80 | - meta: flush_handlers | ||
81 | |||
82 | - name: login to docker registry | ||
83 | when: "'docker_hosts' in group_names" | ||
84 | become: yes | ||
85 | become_user: "{{ docker_username }}" | ||
86 | environment: | ||
87 | XDG_RUNTIME_DIR: "/run/user/{{ docker_uid }}" | ||
88 | docker_login: | ||
89 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
90 | registry_url: "{{ docker_registry_url }}" | ||
91 | username: "{{ docker_registry_username }}" | ||
92 | password: "{{ docker_registry_password }}" | ||
93 | |||
94 | # docker driver rootless | ||
95 | |||
96 | - name: enable loki-docker-driver plugin | ||
97 | when: "'docker_hosts' in group_names" | ||
98 | become: yes | ||
99 | become_user: "{{ docker_username }}" | ||
100 | notify: restart rootless docker | ||
101 | community.docker.docker_plugin: | ||
102 | plugin_name: "{{ docker_registry_url }}/{{ docker_registry_username }}/loki-docker-driver:main" | ||
103 | state: enable | ||
104 | docker_host: "unix://run/user/{{ docker_uid }}/docker.sock" | ||
105 | alias: loki | ||
106 | |||
107 | - name: deploy docker config | ||
108 | when: "'docker_hosts' in group_names" | ||
109 | notify: restart rootless docker | ||
110 | copy: | ||
111 | src: "{{ docker_config }}" | ||
112 | dest: "{{ docker_home }}/.config/docker/daemon.json" | ||
113 | owner: "{{ docker_username }}" | ||
114 | group: "{{ docker_username }}" | ||
115 | mode: '0644' | ||
116 | |||
117 | # docker driver root | ||
118 | |||
119 | - name: enable loki-docker-driver plugin | ||
120 | when: "'docker_hosts' in group_names" | ||
121 | notify: restart docker | ||
122 | community.docker.docker_plugin: | ||
123 | plugin_name: "{{ docker_registry_url }}/{{ docker_registry_username }}/loki-docker-driver:main" | ||
124 | state: enable | ||
125 | alias: loki | ||
126 | |||
127 | - name: deploy docker config | ||
128 | when: "'docker_hosts' in group_names" | ||
129 | notify: restart docker | ||
130 | copy: | ||
131 | src: "{{ docker_config }}" | ||
132 | dest: /etc/docker/daemon.json | ||
133 | owner: root | ||
134 | group: root | ||
135 | mode: '0644' | ||
136 | |||
137 | - name: deploy promtail configuration | ||
138 | notify: restart promtail | ||
139 | copy: | ||
140 | src: "{{ promtail_config }}" | ||
141 | dest: /etc/promtail/config.yml | ||
142 | owner: root | ||
143 | group: root | ||
144 | mode: '0644' | ||
145 | |||
146 | - name: enable promtail | ||
147 | systemd: | ||
148 | daemon_reload: yes | ||
149 | enabled: yes | ||
150 | masked: no | ||
151 | name: promtail | ||
diff --git a/roles/services/msmtp_mta/tasks/main.yml b/roles/services/msmtp_mta/tasks/main.yml new file mode 100644 index 0000000..4958acc --- /dev/null +++ b/roles/services/msmtp_mta/tasks/main.yml | |||
@@ -0,0 +1,11 @@ | |||
1 | - name: install msmtp packages | ||
2 | package: | ||
3 | name: "{{ msmtp_mta_packages }}" | ||
4 | |||
5 | - name: copy msmtp config file | ||
6 | copy: | ||
7 | src: "{{ msmtp_mta_config }}" | ||
8 | dest: /etc/msmtprc | ||
9 | owner: root | ||
10 | group: msmtp | ||
11 | mode: '0640' | ||
diff --git a/roles/services/pihole/handlers/main.yml b/roles/services/pihole/handlers/main.yml new file mode 100644 index 0000000..9c1d311 --- /dev/null +++ b/roles/services/pihole/handlers/main.yml | |||
@@ -0,0 +1,14 @@ | |||
1 | - name: restart unbound | ||
2 | service: | ||
3 | name: unbound | ||
4 | state: restarted | ||
5 | |||
6 | - name: restart lighttpd | ||
7 | service: | ||
8 | name: lighttpd | ||
9 | state: restarted | ||
10 | |||
11 | - name: restart ftl | ||
12 | service: | ||
13 | name: pihole-FTL | ||
14 | state: restarted | ||
diff --git a/roles/services/pihole/tasks/main.yml b/roles/services/pihole/tasks/main.yml new file mode 100644 index 0000000..3f3abde --- /dev/null +++ b/roles/services/pihole/tasks/main.yml | |||
@@ -0,0 +1,80 @@ | |||
1 | - name: install packages | ||
2 | package: | ||
3 | name: "{{ pihole_packages }}" | ||
4 | |||
5 | - name: clone pihole repository | ||
6 | git: | ||
7 | repo: https://github.com/pi-hole/pi-hole.git | ||
8 | dest: /tmp/pi-hole | ||
9 | version: v5.17.1 | ||
10 | depth: 1 | ||
11 | |||
12 | - name: create configuration directory | ||
13 | file: | ||
14 | path: /etc/pihole | ||
15 | state: directory | ||
16 | owner: root | ||
17 | group: root | ||
18 | mode: '0755' | ||
19 | |||
20 | - name: copy setupVars.conf | ||
21 | copy: | ||
22 | src: "{{ pihole_setupvars }}" | ||
23 | dest: /etc/pihole/setupVars.conf | ||
24 | owner: root | ||
25 | group: root | ||
26 | mode: '0644' | ||
27 | |||
28 | - name: copy pihole unbound configuration | ||
29 | notify: restart unbound | ||
30 | copy: | ||
31 | src: "{{ pihole_unboundconf }}" | ||
32 | dest: /etc/unbound/unbound.conf.d/pihole.conf | ||
33 | owner: root | ||
34 | group: root | ||
35 | mode: '0644' | ||
36 | |||
37 | - name: run installation script | ||
38 | command: | ||
39 | cmd: "/bin/bash '/tmp/pi-hole/automated install/basic-install.sh' --unattended" | ||
40 | creates: /etc/pihole/install.log | ||
41 | ignore_errors: yes | ||
42 | notify: | ||
43 | - restart lighttpd | ||
44 | - restart ftl | ||
45 | |||
46 | - name: change pihole admin password | ||
47 | register: result | ||
48 | changed_when: result.rc == 0 | ||
49 | command: | ||
50 | cmd: "pihole -a -p {{ pihole_password }}" | ||
51 | |||
52 | - name: initialize gravity | ||
53 | register: result | ||
54 | changed_when: result.rc == 0 | ||
55 | command: | ||
56 | cmd: "pihole -g" | ||
57 | |||
58 | - name: allow http (80/tcp) traffic | ||
59 | ufw: | ||
60 | rule: allow | ||
61 | port: '80' | ||
62 | proto: tcp | ||
63 | |||
64 | - name: allow https (443/tcp) traffic | ||
65 | ufw: | ||
66 | rule: allow | ||
67 | port: '443' | ||
68 | proto: tcp | ||
69 | |||
70 | - name: allow dns (53/udp) traffic | ||
71 | ufw: | ||
72 | rule: allow | ||
73 | port: '53' | ||
74 | proto: udp | ||
75 | |||
76 | - name: allow dns tcp (53/tcp) traffic | ||
77 | ufw: | ||
78 | rule: allow | ||
79 | port: '53' | ||
80 | proto: tcp | ||
diff --git a/roles/services/ssh/tasks/main.yml b/roles/services/ssh/tasks/main.yml new file mode 100644 index 0000000..d2cabab --- /dev/null +++ b/roles/services/ssh/tasks/main.yml | |||
@@ -0,0 +1,46 @@ | |||
1 | - name: explicitly only allow pubkey auth | ||
2 | lineinfile: | ||
3 | path: /etc/ssh/sshd_config | ||
4 | regexp: "^#?AuthenticationMethods.*" | ||
5 | line: "AuthenticationMethods publickey" | ||
6 | |||
7 | - name: disable root ssh login | ||
8 | lineinfile: | ||
9 | path: /etc/ssh/sshd_config | ||
10 | regexp: "^#?PermitRootLogin" | ||
11 | line: "PermitRootLogin no" | ||
12 | |||
13 | - name: enable publickey authentication | ||
14 | lineinfile: | ||
15 | path: /etc/ssh/sshd_config | ||
16 | regexp: "^#?PubkeyAuthentication.*" | ||
17 | line: "PubkeyAuthentication yes" | ||
18 | |||
19 | - name: disable password authentication | ||
20 | lineinfile: | ||
21 | path: /etc/ssh/sshd_config | ||
22 | regexp: "^#?PasswordAuthentication.*" | ||
23 | line: "PasswordAuthentication no" | ||
24 | |||
25 | - name: disable challenge response | ||
26 | lineinfile: | ||
27 | path: /etc/ssh/sshd_config | ||
28 | regexp: "^#?ChallengeResponseAuthentication.*" | ||
29 | line: "ChallengeResponseAuthentication no" | ||
30 | |||
31 | - name: disable pam | ||
32 | lineinfile: | ||
33 | path: /etc/ssh/sshd_config | ||
34 | regexp: "^#?UsePAM.*" | ||
35 | line: "UsePAM no" | ||
36 | |||
37 | - name: ensure sshd is enabled | ||
38 | systemd: | ||
39 | name: sshd | ||
40 | enabled: yes | ||
41 | masked: no | ||
42 | |||
43 | - name: restart sshd | ||
44 | service: | ||
45 | name: sshd | ||
46 | state: restarted | ||
diff --git a/roles/services/unattended_upgrades/tasks/main.yml b/roles/services/unattended_upgrades/tasks/main.yml new file mode 100644 index 0000000..bad3c02 --- /dev/null +++ b/roles/services/unattended_upgrades/tasks/main.yml | |||
@@ -0,0 +1,63 @@ | |||
1 | - name: install packages | ||
2 | package: | ||
3 | name: "{{ unattended_upgrades_packages }}" | ||
4 | state: latest | ||
5 | |||
6 | - name: edit apt update timer | ||
7 | lineinfile: | ||
8 | path: /etc/systemd/system/timers.target.wants/apt-daily.timer | ||
9 | regexp: "OnCalendar.*" | ||
10 | line: "OnCalendar=*-*-* 0,4,8,12,16,20:00" | ||
11 | |||
12 | - name: edit apt update timer | ||
13 | lineinfile: | ||
14 | path: /etc/systemd/system/timers.target.wants/apt-daily.timer | ||
15 | regexp: "RandomizedDelaySec.*" | ||
16 | line: "RandomizedDelaySec=10m" | ||
17 | |||
18 | - name: edit apt upgrade timer | ||
19 | lineinfile: | ||
20 | path: /etc/systemd/system/timers.target.wants/apt-daily-upgrade.timer | ||
21 | regexp: "OnCalendar.*" | ||
22 | line: "OnCalendar=*-*-* 0,4,8,12,16,20:30" | ||
23 | |||
24 | - name: edit apt upgrade timer | ||
25 | lineinfile: | ||
26 | path: /etc/systemd/system/timers.target.wants/apt-daily-upgrade.timer | ||
27 | regexp: "RandomizedDelaySec.*" | ||
28 | line: "RandomizedDelaySec=5m" | ||
29 | |||
30 | - name: edit APT::Periodic settings | ||
31 | lineinfile: | ||
32 | path: /etc/apt/apt.conf.d/20auto-upgrades | ||
33 | regexp: "APT::Periodic::Update.*" | ||
34 | line: 'APT::Periodic::Update-Package-Lists "always";' | ||
35 | |||
36 | - name: edit APT::Periodic settings | ||
37 | lineinfile: | ||
38 | path: /etc/apt/apt.conf.d/20auto-upgrades | ||
39 | regexp: "APT::Periodic::Unattended.*" | ||
40 | line: 'APT::Periodic::Unattended-Upgrade "always";' | ||
41 | |||
42 | - name: configure unattended upgrades | ||
43 | lineinfile: | ||
44 | path: /etc/apt/apt.conf.d/50unattended-upgrades | ||
45 | regexp: ".*Unattended-Upgrade::Mail.*" | ||
46 | line: 'Unattended-Upgrade::Mail "{{ uu_mail_to }}";' | ||
47 | |||
48 | - name: configure unattended upgrades | ||
49 | lineinfile: | ||
50 | path: /etc/apt/apt.conf.d/50unattended-upgrades | ||
51 | insertafter: 'Unattended-Upgrade::Mail "{{ uu_mail_to }}";' | ||
52 | line: 'Unattended-Upgrade::Sender "{{ uu_mail_from }}";' | ||
53 | |||
54 | - name: configure unattended upgrades | ||
55 | lineinfile: | ||
56 | path: /etc/apt/apt.conf.d/50unattended-upgrades | ||
57 | regexp: ".*Unattended-Upgrade::MailReport.*" | ||
58 | line: 'Unattended-Upgrade::MailReport "always";' | ||
59 | |||
60 | - name: restart service | ||
61 | service: | ||
62 | name: unattended-upgrades | ||
63 | state: restarted | ||