3
0
forked from jakub/ansible

Compare commits

31 Commits

Author SHA1 Message Date
martin.fencl
4c410d8ebe edit 2026-02-03 19:32:30 +01:00
martin.fencl
cc743bda16 edit 2026-02-03 19:29:50 +01:00
martin.fencl
e2390fe2e9 edit 2026-02-03 19:28:39 +01:00
martin.fencl
904984df8b . 2026-02-03 19:27:44 +01:00
martin.fencl
512a4e6a63 . 2026-02-03 19:23:47 +01:00
martin.fencl
da20e9c625 . 2026-02-03 19:19:28 +01:00
martin.fencl
b24135352c . 2026-02-03 19:18:25 +01:00
martin.fencl
6d580e80b0 . 2026-02-03 19:17:16 +01:00
martin.fencl
1b26c03c28 . 2026-02-03 19:14:35 +01:00
martin.fencl
0a164648da . 2026-02-03 19:07:42 +01:00
martin.fencl
781a8e7dda add 2026-02-03 19:05:58 +01:00
martin.fencl
24df01f60a edit 2026-02-03 18:52:55 +01:00
martin.fencl
325cc99c09 . 2026-02-03 18:51:53 +01:00
martin.fencl
855c7992dd edit 2026-02-03 18:46:48 +01:00
martin.fencl
9002c95017 edit 2026-02-03 18:43:06 +01:00
martin.fencl
165b8f1a4e . 2026-02-03 18:41:51 +01:00
martin.fencl
cbedeca846 . 2026-02-03 18:39:09 +01:00
martin.fencl
a7cda14994 . 2026-02-03 18:36:09 +01:00
martin.fencl
9aed30136d . 2026-02-03 18:32:15 +01:00
martin.fencl
eb5d587334 , 2026-02-03 18:26:46 +01:00
martin.fencl
87345121ea edit immich 2026-02-03 18:22:32 +01:00
martin.fencl
8946248d6f edit immich 2026-02-03 18:21:12 +01:00
martin.fencl
12d9ea51b8 edit immich 2026-02-03 18:13:24 +01:00
martin.fencl
fccd326374 edit homarr 2026-02-03 18:06:24 +01:00
martin.fencl
90c591e389 edit homarrr 2026-02-03 18:04:58 +01:00
martin.fencl
2b067fa6cb edit homarr 2026-02-03 18:02:54 +01:00
martin.fencl
309394b503 edit homarr 2026-02-03 17:57:37 +01:00
martin.fencl
fa2f4fa6d5 edit homarr 2026-02-03 17:54:06 +01:00
martin.fencl
1938fd6ec8 edit homarr 2026-02-03 17:52:48 +01:00
martin.fencl
48b3f6059a edit homarr 2026-02-03 17:50:51 +01:00
martin.fencl
d1aab54fed edit homarr 2026-02-03 17:47:28 +01:00
25 changed files with 1775 additions and 909 deletions

View File

@@ -0,0 +1,23 @@
version: '3'
services:
collabora:
image: collabora/code:latest
container_name: collabora
restart: unless-stopped
networks:
- cloud
environment:
- TZ=Europe/Prague
- password=password
- username=nextcloud
- domain=cloud.martinfencl.eu
- extra_params=--o:ssl.enable=false --o:ssl.termination=true
- aliasgroup1=https://cloud.martinfencl.eu:443,https://collabora.martinfencl.eu:443
- dictionaries=de_DE en_GB en_US es_ES fr_FR it nl pt_BR pt_PT ru cs_CZ
ports:
- 9980:9980
networks:
cloud:
driver: bridge

View File

@@ -0,0 +1,13 @@
services:
homarr:
container_name: homarr
image: ghcr.io/homarr-labs/homarr:latest
restart: unless-stopped
volumes:
- /var/run/docker.sock:/var/run/docker.sock # Optional, only if you want docker integration
- /data/compose/homarr/appdata:/appdata
environment:
- SECRET_ENCRYPTION_KEY=4fb16028fa1788d9a24fa93a323aa4a278524bed177c8c38454f4c4068c1b9b6
ports:
- '7575:7575'

View File

@@ -0,0 +1,5 @@
services:
immich-server:
volumes:
- /mnt/nextcloud-howard-photos:/mnt/nextcloud-howard-photos
- /mnt/nextcloud-kamilkaprdelka-photos:/mnt/nextcloud-kamilkaprdelka-photos

View File

@@ -0,0 +1,77 @@
#
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
#
# Make sure to use the docker-compose.yml of the current release:
#
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
#
# The compose file on main may not be compatible with the latest release.
name: immich
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
# extends:
# file: hwaccel.transcoding.yml
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
volumes:
- ${UPLOAD_LOCATION}:/usr/src/app/upload
#- /mnt/nextcloud-howard-photos:/mnt/nextcloud-howard-photos:ro # read-only external library
#- /mnt/nextcloud-kamilkaprdelka-photos:/mnt/nextcloud-kamilkaprdelka-photos:ro # read-only external library
- /mnt/nextcloud-howard-photos:/mnt/nextcloud-howard-photos
- /mnt/nextcloud-kamilkaprdelka-photos:/mnt/nextcloud-kamilkaprdelka-photos
- /etc/localtime:/etc/localtime:ro
env_file:
- .env
ports:
- '2283:2283'
depends_on:
- redis
- database
restart: always
healthcheck:
disable: false
immich-machine-learning:
container_name: immich_machine_learning
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
# Example tag: ${IMMICH_VERSION:-release}-cuda
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
# file: hwaccel.ml.yml
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
volumes:
- model-cache:/cache
env_file:
- .env
restart: always
healthcheck:
disable: false
redis:
container_name: immich_redis
image: docker.io/valkey/valkey:8@sha256:81db6d39e1bba3b3ff32bd3a1b19a6d69690f94a3954ec131277b9a26b95b3aa
healthcheck:
test: redis-cli ping || exit 1
restart: always
database:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
environment:
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_USER: ${DB_USERNAME}
POSTGRES_DB: ${DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: '--data-checksums'
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
# DB_STORAGE_TYPE: 'HDD'
volumes:
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
shm_size: 128mb
restart: always
volumes:
model-cache:

View File

@@ -0,0 +1,33 @@
services:
jellyfin:
image: lscr.io/linuxserver/jellyfin:latest
container_name: jellyfin
restart: unless-stopped
ports:
- "8096:8096"
- "7359:7359/udp"
- "1900:1900/udp"
environment:
- TZ=Europe/Prague
- PUID=0
- PGID=0
volumes:
- /opt/jellyfin/config:/config
- /opt/jellyfin/cache:/cache
- /mnt/films:/media/films:ro
- /mnt/books:/media/books:ro
- /mnt/ondrulin:/media/ondrulin:ro
devices:
- /dev/dri:/dev/dri
group_add:
- "104"
- "44"
tmpfs:
- /transcode:rw,size=8g,mode=1777

View File

@@ -0,0 +1,26 @@
version: "3.8"
services:
semaphore:
image: semaphoreui/semaphore:latest
user: "0:0"
ports:
- "3000:3000"
environment:
SEMAPHORE_DB_DIALECT: bolt
SEMAPHORE_DB_PATH: /etc/semaphore/semaphore.db.bolt # full path to file!
SEMAPHORE_TMP_PATH: /var/lib/semaphore/projects
SEMAPHORE_ADMIN: admin
SEMAPHORE_ADMIN_NAME: admin
SEMAPHORE_ADMIN_EMAIL: admin@localhost
SEMAPHORE_ADMIN_PASSWORD: changeme
SEMAPHORE_ACCESS_KEY_ENCRYPTION: "rZffGjw4BGlwoM+66fStJ4Pg+ivLc5ghtty3yoscltY="
volumes:
- /data/compose/semaphore/db:/etc/semaphore
- /data/compose/semaphore/projects:/var/lib/semaphore/projects
- /data/compose/semaphore/backups:/opt/mikrotik_backups/
- /data/compose/semaphore/ansible.cfg:/etc/ansible.cfg:ro # mount as file, ne do /etc/ansible/ansible.cfg
restart: unless-stopped

View File

@@ -0,0 +1,12 @@
version: '3.8'
services:
uptime-kuma:
container_name: uptime-kuma-dev
image: louislam/uptime-kuma:latest
volumes:
#- ./data:/app/data
- /data/compose/kuma/data:/app/data
ports:
- "3001:3001" # <Host Port>:<Container Port>

4
inv_vm
View File

@@ -1,3 +1,5 @@
[vm]
pve1_vm ansible_host=192.168.69.253
pve2_vm ansible_host=192.168.69.254
pve2_vm ansible_host=192.168.69.254
[lxc]
pve2_lxc_jellyfin ansible_host=192.168.69.252

View File

@@ -1,174 +1,92 @@
# nextcloud/update_collabora.yml
- name: Update Collabora CODE on VM via Proxmox
hosts: linux_servers
- name: Update Collabora
hosts: pve2_vm
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# Compose sync (controller -> target)
compose_local_dir: "{{ lookup('env','PWD') }}/docker-compose"
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Collabora specifics ---
collabora_debug_caps: true
collabora_caps_url: "https://collabora.martinfencl.eu/hosting/capabilities"
# Use the FULL Nextcloud stack compose file; only target the 'collabora' service inside it
collabora_project: "nextcloud-collabora"
collabora_compose_file: "/data/compose/nextcloud/nextcloud-collabora.yml"
collabora_service: "collabora"
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
collabora_commands:
- "{{ docker_prefix }} pull -q collabora/code:latest >/dev/null"
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} pull {{ collabora_service }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} up -d --no-deps --force-recreate {{ collabora_service }} >/dev/null"
# Collabora settings
collabora_project: collabora
collabora_compose_filename: "docker-compose-collabora.yml"
collabora_service: collabora
collabora_port: 9980
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Ensure remote base directory exists
ansible.builtin.file:
path: "{{ compose_remote_base }}"
state: directory
mode: "0755"
- name: Run Collabora update commands on VM (via SSH) # use SSHPASS env, hide item value
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ collabora_commands }}"
loop_control:
index_var: idx # <-- capture loop index here
label: "cmd-{{ idx }}" # <-- use idx instead of loop.index
register: collab_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Show outputs for each Collabora command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ collab_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Collabora command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Collabora update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Collabora update commands succeeded."
loop: "{{ collab_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Collabora | Wait for capabilities (controller first)
ansible.builtin.uri:
url: "{{ collabora_caps_url }}"
method: GET
return_content: true
validate_certs: true
status_code: 200
register: caps_controller
- name: Create local archive of docker-compose directory (controller)
ansible.builtin.archive:
path: "{{ compose_local_dir }}/"
dest: "/tmp/docker-compose.tar.gz"
format: gz
delegate_to: localhost
run_once: true
retries: "{{ RETRIES }}"
delay: 2
until: caps_controller.status == 200
failed_when: false
- name: Upload archive to remote host
ansible.builtin.copy:
src: "/tmp/docker-compose.tar.gz"
dest: "{{ compose_remote_archive }}"
mode: "0644"
- name: Recreate remote compose directory
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: absent
- name: Ensure remote compose directory exists
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: directory
mode: "0755"
- name: Extract archive on remote host
ansible.builtin.unarchive:
src: "{{ compose_remote_archive }}"
dest: "{{ compose_remote_dir }}"
remote_src: true
- name: Pull latest Collabora image
community.docker.docker_compose_v2:
project_name: "{{ collabora_project }}"
project_src: "{{ compose_remote_dir }}"
files:
- "{{ collabora_compose_filename }}"
pull: always
- name: Recreate Collabora service
community.docker.docker_compose_v2:
project_name: "{{ collabora_project }}"
project_src: "{{ compose_remote_dir }}"
files:
- "{{ collabora_compose_filename }}"
services:
- "{{ collabora_service }}"
state: present
recreate: always
- name: Wait for Collabora port
ansible.builtin.wait_for:
host: 127.0.0.1
port: "{{ collabora_port }}"
timeout: 120
- name: Check Collabora discovery endpoint (retry until ready)
ansible.builtin.uri:
url: "http://127.0.0.1:{{ collabora_port }}/hosting/discovery"
status_code: 200
return_content: true
register: collabora_http
retries: 40
delay: 3
until: collabora_http.status == 200 and ('<wopi-discovery' in (collabora_http.content | default('')))
changed_when: false
- name: Collabora | VM-side fetch (pure JSON via Python) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
import json, urllib.request, sys
try:
with urllib.request.urlopen("{{ collabora_caps_url }}", timeout=15) as r:
sys.stdout.write(r.read().decode())
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: caps_vm
changed_when: false
failed_when: false
when: caps_controller.status | default(0) != 200 or caps_controller.json is not defined
no_log: "{{ DEBUG == 0 }}"
- name: Collabora | Choose JSON (controller wins, else VM)
ansible.builtin.set_fact:
collab_caps_json: >-
{{
(caps_controller.json
if (caps_controller.status|default(0))==200 and (caps_controller.json is defined)
else (
(caps_vm.stdout | default('') | trim | length > 0)
| ternary((caps_vm.stdout | trim | from_json), omit)
)
)
}}
failed_when: false
- name: Collabora | Print concise summary
ansible.builtin.debug:
msg: >-
Collabora {{ collab_caps_json.productVersion | default('?') }}
({{ collab_caps_json.productName | default('?') }}),
convert-to.available={{ collab_caps_json['convert-to']['available'] | default('n/a') }},
serverId={{ collab_caps_json.serverId | default('n/a') }}
when: collab_caps_json is defined and DEBUG == 1
- name: Collabora | Capabilities unavailable (after retries)
ansible.builtin.debug:
msg: "Capabilities endpoint není dostupný ani po pokusech."
when: collab_caps_json is not defined and DEBUG == 1
# Optional full JSON (debug)
- name: Collabora | Full JSON (debug)
ansible.builtin.debug:
var: collab_caps_json
when: collabora_debug_caps and (collab_caps_json is defined) and DEBUG == 1

174
old/update_collabora.yml Normal file
View File

@@ -0,0 +1,174 @@
# nextcloud/update_collabora.yml
- name: Update Collabora CODE on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Collabora specifics ---
collabora_debug_caps: true
collabora_caps_url: "https://collabora.martinfencl.eu/hosting/capabilities"
# Use the FULL Nextcloud stack compose file; only target the 'collabora' service inside it
collabora_project: "nextcloud-collabora"
collabora_compose_file: "/data/compose/nextcloud/nextcloud-collabora.yml"
collabora_service: "collabora"
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
collabora_commands:
- "{{ docker_prefix }} pull -q collabora/code:latest >/dev/null"
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} pull {{ collabora_service }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} up -d --no-deps --force-recreate {{ collabora_service }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Collabora update commands on VM (via SSH) # use SSHPASS env, hide item value
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ collabora_commands }}"
loop_control:
index_var: idx # <-- capture loop index here
label: "cmd-{{ idx }}" # <-- use idx instead of loop.index
register: collab_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Show outputs for each Collabora command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ collab_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Collabora command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Collabora update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Collabora update commands succeeded."
loop: "{{ collab_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Collabora | Wait for capabilities (controller first)
ansible.builtin.uri:
url: "{{ collabora_caps_url }}"
method: GET
return_content: true
validate_certs: true
status_code: 200
register: caps_controller
delegate_to: localhost
run_once: true
retries: "{{ RETRIES }}"
delay: 2
until: caps_controller.status == 200
failed_when: false
changed_when: false
- name: Collabora | VM-side fetch (pure JSON via Python) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
import json, urllib.request, sys
try:
with urllib.request.urlopen("{{ collabora_caps_url }}", timeout=15) as r:
sys.stdout.write(r.read().decode())
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: caps_vm
changed_when: false
failed_when: false
when: caps_controller.status | default(0) != 200 or caps_controller.json is not defined
no_log: "{{ DEBUG == 0 }}"
- name: Collabora | Choose JSON (controller wins, else VM)
ansible.builtin.set_fact:
collab_caps_json: >-
{{
(caps_controller.json
if (caps_controller.status|default(0))==200 and (caps_controller.json is defined)
else (
(caps_vm.stdout | default('') | trim | length > 0)
| ternary((caps_vm.stdout | trim | from_json), omit)
)
)
}}
failed_when: false
- name: Collabora | Print concise summary
ansible.builtin.debug:
msg: >-
Collabora {{ collab_caps_json.productVersion | default('?') }}
({{ collab_caps_json.productName | default('?') }}),
convert-to.available={{ collab_caps_json['convert-to']['available'] | default('n/a') }},
serverId={{ collab_caps_json.serverId | default('n/a') }}
when: collab_caps_json is defined and DEBUG == 1
- name: Collabora | Capabilities unavailable (after retries)
ansible.builtin.debug:
msg: "Capabilities endpoint není dostupný ani po pokusech."
when: collab_caps_json is not defined and DEBUG == 1
# Optional full JSON (debug)
- name: Collabora | Full JSON (debug)
ansible.builtin.debug:
var: collab_caps_json
when: collabora_debug_caps and (collab_caps_json is defined) and DEBUG == 1

194
old/update_homarr.yml Normal file
View File

@@ -0,0 +1,194 @@
# update_homarr.yml
- name: Update Homarr on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# Homarr specifics
homarr_project: "homarr"
homarr_compose_file: "/data/compose/homarr/docker-compose-homarr.yml"
homarr_service: "homarr"
homarr_image: "ghcr.io/homarr-labs/homarr:latest"
homarr_port: 7575
# Optional external URL for controller-side readiness check (e.g., https://homarr.example.com)
# If empty/undefined, controller check is skipped and we only probe from the VM.
homarr_url: "{{ lookup('env', 'HOMARR_URL') | default('', true) }}"
# Fixed container name used in your compose (avoid conflicts with any leftover container)
homarr_container_name: "homarr"
# Retry policy (same pattern as Kuma): 25x with 2s delay
homarr_retries: "{{ RETRIES }}"
homarr_delay: 2
# Docker command prefix (consistent behavior)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
homarr_commands:
- "{{ docker_prefix }} pull -q {{ homarr_image }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} pull {{ homarr_service }} >/dev/null"
# remove conflicting container name before compose up (silently)
- "{{ docker_prefix }} rm -f {{ homarr_container_name }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} up -d --no-deps --force-recreate {{ homarr_service }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Homarr update commands on VM (via SSH) # use SSHPASS env, hide item label
ansible.builtin.command:
argv:
- sshpass
- -e # read password from SSHPASS environment
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}" # supply password via environment
loop: "{{ homarr_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: homarr_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Homarr command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ homarr_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Homarr command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Homarr update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Homarr update commands succeeded."
loop: "{{ homarr_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Homarr | Wait for homepage (controller first, with retries)
ansible.builtin.uri:
url: "{{ (homarr_url | regex_replace('/$','')) + '/' }}"
method: GET
return_content: true
# Validate TLS only when using https://
validate_certs: "{{ (homarr_url | default('')) is match('^https://') }}"
status_code: 200
register: homarr_controller
delegate_to: localhost
run_once: true
when: homarr_url is defined and (homarr_url | length) > 0
retries: "{{ homarr_retries }}"
delay: "{{ homarr_delay }}"
until: homarr_controller.status == 200
failed_when: false
changed_when: false
- name: Homarr | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Fetch Homarr homepage from localhost and print HTML to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ homarr_port }}/", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: homarr_vm
changed_when: false
failed_when: false
when: homarr_controller.status | default(0) != 200 or homarr_controller.content is not defined
retries: "{{ homarr_retries }}"
delay: "{{ homarr_delay }}"
until: (homarr_vm.stdout | default('') | trim | length) > 0 and ('Homarr' in (homarr_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}"
- name: Homarr | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
ansible.builtin.set_fact:
homarr_home_html: >-
{{
(
homarr_controller.content
if (homarr_controller is defined)
and ((homarr_controller.status|default(0))==200)
and (homarr_controller.content is defined)
else
(homarr_vm.stdout | default('') | trim)
)
}}
when:
- (homarr_controller is defined and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined))
or ((homarr_vm.stdout | default('') | trim | length) > 0)
- name: Homarr | Print concise summary
ansible.builtin.debug:
msg: >-
Homarr homepage {{ 'reachable' if (homarr_home_html is defined) else 'NOT reachable' }}.
Source={{ 'controller' if ((homarr_controller is defined) and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined)) else 'vm' if (homarr_vm.stdout|default('')|trim|length>0) else 'n/a' }};
length={{ (homarr_home_html | default('')) | length }};
contains('Homarr')={{ (homarr_home_html is defined) and ('Homarr' in homarr_home_html) }}
when: DEBUG == 1
- name: Homarr | Homepage unavailable (after retries)
ansible.builtin.debug:
msg: "Homarr web není dostupný ani po pokusech."
when: homarr_home_html is not defined and DEBUG == 1
# Optional detailed dump (short excerpt only)
- name: Homarr | HTML excerpt (debug)
ansible.builtin.debug:
msg: "{{ (homarr_home_html | default(''))[:500] }}"
when: homarr_home_html is defined and DEBUG == 1

313
old/update_immich.yml Normal file
View File

@@ -0,0 +1,313 @@
# update_immich.yml
- name: Update Immich on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# Immich specifics
immich_dir: "/opt/immich"
immich_project: "immich"
immich_compose_url: "https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml"
immich_compose_file: "/opt/immich/docker-compose.yml"
immich_override_file: "/opt/immich/docker-compose.override.yml"
immich_port: 2283
# Optional external URL for controller-side readiness check (e.g., https://photos.example.com)
immich_url: "{{ lookup('env', 'IMMICH_URL') | default('', true) }}"
# Retry policy
immich_retries: "{{ RETRIES }}"
immich_delay: 2
# Docker command prefix (consistent behavior)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Compose command (always include override to keep local mounts separate from upstream compose)
immich_compose_cmd: >-
{{ docker_prefix }} compose
-p {{ immich_project }}
-f {{ immich_compose_file }}
-f {{ immich_override_file }}
# Commands to run on the target VM
immich_commands:
- "cd {{ immich_dir }}"
- |
cd {{ immich_dir }}
mkdir -p backups
if [ -f docker-compose.yml ]; then
cp -a docker-compose.yml "backups/docker-compose.yml.$(date +%F_%H%M%S).bak"
fi
if [ -f .env ]; then
cp -a .env "backups/.env.$(date +%F_%H%M%S).bak"
fi
if [ -f docker-compose.override.yml ]; then
cp -a docker-compose.override.yml "backups/docker-compose.override.yml.$(date +%F_%H%M%S).bak"
fi
- |
cd {{ immich_dir }}
# Download latest compose from Immich releases (requires curl or wget)
if command -v curl >/dev/null 2>&1; then
curl -fsSL -o docker-compose.yml "{{ immich_compose_url }}"
elif command -v wget >/dev/null 2>&1; then
wget -qO docker-compose.yml "{{ immich_compose_url }}"
else
echo "Neither curl nor wget is available on the VM."
exit 1
fi
- |
cd {{ immich_dir }}
# Ensure override compose exists (create if missing)
if [ ! -f "{{ immich_override_file }}" ]; then
printf '%s\n' \
'services:' \
' immich-server:' \
' volumes:' \
' - /mnt/nextcloud-howard-photos:/mnt/nextcloud-howard-photos' \
' - /mnt/nextcloud-kamilkaprdelka-photos:/mnt/nextcloud-kamilkaprdelka-photos' \
> "{{ immich_override_file }}"
fi
# Fail early if override is still missing/empty
test -s "{{ immich_override_file }}"
- |
cd {{ immich_dir }}
# Ensure .env exists. If missing, try to reconstruct it from running containers to avoid breaking DB creds.
python3 - <<'PY'
import json
import subprocess
from pathlib import Path
env_path = Path(".env")
if env_path.exists():
raise SystemExit(0)
def run(cmd):
p = subprocess.run(cmd, capture_output=True, text=True)
return p.returncode, p.stdout, p.stderr
rc, out, err = run(["bash", "-lc", "command docker inspect immich_postgres immich_server"])
if rc != 0 or not out.strip():
print("ERROR: .env is missing and cannot inspect running containers (immich_postgres/immich_server).", flush=True)
print("Create /opt/immich/.env manually or ensure the containers exist.", flush=True)
raise SystemExit(1)
data = json.loads(out)
by_name = {}
for c in data:
name = (c.get("Name") or "").lstrip("/")
by_name[name] = c
pg = by_name.get("immich_postgres")
srv = by_name.get("immich_server")
if not pg or not srv:
print("ERROR: Could not find immich_postgres and immich_server in docker inspect output.", flush=True)
raise SystemExit(1)
def env_map(container):
m = {}
for kv in (container.get("Config", {}).get("Env") or []):
if "=" in kv:
k, v = kv.split("=", 1)
m[k] = v
return m
def find_mount_source(container, dest):
for m in (container.get("Mounts") or []):
if m.get("Destination") == dest:
return m.get("Source")
return ""
pg_env = env_map(pg)
db_user = pg_env.get("POSTGRES_USER", "")
db_pass = pg_env.get("POSTGRES_PASSWORD", "")
db_name = pg_env.get("POSTGRES_DB", "")
db_data = find_mount_source(pg, "/var/lib/postgresql/data")
upload_loc = find_mount_source(srv, "/usr/src/app/upload")
# Try to preserve the currently used image tag as IMMICH_VERSION (optional but safer)
immich_version = ""
image = (srv.get("Config", {}).get("Image") or "")
if ":" in image and "@" not in image:
immich_version = image.rsplit(":", 1)[-1]
elif ":" in image and "@" in image:
# image like repo:tag@sha256:...
immich_version = image.split("@", 1)[0].rsplit(":", 1)[-1]
missing = []
for k, v in [
("DB_USERNAME", db_user),
("DB_PASSWORD", db_pass),
("DB_DATABASE_NAME", db_name),
("DB_DATA_LOCATION", db_data),
("UPLOAD_LOCATION", upload_loc),
]:
if not v:
missing.append(k)
if missing:
print("ERROR: Could not reconstruct these .env values from containers: " + ", ".join(missing), flush=True)
raise SystemExit(1)
lines = [
f"UPLOAD_LOCATION={upload_loc}",
f"DB_DATA_LOCATION={db_data}",
f"DB_USERNAME={db_user}",
f"DB_PASSWORD={db_pass}",
f"DB_DATABASE_NAME={db_name}",
]
if immich_version:
lines.append(f"IMMICH_VERSION={immich_version}")
env_path.write_text("\n".join(lines) + "\n", encoding="utf-8")
print("Created .env from running containers.", flush=True)
PY
- |
cd {{ immich_dir }}
# Comment out healthcheck.start_interval if present (safe no-op if missing)
sed -i -E 's/^([[:space:]]*)start_interval:/\1# start_interval:/' docker-compose.yml || true
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} config >/dev/null"
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} pull >/dev/null"
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} up -d --remove-orphans --force-recreate >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Immich update commands on VM (via SSH) # use SSHPASS env, hide item label
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ immich_commands }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
register: immich_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
run_once: true
- name: Show outputs for each Immich command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ immich_cmds.results }}"
when: DEBUG == 1
run_once: true
- name: Fail play if any Immich command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Immich update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Immich update commands succeeded."
loop: "{{ immich_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
run_once: true
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Immich | Wait for API ping (controller first, with retries)
ansible.builtin.uri:
url: "{{ (immich_url | regex_replace('/$','')) + '/api/server/ping' }}"
method: GET
return_content: true
validate_certs: "{{ (immich_url | default('')) is match('^https://') }}"
status_code: 200
register: immich_controller
delegate_to: localhost
run_once: true
when: immich_url is defined and (immich_url | length) > 0
retries: "{{ immich_retries }}"
delay: "{{ immich_delay }}"
until: immich_controller.status == 200 and ('pong' in (immich_controller.content | default('')))
failed_when: false
changed_when: false
- name: Immich | VM-side ping (JSON via Python, with retries)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Ping Immich API from localhost and print response to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ immich_port }}/api/server/ping", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: immich_vm
changed_when: false
failed_when: false
when: immich_controller.status | default(0) != 200
retries: "{{ immich_retries }}"
delay: "{{ immich_delay }}"
until: (immich_vm.stdout | default('') | trim | length) > 0 and ('pong' in (immich_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}"
run_once: true
- name: Immich | Print concise summary
ansible.builtin.debug:
msg: >-
Immich API ping {{ 'OK' if (('pong' in (immich_controller.content|default(''))) or ('pong' in (immich_vm.stdout|default('')))) else 'NOT OK' }}.
Source={{ 'controller' if (immich_controller.status|default(0))==200 else 'vm' if (immich_vm.stdout|default('')|trim|length>0) else 'n/a' }}.
when: DEBUG == 1
run_once: true

65
old/update_semaphore.yml Normal file
View File

@@ -0,0 +1,65 @@
# update_semaphore.yml
- name: Update Semaphore on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}" # IP vm-portainer
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
# --- Semaphore specifics ---
semaphore_project: "semaphore"
semaphore_compose_file: "/data/compose/semaphore/docker-compose.yml"
semaphore_service: "semaphore"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Semaphore self-update on VM in background (nohup)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
nohup bash -c '
unalias docker 2>/dev/null || true
DOCKER_CLI_HINTS=0 docker compose \
-p {{ semaphore_project }} \
-f {{ semaphore_compose_file }} \
up -d --no-deps --force-recreate --pull always {{ semaphore_service }}
' >/dev/null 2>&1 &
environment:
SSHPASS: "{{ vm_pass }}"
register: semaphore_update
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Show result of Semaphore self-update (debug)
ansible.builtin.debug:
msg: |
RC: {{ semaphore_update.rc }}
STDOUT: {{ (semaphore_update.stdout | default('')).strip() }}
STDERR: {{ (semaphore_update.stderr | default('')).strip() }}
when: DEBUG == 1

194
old/update_uptime_kuma.yml Normal file
View File

@@ -0,0 +1,194 @@
# nextcloud/update_uptime_kuma.yml
- name: Update Uptime Kuma on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# Uptime Kuma specifics
kuma_project: "uptime-kuma"
kuma_compose_file: "/data/compose/uptime-kuma/docker-compose-uptime-kuma.yml"
kuma_service: "uptime-kuma"
kuma_image: "louislam/uptime-kuma:latest"
kuma_port: 3001
# Optional external URL for controller-side readiness check (e.g., https://kuma.example.com)
# If empty/undefined, controller check is skipped and we only probe from the VM.
kuma_url: "{{ lookup('env', 'KUMA_URL') | default('', true) }}"
# Fixed container name used in your compose (conflicts with previous/Portainer-run container)
kuma_container_name: "uptime-kuma-dev"
# Retry policy
kuma_retries: "{{ RETRIES }}"
kuma_delay: 2
# Docker command prefix (consistent behavior)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs like in Collabora play)
kuma_commands:
- "{{ docker_prefix }} pull -q {{ kuma_image }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} pull {{ kuma_service }} >/dev/null"
# remove conflicting container name before compose up (silently)
- "{{ docker_prefix }} rm -f {{ kuma_container_name }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} up -d --no-deps --force-recreate {{ kuma_service }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Uptime Kuma update commands on VM (via SSH) # use SSHPASS env, hide item label
ansible.builtin.command:
argv:
- sshpass
- -e # read password from SSHPASS environment
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}" # supply password via environment
loop: "{{ kuma_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: kuma_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Uptime Kuma command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ kuma_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Uptime Kuma command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Uptime Kuma update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Uptime Kuma update commands succeeded."
loop: "{{ kuma_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Kuma | Wait for homepage (controller first, with retries)
ansible.builtin.uri:
url: "{{ (kuma_url | regex_replace('/$','')) + '/' }}"
method: GET
return_content: true
# Validate TLS only when using https://
validate_certs: "{{ (kuma_url | default('')) is match('^https://') }}"
status_code: 200
register: kuma_controller
delegate_to: localhost
run_once: true
when: kuma_url is defined and (kuma_url | length) > 0
retries: "{{ kuma_retries }}"
delay: "{{ kuma_delay }}"
until: kuma_controller.status == 200
failed_when: false
changed_when: false
- name: Kuma | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Fetch Kuma homepage from localhost and print HTML to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ kuma_port }}/", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: kuma_vm
changed_when: false
failed_when: false
when: kuma_controller.status | default(0) != 200 or kuma_controller.content is not defined
retries: "{{ kuma_retries }}"
delay: "{{ kuma_delay }}"
until: (kuma_vm.stdout | default('') | trim | length) > 0 and ('Uptime Kuma' in (kuma_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}" # hide command and output when not debugging
- name: Kuma | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
ansible.builtin.set_fact:
kuma_home_html: >-
{{
(
kuma_controller.content
if (kuma_controller is defined)
and ((kuma_controller.status|default(0))==200)
and (kuma_controller.content is defined)
else
(kuma_vm.stdout | default('') | trim)
)
}}
when:
- (kuma_controller is defined and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined))
or ((kuma_vm.stdout | default('') | trim | length) > 0)
- name: Kuma | Print concise summary
ansible.builtin.debug:
msg: >-
Uptime Kuma homepage {{ 'reachable' if (kuma_home_html is defined) else 'NOT reachable' }}.
Source={{ 'controller' if ((kuma_controller is defined) and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined)) else 'vm' if (kuma_vm.stdout|default('')|trim|length>0) else 'n/a' }};
length={{ (kuma_home_html | default('')) | length }};
contains('Uptime Kuma')={{ (kuma_home_html is defined) and ('Uptime Kuma' in kuma_home_html) }}
when: DEBUG == 1
- name: Kuma | Homepage unavailable (after retries)
ansible.builtin.debug:
msg: "Kuma web není dostupná ani po pokusech."
when: kuma_home_html is not defined and DEBUG == 1
# Optional detailed dump (short excerpt only)
- name: Kuma | HTML excerpt (debug)
ansible.builtin.debug:
msg: "{{ (kuma_home_html | default(''))[:500] }}"
when: kuma_home_html is defined and DEBUG == 1

View File

@@ -0,0 +1,69 @@
# update_portainer_agent_lxc.yml
- name: Update Portainer Agent (LXC, no compose)
hosts: pve2_lxc_jellyfin
gather_facts: false
vars:
agent_container_name: portainer_agent
agent_port: 9001
tasks:
- name: Check if agent container exists
ansible.builtin.command:
argv:
- bash
- -lc
- "docker ps -a --format '{{ \"{{\" }}.Names{{ \"}}\" }}' | grep -x '{{ agent_container_name }}'"
register: agent_exists
changed_when: false
failed_when: false
- name: Abort if agent container is missing
ansible.builtin.fail:
msg: "Container '{{ agent_container_name }}' not found."
when: agent_exists.rc != 0
- name: Read current agent image
ansible.builtin.command:
argv:
- bash
- -lc
- "docker inspect -f '{{ \"{{\" }}.Config.Image{{ \"}}\" }}' {{ agent_container_name }}"
register: agent_image
changed_when: false
- name: Pull latest image tag for current agent image
ansible.builtin.command:
argv:
- bash
- -lc
- "docker pull {{ agent_image.stdout | trim }}"
changed_when: true
- name: Recreate agent container with standard Portainer Agent args
ansible.builtin.command:
argv:
- bash
- -lc
- |
set -euo pipefail
# Stop/remove old container
docker rm -f "{{ agent_container_name }}" >/dev/null 2>&1 || true
# Run Portainer Agent with common, safe defaults
docker run -d \
--name "{{ agent_container_name }}" \
--restart=always \
-p {{ agent_port }}:9001 \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/lib/docker/volumes:/var/lib/docker/volumes \
"{{ agent_image.stdout | trim }}"
changed_when: true
- name: Wait for agent port
ansible.builtin.wait_for:
host: 127.0.0.1
port: "{{ agent_port }}"
timeout: 60

View File

@@ -0,0 +1,69 @@
# update_portainer_agent_vm.yml
- name: Update Portainer Agent (VM, no compose)
hosts: pve2_vm
gather_facts: false
vars:
agent_container_name: portainer_agent
agent_port: 9001
tasks:
- name: Check if agent container exists
ansible.builtin.command:
argv:
- bash
- -lc
- "docker ps -a --format '{{ \"{{\" }}.Names{{ \"}}\" }}' | grep -x '{{ agent_container_name }}'"
register: agent_exists
changed_when: false
failed_when: false
- name: Abort if agent container is missing
ansible.builtin.fail:
msg: "Container '{{ agent_container_name }}' not found."
when: agent_exists.rc != 0
- name: Read current agent image
ansible.builtin.command:
argv:
- bash
- -lc
- "docker inspect -f '{{ \"{{\" }}.Config.Image{{ \"}}\" }}' {{ agent_container_name }}"
register: agent_image
changed_when: false
- name: Pull latest image tag for current agent image
ansible.builtin.command:
argv:
- bash
- -lc
- "docker pull {{ agent_image.stdout | trim }}"
changed_when: true
- name: Recreate agent container with standard Portainer Agent args
ansible.builtin.command:
argv:
- bash
- -lc
- |
set -euo pipefail
# Stop/remove old container
docker rm -f "{{ agent_container_name }}" >/dev/null 2>&1 || true
# Run Portainer Agent with common, safe defaults
docker run -d \
--name "{{ agent_container_name }}" \
--restart=always \
-p {{ agent_port }}:9001 \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/lib/docker/volumes:/var/lib/docker/volumes \
"{{ agent_image.stdout | trim }}"
changed_when: true
- name: Wait for agent port
ansible.builtin.wait_for:
host: 127.0.0.1
port: "{{ agent_port }}"
timeout: 60

View File

@@ -1,194 +1,91 @@
# update_homarr.yml
- name: Update Homarr on VM via Proxmox
hosts: linux_servers
- name: Update Homarr
hosts: pve2_vm
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# Compose sync (controller -> target)
compose_local_dir: "{{ playbook_dir }}/docker-compose"
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# Homarr specifics
homarr_project: "homarr"
homarr_compose_file: "/data/compose/homarr/docker-compose-homarr.yml"
homarr_service: "homarr"
homarr_image: "ghcr.io/homarr-labs/homarr:latest"
# Homarr settings
homarr_project: homarr
homarr_compose_filename: "docker-compose-homarr.yml"
homarr_service: homarr
homarr_port: 7575
# Optional external URL for controller-side readiness check (e.g., https://homarr.example.com)
# If empty/undefined, controller check is skipped and we only probe from the VM.
homarr_url: "{{ lookup('env', 'HOMARR_URL') | default('', true) }}"
# Fixed container name used in your compose (avoid conflicts with any leftover container)
homarr_container_name: "homarr"
# Retry policy (same pattern as Kuma): 25x with 2s delay
homarr_retries: "{{ RETRIES }}"
homarr_delay: 2
# Docker command prefix (consistent behavior)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
homarr_commands:
- "{{ docker_prefix }} pull -q {{ homarr_image }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} pull {{ homarr_service }} >/dev/null"
# remove conflicting container name before compose up (silently)
- "{{ docker_prefix }} rm -f {{ homarr_container_name }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} up -d --no-deps --force-recreate {{ homarr_service }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Ensure remote base directory exists
ansible.builtin.file:
path: "{{ compose_remote_base }}"
state: directory
mode: "0755"
- name: Run Homarr update commands on VM (via SSH) # use SSHPASS env, hide item label
ansible.builtin.command:
argv:
- sshpass
- -e # read password from SSHPASS environment
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}" # supply password via environment
loop: "{{ homarr_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: homarr_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Homarr command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ homarr_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Homarr command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Homarr update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Homarr update commands succeeded."
loop: "{{ homarr_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Homarr | Wait for homepage (controller first, with retries)
ansible.builtin.uri:
url: "{{ (homarr_url | regex_replace('/$','')) + '/' }}"
method: GET
return_content: true
# Validate TLS only when using https://
validate_certs: "{{ (homarr_url | default('')) is match('^https://') }}"
status_code: 200
register: homarr_controller
- name: Create local archive of docker-compose directory (controller)
ansible.builtin.archive:
path: "{{ compose_local_dir }}/"
dest: "/tmp/docker-compose.tar.gz"
format: gz
delegate_to: localhost
run_once: true
when: homarr_url is defined and (homarr_url | length) > 0
retries: "{{ homarr_retries }}"
delay: "{{ homarr_delay }}"
until: homarr_controller.status == 200
failed_when: false
- name: Upload archive to remote host
ansible.builtin.copy:
src: "/tmp/docker-compose.tar.gz"
dest: "{{ compose_remote_archive }}"
mode: "0644"
- name: Recreate remote compose directory
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: absent
- name: Ensure remote compose directory exists
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: directory
mode: "0755"
- name: Extract archive on remote host
ansible.builtin.unarchive:
src: "{{ compose_remote_archive }}"
dest: "{{ compose_remote_dir }}"
remote_src: true
- name: Pull latest Homarr image
community.docker.docker_compose_v2:
project_name: "{{ homarr_project }}"
project_src: "{{ compose_remote_dir }}"
files:
- "{{ homarr_compose_filename }}"
pull: always
- name: Recreate Homarr service
community.docker.docker_compose_v2:
project_name: "{{ homarr_project }}"
project_src: "{{ compose_remote_dir }}"
files:
- "{{ homarr_compose_filename }}"
services:
- "{{ homarr_service }}"
state: present
recreate: always
- name: Wait for Homarr port
ansible.builtin.wait_for:
host: 127.0.0.1
port: "{{ homarr_port }}"
timeout: 60
- name: Check Homarr HTTP endpoint (retry until ready)
ansible.builtin.uri:
url: "http://127.0.0.1:{{ homarr_port }}/"
status_code: 200
register: homarr_http
retries: 30
delay: 3
until: homarr_http.status == 200
changed_when: false
- name: Homarr | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Fetch Homarr homepage from localhost and print HTML to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ homarr_port }}/", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: homarr_vm
changed_when: false
failed_when: false
when: homarr_controller.status | default(0) != 200 or homarr_controller.content is not defined
retries: "{{ homarr_retries }}"
delay: "{{ homarr_delay }}"
until: (homarr_vm.stdout | default('') | trim | length) > 0 and ('Homarr' in (homarr_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}"
- name: Homarr | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
ansible.builtin.set_fact:
homarr_home_html: >-
{{
(
homarr_controller.content
if (homarr_controller is defined)
and ((homarr_controller.status|default(0))==200)
and (homarr_controller.content is defined)
else
(homarr_vm.stdout | default('') | trim)
)
}}
when:
- (homarr_controller is defined and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined))
or ((homarr_vm.stdout | default('') | trim | length) > 0)
- name: Homarr | Print concise summary
ansible.builtin.debug:
msg: >-
Homarr homepage {{ 'reachable' if (homarr_home_html is defined) else 'NOT reachable' }}.
Source={{ 'controller' if ((homarr_controller is defined) and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined)) else 'vm' if (homarr_vm.stdout|default('')|trim|length>0) else 'n/a' }};
length={{ (homarr_home_html | default('')) | length }};
contains('Homarr')={{ (homarr_home_html is defined) and ('Homarr' in homarr_home_html) }}
when: DEBUG == 1
- name: Homarr | Homepage unavailable (after retries)
ansible.builtin.debug:
msg: "Homarr web není dostupný ani po pokusech."
when: homarr_home_html is not defined and DEBUG == 1
# Optional detailed dump (short excerpt only)
- name: Homarr | HTML excerpt (debug)
ansible.builtin.debug:
msg: "{{ (homarr_home_html | default(''))[:500] }}"
when: homarr_home_html is defined and DEBUG == 1

View File

@@ -1,40 +0,0 @@
# update_homarr2.yml
- name: Update Homarr
hosts: pve2_vm
gather_facts: false
vars:
homarr_project: homarr
homarr_compose_file: /data/compose/homarr/docker-compose-homarr.yml
homarr_service: homarr
homarr_port: 7575
tasks:
- name: Pull latest Homarr image
community.docker.docker_compose_v2:
project_src: "{{ homarr_compose_file | dirname }}"
files:
- "{{ homarr_compose_file | basename }}"
pull: always
- name: Recreate Homarr service
community.docker.docker_compose_v2:
project_src: "{{ homarr_compose_file | dirname }}"
files:
- "{{ homarr_compose_file | basename }}"
services:
- "{{ homarr_service }}"
state: present
recreate: always
- name: Wait for Homarr port
ansible.builtin.wait_for:
host: 127.0.0.1
port: "{{ homarr_port }}"
timeout: 60
- name: Check Homarr HTTP endpoint
ansible.builtin.uri:
url: "http://127.0.0.1:{{ homarr_port }}/"
status_code: 200

View File

@@ -1,313 +1,116 @@
# update_immich.yml
- name: Update Immich on VM via Proxmox
hosts: linux_servers
- name: Update Immich
hosts: pve2_vm
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# Compose sync (controller -> target)
compose_local_dir: "{{ playbook_dir }}/docker-compose"
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# Immich specifics
immich_dir: "/opt/immich"
immich_project: "immich"
immich_compose_url: "https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml"
immich_compose_file: "/opt/immich/docker-compose.yml"
immich_override_file: "/opt/immich/docker-compose.override.yml"
# Immich settings
immich_project: immich
immich_port: 2283
immich_compose_files:
- docker-compose-immich.yml
- docker-compose-immich.override.yml
# Optional external URL for controller-side readiness check (e.g., https://photos.example.com)
immich_url: "{{ lookup('env', 'IMMICH_URL') | default('', true) }}"
# Retry policy
immich_retries: "{{ RETRIES }}"
immich_delay: 2
# Docker command prefix (consistent behavior)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Compose command (always include override to keep local mounts separate from upstream compose)
immich_compose_cmd: >-
{{ docker_prefix }} compose
-p {{ immich_project }}
-f {{ immich_compose_file }}
-f {{ immich_override_file }}
# Commands to run on the target VM
immich_commands:
- "cd {{ immich_dir }}"
- |
cd {{ immich_dir }}
mkdir -p backups
if [ -f docker-compose.yml ]; then
cp -a docker-compose.yml "backups/docker-compose.yml.$(date +%F_%H%M%S).bak"
fi
if [ -f .env ]; then
cp -a .env "backups/.env.$(date +%F_%H%M%S).bak"
fi
if [ -f docker-compose.override.yml ]; then
cp -a docker-compose.override.yml "backups/docker-compose.override.yml.$(date +%F_%H%M%S).bak"
fi
- |
cd {{ immich_dir }}
# Download latest compose from Immich releases (requires curl or wget)
if command -v curl >/dev/null 2>&1; then
curl -fsSL -o docker-compose.yml "{{ immich_compose_url }}"
elif command -v wget >/dev/null 2>&1; then
wget -qO docker-compose.yml "{{ immich_compose_url }}"
else
echo "Neither curl nor wget is available on the VM."
exit 1
fi
- |
cd {{ immich_dir }}
# Ensure override compose exists (create if missing)
if [ ! -f "{{ immich_override_file }}" ]; then
printf '%s\n' \
'services:' \
' immich-server:' \
' volumes:' \
' - /mnt/nextcloud-howard-photos:/mnt/nextcloud-howard-photos' \
' - /mnt/nextcloud-kamilkaprdelka-photos:/mnt/nextcloud-kamilkaprdelka-photos' \
> "{{ immich_override_file }}"
fi
# Fail early if override is still missing/empty
test -s "{{ immich_override_file }}"
- |
cd {{ immich_dir }}
# Ensure .env exists. If missing, try to reconstruct it from running containers to avoid breaking DB creds.
python3 - <<'PY'
import json
import subprocess
from pathlib import Path
env_path = Path(".env")
if env_path.exists():
raise SystemExit(0)
def run(cmd):
p = subprocess.run(cmd, capture_output=True, text=True)
return p.returncode, p.stdout, p.stderr
rc, out, err = run(["bash", "-lc", "command docker inspect immich_postgres immich_server"])
if rc != 0 or not out.strip():
print("ERROR: .env is missing and cannot inspect running containers (immich_postgres/immich_server).", flush=True)
print("Create /opt/immich/.env manually or ensure the containers exist.", flush=True)
raise SystemExit(1)
data = json.loads(out)
by_name = {}
for c in data:
name = (c.get("Name") or "").lstrip("/")
by_name[name] = c
pg = by_name.get("immich_postgres")
srv = by_name.get("immich_server")
if not pg or not srv:
print("ERROR: Could not find immich_postgres and immich_server in docker inspect output.", flush=True)
raise SystemExit(1)
def env_map(container):
m = {}
for kv in (container.get("Config", {}).get("Env") or []):
if "=" in kv:
k, v = kv.split("=", 1)
m[k] = v
return m
def find_mount_source(container, dest):
for m in (container.get("Mounts") or []):
if m.get("Destination") == dest:
return m.get("Source")
return ""
pg_env = env_map(pg)
db_user = pg_env.get("POSTGRES_USER", "")
db_pass = pg_env.get("POSTGRES_PASSWORD", "")
db_name = pg_env.get("POSTGRES_DB", "")
db_data = find_mount_source(pg, "/var/lib/postgresql/data")
upload_loc = find_mount_source(srv, "/usr/src/app/upload")
# Try to preserve the currently used image tag as IMMICH_VERSION (optional but safer)
immich_version = ""
image = (srv.get("Config", {}).get("Image") or "")
if ":" in image and "@" not in image:
immich_version = image.rsplit(":", 1)[-1]
elif ":" in image and "@" in image:
# image like repo:tag@sha256:...
immich_version = image.split("@", 1)[0].rsplit(":", 1)[-1]
missing = []
for k, v in [
("DB_USERNAME", db_user),
("DB_PASSWORD", db_pass),
("DB_DATABASE_NAME", db_name),
("DB_DATA_LOCATION", db_data),
("UPLOAD_LOCATION", upload_loc),
]:
if not v:
missing.append(k)
if missing:
print("ERROR: Could not reconstruct these .env values from containers: " + ", ".join(missing), flush=True)
raise SystemExit(1)
lines = [
f"UPLOAD_LOCATION={upload_loc}",
f"DB_DATA_LOCATION={db_data}",
f"DB_USERNAME={db_user}",
f"DB_PASSWORD={db_pass}",
f"DB_DATABASE_NAME={db_name}",
]
if immich_version:
lines.append(f"IMMICH_VERSION={immich_version}")
env_path.write_text("\n".join(lines) + "\n", encoding="utf-8")
print("Created .env from running containers.", flush=True)
PY
- |
cd {{ immich_dir }}
# Comment out healthcheck.start_interval if present (safe no-op if missing)
sed -i -E 's/^([[:space:]]*)start_interval:/\1# start_interval:/' docker-compose.yml || true
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} config >/dev/null"
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} pull >/dev/null"
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} up -d --remove-orphans --force-recreate >/dev/null"
# Persistent env file on the VM (NOT in git)
immich_env_persistent: "{{ compose_remote_base }}/env/immich.env"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Ensure remote base directory exists
ansible.builtin.file:
path: "{{ compose_remote_base }}"
state: directory
mode: "0755"
- name: Run Immich update commands on VM (via SSH) # use SSHPASS env, hide item label
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ immich_commands }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
register: immich_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
run_once: true
- name: Ensure remote env directory exists
ansible.builtin.file:
path: "{{ compose_remote_base }}/env"
state: directory
- name: Show outputs for each Immich command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ immich_cmds.results }}"
when: DEBUG == 1
run_once: true
- name: Fail if persistent Immich env file is missing
ansible.builtin.stat:
path: "{{ immich_env_persistent }}"
register: immich_env_stat
- name: Fail play if any Immich command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Immich update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Immich update commands succeeded."
loop: "{{ immich_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
run_once: true
- name: Abort when Immich env is missing
ansible.builtin.fail:
msg: >-
Missing persistent env file: {{ immich_env_persistent }}.
Create it on the VM with DB_* and UPLOAD_LOCATION variables.
when: not immich_env_stat.stat.exists
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Immich | Wait for API ping (controller first, with retries)
ansible.builtin.uri:
url: "{{ (immich_url | regex_replace('/$','')) + '/api/server/ping' }}"
method: GET
return_content: true
validate_certs: "{{ (immich_url | default('')) is match('^https://') }}"
status_code: 200
register: immich_controller
- name: Create local archive of docker-compose directory (controller)
ansible.builtin.archive:
path: "{{ compose_local_dir }}/"
dest: "/tmp/docker-compose.tar.gz"
format: gz
delegate_to: localhost
run_once: true
when: immich_url is defined and (immich_url | length) > 0
retries: "{{ immich_retries }}"
delay: "{{ immich_delay }}"
until: immich_controller.status == 200 and ('pong' in (immich_controller.content | default('')))
failed_when: false
changed_when: false
- name: Immich | VM-side ping (JSON via Python, with retries)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Ping Immich API from localhost and print response to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ immich_port }}/api/server/ping", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: immich_vm
changed_when: false
failed_when: false
when: immich_controller.status | default(0) != 200
retries: "{{ immich_retries }}"
delay: "{{ immich_delay }}"
until: (immich_vm.stdout | default('') | trim | length) > 0 and ('pong' in (immich_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}"
run_once: true
- name: Upload archive to remote host
ansible.builtin.copy:
src: "/tmp/docker-compose.tar.gz"
dest: "{{ compose_remote_archive }}"
mode: "0644"
- name: Immich | Print concise summary
ansible.builtin.debug:
msg: >-
Immich API ping {{ 'OK' if (('pong' in (immich_controller.content|default(''))) or ('pong' in (immich_vm.stdout|default('')))) else 'NOT OK' }}.
Source={{ 'controller' if (immich_controller.status|default(0))==200 else 'vm' if (immich_vm.stdout|default('')|trim|length>0) else 'n/a' }}.
when: DEBUG == 1
run_once: true
- name: Recreate remote compose directory
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: absent
- name: Ensure remote compose directory exists
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: directory
mode: "0755"
- name: Extract archive on remote host
ansible.builtin.unarchive:
src: "{{ compose_remote_archive }}"
dest: "{{ compose_remote_dir }}"
remote_src: true
- name: Deploy Immich .env into compose directory
ansible.builtin.copy:
src: "{{ immich_env_persistent }}"
dest: "{{ compose_remote_dir }}/.env"
remote_src: true
mode: "0600"
- name: Pull latest Immich images
community.docker.docker_compose_v2:
project_name: "{{ immich_project }}"
project_src: "{{ compose_remote_dir }}"
files: "{{ immich_compose_files }}"
pull: always
- name: Recreate Immich stack
community.docker.docker_compose_v2:
project_name: "{{ immich_project }}"
project_src: "{{ compose_remote_dir }}"
files: "{{ immich_compose_files }}"
state: present
recreate: always
- name: Wait for Immich port
ansible.builtin.wait_for:
host: 127.0.0.1
port: "{{ immich_port }}"
timeout: 120
- name: Check Immich API ping (retry until ready)
ansible.builtin.uri:
url: "http://127.0.0.1:{{ immich_port }}/api/server/ping"
status_code: 200
return_content: true
register: immich_ping
retries: 40
delay: 3
until: immich_ping.status == 200 and ('pong' in (immich_ping.content | default('')))
changed_when: false

94
update_jellyfin.yml Normal file
View File

@@ -0,0 +1,94 @@
# update_jellyfin.yml
- name: Update Jellyfin
hosts: pve2_lxc_jellyfin
gather_facts: false
vars:
# Compose sync (controller -> target)
compose_local_dir: "{{ playbook_dir }}/docker-compose"
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
# Jellyfin settings
jellyfin_compose_filename: "docker-compose-jellyfin.yml"
jellyfin_service: jellyfin
jellyfin_port: 8096
tasks:
- name: Ensure remote base directory exists
ansible.builtin.file:
path: "{{ compose_remote_base }}"
state: directory
mode: "0755"
- name: Create local archive of docker-compose directory (controller)
ansible.builtin.archive:
path: "{{ compose_local_dir }}/"
dest: "/tmp/docker-compose.tar.gz"
format: gz
delegate_to: localhost
run_once: true
- name: Upload archive to remote host
ansible.builtin.copy:
src: "/tmp/docker-compose.tar.gz"
dest: "{{ compose_remote_archive }}"
mode: "0644"
- name: Recreate remote compose directory
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: absent
- name: Ensure remote compose directory exists
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: directory
mode: "0755"
- name: Extract archive on remote host
ansible.builtin.unarchive:
src: "{{ compose_remote_archive }}"
dest: "{{ compose_remote_dir }}"
remote_src: true
- name: Pull latest Jellyfin image (docker-compose v1)
ansible.builtin.command:
argv:
- bash
- -lc
- >
cd "{{ compose_remote_dir }}"
&& docker-compose -f "{{ jellyfin_compose_filename }}" pull
changed_when: true
- name: Recreate Jellyfin service (docker-compose v1)
ansible.builtin.command:
argv:
- bash
- -lc
- >
cd "{{ compose_remote_dir }}"
&& docker-compose -f "{{ jellyfin_compose_filename }}"
up -d --force-recreate --remove-orphans "{{ jellyfin_service }}"
changed_when: true
- name: Wait for Jellyfin port
ansible.builtin.wait_for:
host: 127.0.0.1
port: "{{ jellyfin_port }}"
timeout: 180
- name: Check Jellyfin HTTP endpoint (retry until ready)
ansible.builtin.uri:
url: "http://127.0.0.1:{{ jellyfin_port }}/"
status_code:
- 200
- 302
register: jellyfin_http
retries: 40
delay: 3
until: jellyfin_http.status in [200, 302]
changed_when: false

View File

@@ -1,65 +1,93 @@
# update_semaphore.yml
- name: Update Semaphore on VM via Proxmox
hosts: linux_servers
- name: Update Semaphore (self-update safe)
hosts: pve2_vm
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}" # IP vm-portainer
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
compose_local_dir: "{{ playbook_dir }}/docker-compose"
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
# --- Semaphore specifics ---
semaphore_project: "semaphore"
semaphore_compose_file: "/data/compose/semaphore/docker-compose.yml"
semaphore_service: "semaphore"
semaphore_project: semaphore
semaphore_compose_filename: "docker-compose-semaphore.yml"
semaphore_port: 3000
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Ensure remote base directory exists
ansible.builtin.file:
path: "{{ compose_remote_base }}"
state: directory
- name: Run Semaphore self-update on VM in background (nohup)
- name: Create local archive of docker-compose directory (controller)
ansible.builtin.archive:
path: "{{ compose_local_dir }}/"
dest: "/tmp/docker-compose.tar.gz"
format: gz
delegate_to: localhost
run_once: true
- name: Upload archive to remote host
ansible.builtin.copy:
src: "/tmp/docker-compose.tar.gz"
dest: "{{ compose_remote_archive }}"
- name: Recreate remote compose directory
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: absent
- name: Ensure remote compose directory exists
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: directory
- name: Extract archive on remote host
ansible.builtin.unarchive:
src: "{{ compose_remote_archive }}"
dest: "{{ compose_remote_dir }}"
remote_src: true
- name: Pull latest Semaphore image(s)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
nohup bash -c '
unalias docker 2>/dev/null || true
DOCKER_CLI_HINTS=0 docker compose \
-p {{ semaphore_project }} \
-f {{ semaphore_compose_file }} \
up -d --no-deps --force-recreate --pull always {{ semaphore_service }}
' >/dev/null 2>&1 &
environment:
SSHPASS: "{{ vm_pass }}"
register: semaphore_update
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- >
cd "{{ compose_remote_dir }}"
&& docker compose -p "{{ semaphore_project }}"
-f "{{ semaphore_compose_filename }}"
pull
changed_when: true
- name: Show result of Semaphore self-update (debug)
ansible.builtin.debug:
msg: |
RC: {{ semaphore_update.rc }}
STDOUT: {{ (semaphore_update.stdout | default('')).strip() }}
STDERR: {{ (semaphore_update.stderr | default('')).strip() }}
when: DEBUG == 1
- name: Start Semaphore update in background (avoid killing this job)
ansible.builtin.command:
argv:
- bash
- -lc
- >
cd "{{ compose_remote_dir }}"
&& nohup docker compose -p "{{ semaphore_project }}"
-f "{{ semaphore_compose_filename }}"
up -d --remove-orphans --force-recreate
> "{{ compose_remote_base }}/semaphore-update.log" 2>&1 &
async: 1
poll: 0
changed_when: true
- name: Wait for Semaphore port
ansible.builtin.wait_for:
host: 127.0.0.1
port: "{{ semaphore_port }}"
timeout: 300
- name: Check Semaphore HTTP endpoint (retry)
ansible.builtin.uri:
url: "http://127.0.0.1:{{ semaphore_port }}/"
status_code: 200
register: sem_http
retries: 30
delay: 5
until: sem_http.status == 200
changed_when: false

View File

@@ -1,194 +1,91 @@
# nextcloud/update_uptime_kuma.yml
# update_uptimekuma.yml
- name: Update Uptime Kuma on VM via Proxmox
hosts: linux_servers
- name: Update Uptime Kuma
hosts: pve1_vm
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# Compose sync (controller -> target)
compose_local_dir: "{{ playbook_dir }}/docker-compose"
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# Uptime Kuma specifics
kuma_project: "uptime-kuma"
kuma_compose_file: "/data/compose/uptime-kuma/docker-compose-uptime-kuma.yml"
kuma_service: "uptime-kuma"
kuma_image: "louislam/uptime-kuma:latest"
kuma_port: 3001
# Optional external URL for controller-side readiness check (e.g., https://kuma.example.com)
# If empty/undefined, controller check is skipped and we only probe from the VM.
kuma_url: "{{ lookup('env', 'KUMA_URL') | default('', true) }}"
# Fixed container name used in your compose (conflicts with previous/Portainer-run container)
kuma_container_name: "uptime-kuma-dev"
# Retry policy
kuma_retries: "{{ RETRIES }}"
kuma_delay: 2
# Docker command prefix (consistent behavior)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs like in Collabora play)
kuma_commands:
- "{{ docker_prefix }} pull -q {{ kuma_image }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} pull {{ kuma_service }} >/dev/null"
# remove conflicting container name before compose up (silently)
- "{{ docker_prefix }} rm -f {{ kuma_container_name }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} up -d --no-deps --force-recreate {{ kuma_service }} >/dev/null"
# Uptime Kuma settings
uptimekuma_project: uptimekuma
uptimekuma_compose_filename: "docker-compose-uptimekuma.yml"
uptimekuma_service: uptime-kuma
uptimekuma_port: 3001
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Ensure remote base directory exists
ansible.builtin.file:
path: "{{ compose_remote_base }}"
state: directory
mode: "0755"
- name: Run Uptime Kuma update commands on VM (via SSH) # use SSHPASS env, hide item label
ansible.builtin.command:
argv:
- sshpass
- -e # read password from SSHPASS environment
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}" # supply password via environment
loop: "{{ kuma_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: kuma_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Uptime Kuma command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ kuma_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Uptime Kuma command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Uptime Kuma update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Uptime Kuma update commands succeeded."
loop: "{{ kuma_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Kuma | Wait for homepage (controller first, with retries)
ansible.builtin.uri:
url: "{{ (kuma_url | regex_replace('/$','')) + '/' }}"
method: GET
return_content: true
# Validate TLS only when using https://
validate_certs: "{{ (kuma_url | default('')) is match('^https://') }}"
status_code: 200
register: kuma_controller
- name: Create local archive of docker-compose directory (controller)
ansible.builtin.archive:
path: "{{ compose_local_dir }}/"
dest: "/tmp/docker-compose.tar.gz"
format: gz
delegate_to: localhost
run_once: true
when: kuma_url is defined and (kuma_url | length) > 0
retries: "{{ kuma_retries }}"
delay: "{{ kuma_delay }}"
until: kuma_controller.status == 200
failed_when: false
changed_when: false
- name: Kuma | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Fetch Kuma homepage from localhost and print HTML to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ kuma_port }}/", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: kuma_vm
changed_when: false
failed_when: false
when: kuma_controller.status | default(0) != 200 or kuma_controller.content is not defined
retries: "{{ kuma_retries }}"
delay: "{{ kuma_delay }}"
until: (kuma_vm.stdout | default('') | trim | length) > 0 and ('Uptime Kuma' in (kuma_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}" # hide command and output when not debugging
- name: Upload archive to remote host
ansible.builtin.copy:
src: "/tmp/docker-compose.tar.gz"
dest: "{{ compose_remote_archive }}"
mode: "0644"
- name: Kuma | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
ansible.builtin.set_fact:
kuma_home_html: >-
{{
(
kuma_controller.content
if (kuma_controller is defined)
and ((kuma_controller.status|default(0))==200)
and (kuma_controller.content is defined)
else
(kuma_vm.stdout | default('') | trim)
)
}}
when:
- (kuma_controller is defined and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined))
or ((kuma_vm.stdout | default('') | trim | length) > 0)
- name: Recreate remote compose directory
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: absent
- name: Kuma | Print concise summary
ansible.builtin.debug:
msg: >-
Uptime Kuma homepage {{ 'reachable' if (kuma_home_html is defined) else 'NOT reachable' }}.
Source={{ 'controller' if ((kuma_controller is defined) and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined)) else 'vm' if (kuma_vm.stdout|default('')|trim|length>0) else 'n/a' }};
length={{ (kuma_home_html | default('')) | length }};
contains('Uptime Kuma')={{ (kuma_home_html is defined) and ('Uptime Kuma' in kuma_home_html) }}
when: DEBUG == 1
- name: Ensure remote compose directory exists
ansible.builtin.file:
path: "{{ compose_remote_dir }}"
state: directory
mode: "0755"
- name: Kuma | Homepage unavailable (after retries)
ansible.builtin.debug:
msg: "Kuma web není dostupná ani po pokusech."
when: kuma_home_html is not defined and DEBUG == 1
- name: Extract archive on remote host
ansible.builtin.unarchive:
src: "{{ compose_remote_archive }}"
dest: "{{ compose_remote_dir }}"
remote_src: true
# Optional detailed dump (short excerpt only)
- name: Kuma | HTML excerpt (debug)
ansible.builtin.debug:
msg: "{{ (kuma_home_html | default(''))[:500] }}"
when: kuma_home_html is defined and DEBUG == 1
- name: Pull latest Uptime Kuma image
community.docker.docker_compose_v2:
project_name: "{{ uptimekuma_project }}"
project_src: "{{ compose_remote_dir }}"
files:
- "{{ uptimekuma_compose_filename }}"
pull: always
- name: Recreate Uptime Kuma service
community.docker.docker_compose_v2:
project_name: "{{ uptimekuma_project }}"
project_src: "{{ compose_remote_dir }}"
files:
- "{{ uptimekuma_compose_filename }}"
services:
- "{{ uptimekuma_service }}"
state: present
recreate: always
- name: Wait for Uptime Kuma port
ansible.builtin.wait_for:
host: 127.0.0.1
port: "{{ uptimekuma_port }}"
timeout: 120
- name: Check Uptime Kuma HTTP endpoint (retry until ready)
ansible.builtin.uri:
url: "http://127.0.0.1:{{ uptimekuma_port }}/"
status_code: 200
register: kuma_http
retries: 30
delay: 3
until: kuma_http.status == 200
changed_when: false