Compare commits
174 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4c410d8ebe | ||
|
|
cc743bda16 | ||
|
|
e2390fe2e9 | ||
|
|
904984df8b | ||
|
|
512a4e6a63 | ||
|
|
da20e9c625 | ||
|
|
b24135352c | ||
|
|
6d580e80b0 | ||
|
|
1b26c03c28 | ||
|
|
0a164648da | ||
|
|
781a8e7dda | ||
|
|
24df01f60a | ||
|
|
325cc99c09 | ||
|
|
855c7992dd | ||
|
|
9002c95017 | ||
|
|
165b8f1a4e | ||
|
|
cbedeca846 | ||
|
|
a7cda14994 | ||
|
|
9aed30136d | ||
|
|
eb5d587334 | ||
|
|
87345121ea | ||
|
|
8946248d6f | ||
|
|
12d9ea51b8 | ||
|
|
fccd326374 | ||
|
|
90c591e389 | ||
|
|
2b067fa6cb | ||
|
|
309394b503 | ||
|
|
fa2f4fa6d5 | ||
|
|
1938fd6ec8 | ||
|
|
48b3f6059a | ||
|
|
d1aab54fed | ||
|
|
ebfe720397 | ||
| 0da25c9550 | |||
|
|
7a6153676c | ||
|
|
d36336f53e | ||
|
|
7f3f89e32d | ||
|
|
8308c1380e | ||
|
|
61ee49b6f4 | ||
|
|
602101bbfb | ||
|
|
9366ff0912 | ||
|
|
17c1b43116 | ||
|
|
7ea0bb86f2 | ||
|
|
3e283783db | ||
|
|
0803cf3e52 | ||
|
|
d53ad9a6d6 | ||
|
|
e8bedc3939 | ||
|
|
4fb56ed09a | ||
|
|
e47ccb64b7 | ||
|
|
4038f5b6a1 | ||
|
|
74c3ef8945 | ||
|
|
d413dcb29f | ||
|
|
e710669c84 | ||
|
|
eb9c56bb5e | ||
|
|
1c6fb9c9c3 | ||
|
|
a7d50a8f36 | ||
|
|
c3ff0514ee | ||
|
|
1b1806907a | ||
|
|
fd57e6b566 | ||
|
|
3c7701a760 | ||
|
|
b1f8eea86f | ||
|
|
dbd864b45e | ||
|
|
8d1b2cd065 | ||
|
|
54b0dc86c8 | ||
|
|
75f4e8611f | ||
|
|
5e9d755390 | ||
|
|
73cf848f82 | ||
|
|
e4dac7808b | ||
|
|
f61addc2be | ||
|
|
94f3de1e7d | ||
|
|
af3c676183 | ||
|
|
c2d67f5498 | ||
|
|
a1d730a18c | ||
|
|
cf2507bdf6 | ||
|
|
21ce9478f4 | ||
|
|
0fd5ef9e41 | ||
|
|
bf35a6c253 | ||
|
|
4f39d04e3f | ||
|
|
d94f999f7b | ||
|
|
9140c6e2c9 | ||
|
|
4500478fce | ||
|
|
3735217c58 | ||
|
|
cb2099a802 | ||
|
|
bcbc0269d6 | ||
|
|
726ccb0242 | ||
|
|
b20f103992 | ||
|
|
7d5fa667dd | ||
|
|
f74977f1fb | ||
|
|
d28c0662c5 | ||
|
|
f93373b1c8 | ||
|
|
6ccbb97fbb | ||
|
|
885edc980c | ||
|
|
6b1c5efe22 | ||
|
|
d9b870d36f | ||
|
|
21bc7b4fd8 | ||
|
|
690d03e470 | ||
|
|
3228778db9 | ||
|
|
0dfeed3e23 | ||
|
|
a8b8ea8a05 | ||
|
|
8537742961 | ||
|
|
4a462417a9 | ||
|
|
97dc09e45c | ||
|
|
cf07ef7608 | ||
|
|
04450776fe | ||
|
|
aa4c6fb6b6 | ||
|
|
46d44ae924 | ||
|
|
e1474fd587 | ||
|
|
8b6eec595c | ||
|
|
3c406662cb | ||
|
|
f08dc68e20 | ||
|
|
cadc296a1f | ||
|
|
65642d8114 | ||
|
|
fd7ec9a3e7 | ||
|
|
40586253a5 | ||
|
|
b834c2e4c4 | ||
|
|
928a131ac8 | ||
|
|
d3a424508e | ||
|
|
615ebcfe65 | ||
|
|
8d9d39590e | ||
|
|
3e5099b31d | ||
|
|
a6f4c8cd75 | ||
|
|
c60c881f5a | ||
|
|
e94a76dde4 | ||
|
|
ef67219c98 | ||
|
|
3c0f29e3cb | ||
|
|
f077a811da | ||
|
|
bd25ea0eb1 | ||
|
|
3853c25f7b | ||
|
|
d38d6c76ed | ||
|
|
fa74512fa4 | ||
|
|
31771567b0 | ||
|
|
b7d968b8cc | ||
|
|
49904d991e | ||
|
|
bf60fdd9f1 | ||
|
|
168b729503 | ||
|
|
9f08ef2d76 | ||
|
|
463990f772 | ||
|
|
7bc6c917f1 | ||
|
|
866abc3d83 | ||
|
|
4dd4b3b6f7 | ||
|
|
abc7fba684 | ||
|
|
b7f6d38a32 | ||
|
|
1a8690529c | ||
|
|
cbac27b3f2 | ||
|
|
9869bbc383 | ||
|
|
10f542989b | ||
|
|
3fdea9f960 | ||
|
|
2678483149 | ||
|
|
fdd8834ea3 | ||
|
|
27577a2ff9 | ||
|
|
f36b78baa4 | ||
|
|
21c6781bb3 | ||
|
|
71fd262c40 | ||
|
|
060065e040 | ||
|
|
9b111803c6 | ||
|
|
004b560004 | ||
|
|
52fcb80ec4 | ||
|
|
243b88521d | ||
|
|
5df4686c00 | ||
|
|
2643526326 | ||
|
|
1591c2e787 | ||
|
|
5c74f10f37 | ||
|
|
76fde11ad9 | ||
|
|
d40bb2984b | ||
|
|
bbc27cb2f6 | ||
|
|
6fbb3fb088 | ||
|
|
b4758ee0e1 | ||
|
|
20d2aacd47 | ||
|
|
7b5d3a097e | ||
|
|
65ea83638d | ||
| 595c0624d6 | |||
|
|
2b5a2b4a1a | ||
| b247ea0832 | |||
| c476f04a8e | |||
| 5c185324d5 |
183
check_raid.yml
Normal file
183
check_raid.yml
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
# check_raid.yml
|
||||||
|
|
||||||
|
- name: Check Linux MD RAID health on VM via Proxmox
|
||||||
|
hosts: linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# VM connection (provided by Semaphore env vars)
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# Debug mode
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# RAID specifics
|
||||||
|
# RAID_MD can be: md0 / md1 / ... OR "auto" to check all arrays found in /proc/mdstat
|
||||||
|
raid_md_device: "{{ lookup('env', 'RAID_MD') | default('md0', true) }}"
|
||||||
|
raid_allow_sync: "{{ lookup('env', 'RAID_ALLOW_SYNC') | default(1, true) | int }}"
|
||||||
|
raid_allow_no_array: "{{ lookup('env', 'RAID_ALLOW_NO_ARRAY') | default(0, true) | int }}"
|
||||||
|
|
||||||
|
raid_retries: "{{ RETRIES }}"
|
||||||
|
raid_delay: 2
|
||||||
|
ssh_hard_timeout: 30
|
||||||
|
|
||||||
|
# SSH options
|
||||||
|
ssh_opts:
|
||||||
|
- "-o" # English comments
|
||||||
|
- "StrictHostKeyChecking=no"
|
||||||
|
- "-o"
|
||||||
|
- "UserKnownHostsFile=/dev/null"
|
||||||
|
- "-o"
|
||||||
|
- "GlobalKnownHostsFile=/dev/null"
|
||||||
|
- "-o"
|
||||||
|
- "LogLevel=ERROR"
|
||||||
|
- "-o"
|
||||||
|
- "ConnectTimeout=15"
|
||||||
|
- "-o"
|
||||||
|
- "PreferredAuthentications=password"
|
||||||
|
- "-o"
|
||||||
|
- "PubkeyAuthentication=no"
|
||||||
|
- "-o"
|
||||||
|
- "KbdInteractiveAuthentication=no"
|
||||||
|
- "-o"
|
||||||
|
- "NumberOfPasswordPrompts=1"
|
||||||
|
|
||||||
|
raid_check_cmd: |
|
||||||
|
python3 - <<'PY'
|
||||||
|
# Print exactly one status line and exit with code:
|
||||||
|
# 0=OK, 1=FAIL (degraded/disallowed sync), 2=ERROR (unexpected/misconfig)
|
||||||
|
import re, sys
|
||||||
|
|
||||||
|
target = "{{ raid_md_device }}"
|
||||||
|
allow_sync = int("{{ raid_allow_sync }}")
|
||||||
|
allow_no_array = int("{{ raid_allow_no_array }}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
txt = open("/proc/mdstat", "r", encoding="utf-8", errors="ignore").read()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR RAID read_mdstat err={e}")
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
arrays = {}
|
||||||
|
header_re = re.compile(r"^(md\d+)\s*:\s*active.*$", re.MULTILINE)
|
||||||
|
token_re = re.compile(r"^\s*\d+\s+blocks.*\[\d+/\d+\]\s*\[([U_]+)\]\s*$", re.MULTILINE)
|
||||||
|
|
||||||
|
for m in header_re.finditer(txt):
|
||||||
|
name = m.group(1)
|
||||||
|
chunk = txt[m.end():m.end() + 3000]
|
||||||
|
tm = token_re.search(chunk)
|
||||||
|
if tm:
|
||||||
|
arrays[name] = tm.group(1)
|
||||||
|
|
||||||
|
if not arrays:
|
||||||
|
if allow_no_array:
|
||||||
|
print("OK RAID none=no-md-arrays")
|
||||||
|
sys.exit(0)
|
||||||
|
print("ERROR RAID none=no-md-arrays")
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
syncing = bool(re.search(r"\b(resync|recovery|reshape|check|repair)\b", txt))
|
||||||
|
|
||||||
|
if target == "auto":
|
||||||
|
to_check = sorted(arrays.keys())
|
||||||
|
else:
|
||||||
|
if target not in arrays:
|
||||||
|
found = ",".join(sorted(arrays.keys()))
|
||||||
|
print(f"ERROR RAID target_not_found target={target} found={found}")
|
||||||
|
sys.exit(2)
|
||||||
|
to_check = [target]
|
||||||
|
|
||||||
|
tokens_str = " ".join([f"{name}=[{arrays[name]}]" for name in to_check])
|
||||||
|
degraded = any("_" in arrays[name] for name in to_check)
|
||||||
|
|
||||||
|
if degraded:
|
||||||
|
print(f"FAIL RAID {tokens_str} syncing={int(syncing)}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if syncing and not allow_sync:
|
||||||
|
print(f"FAIL RAID {tokens_str} syncing={int(syncing)} allow_sync={allow_sync}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
print(f"OK RAID {tokens_str} syncing={int(syncing)}")
|
||||||
|
sys.exit(0)
|
||||||
|
PY
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Run RAID check on VM (via SSH) # single command, no loop
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv: >-
|
||||||
|
{{
|
||||||
|
['timeout', '-k', '5', (ssh_hard_timeout | string)]
|
||||||
|
+ ['sshpass', '-e', 'ssh']
|
||||||
|
+ ssh_opts
|
||||||
|
+ [ vm_user ~ '@' ~ vm_ip,
|
||||||
|
'bash', '-lc',
|
||||||
|
('sudo ' if use_sudo else '') + raid_check_cmd
|
||||||
|
]
|
||||||
|
}}
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: raid_cmd
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false # we decide via assert below
|
||||||
|
retries: "{{ raid_retries }}"
|
||||||
|
delay: "{{ raid_delay }}"
|
||||||
|
until: raid_cmd.rc not in [124, 255]
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Build one-line summary (always)
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
raid_line: >-
|
||||||
|
{{
|
||||||
|
(raid_cmd.stdout | default('') | trim)
|
||||||
|
if ((raid_cmd.stdout | default('') | trim) | length) > 0
|
||||||
|
else ('ERROR RAID no-output rc=' ~ (raid_cmd.rc | string))
|
||||||
|
}}
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: RAID result (always one line)
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- raid_cmd.rc == 0
|
||||||
|
success_msg: "{{ raid_line }}"
|
||||||
|
fail_msg: "{{ raid_line }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
# Optional verbose debug
|
||||||
|
- name: Debug | /proc/mdstat (VM)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv: >-
|
||||||
|
{{
|
||||||
|
['timeout', '-k', '5', (ssh_hard_timeout | string)]
|
||||||
|
+ ['sshpass', '-e', 'ssh']
|
||||||
|
+ ssh_opts
|
||||||
|
+ [ vm_user ~ '@' ~ vm_ip, 'bash', '-lc', "cat /proc/mdstat" ]
|
||||||
|
}}
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: mdstat_dbg
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: DEBUG == 1
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Debug | mdstat output
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ mdstat_dbg.stdout | default('') }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
run_once: true
|
||||||
23
docker-compose/docker-compose-collabora.yml
Normal file
23
docker-compose/docker-compose-collabora.yml
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
collabora:
|
||||||
|
image: collabora/code:latest
|
||||||
|
container_name: collabora
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- cloud
|
||||||
|
environment:
|
||||||
|
- TZ=Europe/Prague
|
||||||
|
- password=password
|
||||||
|
- username=nextcloud
|
||||||
|
- domain=cloud.martinfencl.eu
|
||||||
|
- extra_params=--o:ssl.enable=false --o:ssl.termination=true
|
||||||
|
- aliasgroup1=https://cloud.martinfencl.eu:443,https://collabora.martinfencl.eu:443
|
||||||
|
- dictionaries=de_DE en_GB en_US es_ES fr_FR it nl pt_BR pt_PT ru cs_CZ
|
||||||
|
ports:
|
||||||
|
- 9980:9980
|
||||||
|
|
||||||
|
networks:
|
||||||
|
cloud:
|
||||||
|
driver: bridge
|
||||||
13
docker-compose/docker-compose-homarr.yml
Normal file
13
docker-compose/docker-compose-homarr.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
services:
|
||||||
|
homarr:
|
||||||
|
container_name: homarr
|
||||||
|
image: ghcr.io/homarr-labs/homarr:latest
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock # Optional, only if you want docker integration
|
||||||
|
- /data/compose/homarr/appdata:/appdata
|
||||||
|
environment:
|
||||||
|
- SECRET_ENCRYPTION_KEY=4fb16028fa1788d9a24fa93a323aa4a278524bed177c8c38454f4c4068c1b9b6
|
||||||
|
ports:
|
||||||
|
- '7575:7575'
|
||||||
|
|
||||||
5
docker-compose/docker-compose-immich.override.yml
Normal file
5
docker-compose/docker-compose-immich.override.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
services:
|
||||||
|
immich-server:
|
||||||
|
volumes:
|
||||||
|
- /mnt/nextcloud-howard-photos:/mnt/nextcloud-howard-photos
|
||||||
|
- /mnt/nextcloud-kamilkaprdelka-photos:/mnt/nextcloud-kamilkaprdelka-photos
|
||||||
77
docker-compose/docker-compose-immich.yml
Normal file
77
docker-compose/docker-compose-immich.yml
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
#
|
||||||
|
# WARNING: To install Immich, follow our guide: https://docs.immich.app/install/docker-compose
|
||||||
|
#
|
||||||
|
# Make sure to use the docker-compose.yml of the current release:
|
||||||
|
#
|
||||||
|
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
|
||||||
|
#
|
||||||
|
# The compose file on main may not be compatible with the latest release.
|
||||||
|
|
||||||
|
name: immich
|
||||||
|
|
||||||
|
services:
|
||||||
|
immich-server:
|
||||||
|
container_name: immich_server
|
||||||
|
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
|
||||||
|
# extends:
|
||||||
|
# file: hwaccel.transcoding.yml
|
||||||
|
# service: cpu # set to one of [nvenc, quicksync, rkmpp, vaapi, vaapi-wsl] for accelerated transcoding
|
||||||
|
volumes:
|
||||||
|
- ${UPLOAD_LOCATION}:/usr/src/app/upload
|
||||||
|
#- /mnt/nextcloud-howard-photos:/mnt/nextcloud-howard-photos:ro # read-only external library
|
||||||
|
#- /mnt/nextcloud-kamilkaprdelka-photos:/mnt/nextcloud-kamilkaprdelka-photos:ro # read-only external library
|
||||||
|
- /mnt/nextcloud-howard-photos:/mnt/nextcloud-howard-photos
|
||||||
|
- /mnt/nextcloud-kamilkaprdelka-photos:/mnt/nextcloud-kamilkaprdelka-photos
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
ports:
|
||||||
|
- '2283:2283'
|
||||||
|
depends_on:
|
||||||
|
- redis
|
||||||
|
- database
|
||||||
|
restart: always
|
||||||
|
healthcheck:
|
||||||
|
disable: false
|
||||||
|
|
||||||
|
immich-machine-learning:
|
||||||
|
container_name: immich_machine_learning
|
||||||
|
# For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag.
|
||||||
|
# Example tag: ${IMMICH_VERSION:-release}-cuda
|
||||||
|
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
|
||||||
|
# extends: # uncomment this section for hardware acceleration - see https://docs.immich.app/features/ml-hardware-acceleration
|
||||||
|
# file: hwaccel.ml.yml
|
||||||
|
# service: cpu # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable
|
||||||
|
volumes:
|
||||||
|
- model-cache:/cache
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
restart: always
|
||||||
|
healthcheck:
|
||||||
|
disable: false
|
||||||
|
|
||||||
|
redis:
|
||||||
|
container_name: immich_redis
|
||||||
|
image: docker.io/valkey/valkey:8@sha256:81db6d39e1bba3b3ff32bd3a1b19a6d69690f94a3954ec131277b9a26b95b3aa
|
||||||
|
healthcheck:
|
||||||
|
test: redis-cli ping || exit 1
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
database:
|
||||||
|
container_name: immich_postgres
|
||||||
|
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||||
|
POSTGRES_USER: ${DB_USERNAME}
|
||||||
|
POSTGRES_DB: ${DB_DATABASE_NAME}
|
||||||
|
POSTGRES_INITDB_ARGS: '--data-checksums'
|
||||||
|
# Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs
|
||||||
|
# DB_STORAGE_TYPE: 'HDD'
|
||||||
|
volumes:
|
||||||
|
# Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file
|
||||||
|
- ${DB_DATA_LOCATION}:/var/lib/postgresql/data
|
||||||
|
shm_size: 128mb
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
model-cache:
|
||||||
33
docker-compose/docker-compose-jellyfin.yml
Normal file
33
docker-compose/docker-compose-jellyfin.yml
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
|
||||||
|
services:
|
||||||
|
jellyfin:
|
||||||
|
image: lscr.io/linuxserver/jellyfin:latest
|
||||||
|
container_name: jellyfin
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
ports:
|
||||||
|
- "8096:8096"
|
||||||
|
- "7359:7359/udp"
|
||||||
|
- "1900:1900/udp"
|
||||||
|
|
||||||
|
environment:
|
||||||
|
- TZ=Europe/Prague
|
||||||
|
- PUID=0
|
||||||
|
- PGID=0
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- /opt/jellyfin/config:/config
|
||||||
|
- /opt/jellyfin/cache:/cache
|
||||||
|
- /mnt/films:/media/films:ro
|
||||||
|
- /mnt/books:/media/books:ro
|
||||||
|
- /mnt/ondrulin:/media/ondrulin:ro
|
||||||
|
|
||||||
|
devices:
|
||||||
|
- /dev/dri:/dev/dri
|
||||||
|
|
||||||
|
group_add:
|
||||||
|
- "104"
|
||||||
|
- "44"
|
||||||
|
|
||||||
|
tmpfs:
|
||||||
|
- /transcode:rw,size=8g,mode=1777
|
||||||
26
docker-compose/docker-compose-semaphore.yml
Normal file
26
docker-compose/docker-compose-semaphore.yml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
version: "3.8"
|
||||||
|
|
||||||
|
services:
|
||||||
|
semaphore:
|
||||||
|
image: semaphoreui/semaphore:latest
|
||||||
|
user: "0:0"
|
||||||
|
ports:
|
||||||
|
- "3000:3000"
|
||||||
|
|
||||||
|
environment:
|
||||||
|
SEMAPHORE_DB_DIALECT: bolt
|
||||||
|
SEMAPHORE_DB_PATH: /etc/semaphore/semaphore.db.bolt # full path to file!
|
||||||
|
SEMAPHORE_TMP_PATH: /var/lib/semaphore/projects
|
||||||
|
SEMAPHORE_ADMIN: admin
|
||||||
|
SEMAPHORE_ADMIN_NAME: admin
|
||||||
|
SEMAPHORE_ADMIN_EMAIL: admin@localhost
|
||||||
|
SEMAPHORE_ADMIN_PASSWORD: changeme
|
||||||
|
SEMAPHORE_ACCESS_KEY_ENCRYPTION: "rZffGjw4BGlwoM+66fStJ4Pg+ivLc5ghtty3yoscltY="
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- /data/compose/semaphore/db:/etc/semaphore
|
||||||
|
- /data/compose/semaphore/projects:/var/lib/semaphore/projects
|
||||||
|
- /data/compose/semaphore/backups:/opt/mikrotik_backups/
|
||||||
|
- /data/compose/semaphore/ansible.cfg:/etc/ansible.cfg:ro # mount as file, ne do /etc/ansible/ansible.cfg
|
||||||
|
|
||||||
|
restart: unless-stopped
|
||||||
12
docker-compose/docker-compose-uptimekuma.yml
Normal file
12
docker-compose/docker-compose-uptimekuma.yml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
uptime-kuma:
|
||||||
|
container_name: uptime-kuma-dev
|
||||||
|
image: louislam/uptime-kuma:latest
|
||||||
|
volumes:
|
||||||
|
#- ./data:/app/data
|
||||||
|
|
||||||
|
- /data/compose/kuma/data:/app/data
|
||||||
|
ports:
|
||||||
|
- "3001:3001" # <Host Port>:<Container Port>
|
||||||
39
homarr.yml
39
homarr.yml
@@ -1,39 +0,0 @@
|
|||||||
- name: Update Homarr
|
|
||||||
hosts: linux_servers
|
|
||||||
become: true
|
|
||||||
gather_facts: false
|
|
||||||
|
|
||||||
vars:
|
|
||||||
homarr_project: homarr
|
|
||||||
homarr_compose_file: /data/compose/homarr/docker-compose-homarr.yml
|
|
||||||
homarr_service: homarr
|
|
||||||
homarr_port: 7575
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Pull latest Homarr image
|
|
||||||
community.docker.docker_compose_v2:
|
|
||||||
project_src: "{{ homarr_compose_file | dirname }}"
|
|
||||||
files:
|
|
||||||
- "{{ homarr_compose_file | basename }}"
|
|
||||||
pull: always
|
|
||||||
|
|
||||||
- name: Recreate Homarr service
|
|
||||||
community.docker.docker_compose_v2:
|
|
||||||
project_src: "{{ homarr_compose_file | dirname }}"
|
|
||||||
files:
|
|
||||||
- "{{ homarr_compose_file | basename }}"
|
|
||||||
services:
|
|
||||||
- "{{ homarr_service }}"
|
|
||||||
state: present
|
|
||||||
recreate: always
|
|
||||||
|
|
||||||
- name: Wait for Homarr port
|
|
||||||
ansible.builtin.wait_for:
|
|
||||||
host: 127.0.0.1
|
|
||||||
port: "{{ homarr_port }}"
|
|
||||||
timeout: 60
|
|
||||||
|
|
||||||
- name: Check Homarr HTTP endpoint
|
|
||||||
ansible.builtin.uri:
|
|
||||||
url: "http://127.0.0.1:{{ homarr_port }}/"
|
|
||||||
status_code: 200
|
|
||||||
@@ -1,8 +1,3 @@
|
|||||||
[linux_servers]
|
[linux_servers]
|
||||||
jimbuntu ansible_host=192.168.19.4
|
proxmox_nextcloud ansible_host=192.168.69.2
|
||||||
jim_storage ansible_host=192.168.19.7
|
proxmox_services ansible_host=192.168.69.3
|
||||||
portainer2 ansible_host=192.168.52.9
|
|
||||||
portainernode ansible_host=192.168.52.21
|
|
||||||
|
|
||||||
[local]
|
|
||||||
localhost ansible_connection=local
|
|
||||||
@@ -1,3 +1,3 @@
|
|||||||
[mikrotiks]
|
[mikrotiks]
|
||||||
jim ansible_host=192.168.19.2
|
mikrotik_fencl_server ansible_host=192.168.69.1
|
||||||
hellsos ansible_host=192.168.40.1
|
mikrotik_fencl_5G ansible_host=192.168.68.1
|
||||||
5
inv_vm
Normal file
5
inv_vm
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[vm]
|
||||||
|
pve1_vm ansible_host=192.168.69.253
|
||||||
|
pve2_vm ansible_host=192.168.69.254
|
||||||
|
[lxc]
|
||||||
|
pve2_lxc_jellyfin ansible_host=192.168.69.252
|
||||||
68
nextcloud/check_stack_nextcloud.yml
Normal file
68
nextcloud/check_stack_nextcloud.yml
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
# nextcloud/check_stack_nextcloud.yml
|
||||||
|
|
||||||
|
- name: Run Nextcloud maintenance on VM via Proxmox
|
||||||
|
hosts: linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
|
||||||
|
vars:
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
|
||||||
|
# Flip to true if Docker needs sudo on the VM
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
vm_commands:
|
||||||
|
- "docker exec -u www-data nextcloud php -f /var/www/html/cron.php"
|
||||||
|
- "docker exec -u www-data nextcloud php occ app:update --all"
|
||||||
|
- "docker exec -u www-data nextcloud php occ maintenance:repair --include-expensive"
|
||||||
|
- "docker exec -u www-data nextcloud php occ status"
|
||||||
|
- "set -o pipefail; timeout 180s bash -x /data/compose/nextcloud/stack-health.sh </dev/null"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Run Nextcloud commands on VM (via SSH, argv, no line breaks)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -p
|
||||||
|
- "{{ vm_pass }}"
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
loop: "{{ vm_commands }}"
|
||||||
|
register: vm_cmds
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Show outputs for each command
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ vm_cmds.results }}"
|
||||||
|
|
||||||
|
- name: Fail play if any command failed
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Command failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All commands succeeded."
|
||||||
|
loop: "{{ vm_cmds.results }}"
|
||||||
92
nextcloud/update_collabora.yml
Normal file
92
nextcloud/update_collabora.yml
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# nextcloud/update_collabora.yml
|
||||||
|
|
||||||
|
- name: Update Collabora
|
||||||
|
hosts: pve2_vm
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# Compose sync (controller -> target)
|
||||||
|
compose_local_dir: "{{ lookup('env','PWD') }}/docker-compose"
|
||||||
|
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
|
||||||
|
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
|
||||||
|
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
|
||||||
|
|
||||||
|
# Collabora settings
|
||||||
|
collabora_project: collabora
|
||||||
|
collabora_compose_filename: "docker-compose-collabora.yml"
|
||||||
|
collabora_service: collabora
|
||||||
|
collabora_port: 9980
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure remote base directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_base }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Create local archive of docker-compose directory (controller)
|
||||||
|
ansible.builtin.archive:
|
||||||
|
path: "{{ compose_local_dir }}/"
|
||||||
|
dest: "/tmp/docker-compose.tar.gz"
|
||||||
|
format: gz
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Upload archive to remote host
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "/tmp/docker-compose.tar.gz"
|
||||||
|
dest: "{{ compose_remote_archive }}"
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: Recreate remote compose directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Ensure remote compose directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Extract archive on remote host
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: "{{ compose_remote_archive }}"
|
||||||
|
dest: "{{ compose_remote_dir }}"
|
||||||
|
remote_src: true
|
||||||
|
|
||||||
|
- name: Pull latest Collabora image
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_name: "{{ collabora_project }}"
|
||||||
|
project_src: "{{ compose_remote_dir }}"
|
||||||
|
files:
|
||||||
|
- "{{ collabora_compose_filename }}"
|
||||||
|
pull: always
|
||||||
|
|
||||||
|
- name: Recreate Collabora service
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_name: "{{ collabora_project }}"
|
||||||
|
project_src: "{{ compose_remote_dir }}"
|
||||||
|
files:
|
||||||
|
- "{{ collabora_compose_filename }}"
|
||||||
|
services:
|
||||||
|
- "{{ collabora_service }}"
|
||||||
|
state: present
|
||||||
|
recreate: always
|
||||||
|
|
||||||
|
- name: Wait for Collabora port
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: "{{ collabora_port }}"
|
||||||
|
timeout: 120
|
||||||
|
|
||||||
|
- name: Check Collabora discovery endpoint (retry until ready)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "http://127.0.0.1:{{ collabora_port }}/hosting/discovery"
|
||||||
|
status_code: 200
|
||||||
|
return_content: true
|
||||||
|
register: collabora_http
|
||||||
|
retries: 40
|
||||||
|
delay: 3
|
||||||
|
until: collabora_http.status == 200 and ('<wopi-discovery' in (collabora_http.content | default('')))
|
||||||
|
changed_when: false
|
||||||
287
nextcloud/update_nextcloud_v1.yml
Normal file
287
nextcloud/update_nextcloud_v1.yml
Normal file
@@ -0,0 +1,287 @@
|
|||||||
|
# nextcloud/update_nextcloud.yml
|
||||||
|
|
||||||
|
- name: Update Nextcloud on VM via Proxmox
|
||||||
|
hosts: proxmox_nextcloud # linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# --- Connection to VM (provided by Semaphore env vars) ---
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug / retries ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# --- Nextcloud specifics ---
|
||||||
|
nextcloud_project: "nextcloud-collabora"
|
||||||
|
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
|
||||||
|
nextcloud_service: "nextcloud"
|
||||||
|
|
||||||
|
# Backup directory on the VM (timestamped on controller)
|
||||||
|
backup_dir: "/data/compose/nextcloud/backup-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
|
||||||
|
|
||||||
|
nextcloud_base_url: "https://cloud.martinfencl.eu"
|
||||||
|
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
|
||||||
|
|
||||||
|
# Docker command prefix (consistent behavior and quiet hints)
|
||||||
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
|
# --- Backup phase commands (run on VM) ---
|
||||||
|
nextcloud_backup_commands:
|
||||||
|
- >
|
||||||
|
mkdir -p "{{ backup_dir }}"
|
||||||
|
- >
|
||||||
|
docker exec -u www-data nextcloud php occ maintenance:mode --on
|
||||||
|
# Create tarball of config + custom_apps inside the container
|
||||||
|
- >
|
||||||
|
docker exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
|
||||||
|
# Copy that tarball to the host backup directory
|
||||||
|
- >
|
||||||
|
docker cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
|
||||||
|
# Remove temporary file inside the container
|
||||||
|
- >
|
||||||
|
docker exec nextcloud rm /tmp/nextcloud_conf.tgz || true
|
||||||
|
# Database dump from DB container (unchanged)
|
||||||
|
- >
|
||||||
|
docker exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
|
||||||
|
|
||||||
|
# --- Upgrade phase commands (run on VM) ---
|
||||||
|
nextcloud_upgrade_commands:
|
||||||
|
- >
|
||||||
|
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_service }}
|
||||||
|
- >
|
||||||
|
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_service }}
|
||||||
|
- >
|
||||||
|
docker exec -u www-data nextcloud php occ upgrade
|
||||||
|
- >
|
||||||
|
docker exec -u www-data nextcloud php occ app:update --all || true
|
||||||
|
- >
|
||||||
|
docker exec -u www-data nextcloud php occ maintenance:repair --include-expensive || true
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH)
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Nextcloud | Show current version before upgrade (DEBUG)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- 'docker exec -u www-data nextcloud php occ -V || true'
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_version_before
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Backup phase
|
||||||
|
# -------------------------
|
||||||
|
- name: Nextcloud | Run backup commands on VM (via SSH) # run plain commands via SSH
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
loop: "{{ nextcloud_backup_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "backup-cmd-{{ idx }}"
|
||||||
|
register: nc_backup_cmds
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Show outputs of backup commands (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ nc_backup_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Fail play if any backup command failed
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Nextcloud backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Nextcloud backup commands succeeded."
|
||||||
|
loop: "{{ nc_backup_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "backup-cmd-{{ idx }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Upgrade phase
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Nextcloud | Run upgrade commands on VM (via SSH)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
loop: "{{ nextcloud_upgrade_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "upgrade-cmd-{{ idx }}"
|
||||||
|
register: nc_upgrade_cmds
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Show outputs of upgrade commands (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ nc_upgrade_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Fail play if any upgrade command failed
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Nextcloud upgrade step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Nextcloud upgrade commands succeeded."
|
||||||
|
loop: "{{ nc_upgrade_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "upgrade-cmd-{{ idx }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Disable maintenance mode (only after successful upgrade)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}docker exec -u www-data nextcloud php occ maintenance:mode --off"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_maint_off
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Readiness check (status.php)
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Nextcloud | Wait for status.php (controller first)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "{{ nextcloud_status_url }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
validate_certs: true
|
||||||
|
status_code: 200
|
||||||
|
register: nc_status_controller
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
retries: "{{ RETRIES }}"
|
||||||
|
delay: 4
|
||||||
|
until: nc_status_controller.status == 200
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Nextcloud | VM-side fetch status.php (JSON via Python)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- |
|
||||||
|
python3 - <<'PY'
|
||||||
|
import json, urllib.request, sys
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen("{{ nextcloud_status_url }}", timeout=15) as r:
|
||||||
|
sys.stdout.write(r.read().decode())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
PY
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_status_vm
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: nc_status_controller.status | default(0) != 200 or nc_status_controller.json is not defined
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Choose status JSON (controller wins, else VM)
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
nextcloud_status_json: >-
|
||||||
|
{{
|
||||||
|
(nc_status_controller.json
|
||||||
|
if (nc_status_controller.status | default(0)) == 200 and (nc_status_controller.json is defined)
|
||||||
|
else (
|
||||||
|
(nc_status_vm.stdout | default('') | trim | length > 0)
|
||||||
|
| ternary((nc_status_vm.stdout | trim | from_json), omit)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Nextcloud | Print concise status summary (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >-
|
||||||
|
Nextcloud {{ nextcloud_status_json.version | default('?') }}
|
||||||
|
(installed={{ nextcloud_status_json.installed | default('?') }},
|
||||||
|
maintenance={{ nextcloud_status_json.maintenance | default('?') }},
|
||||||
|
needsDbUpgrade={{ nextcloud_status_json.needsDbUpgrade | default('?') }})
|
||||||
|
when: nextcloud_status_json is defined and DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Status JSON not available (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "status.php is not reachable or did not return JSON."
|
||||||
|
when: nextcloud_status_json is not defined and DEBUG == 1
|
||||||
287
nextcloud/update_nextcloud_v2.yml
Normal file
287
nextcloud/update_nextcloud_v2.yml
Normal file
@@ -0,0 +1,287 @@
|
|||||||
|
# nextcloud/update_nextcloud.yml
|
||||||
|
|
||||||
|
- name: Update Nextcloud on VM via Proxmox
|
||||||
|
hosts: proxmox_nextcloud # linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# --- Connection to VM (provided by Semaphore env vars) ---
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug / retries ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# --- Nextcloud specifics ---
|
||||||
|
nextcloud_project: "nextcloud-collabora"
|
||||||
|
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
|
||||||
|
nextcloud_service: "nextcloud"
|
||||||
|
|
||||||
|
# Backup directory on the VM (timestamped on controller)
|
||||||
|
backup_dir: "/data/compose/nextcloud/backup-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
|
||||||
|
|
||||||
|
nextcloud_base_url: "https://cloud.martinfencl.eu"
|
||||||
|
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
|
||||||
|
|
||||||
|
# Docker command prefix (consistent behavior and quiet hints)
|
||||||
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
|
# --- Backup phase commands (run on VM) ---
|
||||||
|
nextcloud_backup_commands:
|
||||||
|
- >-
|
||||||
|
mkdir -p "{{ backup_dir }}"
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --on
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec nextcloud rm /tmp/nextcloud_conf.tgz || true
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
|
||||||
|
|
||||||
|
# --- Upgrade phase commands (run on VM) ---
|
||||||
|
nextcloud_upgrade_commands:
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_service }}
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_service }}
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --off
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec -u www-data nextcloud php occ upgrade
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec -u www-data nextcloud php occ app:update --all || true
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:repair --include-expensive || true
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --on
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH)
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Nextcloud | Show current version before upgrade (DEBUG)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- 'docker exec -u www-data nextcloud php occ -V || true'
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_version_before
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Backup phase
|
||||||
|
# -------------------------
|
||||||
|
- name: Nextcloud | Run backup commands on VM (via SSH) # run plain commands via SSH
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
loop: "{{ nextcloud_backup_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "backup-cmd-{{ idx }}"
|
||||||
|
register: nc_backup_cmds
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Show outputs of backup commands (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ nc_backup_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Fail play if any backup command failed
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Nextcloud backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Nextcloud backup commands succeeded."
|
||||||
|
loop: "{{ nc_backup_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "backup-cmd-{{ idx }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Upgrade phase
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Nextcloud | Run upgrade commands on VM (via SSH)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
loop: "{{ nextcloud_upgrade_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "upgrade-cmd-{{ idx }}"
|
||||||
|
register: nc_upgrade_cmds
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Show outputs of upgrade commands (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ nc_upgrade_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Fail play if any upgrade command failed
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Nextcloud upgrade step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Nextcloud upgrade commands succeeded."
|
||||||
|
loop: "{{ nc_upgrade_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "upgrade-cmd-{{ idx }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Disable maintenance mode (only after successful upgrade)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}docker exec -u www-data nextcloud php occ maintenance:mode --off"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_maint_off
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Readiness check (status.php)
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Nextcloud | Wait for status.php (controller first)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "{{ nextcloud_status_url }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
validate_certs: true
|
||||||
|
status_code: 200
|
||||||
|
register: nc_status_controller
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
retries: "{{ RETRIES }}"
|
||||||
|
delay: 4
|
||||||
|
until: nc_status_controller.status == 200
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Nextcloud | VM-side fetch status.php (JSON via Python)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- |
|
||||||
|
python3 - <<'PY'
|
||||||
|
import json, urllib.request, sys
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen("{{ nextcloud_status_url }}", timeout=15) as r:
|
||||||
|
sys.stdout.write(r.read().decode())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
PY
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_status_vm
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: nc_status_controller.status | default(0) != 200 or nc_status_controller.json is not defined
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Choose status JSON (controller wins, else VM)
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
nextcloud_status_json: >-
|
||||||
|
{{
|
||||||
|
(nc_status_controller.json
|
||||||
|
if (nc_status_controller.status | default(0)) == 200 and (nc_status_controller.json is defined)
|
||||||
|
else (
|
||||||
|
(nc_status_vm.stdout | default('') | trim | length > 0)
|
||||||
|
| ternary((nc_status_vm.stdout | trim | from_json), omit)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Nextcloud | Print concise status summary (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >-
|
||||||
|
Nextcloud {{ nextcloud_status_json.version | default('?') }}
|
||||||
|
(installed={{ nextcloud_status_json.installed | default('?') }},
|
||||||
|
maintenance={{ nextcloud_status_json.maintenance | default('?') }},
|
||||||
|
needsDbUpgrade={{ nextcloud_status_json.needsDbUpgrade | default('?') }})
|
||||||
|
when: nextcloud_status_json is defined and DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Status JSON not available (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "status.php is not reachable or did not return JSON."
|
||||||
|
when: nextcloud_status_json is not defined and DEBUG == 1
|
||||||
343
nextcloud/update_nextcloud_v3.yml
Normal file
343
nextcloud/update_nextcloud_v3.yml
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
# nextcloud/update_nextcloud.yml
|
||||||
|
|
||||||
|
- name: Update Nextcloud on VM via Proxmox
|
||||||
|
hosts: proxmox_nextcloud
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# --- Connection to VM (provided by Semaphore env vars) ---
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug / retries ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# --- Nextcloud specifics ---
|
||||||
|
nextcloud_project: "nextcloud-collabora"
|
||||||
|
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
|
||||||
|
nextcloud_service: "nextcloud"
|
||||||
|
|
||||||
|
# Backup directory on the VM (timestamped on controller)
|
||||||
|
backup_dir: "/data/compose/nextcloud/backup-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
|
||||||
|
|
||||||
|
nextcloud_base_url: "https://cloud.martinfencl.eu"
|
||||||
|
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
|
||||||
|
|
||||||
|
# Docker command prefix (consistent behavior and quiet hints)
|
||||||
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
|
# --- Backup phase commands (run on VM) ---
|
||||||
|
nextcloud_backup_commands:
|
||||||
|
- >-
|
||||||
|
mkdir -p "{{ backup_dir }}"
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --on
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec nextcloud rm /tmp/nextcloud_conf.tgz || true
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH)
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Nextcloud | Show current version before upgrade (DEBUG)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ docker_prefix }} exec -u www-data nextcloud php occ -V || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_version_before
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: DEBUG == 1
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Backup phase
|
||||||
|
# -------------------------
|
||||||
|
- name: Nextcloud | Run backup commands on VM (via SSH)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
loop: "{{ nextcloud_backup_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "backup-cmd-{{ idx }}"
|
||||||
|
register: nc_backup_cmds
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Show outputs of backup commands (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ nc_backup_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Fail play if any backup command failed
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Nextcloud backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Nextcloud backup commands succeeded."
|
||||||
|
loop: "{{ nc_backup_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "backup-cmd-{{ idx }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Upgrade phase (with always cleanup)
|
||||||
|
# -------------------------
|
||||||
|
- name: Nextcloud | Upgrade block
|
||||||
|
block:
|
||||||
|
- name: Nextcloud | Pull image
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_service }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_pull
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Recreate service
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_service }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_up
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Ensure maintenance is OFF before occ upgrade
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --off || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_maint_off_before
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | occ upgrade (must succeed)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ upgrade"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_occ_upgrade
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Update apps (best-effort)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ app:update --all || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_app_update
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Repair (best-effort)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:repair --include-expensive || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_repair
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
rescue:
|
||||||
|
- name: Nextcloud | Show occ upgrade output (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
occ upgrade FAILED
|
||||||
|
RC: {{ nc_occ_upgrade.rc | default('n/a') }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (nc_occ_upgrade.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (nc_occ_upgrade.stderr | default('')).strip() }}
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Try to force-disable maintenance flag (best-effort)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ config:system:set maintenance --type=boolean --value=false || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Fail explicitly
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: >-
|
||||||
|
Nextcloud occ upgrade failed. Check nextcloud.log inside the container (data/nextcloud.log).
|
||||||
|
stdout={{ (nc_occ_upgrade.stdout | default('') | trim) }}
|
||||||
|
stderr={{ (nc_occ_upgrade.stderr | default('') | trim) }}
|
||||||
|
|
||||||
|
always:
|
||||||
|
- name: Nextcloud | Ensure maintenance mode is OFF (always)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --off || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Readiness check (status.php)
|
||||||
|
# -------------------------
|
||||||
|
- name: Nextcloud | Wait for status.php (controller first)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "{{ nextcloud_status_url }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
validate_certs: true
|
||||||
|
status_code: 200
|
||||||
|
register: nc_status_controller
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
retries: "{{ RETRIES }}"
|
||||||
|
delay: 4
|
||||||
|
until: nc_status_controller.status == 200
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Nextcloud | Print concise status summary (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >-
|
||||||
|
Nextcloud {{ nc_status_controller.json.version | default('?') }}
|
||||||
|
(installed={{ nc_status_controller.json.installed | default('?') }},
|
||||||
|
maintenance={{ nc_status_controller.json.maintenance | default('?') }},
|
||||||
|
needsDbUpgrade={{ nc_status_controller.json.needsDbUpgrade | default('?') }})
|
||||||
|
when: DEBUG == 1 and nc_status_controller.json is defined
|
||||||
155
old/update_broker_kafka-ui.yml
Normal file
155
old/update_broker_kafka-ui.yml
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
# update_broker_kafka-ui.yml
|
||||||
|
|
||||||
|
- name: Update Kafka broker3 and Redpanda Console on VM via Proxmox
|
||||||
|
hosts: linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# --- Connection to VM (provided by Semaphore env vars) ---
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug mode (controlled via Semaphore variable) ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# --- Kafka / Redpanda Console specifics ---
|
||||||
|
kafka_project: "kafka"
|
||||||
|
# Adjusted to match your actual compose file location
|
||||||
|
kafka_compose_file: "/data/compose/docker-compose.yml"
|
||||||
|
|
||||||
|
kafka_services:
|
||||||
|
- broker3
|
||||||
|
- kafka-ui
|
||||||
|
|
||||||
|
redpanda_console_port: 8084
|
||||||
|
|
||||||
|
# Controller-side URL (default to direct VM IP/port or external URL)
|
||||||
|
redpanda_console_url: "{{ lookup('env', 'REDPANDA_CONSOLE_URL') | default('http://192.168.69.254:8084/overview', true) }}"
|
||||||
|
|
||||||
|
redpanda_retries: "{{ RETRIES }}"
|
||||||
|
redpanda_delay: 2
|
||||||
|
|
||||||
|
# Docker command prefix (consistent behavior and quiet hints)
|
||||||
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
|
# Commands to run on the target VM (quiet outputs)
|
||||||
|
# 1) Pull latest images for broker3 + kafka-ui
|
||||||
|
# 2) Stop any running containers with these names (legacy or compose-managed)
|
||||||
|
# 3) Remove any containers with these names to avoid name conflicts
|
||||||
|
# 4) Recreate services via docker compose
|
||||||
|
kafka_commands:
|
||||||
|
- "{{ docker_prefix }} compose -p {{ kafka_project }} -f {{ kafka_compose_file }} pull {{ kafka_services | join(' ') }} >/dev/null"
|
||||||
|
- "{{ docker_prefix }} stop {{ kafka_services | join(' ') }} >/dev/null 2>&1 || true"
|
||||||
|
- "{{ docker_prefix }} rm -f {{ kafka_services | join(' ') }} >/dev/null 2>&1 || true"
|
||||||
|
- "{{ docker_prefix }} compose -p {{ kafka_project }} -f {{ kafka_compose_file }} up -d --no-deps --force-recreate {{ kafka_services | join(' ') }} >/dev/null"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Run Kafka update commands on VM (via SSH) # use SSHPASS env, hide item value
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e # read password from SSHPASS environment
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}" # supply password via environment
|
||||||
|
loop: "{{ kafka_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx # capture loop index
|
||||||
|
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
|
||||||
|
register: kafka_cmds
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
|
||||||
|
|
||||||
|
- name: Show outputs for each Kafka command
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ kafka_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Fail play if any Kafka command failed # also hide item label
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Kafka/Redpanda Console update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Kafka/Redpanda Console update commands succeeded."
|
||||||
|
loop: "{{ kafka_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "cmd-{{ idx }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Readiness check – Redpanda Console UI
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Redpanda Console | Wait for overview page (controller, with retries)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "{{ redpanda_console_url }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
validate_certs: false # plain HTTP on 192.168.69.254 (or as configured)
|
||||||
|
status_code: 200
|
||||||
|
register: redpanda_controller
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
when: redpanda_console_url is defined and (redpanda_console_url | length) > 0
|
||||||
|
retries: "{{ redpanda_retries }}"
|
||||||
|
delay: "{{ redpanda_delay }}"
|
||||||
|
until: redpanda_controller.status == 200
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Redpanda Console | Print concise summary
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >-
|
||||||
|
Redpanda Console overview {{ 'reachable' if (redpanda_controller is defined and (redpanda_controller.status|default(0))==200) else 'NOT reachable' }}.
|
||||||
|
status={{ redpanda_controller.status | default('n/a') }};
|
||||||
|
length={{ (redpanda_controller.content | default('')) | length }};
|
||||||
|
when: DEBUG == 1 and (redpanda_controller is defined)
|
||||||
|
|
||||||
|
# Optional detailed dump (short excerpt only)
|
||||||
|
- name: Redpanda Console | HTML excerpt (debug)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ (redpanda_controller.content | default(''))[:500] }}"
|
||||||
|
when: DEBUG == 1 and (redpanda_controller is defined) and (redpanda_controller.content is defined)
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Final assertion: Console URL must be reachable
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Redpanda Console | Assert overview reachable (if URL configured)
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- >
|
||||||
|
not (redpanda_console_url is defined and (redpanda_console_url | length) > 0)
|
||||||
|
or
|
||||||
|
(
|
||||||
|
redpanda_controller is defined
|
||||||
|
and (redpanda_controller.status | default(0)) == 200
|
||||||
|
)
|
||||||
|
fail_msg: "Redpanda Console URL {{ redpanda_console_url }} is NOT reachable with HTTP 200 after retries."
|
||||||
|
success_msg: "Redpanda Console URL {{ redpanda_console_url }} is reachable with HTTP 200."
|
||||||
174
old/update_collabora.yml
Normal file
174
old/update_collabora.yml
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
# nextcloud/update_collabora.yml
|
||||||
|
|
||||||
|
- name: Update Collabora CODE on VM via Proxmox
|
||||||
|
hosts: linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# --- Connection to VM (provided by Semaphore env vars) ---
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug mode (controlled via Semaphore variable) ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# --- Collabora specifics ---
|
||||||
|
collabora_debug_caps: true
|
||||||
|
collabora_caps_url: "https://collabora.martinfencl.eu/hosting/capabilities"
|
||||||
|
|
||||||
|
# Use the FULL Nextcloud stack compose file; only target the 'collabora' service inside it
|
||||||
|
collabora_project: "nextcloud-collabora"
|
||||||
|
collabora_compose_file: "/data/compose/nextcloud/nextcloud-collabora.yml"
|
||||||
|
collabora_service: "collabora"
|
||||||
|
|
||||||
|
# Docker command prefix (consistent behavior and quiet hints)
|
||||||
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
|
# Commands to run on the target VM (quiet outputs)
|
||||||
|
collabora_commands:
|
||||||
|
- "{{ docker_prefix }} pull -q collabora/code:latest >/dev/null"
|
||||||
|
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} pull {{ collabora_service }} >/dev/null"
|
||||||
|
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} up -d --no-deps --force-recreate {{ collabora_service }} >/dev/null"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Run Collabora update commands on VM (via SSH) # use SSHPASS env, hide item value
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
loop: "{{ collabora_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx # <-- capture loop index here
|
||||||
|
label: "cmd-{{ idx }}" # <-- use idx instead of loop.index
|
||||||
|
register: collab_cmds
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Show outputs for each Collabora command
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ collab_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Fail play if any Collabora command failed # also hide item label
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Collabora update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Collabora update commands succeeded."
|
||||||
|
loop: "{{ collab_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "cmd-{{ idx }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Readiness checks (controller first, then VM fallback)
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Collabora | Wait for capabilities (controller first)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "{{ collabora_caps_url }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
validate_certs: true
|
||||||
|
status_code: 200
|
||||||
|
register: caps_controller
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
retries: "{{ RETRIES }}"
|
||||||
|
delay: 2
|
||||||
|
until: caps_controller.status == 200
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Collabora | VM-side fetch (pure JSON via Python) # use SSHPASS env here too
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- |
|
||||||
|
python3 - <<'PY'
|
||||||
|
import json, urllib.request, sys
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen("{{ collabora_caps_url }}", timeout=15) as r:
|
||||||
|
sys.stdout.write(r.read().decode())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
PY
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: caps_vm
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: caps_controller.status | default(0) != 200 or caps_controller.json is not defined
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Collabora | Choose JSON (controller wins, else VM)
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
collab_caps_json: >-
|
||||||
|
{{
|
||||||
|
(caps_controller.json
|
||||||
|
if (caps_controller.status|default(0))==200 and (caps_controller.json is defined)
|
||||||
|
else (
|
||||||
|
(caps_vm.stdout | default('') | trim | length > 0)
|
||||||
|
| ternary((caps_vm.stdout | trim | from_json), omit)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Collabora | Print concise summary
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >-
|
||||||
|
Collabora {{ collab_caps_json.productVersion | default('?') }}
|
||||||
|
({{ collab_caps_json.productName | default('?') }}),
|
||||||
|
convert-to.available={{ collab_caps_json['convert-to']['available'] | default('n/a') }},
|
||||||
|
serverId={{ collab_caps_json.serverId | default('n/a') }}
|
||||||
|
when: collab_caps_json is defined and DEBUG == 1
|
||||||
|
|
||||||
|
- name: Collabora | Capabilities unavailable (after retries)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Capabilities endpoint není dostupný ani po pokusech."
|
||||||
|
when: collab_caps_json is not defined and DEBUG == 1
|
||||||
|
|
||||||
|
# Optional full JSON (debug)
|
||||||
|
- name: Collabora | Full JSON (debug)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
var: collab_caps_json
|
||||||
|
when: collabora_debug_caps and (collab_caps_json is defined) and DEBUG == 1
|
||||||
194
old/update_homarr.yml
Normal file
194
old/update_homarr.yml
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
# update_homarr.yml
|
||||||
|
|
||||||
|
- name: Update Homarr on VM via Proxmox
|
||||||
|
hosts: linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# VM connection (provided by Semaphore env vars)
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug mode (controlled via Semaphore variable) ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# Homarr specifics
|
||||||
|
homarr_project: "homarr"
|
||||||
|
homarr_compose_file: "/data/compose/homarr/docker-compose-homarr.yml"
|
||||||
|
homarr_service: "homarr"
|
||||||
|
homarr_image: "ghcr.io/homarr-labs/homarr:latest"
|
||||||
|
homarr_port: 7575
|
||||||
|
|
||||||
|
# Optional external URL for controller-side readiness check (e.g., https://homarr.example.com)
|
||||||
|
# If empty/undefined, controller check is skipped and we only probe from the VM.
|
||||||
|
homarr_url: "{{ lookup('env', 'HOMARR_URL') | default('', true) }}"
|
||||||
|
|
||||||
|
# Fixed container name used in your compose (avoid conflicts with any leftover container)
|
||||||
|
homarr_container_name: "homarr"
|
||||||
|
|
||||||
|
# Retry policy (same pattern as Kuma): 25x with 2s delay
|
||||||
|
homarr_retries: "{{ RETRIES }}"
|
||||||
|
homarr_delay: 2
|
||||||
|
|
||||||
|
# Docker command prefix (consistent behavior)
|
||||||
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
|
# Commands to run on the target VM (quiet outputs)
|
||||||
|
homarr_commands:
|
||||||
|
- "{{ docker_prefix }} pull -q {{ homarr_image }} >/dev/null"
|
||||||
|
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} pull {{ homarr_service }} >/dev/null"
|
||||||
|
# remove conflicting container name before compose up (silently)
|
||||||
|
- "{{ docker_prefix }} rm -f {{ homarr_container_name }} >/dev/null 2>&1 || true"
|
||||||
|
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} up -d --no-deps --force-recreate {{ homarr_service }} >/dev/null"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Run Homarr update commands on VM (via SSH) # use SSHPASS env, hide item label
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e # read password from SSHPASS environment
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}" # supply password via environment
|
||||||
|
loop: "{{ homarr_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx # capture loop index
|
||||||
|
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
|
||||||
|
register: homarr_cmds
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
|
||||||
|
|
||||||
|
- name: Show outputs for each Homarr command
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ homarr_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Fail play if any Homarr command failed # also hide item label
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Homarr update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Homarr update commands succeeded."
|
||||||
|
loop: "{{ homarr_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "cmd-{{ idx }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Readiness checks (controller first, then VM fallback)
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Homarr | Wait for homepage (controller first, with retries)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "{{ (homarr_url | regex_replace('/$','')) + '/' }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
# Validate TLS only when using https://
|
||||||
|
validate_certs: "{{ (homarr_url | default('')) is match('^https://') }}"
|
||||||
|
status_code: 200
|
||||||
|
register: homarr_controller
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
when: homarr_url is defined and (homarr_url | length) > 0
|
||||||
|
retries: "{{ homarr_retries }}"
|
||||||
|
delay: "{{ homarr_delay }}"
|
||||||
|
until: homarr_controller.status == 200
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Homarr | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- |
|
||||||
|
python3 - <<'PY'
|
||||||
|
# Fetch Homarr homepage from localhost and print HTML to stdout
|
||||||
|
import urllib.request, sys
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen("http://127.0.0.1:{{ homarr_port }}/", timeout=15) as r:
|
||||||
|
sys.stdout.write(r.read().decode(errors='ignore'))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
PY
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: homarr_vm
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: homarr_controller.status | default(0) != 200 or homarr_controller.content is not defined
|
||||||
|
retries: "{{ homarr_retries }}"
|
||||||
|
delay: "{{ homarr_delay }}"
|
||||||
|
until: (homarr_vm.stdout | default('') | trim | length) > 0 and ('Homarr' in (homarr_vm.stdout | default('')))
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Homarr | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
homarr_home_html: >-
|
||||||
|
{{
|
||||||
|
(
|
||||||
|
homarr_controller.content
|
||||||
|
if (homarr_controller is defined)
|
||||||
|
and ((homarr_controller.status|default(0))==200)
|
||||||
|
and (homarr_controller.content is defined)
|
||||||
|
else
|
||||||
|
(homarr_vm.stdout | default('') | trim)
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
when:
|
||||||
|
- (homarr_controller is defined and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined))
|
||||||
|
or ((homarr_vm.stdout | default('') | trim | length) > 0)
|
||||||
|
|
||||||
|
- name: Homarr | Print concise summary
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >-
|
||||||
|
Homarr homepage {{ 'reachable' if (homarr_home_html is defined) else 'NOT reachable' }}.
|
||||||
|
Source={{ 'controller' if ((homarr_controller is defined) and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined)) else 'vm' if (homarr_vm.stdout|default('')|trim|length>0) else 'n/a' }};
|
||||||
|
length={{ (homarr_home_html | default('')) | length }};
|
||||||
|
contains('Homarr')={{ (homarr_home_html is defined) and ('Homarr' in homarr_home_html) }}
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Homarr | Homepage unavailable (after retries)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Homarr web není dostupný ani po pokusech."
|
||||||
|
when: homarr_home_html is not defined and DEBUG == 1
|
||||||
|
|
||||||
|
# Optional detailed dump (short excerpt only)
|
||||||
|
- name: Homarr | HTML excerpt (debug)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ (homarr_home_html | default(''))[:500] }}"
|
||||||
|
when: homarr_home_html is defined and DEBUG == 1
|
||||||
313
old/update_immich.yml
Normal file
313
old/update_immich.yml
Normal file
@@ -0,0 +1,313 @@
|
|||||||
|
# update_immich.yml
|
||||||
|
|
||||||
|
- name: Update Immich on VM via Proxmox
|
||||||
|
hosts: linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# VM connection (provided by Semaphore env vars)
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug mode (controlled via Semaphore variable) ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# Immich specifics
|
||||||
|
immich_dir: "/opt/immich"
|
||||||
|
immich_project: "immich"
|
||||||
|
immich_compose_url: "https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml"
|
||||||
|
immich_compose_file: "/opt/immich/docker-compose.yml"
|
||||||
|
immich_override_file: "/opt/immich/docker-compose.override.yml"
|
||||||
|
immich_port: 2283
|
||||||
|
|
||||||
|
# Optional external URL for controller-side readiness check (e.g., https://photos.example.com)
|
||||||
|
immich_url: "{{ lookup('env', 'IMMICH_URL') | default('', true) }}"
|
||||||
|
|
||||||
|
# Retry policy
|
||||||
|
immich_retries: "{{ RETRIES }}"
|
||||||
|
immich_delay: 2
|
||||||
|
|
||||||
|
# Docker command prefix (consistent behavior)
|
||||||
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
|
# Compose command (always include override to keep local mounts separate from upstream compose)
|
||||||
|
immich_compose_cmd: >-
|
||||||
|
{{ docker_prefix }} compose
|
||||||
|
-p {{ immich_project }}
|
||||||
|
-f {{ immich_compose_file }}
|
||||||
|
-f {{ immich_override_file }}
|
||||||
|
|
||||||
|
# Commands to run on the target VM
|
||||||
|
immich_commands:
|
||||||
|
- "cd {{ immich_dir }}"
|
||||||
|
|
||||||
|
- |
|
||||||
|
cd {{ immich_dir }}
|
||||||
|
mkdir -p backups
|
||||||
|
if [ -f docker-compose.yml ]; then
|
||||||
|
cp -a docker-compose.yml "backups/docker-compose.yml.$(date +%F_%H%M%S).bak"
|
||||||
|
fi
|
||||||
|
if [ -f .env ]; then
|
||||||
|
cp -a .env "backups/.env.$(date +%F_%H%M%S).bak"
|
||||||
|
fi
|
||||||
|
if [ -f docker-compose.override.yml ]; then
|
||||||
|
cp -a docker-compose.override.yml "backups/docker-compose.override.yml.$(date +%F_%H%M%S).bak"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- |
|
||||||
|
cd {{ immich_dir }}
|
||||||
|
# Download latest compose from Immich releases (requires curl or wget)
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
curl -fsSL -o docker-compose.yml "{{ immich_compose_url }}"
|
||||||
|
elif command -v wget >/dev/null 2>&1; then
|
||||||
|
wget -qO docker-compose.yml "{{ immich_compose_url }}"
|
||||||
|
else
|
||||||
|
echo "Neither curl nor wget is available on the VM."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
- |
|
||||||
|
cd {{ immich_dir }}
|
||||||
|
# Ensure override compose exists (create if missing)
|
||||||
|
if [ ! -f "{{ immich_override_file }}" ]; then
|
||||||
|
printf '%s\n' \
|
||||||
|
'services:' \
|
||||||
|
' immich-server:' \
|
||||||
|
' volumes:' \
|
||||||
|
' - /mnt/nextcloud-howard-photos:/mnt/nextcloud-howard-photos' \
|
||||||
|
' - /mnt/nextcloud-kamilkaprdelka-photos:/mnt/nextcloud-kamilkaprdelka-photos' \
|
||||||
|
> "{{ immich_override_file }}"
|
||||||
|
fi
|
||||||
|
# Fail early if override is still missing/empty
|
||||||
|
test -s "{{ immich_override_file }}"
|
||||||
|
|
||||||
|
|
||||||
|
- |
|
||||||
|
cd {{ immich_dir }}
|
||||||
|
# Ensure .env exists. If missing, try to reconstruct it from running containers to avoid breaking DB creds.
|
||||||
|
python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
env_path = Path(".env")
|
||||||
|
if env_path.exists():
|
||||||
|
raise SystemExit(0)
|
||||||
|
|
||||||
|
def run(cmd):
|
||||||
|
p = subprocess.run(cmd, capture_output=True, text=True)
|
||||||
|
return p.returncode, p.stdout, p.stderr
|
||||||
|
|
||||||
|
rc, out, err = run(["bash", "-lc", "command docker inspect immich_postgres immich_server"])
|
||||||
|
if rc != 0 or not out.strip():
|
||||||
|
print("ERROR: .env is missing and cannot inspect running containers (immich_postgres/immich_server).", flush=True)
|
||||||
|
print("Create /opt/immich/.env manually or ensure the containers exist.", flush=True)
|
||||||
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
data = json.loads(out)
|
||||||
|
|
||||||
|
by_name = {}
|
||||||
|
for c in data:
|
||||||
|
name = (c.get("Name") or "").lstrip("/")
|
||||||
|
by_name[name] = c
|
||||||
|
|
||||||
|
pg = by_name.get("immich_postgres")
|
||||||
|
srv = by_name.get("immich_server")
|
||||||
|
if not pg or not srv:
|
||||||
|
print("ERROR: Could not find immich_postgres and immich_server in docker inspect output.", flush=True)
|
||||||
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
def env_map(container):
|
||||||
|
m = {}
|
||||||
|
for kv in (container.get("Config", {}).get("Env") or []):
|
||||||
|
if "=" in kv:
|
||||||
|
k, v = kv.split("=", 1)
|
||||||
|
m[k] = v
|
||||||
|
return m
|
||||||
|
|
||||||
|
def find_mount_source(container, dest):
|
||||||
|
for m in (container.get("Mounts") or []):
|
||||||
|
if m.get("Destination") == dest:
|
||||||
|
return m.get("Source")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
pg_env = env_map(pg)
|
||||||
|
db_user = pg_env.get("POSTGRES_USER", "")
|
||||||
|
db_pass = pg_env.get("POSTGRES_PASSWORD", "")
|
||||||
|
db_name = pg_env.get("POSTGRES_DB", "")
|
||||||
|
|
||||||
|
db_data = find_mount_source(pg, "/var/lib/postgresql/data")
|
||||||
|
upload_loc = find_mount_source(srv, "/usr/src/app/upload")
|
||||||
|
|
||||||
|
# Try to preserve the currently used image tag as IMMICH_VERSION (optional but safer)
|
||||||
|
immich_version = ""
|
||||||
|
image = (srv.get("Config", {}).get("Image") or "")
|
||||||
|
if ":" in image and "@" not in image:
|
||||||
|
immich_version = image.rsplit(":", 1)[-1]
|
||||||
|
elif ":" in image and "@" in image:
|
||||||
|
# image like repo:tag@sha256:...
|
||||||
|
immich_version = image.split("@", 1)[0].rsplit(":", 1)[-1]
|
||||||
|
|
||||||
|
missing = []
|
||||||
|
for k, v in [
|
||||||
|
("DB_USERNAME", db_user),
|
||||||
|
("DB_PASSWORD", db_pass),
|
||||||
|
("DB_DATABASE_NAME", db_name),
|
||||||
|
("DB_DATA_LOCATION", db_data),
|
||||||
|
("UPLOAD_LOCATION", upload_loc),
|
||||||
|
]:
|
||||||
|
if not v:
|
||||||
|
missing.append(k)
|
||||||
|
|
||||||
|
if missing:
|
||||||
|
print("ERROR: Could not reconstruct these .env values from containers: " + ", ".join(missing), flush=True)
|
||||||
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
lines = [
|
||||||
|
f"UPLOAD_LOCATION={upload_loc}",
|
||||||
|
f"DB_DATA_LOCATION={db_data}",
|
||||||
|
f"DB_USERNAME={db_user}",
|
||||||
|
f"DB_PASSWORD={db_pass}",
|
||||||
|
f"DB_DATABASE_NAME={db_name}",
|
||||||
|
]
|
||||||
|
if immich_version:
|
||||||
|
lines.append(f"IMMICH_VERSION={immich_version}")
|
||||||
|
|
||||||
|
env_path.write_text("\n".join(lines) + "\n", encoding="utf-8")
|
||||||
|
print("Created .env from running containers.", flush=True)
|
||||||
|
PY
|
||||||
|
|
||||||
|
- |
|
||||||
|
cd {{ immich_dir }}
|
||||||
|
# Comment out healthcheck.start_interval if present (safe no-op if missing)
|
||||||
|
sed -i -E 's/^([[:space:]]*)start_interval:/\1# start_interval:/' docker-compose.yml || true
|
||||||
|
|
||||||
|
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} config >/dev/null"
|
||||||
|
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} pull >/dev/null"
|
||||||
|
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} up -d --remove-orphans --force-recreate >/dev/null"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Run Immich update commands on VM (via SSH) # use SSHPASS env, hide item label
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
loop: "{{ immich_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "cmd-{{ idx }}"
|
||||||
|
register: immich_cmds
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Show outputs for each Immich command
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ immich_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Fail play if any Immich command failed
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Immich update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Immich update commands succeeded."
|
||||||
|
loop: "{{ immich_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "cmd-{{ idx }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Readiness checks (controller first, then VM fallback)
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Immich | Wait for API ping (controller first, with retries)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "{{ (immich_url | regex_replace('/$','')) + '/api/server/ping' }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
validate_certs: "{{ (immich_url | default('')) is match('^https://') }}"
|
||||||
|
status_code: 200
|
||||||
|
register: immich_controller
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
when: immich_url is defined and (immich_url | length) > 0
|
||||||
|
retries: "{{ immich_retries }}"
|
||||||
|
delay: "{{ immich_delay }}"
|
||||||
|
until: immich_controller.status == 200 and ('pong' in (immich_controller.content | default('')))
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Immich | VM-side ping (JSON via Python, with retries)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- |
|
||||||
|
python3 - <<'PY'
|
||||||
|
# Ping Immich API from localhost and print response to stdout
|
||||||
|
import urllib.request, sys
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen("http://127.0.0.1:{{ immich_port }}/api/server/ping", timeout=15) as r:
|
||||||
|
sys.stdout.write(r.read().decode(errors='ignore'))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
PY
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: immich_vm
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: immich_controller.status | default(0) != 200
|
||||||
|
retries: "{{ immich_retries }}"
|
||||||
|
delay: "{{ immich_delay }}"
|
||||||
|
until: (immich_vm.stdout | default('') | trim | length) > 0 and ('pong' in (immich_vm.stdout | default('')))
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Immich | Print concise summary
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >-
|
||||||
|
Immich API ping {{ 'OK' if (('pong' in (immich_controller.content|default(''))) or ('pong' in (immich_vm.stdout|default('')))) else 'NOT OK' }}.
|
||||||
|
Source={{ 'controller' if (immich_controller.status|default(0))==200 else 'vm' if (immich_vm.stdout|default('')|trim|length>0) else 'n/a' }}.
|
||||||
|
when: DEBUG == 1
|
||||||
|
run_once: true
|
||||||
293
old/update_nextcloud_mariadb_redis.yml
Normal file
293
old/update_nextcloud_mariadb_redis.yml
Normal file
@@ -0,0 +1,293 @@
|
|||||||
|
# nextcloud/update_nextcloud_db_redis.yml
|
||||||
|
|
||||||
|
- name: Update Nextcloud DB and Redis on VM via Proxmox
|
||||||
|
hosts: linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# --- Connection to VM (provided by Semaphore env vars) ---
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug / retries ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# --- Nextcloud specifics ---
|
||||||
|
nextcloud_project: "nextcloud-collabora"
|
||||||
|
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
|
||||||
|
|
||||||
|
# Service names from docker-compose file
|
||||||
|
nextcloud_web_service: "nextcloud"
|
||||||
|
nextcloud_db_service: "nextclouddb"
|
||||||
|
redis_service: "redis"
|
||||||
|
|
||||||
|
# Backup directory on the VM (timestamped on controller)
|
||||||
|
backup_dir: "/data/compose/nextcloud/backup-db-redis-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
|
||||||
|
|
||||||
|
nextcloud_base_url: "https://cloud.martinfencl.eu"
|
||||||
|
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
|
||||||
|
|
||||||
|
# Docker command prefix (consistent behavior and quiet hints)
|
||||||
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
|
# --- Backup phase commands (run on VM) ---
|
||||||
|
# Same idea as in update_nextcloud.yml: maintenance on + config/custom_apps + DB dump
|
||||||
|
nextcloud_backup_commands:
|
||||||
|
- >
|
||||||
|
mkdir -p "{{ backup_dir }}"
|
||||||
|
- >
|
||||||
|
docker exec -u www-data nextcloud php occ maintenance:mode --on
|
||||||
|
- >
|
||||||
|
docker exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
|
||||||
|
- >
|
||||||
|
docker cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
|
||||||
|
- >
|
||||||
|
docker exec nextcloud rm /tmp/nextcloud_conf.tgz || true
|
||||||
|
- >
|
||||||
|
docker exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
|
||||||
|
|
||||||
|
# --- DB + Redis upgrade commands (run on VM) ---
|
||||||
|
db_redis_upgrade_commands:
|
||||||
|
# Update MariaDB service
|
||||||
|
- >
|
||||||
|
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_db_service }}
|
||||||
|
- >
|
||||||
|
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_db_service }}
|
||||||
|
# Simple DB health check (non-fatal)
|
||||||
|
- >
|
||||||
|
docker exec nextcloud-db sh -c 'mysqladmin ping -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' || true
|
||||||
|
# Update Redis service
|
||||||
|
- >
|
||||||
|
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ redis_service }}
|
||||||
|
- >
|
||||||
|
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ redis_service }}
|
||||||
|
# Simple Redis health check (non-fatal)
|
||||||
|
- >
|
||||||
|
docker exec redis redis-cli PING || true
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH)
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Nextcloud | Show current version before DB/Redis upgrade (DEBUG)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- 'docker exec -u www-data nextcloud php occ -V || true'
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_version_before
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Backup phase
|
||||||
|
# -------------------------
|
||||||
|
- name: Nextcloud | Run backup commands on VM (via SSH)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
loop: "{{ nextcloud_backup_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "backup-cmd-{{ idx }}"
|
||||||
|
register: nc_backup_cmds
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Show outputs of backup commands (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ nc_backup_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Fail play if any backup command failed
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Nextcloud DB/Redis backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Nextcloud DB/Redis backup commands succeeded."
|
||||||
|
loop: "{{ nc_backup_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "backup-cmd-{{ idx }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# DB + Redis upgrade phase
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Nextcloud | Run DB/Redis upgrade commands on VM (via SSH)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
loop: "{{ db_redis_upgrade_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "db-redis-cmd-{{ idx }}"
|
||||||
|
register: nc_db_redis_cmds
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Show outputs of DB/Redis upgrade commands (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ nc_db_redis_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Fail play if any DB/Redis upgrade command failed
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Nextcloud DB/Redis upgrade step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Nextcloud DB/Redis upgrade commands succeeded."
|
||||||
|
loop: "{{ nc_db_redis_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "db-redis-cmd-{{ idx }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Disable maintenance + readiness check
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Nextcloud | Disable maintenance mode after DB/Redis upgrade
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}docker exec -u www-data nextcloud php occ maintenance:mode --off"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_maint_off
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Wait for status.php (controller first)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "{{ nextcloud_status_url }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
validate_certs: true
|
||||||
|
status_code: 200
|
||||||
|
register: nc_status_controller
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
retries: "{{ RETRIES }}"
|
||||||
|
delay: 4
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Nextcloud | VM-side fetch status.php (JSON via Python)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- |
|
||||||
|
python3 - <<'PY'
|
||||||
|
import json, urllib.request, sys
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen("{{ nextcloud_status_url }}", timeout=15) as r:
|
||||||
|
sys.stdout.write(r.read().decode())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
PY
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_status_vm
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: nc_status_controller.status | default(0) != 200 or nc_status_controller.json is not defined
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Choose status JSON (controller wins, else VM)
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
nextcloud_status_json: >-
|
||||||
|
{{
|
||||||
|
(nc_status_controller.json
|
||||||
|
if (nc_status_controller.status | default(0)) == 200 and (nc_status_controller.json is defined)
|
||||||
|
else (
|
||||||
|
(nc_status_vm.stdout | default('') | trim | length > 0)
|
||||||
|
| ternary((nc_status_vm.stdout | trim | from_json), omit)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Nextcloud | Print concise status summary (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >-
|
||||||
|
Nextcloud {{ nextcloud_status_json.version | default('?') }}
|
||||||
|
(installed={{ nextcloud_status_json.installed | default('?') }},
|
||||||
|
maintenance={{ nextcloud_status_json.maintenance | default('?') }},
|
||||||
|
needsDbUpgrade={{ nextcloud_status_json.needsDbUpgrade | default('?') }})
|
||||||
|
when: nextcloud_status_json is defined and DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Status JSON not available (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "status.php is not reachable or did not return JSON."
|
||||||
|
when: nextcloud_status_json is not defined and DEBUG == 1
|
||||||
118
old/update_portainer_agent.yml
Normal file
118
old/update_portainer_agent.yml
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
# update_portainer_agent.yml
|
||||||
|
|
||||||
|
- name: Update Portainer Agent on VM via Proxmox
|
||||||
|
hosts: linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# --- Connection to VM (provided by Semaphore env vars) ---
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug mode (controlled via Semaphore variable) ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# --- Portainer Agent specifics ---
|
||||||
|
portainer_agent_image: "portainer/agent:latest"
|
||||||
|
portainer_agent_container: "portainer_agent"
|
||||||
|
portainer_agent_port: 9001
|
||||||
|
|
||||||
|
# Docker command prefix (consistent behavior and quiet hints)
|
||||||
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
|
# Commands to run on the target VM (quiet outputs)
|
||||||
|
portainer_commands:
|
||||||
|
- "{{ docker_prefix }} pull -q {{ portainer_agent_image }} >/dev/null"
|
||||||
|
- "{{ docker_prefix }} stop {{ portainer_agent_container }} >/dev/null 2>&1 || true"
|
||||||
|
- "{{ docker_prefix }} rm {{ portainer_agent_container }} >/dev/null 2>&1 || true"
|
||||||
|
- >
|
||||||
|
{{ docker_prefix }} run -d
|
||||||
|
--name {{ portainer_agent_container }}
|
||||||
|
--restart=always
|
||||||
|
-p {{ portainer_agent_port }}:9001
|
||||||
|
-v /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
-v /var/lib/docker/volumes:/var/lib/docker/volumes
|
||||||
|
{{ portainer_agent_image }} >/dev/null
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Run Portainer Agent update commands on VM (via SSH) # run all commands via sshpass
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
loop: "{{ portainer_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx # capture loop index
|
||||||
|
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
|
||||||
|
register: portainer_cmds
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
|
||||||
|
|
||||||
|
- name: Show outputs for each Portainer command
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ portainer_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Fail play if any Portainer command failed
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Portainer Agent update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Portainer Agent update commands succeeded."
|
||||||
|
loop: "{{ portainer_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "cmd-{{ idx }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Readiness check (TCP port)
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Portainer Agent | Wait for TCP port to be open from controller
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
host: "{{ vm_ip }}"
|
||||||
|
port: "{{ portainer_agent_port }}"
|
||||||
|
delay: 2 # initial delay before first check
|
||||||
|
timeout: "{{ RETRIES * 2 }}" # total timeout in seconds
|
||||||
|
state: started
|
||||||
|
register: portainer_wait
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Portainer Agent | Print concise summary
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >-
|
||||||
|
Portainer Agent TCP {{ vm_ip }}:{{ portainer_agent_port }}
|
||||||
|
reachable={{ (portainer_wait is defined) and (not portainer_wait.failed | default(false)) }}
|
||||||
|
elapsed={{ portainer_wait.elapsed | default('n/a') }}s
|
||||||
|
when: DEBUG == 1
|
||||||
65
old/update_semaphore.yml
Normal file
65
old/update_semaphore.yml
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
# update_semaphore.yml
|
||||||
|
|
||||||
|
- name: Update Semaphore on VM via Proxmox
|
||||||
|
hosts: linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# --- Connection to VM (provided by Semaphore env vars) ---
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}" # IP vm-portainer
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug mode (controlled via Semaphore variable) ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
|
||||||
|
# --- Semaphore specifics ---
|
||||||
|
semaphore_project: "semaphore"
|
||||||
|
semaphore_compose_file: "/data/compose/semaphore/docker-compose.yml"
|
||||||
|
semaphore_service: "semaphore"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Run Semaphore self-update on VM in background (nohup)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- |
|
||||||
|
nohup bash -c '
|
||||||
|
unalias docker 2>/dev/null || true
|
||||||
|
DOCKER_CLI_HINTS=0 docker compose \
|
||||||
|
-p {{ semaphore_project }} \
|
||||||
|
-f {{ semaphore_compose_file }} \
|
||||||
|
up -d --no-deps --force-recreate --pull always {{ semaphore_service }}
|
||||||
|
' >/dev/null 2>&1 &
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: semaphore_update
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Show result of Semaphore self-update (debug)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
RC: {{ semaphore_update.rc }}
|
||||||
|
STDOUT: {{ (semaphore_update.stdout | default('')).strip() }}
|
||||||
|
STDERR: {{ (semaphore_update.stderr | default('')).strip() }}
|
||||||
|
when: DEBUG == 1
|
||||||
194
old/update_uptime_kuma.yml
Normal file
194
old/update_uptime_kuma.yml
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
# nextcloud/update_uptime_kuma.yml
|
||||||
|
|
||||||
|
- name: Update Uptime Kuma on VM via Proxmox
|
||||||
|
hosts: linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# VM connection (provided by Semaphore env vars)
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug mode (controlled via Semaphore variable) ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# Uptime Kuma specifics
|
||||||
|
kuma_project: "uptime-kuma"
|
||||||
|
kuma_compose_file: "/data/compose/uptime-kuma/docker-compose-uptime-kuma.yml"
|
||||||
|
kuma_service: "uptime-kuma"
|
||||||
|
kuma_image: "louislam/uptime-kuma:latest"
|
||||||
|
kuma_port: 3001
|
||||||
|
|
||||||
|
# Optional external URL for controller-side readiness check (e.g., https://kuma.example.com)
|
||||||
|
# If empty/undefined, controller check is skipped and we only probe from the VM.
|
||||||
|
kuma_url: "{{ lookup('env', 'KUMA_URL') | default('', true) }}"
|
||||||
|
|
||||||
|
# Fixed container name used in your compose (conflicts with previous/Portainer-run container)
|
||||||
|
kuma_container_name: "uptime-kuma-dev"
|
||||||
|
|
||||||
|
# Retry policy
|
||||||
|
kuma_retries: "{{ RETRIES }}"
|
||||||
|
kuma_delay: 2
|
||||||
|
|
||||||
|
# Docker command prefix (consistent behavior)
|
||||||
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
|
# Commands to run on the target VM (quiet outputs like in Collabora play)
|
||||||
|
kuma_commands:
|
||||||
|
- "{{ docker_prefix }} pull -q {{ kuma_image }} >/dev/null"
|
||||||
|
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} pull {{ kuma_service }} >/dev/null"
|
||||||
|
# remove conflicting container name before compose up (silently)
|
||||||
|
- "{{ docker_prefix }} rm -f {{ kuma_container_name }} >/dev/null 2>&1 || true"
|
||||||
|
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} up -d --no-deps --force-recreate {{ kuma_service }} >/dev/null"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Run Uptime Kuma update commands on VM (via SSH) # use SSHPASS env, hide item label
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e # read password from SSHPASS environment
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}" # supply password via environment
|
||||||
|
loop: "{{ kuma_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx # capture loop index
|
||||||
|
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
|
||||||
|
register: kuma_cmds
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
|
||||||
|
|
||||||
|
- name: Show outputs for each Uptime Kuma command
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ kuma_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Fail play if any Uptime Kuma command failed # also hide item label
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Uptime Kuma update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Uptime Kuma update commands succeeded."
|
||||||
|
loop: "{{ kuma_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "cmd-{{ idx }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Readiness checks (controller first, then VM fallback)
|
||||||
|
# -------------------------
|
||||||
|
|
||||||
|
- name: Kuma | Wait for homepage (controller first, with retries)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "{{ (kuma_url | regex_replace('/$','')) + '/' }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
# Validate TLS only when using https://
|
||||||
|
validate_certs: "{{ (kuma_url | default('')) is match('^https://') }}"
|
||||||
|
status_code: 200
|
||||||
|
register: kuma_controller
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
when: kuma_url is defined and (kuma_url | length) > 0
|
||||||
|
retries: "{{ kuma_retries }}"
|
||||||
|
delay: "{{ kuma_delay }}"
|
||||||
|
until: kuma_controller.status == 200
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Kuma | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- |
|
||||||
|
python3 - <<'PY'
|
||||||
|
# Fetch Kuma homepage from localhost and print HTML to stdout
|
||||||
|
import urllib.request, sys
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen("http://127.0.0.1:{{ kuma_port }}/", timeout=15) as r:
|
||||||
|
sys.stdout.write(r.read().decode(errors='ignore'))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
PY
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: kuma_vm
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: kuma_controller.status | default(0) != 200 or kuma_controller.content is not defined
|
||||||
|
retries: "{{ kuma_retries }}"
|
||||||
|
delay: "{{ kuma_delay }}"
|
||||||
|
until: (kuma_vm.stdout | default('') | trim | length) > 0 and ('Uptime Kuma' in (kuma_vm.stdout | default('')))
|
||||||
|
no_log: "{{ DEBUG == 0 }}" # hide command and output when not debugging
|
||||||
|
|
||||||
|
- name: Kuma | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
kuma_home_html: >-
|
||||||
|
{{
|
||||||
|
(
|
||||||
|
kuma_controller.content
|
||||||
|
if (kuma_controller is defined)
|
||||||
|
and ((kuma_controller.status|default(0))==200)
|
||||||
|
and (kuma_controller.content is defined)
|
||||||
|
else
|
||||||
|
(kuma_vm.stdout | default('') | trim)
|
||||||
|
)
|
||||||
|
}}
|
||||||
|
when:
|
||||||
|
- (kuma_controller is defined and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined))
|
||||||
|
or ((kuma_vm.stdout | default('') | trim | length) > 0)
|
||||||
|
|
||||||
|
- name: Kuma | Print concise summary
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >-
|
||||||
|
Uptime Kuma homepage {{ 'reachable' if (kuma_home_html is defined) else 'NOT reachable' }}.
|
||||||
|
Source={{ 'controller' if ((kuma_controller is defined) and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined)) else 'vm' if (kuma_vm.stdout|default('')|trim|length>0) else 'n/a' }};
|
||||||
|
length={{ (kuma_home_html | default('')) | length }};
|
||||||
|
contains('Uptime Kuma')={{ (kuma_home_html is defined) and ('Uptime Kuma' in kuma_home_html) }}
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Kuma | Homepage unavailable (after retries)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Kuma web není dostupná ani po pokusech."
|
||||||
|
when: kuma_home_html is not defined and DEBUG == 1
|
||||||
|
|
||||||
|
# Optional detailed dump (short excerpt only)
|
||||||
|
- name: Kuma | HTML excerpt (debug)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ (kuma_home_html | default(''))[:500] }}"
|
||||||
|
when: kuma_home_html is defined and DEBUG == 1
|
||||||
69
portainer/update_portainer_agent_lxc.yml
Normal file
69
portainer/update_portainer_agent_lxc.yml
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# update_portainer_agent_lxc.yml
|
||||||
|
|
||||||
|
- name: Update Portainer Agent (LXC, no compose)
|
||||||
|
hosts: pve2_lxc_jellyfin
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
agent_container_name: portainer_agent
|
||||||
|
agent_port: 9001
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Check if agent container exists
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "docker ps -a --format '{{ \"{{\" }}.Names{{ \"}}\" }}' | grep -x '{{ agent_container_name }}'"
|
||||||
|
register: agent_exists
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Abort if agent container is missing
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: "Container '{{ agent_container_name }}' not found."
|
||||||
|
when: agent_exists.rc != 0
|
||||||
|
|
||||||
|
- name: Read current agent image
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "docker inspect -f '{{ \"{{\" }}.Config.Image{{ \"}}\" }}' {{ agent_container_name }}"
|
||||||
|
register: agent_image
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Pull latest image tag for current agent image
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "docker pull {{ agent_image.stdout | trim }}"
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Recreate agent container with standard Portainer Agent args
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- |
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Stop/remove old container
|
||||||
|
docker rm -f "{{ agent_container_name }}" >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
# Run Portainer Agent with common, safe defaults
|
||||||
|
docker run -d \
|
||||||
|
--name "{{ agent_container_name }}" \
|
||||||
|
--restart=always \
|
||||||
|
-p {{ agent_port }}:9001 \
|
||||||
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
-v /var/lib/docker/volumes:/var/lib/docker/volumes \
|
||||||
|
"{{ agent_image.stdout | trim }}"
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Wait for agent port
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: "{{ agent_port }}"
|
||||||
|
timeout: 60
|
||||||
69
portainer/update_portainer_agent_vm.yml
Normal file
69
portainer/update_portainer_agent_vm.yml
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# update_portainer_agent_vm.yml
|
||||||
|
|
||||||
|
- name: Update Portainer Agent (VM, no compose)
|
||||||
|
hosts: pve2_vm
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
agent_container_name: portainer_agent
|
||||||
|
agent_port: 9001
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Check if agent container exists
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "docker ps -a --format '{{ \"{{\" }}.Names{{ \"}}\" }}' | grep -x '{{ agent_container_name }}'"
|
||||||
|
register: agent_exists
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Abort if agent container is missing
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: "Container '{{ agent_container_name }}' not found."
|
||||||
|
when: agent_exists.rc != 0
|
||||||
|
|
||||||
|
- name: Read current agent image
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "docker inspect -f '{{ \"{{\" }}.Config.Image{{ \"}}\" }}' {{ agent_container_name }}"
|
||||||
|
register: agent_image
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Pull latest image tag for current agent image
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "docker pull {{ agent_image.stdout | trim }}"
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Recreate agent container with standard Portainer Agent args
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- |
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Stop/remove old container
|
||||||
|
docker rm -f "{{ agent_container_name }}" >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
# Run Portainer Agent with common, safe defaults
|
||||||
|
docker run -d \
|
||||||
|
--name "{{ agent_container_name }}" \
|
||||||
|
--restart=always \
|
||||||
|
-p {{ agent_port }}:9001 \
|
||||||
|
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||||
|
-v /var/lib/docker/volumes:/var/lib/docker/volumes \
|
||||||
|
"{{ agent_image.stdout | trim }}"
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Wait for agent port
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: "{{ agent_port }}"
|
||||||
|
timeout: 60
|
||||||
57
test_sms.yml
57
test_sms.yml
@@ -1,57 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Send and verify SMS delivery via internet-master.cz
|
|
||||||
hosts: localhost
|
|
||||||
gather_facts: false
|
|
||||||
vars:
|
|
||||||
sms_number: "601358865"
|
|
||||||
sms_username: "mikrotik"
|
|
||||||
sms_password_send: "jdkotzHJIOPWhjtr32D"
|
|
||||||
sms_password_recv: "jdkotzHJIOPWhjtr32D"
|
|
||||||
sms_wait_seconds: 120 # Wait 2 minutes for delivery
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Generate random test string
|
|
||||||
set_fact:
|
|
||||||
random_string: "mikrotik_{{ lookup('password', '/dev/null length=8 chars=ascii_letters') }}"
|
|
||||||
|
|
||||||
- name: Send SMS message
|
|
||||||
uri:
|
|
||||||
url: "https://sms.internet-master.cz/send/?number={{ sms_number }}&message=@mikrotik@{{ random_string | urlencode }}&type=class-1&username={{ sms_username }}&password={{ sms_password_send }}"
|
|
||||||
method: GET
|
|
||||||
return_content: true
|
|
||||||
register: send_result
|
|
||||||
|
|
||||||
- name: Show send API response
|
|
||||||
debug:
|
|
||||||
var: send_result.content
|
|
||||||
|
|
||||||
- name: Wait for SMS to be delivered
|
|
||||||
pause:
|
|
||||||
seconds: "{{ sms_wait_seconds }}"
|
|
||||||
|
|
||||||
- name: Fetch received messages
|
|
||||||
uri:
|
|
||||||
url: "https://sms.internet-master.cz/receive/?username={{ sms_username }}&password={{ sms_password_recv }}"
|
|
||||||
method: GET
|
|
||||||
return_content: true
|
|
||||||
register: recv_result
|
|
||||||
|
|
||||||
- name: Parse received JSON
|
|
||||||
set_fact:
|
|
||||||
inbox: "{{ recv_result.json.inbox | default([]) }}"
|
|
||||||
|
|
||||||
- name: Check if random string message was received
|
|
||||||
set_fact:
|
|
||||||
message_found: "{{ inbox | selectattr('message', 'equalto', random_string) | list | length > 0 }}"
|
|
||||||
|
|
||||||
- name: Report result
|
|
||||||
debug:
|
|
||||||
msg: >
|
|
||||||
SMS with message '{{ random_string }}' was {{
|
|
||||||
'delivered ✅' if message_found else 'NOT delivered ❌'
|
|
||||||
}}.
|
|
||||||
|
|
||||||
- name: Fail if not delivered
|
|
||||||
fail:
|
|
||||||
msg: "Message '{{ random_string }}' not found in received inbox!"
|
|
||||||
when: not message_found
|
|
||||||
165
update.yml
165
update.yml
@@ -1,165 +0,0 @@
|
|||||||
- name: Update system (APT + Flatpak)
|
|
||||||
hosts: all
|
|
||||||
gather_facts: false
|
|
||||||
strategy: free
|
|
||||||
serial: 2
|
|
||||||
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
vars:
|
|
||||||
ssh_precheck_timeout: 8
|
|
||||||
apt_async: 1800
|
|
||||||
apt_poll: 10
|
|
||||||
apt_retries: 3
|
|
||||||
apt_retry_delay: 5
|
|
||||||
flatpak_timeout: 300
|
|
||||||
flatpak_async: 600
|
|
||||||
flatpak_poll: 5
|
|
||||||
|
|
||||||
pre_tasks:
|
|
||||||
- name: Ensure SSH is reachable (skip host if not)
|
|
||||||
wait_for:
|
|
||||||
host: "{{ ansible_host | default(inventory_hostname) }}"
|
|
||||||
port: "{{ ansible_port | default(22) }}"
|
|
||||||
timeout: "{{ ssh_precheck_timeout }}"
|
|
||||||
delegate_to: localhost
|
|
||||||
register: ssh_ok
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- meta: end_host
|
|
||||||
when: ssh_ok is failed
|
|
||||||
|
|
||||||
- name: Ping with retries (handle intermittent flaps)
|
|
||||||
ping:
|
|
||||||
register: ping_r
|
|
||||||
retries: 3
|
|
||||||
delay: 3
|
|
||||||
until: ping_r is succeeded
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- meta: end_host
|
|
||||||
when: ping_r is failed
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Update APT cache (bounded + retried)
|
|
||||||
environment: { DEBIAN_FRONTEND: noninteractive }
|
|
||||||
apt:
|
|
||||||
update_cache: yes
|
|
||||||
cache_valid_time: 3600
|
|
||||||
async: "{{ apt_async }}"
|
|
||||||
poll: "{{ apt_poll }}"
|
|
||||||
register: apt_update
|
|
||||||
retries: "{{ apt_retries }}"
|
|
||||||
delay: "{{ apt_retry_delay }}"
|
|
||||||
until: apt_update is succeeded
|
|
||||||
|
|
||||||
- name: If APT cache update failed, try to fix dpkg and retry once
|
|
||||||
block:
|
|
||||||
- name: Fix partially configured packages
|
|
||||||
command: dpkg --configure -a
|
|
||||||
changed_when: false
|
|
||||||
- name: Retry APT cache update after dpkg fix
|
|
||||||
environment: { DEBIAN_FRONTEND: noninteractive }
|
|
||||||
apt:
|
|
||||||
update_cache: yes
|
|
||||||
async: 600
|
|
||||||
poll: 5
|
|
||||||
when: apt_update is failed
|
|
||||||
|
|
||||||
- name: Upgrade all APT packages (bounded + retried)
|
|
||||||
environment: { DEBIAN_FRONTEND: noninteractive }
|
|
||||||
apt:
|
|
||||||
upgrade: dist
|
|
||||||
async: "{{ apt_async }}"
|
|
||||||
poll: "{{ apt_poll }}"
|
|
||||||
register: apt_upgrade
|
|
||||||
retries: "{{ apt_retries }}"
|
|
||||||
delay: "{{ apt_retry_delay }}"
|
|
||||||
until: apt_upgrade is succeeded
|
|
||||||
|
|
||||||
- name: If APT upgrade failed, try to fix dpkg and retry once
|
|
||||||
block:
|
|
||||||
- name: Fix partially configured packages
|
|
||||||
command: dpkg --configure -a
|
|
||||||
changed_when: false
|
|
||||||
- name: Retry APT upgrade after dpkg fix
|
|
||||||
environment: { DEBIAN_FRONTEND: noninteractive }
|
|
||||||
apt:
|
|
||||||
upgrade: dist
|
|
||||||
async: 1200
|
|
||||||
poll: 5
|
|
||||||
when: apt_upgrade is failed
|
|
||||||
|
|
||||||
- name: Check if flatpak binary exists
|
|
||||||
become: false
|
|
||||||
stat:
|
|
||||||
path: /usr/bin/flatpak
|
|
||||||
register: flatpak_bin
|
|
||||||
|
|
||||||
- name: Update system Flatpaks (bounded; treat timeout as non-fatal)
|
|
||||||
command: bash -lc "timeout {{ flatpak_timeout }} flatpak update -y --noninteractive"
|
|
||||||
register: flatpak_sys
|
|
||||||
async: "{{ flatpak_async }}"
|
|
||||||
poll: "{{ flatpak_poll }}"
|
|
||||||
failed_when: flatpak_sys.rc is defined and flatpak_sys.rc not in [0, 124]
|
|
||||||
when: flatpak_bin.stat.exists
|
|
||||||
|
|
||||||
# ---- User-agnostic Flatpak updates (all non-system users) ----
|
|
||||||
|
|
||||||
- name: Get passwd database
|
|
||||||
getent:
|
|
||||||
database: passwd
|
|
||||||
register: ge
|
|
||||||
|
|
||||||
- name: Build list of regular users (uid >= 1000, real shells)
|
|
||||||
set_fact:
|
|
||||||
regular_users: >-
|
|
||||||
{{
|
|
||||||
ge.ansible_facts.getent_passwd
|
|
||||||
| dict2items
|
|
||||||
| map(attribute='value')
|
|
||||||
| selectattr('uid', 'defined')
|
|
||||||
| selectattr('uid', '>=', 1000)
|
|
||||||
| rejectattr('shell', 'in', ['/usr/sbin/nologin','/sbin/nologin','/bin/false'])
|
|
||||||
| list
|
|
||||||
}}
|
|
||||||
when: ge is succeeded
|
|
||||||
|
|
||||||
- name: Stat per-user runtime dir if flatpak is present
|
|
||||||
stat:
|
|
||||||
path: "/run/user/{{ item.uid }}"
|
|
||||||
loop: "{{ regular_users | default([]) }}"
|
|
||||||
loop_control:
|
|
||||||
label: "{{ item.name }}"
|
|
||||||
register: user_runtime_stats
|
|
||||||
when: flatpak_bin.stat.exists
|
|
||||||
|
|
||||||
- name: Merge runtime stats keyed by username
|
|
||||||
set_fact:
|
|
||||||
user_runtime_map: >-
|
|
||||||
{{
|
|
||||||
user_runtime_stats.results
|
|
||||||
| items2dict(key_name='item.name', value_name='stat')
|
|
||||||
}}
|
|
||||||
when: flatpak_bin.stat.exists
|
|
||||||
|
|
||||||
- name: Update user Flatpaks (use XDG_RUNTIME_DIR when available)
|
|
||||||
become_user: "{{ item.name }}"
|
|
||||||
environment: >-
|
|
||||||
{{
|
|
||||||
user_runtime_map[item.name].exists
|
|
||||||
| default(false)
|
|
||||||
| ternary({'XDG_RUNTIME_DIR': '/run/user/' ~ item.uid|string}, {})
|
|
||||||
}}
|
|
||||||
command: bash -lc "timeout {{ flatpak_timeout }} flatpak --user update -y --noninteractive"
|
|
||||||
register: flatpak_user_res
|
|
||||||
async: "{{ flatpak_async }}"
|
|
||||||
poll: "{{ flatpak_poll }}"
|
|
||||||
failed_when: flatpak_user_res.rc is defined and flatpak_user_res.rc not in [0, 124]
|
|
||||||
changed_when: "'Installing' in (flatpak_user_res.stdout | default('')) or 'Installing' in (flatpak_user_res.stderr | default('')) or 'Updating' in (flatpak_user_res.stdout | default('')) or 'Updating' in (flatpak_user_res.stderr | default(''))"
|
|
||||||
loop: "{{ regular_users | default([]) }}"
|
|
||||||
loop_control:
|
|
||||||
label: "{{ item.name }}"
|
|
||||||
when: flatpak_bin.stat.exists
|
|
||||||
91
update_homarr.yml
Normal file
91
update_homarr.yml
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# update_homarr.yml
|
||||||
|
|
||||||
|
- name: Update Homarr
|
||||||
|
hosts: pve2_vm
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# Compose sync (controller -> target)
|
||||||
|
compose_local_dir: "{{ playbook_dir }}/docker-compose"
|
||||||
|
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
|
||||||
|
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
|
||||||
|
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
|
||||||
|
|
||||||
|
# Homarr settings
|
||||||
|
homarr_project: homarr
|
||||||
|
homarr_compose_filename: "docker-compose-homarr.yml"
|
||||||
|
homarr_service: homarr
|
||||||
|
homarr_port: 7575
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure remote base directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_base }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Create local archive of docker-compose directory (controller)
|
||||||
|
ansible.builtin.archive:
|
||||||
|
path: "{{ compose_local_dir }}/"
|
||||||
|
dest: "/tmp/docker-compose.tar.gz"
|
||||||
|
format: gz
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Upload archive to remote host
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "/tmp/docker-compose.tar.gz"
|
||||||
|
dest: "{{ compose_remote_archive }}"
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: Recreate remote compose directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Ensure remote compose directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Extract archive on remote host
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: "{{ compose_remote_archive }}"
|
||||||
|
dest: "{{ compose_remote_dir }}"
|
||||||
|
remote_src: true
|
||||||
|
|
||||||
|
- name: Pull latest Homarr image
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_name: "{{ homarr_project }}"
|
||||||
|
project_src: "{{ compose_remote_dir }}"
|
||||||
|
files:
|
||||||
|
- "{{ homarr_compose_filename }}"
|
||||||
|
pull: always
|
||||||
|
|
||||||
|
- name: Recreate Homarr service
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_name: "{{ homarr_project }}"
|
||||||
|
project_src: "{{ compose_remote_dir }}"
|
||||||
|
files:
|
||||||
|
- "{{ homarr_compose_filename }}"
|
||||||
|
services:
|
||||||
|
- "{{ homarr_service }}"
|
||||||
|
state: present
|
||||||
|
recreate: always
|
||||||
|
|
||||||
|
- name: Wait for Homarr port
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: "{{ homarr_port }}"
|
||||||
|
timeout: 60
|
||||||
|
|
||||||
|
- name: Check Homarr HTTP endpoint (retry until ready)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "http://127.0.0.1:{{ homarr_port }}/"
|
||||||
|
status_code: 200
|
||||||
|
register: homarr_http
|
||||||
|
retries: 30
|
||||||
|
delay: 3
|
||||||
|
until: homarr_http.status == 200
|
||||||
|
changed_when: false
|
||||||
116
update_immich.yml
Normal file
116
update_immich.yml
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
# update_immich.yml
|
||||||
|
|
||||||
|
- name: Update Immich
|
||||||
|
hosts: pve2_vm
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# Compose sync (controller -> target)
|
||||||
|
compose_local_dir: "{{ playbook_dir }}/docker-compose"
|
||||||
|
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
|
||||||
|
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
|
||||||
|
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
|
||||||
|
|
||||||
|
# Immich settings
|
||||||
|
immich_project: immich
|
||||||
|
immich_port: 2283
|
||||||
|
immich_compose_files:
|
||||||
|
- docker-compose-immich.yml
|
||||||
|
- docker-compose-immich.override.yml
|
||||||
|
|
||||||
|
# Persistent env file on the VM (NOT in git)
|
||||||
|
immich_env_persistent: "{{ compose_remote_base }}/env/immich.env"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure remote base directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_base }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Ensure remote env directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_base }}/env"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Fail if persistent Immich env file is missing
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: "{{ immich_env_persistent }}"
|
||||||
|
register: immich_env_stat
|
||||||
|
|
||||||
|
- name: Abort when Immich env is missing
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: >-
|
||||||
|
Missing persistent env file: {{ immich_env_persistent }}.
|
||||||
|
Create it on the VM with DB_* and UPLOAD_LOCATION variables.
|
||||||
|
when: not immich_env_stat.stat.exists
|
||||||
|
|
||||||
|
- name: Create local archive of docker-compose directory (controller)
|
||||||
|
ansible.builtin.archive:
|
||||||
|
path: "{{ compose_local_dir }}/"
|
||||||
|
dest: "/tmp/docker-compose.tar.gz"
|
||||||
|
format: gz
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Upload archive to remote host
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "/tmp/docker-compose.tar.gz"
|
||||||
|
dest: "{{ compose_remote_archive }}"
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: Recreate remote compose directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Ensure remote compose directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Extract archive on remote host
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: "{{ compose_remote_archive }}"
|
||||||
|
dest: "{{ compose_remote_dir }}"
|
||||||
|
remote_src: true
|
||||||
|
|
||||||
|
- name: Deploy Immich .env into compose directory
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "{{ immich_env_persistent }}"
|
||||||
|
dest: "{{ compose_remote_dir }}/.env"
|
||||||
|
remote_src: true
|
||||||
|
mode: "0600"
|
||||||
|
|
||||||
|
- name: Pull latest Immich images
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_name: "{{ immich_project }}"
|
||||||
|
project_src: "{{ compose_remote_dir }}"
|
||||||
|
files: "{{ immich_compose_files }}"
|
||||||
|
pull: always
|
||||||
|
|
||||||
|
- name: Recreate Immich stack
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_name: "{{ immich_project }}"
|
||||||
|
project_src: "{{ compose_remote_dir }}"
|
||||||
|
files: "{{ immich_compose_files }}"
|
||||||
|
state: present
|
||||||
|
recreate: always
|
||||||
|
|
||||||
|
- name: Wait for Immich port
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: "{{ immich_port }}"
|
||||||
|
timeout: 120
|
||||||
|
|
||||||
|
- name: Check Immich API ping (retry until ready)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "http://127.0.0.1:{{ immich_port }}/api/server/ping"
|
||||||
|
status_code: 200
|
||||||
|
return_content: true
|
||||||
|
register: immich_ping
|
||||||
|
retries: 40
|
||||||
|
delay: 3
|
||||||
|
until: immich_ping.status == 200 and ('pong' in (immich_ping.content | default('')))
|
||||||
|
changed_when: false
|
||||||
94
update_jellyfin.yml
Normal file
94
update_jellyfin.yml
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
# update_jellyfin.yml
|
||||||
|
|
||||||
|
- name: Update Jellyfin
|
||||||
|
hosts: pve2_lxc_jellyfin
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# Compose sync (controller -> target)
|
||||||
|
compose_local_dir: "{{ playbook_dir }}/docker-compose"
|
||||||
|
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
|
||||||
|
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
|
||||||
|
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
|
||||||
|
|
||||||
|
# Jellyfin settings
|
||||||
|
jellyfin_compose_filename: "docker-compose-jellyfin.yml"
|
||||||
|
jellyfin_service: jellyfin
|
||||||
|
jellyfin_port: 8096
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure remote base directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_base }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Create local archive of docker-compose directory (controller)
|
||||||
|
ansible.builtin.archive:
|
||||||
|
path: "{{ compose_local_dir }}/"
|
||||||
|
dest: "/tmp/docker-compose.tar.gz"
|
||||||
|
format: gz
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Upload archive to remote host
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "/tmp/docker-compose.tar.gz"
|
||||||
|
dest: "{{ compose_remote_archive }}"
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: Recreate remote compose directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Ensure remote compose directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Extract archive on remote host
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: "{{ compose_remote_archive }}"
|
||||||
|
dest: "{{ compose_remote_dir }}"
|
||||||
|
remote_src: true
|
||||||
|
|
||||||
|
- name: Pull latest Jellyfin image (docker-compose v1)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- >
|
||||||
|
cd "{{ compose_remote_dir }}"
|
||||||
|
&& docker-compose -f "{{ jellyfin_compose_filename }}" pull
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Recreate Jellyfin service (docker-compose v1)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- >
|
||||||
|
cd "{{ compose_remote_dir }}"
|
||||||
|
&& docker-compose -f "{{ jellyfin_compose_filename }}"
|
||||||
|
up -d --force-recreate --remove-orphans "{{ jellyfin_service }}"
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Wait for Jellyfin port
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: "{{ jellyfin_port }}"
|
||||||
|
timeout: 180
|
||||||
|
|
||||||
|
- name: Check Jellyfin HTTP endpoint (retry until ready)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "http://127.0.0.1:{{ jellyfin_port }}/"
|
||||||
|
status_code:
|
||||||
|
- 200
|
||||||
|
- 302
|
||||||
|
register: jellyfin_http
|
||||||
|
retries: 40
|
||||||
|
delay: 3
|
||||||
|
until: jellyfin_http.status in [200, 302]
|
||||||
|
changed_when: false
|
||||||
93
update_semaphore.yml
Normal file
93
update_semaphore.yml
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
# update_semaphore.yml
|
||||||
|
|
||||||
|
- name: Update Semaphore (self-update safe)
|
||||||
|
hosts: pve2_vm
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
compose_local_dir: "{{ playbook_dir }}/docker-compose"
|
||||||
|
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
|
||||||
|
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
|
||||||
|
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
|
||||||
|
|
||||||
|
semaphore_project: semaphore
|
||||||
|
semaphore_compose_filename: "docker-compose-semaphore.yml"
|
||||||
|
semaphore_port: 3000
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure remote base directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_base }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Create local archive of docker-compose directory (controller)
|
||||||
|
ansible.builtin.archive:
|
||||||
|
path: "{{ compose_local_dir }}/"
|
||||||
|
dest: "/tmp/docker-compose.tar.gz"
|
||||||
|
format: gz
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Upload archive to remote host
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "/tmp/docker-compose.tar.gz"
|
||||||
|
dest: "{{ compose_remote_archive }}"
|
||||||
|
|
||||||
|
- name: Recreate remote compose directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Ensure remote compose directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Extract archive on remote host
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: "{{ compose_remote_archive }}"
|
||||||
|
dest: "{{ compose_remote_dir }}"
|
||||||
|
remote_src: true
|
||||||
|
|
||||||
|
- name: Pull latest Semaphore image(s)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- >
|
||||||
|
cd "{{ compose_remote_dir }}"
|
||||||
|
&& docker compose -p "{{ semaphore_project }}"
|
||||||
|
-f "{{ semaphore_compose_filename }}"
|
||||||
|
pull
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Start Semaphore update in background (avoid killing this job)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- >
|
||||||
|
cd "{{ compose_remote_dir }}"
|
||||||
|
&& nohup docker compose -p "{{ semaphore_project }}"
|
||||||
|
-f "{{ semaphore_compose_filename }}"
|
||||||
|
up -d --remove-orphans --force-recreate
|
||||||
|
> "{{ compose_remote_base }}/semaphore-update.log" 2>&1 &
|
||||||
|
async: 1
|
||||||
|
poll: 0
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Wait for Semaphore port
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: "{{ semaphore_port }}"
|
||||||
|
timeout: 300
|
||||||
|
|
||||||
|
- name: Check Semaphore HTTP endpoint (retry)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "http://127.0.0.1:{{ semaphore_port }}/"
|
||||||
|
status_code: 200
|
||||||
|
register: sem_http
|
||||||
|
retries: 30
|
||||||
|
delay: 5
|
||||||
|
until: sem_http.status == 200
|
||||||
|
changed_when: false
|
||||||
34
update_system.yml
Normal file
34
update_system.yml
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
- name: Update system (APT + Flatpak)
|
||||||
|
hosts: all
|
||||||
|
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
tasks:
|
||||||
|
- name: Update APT cache
|
||||||
|
apt:
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Upgrade all APT packages
|
||||||
|
apt:
|
||||||
|
upgrade: dist
|
||||||
|
|
||||||
|
- name: Check if flatpak binary exists
|
||||||
|
stat:
|
||||||
|
path: /usr/bin/flatpak
|
||||||
|
register: flatpak_bin
|
||||||
|
|
||||||
|
- name: Update system Flatpaks
|
||||||
|
shell: timeout 300 flatpak update -y
|
||||||
|
register: flatpak_sys
|
||||||
|
failed_when: flatpak_sys.rc != 0 and flatpak_sys.rc != 124
|
||||||
|
when: flatpak_bin.stat.exists
|
||||||
|
|
||||||
|
- name: Update user Flatpaks
|
||||||
|
become_user: jakub
|
||||||
|
environment:
|
||||||
|
XDG_RUNTIME_DIR: /run/user/1000
|
||||||
|
shell: timeout 300 flatpak update -y
|
||||||
|
register: flatpak_user
|
||||||
|
failed_when: flatpak_user.rc != 0 and flatpak_user.rc != 124
|
||||||
|
when: flatpak_bin.stat.exists
|
||||||
91
update_uptime_kuma.yml
Normal file
91
update_uptime_kuma.yml
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# update_uptimekuma.yml
|
||||||
|
|
||||||
|
- name: Update Uptime Kuma
|
||||||
|
hosts: pve1_vm
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# Compose sync (controller -> target)
|
||||||
|
compose_local_dir: "{{ playbook_dir }}/docker-compose"
|
||||||
|
compose_remote_base: "/home/{{ ansible_user }}/.ansible-compose"
|
||||||
|
compose_remote_dir: "{{ compose_remote_base }}/docker-compose"
|
||||||
|
compose_remote_archive: "{{ compose_remote_base }}/docker-compose.tar.gz"
|
||||||
|
|
||||||
|
# Uptime Kuma settings
|
||||||
|
uptimekuma_project: uptimekuma
|
||||||
|
uptimekuma_compose_filename: "docker-compose-uptimekuma.yml"
|
||||||
|
uptimekuma_service: uptime-kuma
|
||||||
|
uptimekuma_port: 3001
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure remote base directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_base }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Create local archive of docker-compose directory (controller)
|
||||||
|
ansible.builtin.archive:
|
||||||
|
path: "{{ compose_local_dir }}/"
|
||||||
|
dest: "/tmp/docker-compose.tar.gz"
|
||||||
|
format: gz
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Upload archive to remote host
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "/tmp/docker-compose.tar.gz"
|
||||||
|
dest: "{{ compose_remote_archive }}"
|
||||||
|
mode: "0644"
|
||||||
|
|
||||||
|
- name: Recreate remote compose directory
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Ensure remote compose directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ compose_remote_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
|
||||||
|
- name: Extract archive on remote host
|
||||||
|
ansible.builtin.unarchive:
|
||||||
|
src: "{{ compose_remote_archive }}"
|
||||||
|
dest: "{{ compose_remote_dir }}"
|
||||||
|
remote_src: true
|
||||||
|
|
||||||
|
- name: Pull latest Uptime Kuma image
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_name: "{{ uptimekuma_project }}"
|
||||||
|
project_src: "{{ compose_remote_dir }}"
|
||||||
|
files:
|
||||||
|
- "{{ uptimekuma_compose_filename }}"
|
||||||
|
pull: always
|
||||||
|
|
||||||
|
- name: Recreate Uptime Kuma service
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_name: "{{ uptimekuma_project }}"
|
||||||
|
project_src: "{{ compose_remote_dir }}"
|
||||||
|
files:
|
||||||
|
- "{{ uptimekuma_compose_filename }}"
|
||||||
|
services:
|
||||||
|
- "{{ uptimekuma_service }}"
|
||||||
|
state: present
|
||||||
|
recreate: always
|
||||||
|
|
||||||
|
- name: Wait for Uptime Kuma port
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: "{{ uptimekuma_port }}"
|
||||||
|
timeout: 120
|
||||||
|
|
||||||
|
- name: Check Uptime Kuma HTTP endpoint (retry until ready)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "http://127.0.0.1:{{ uptimekuma_port }}/"
|
||||||
|
status_code: 200
|
||||||
|
register: kuma_http
|
||||||
|
retries: 30
|
||||||
|
delay: 3
|
||||||
|
until: kuma_http.status == 200
|
||||||
|
changed_when: false
|
||||||
Reference in New Issue
Block a user