forked from jakub/ansible
Compare commits
44 Commits
0fd5ef9e41
...
jakub-patc
| Author | SHA1 | Date | |
|---|---|---|---|
| 46955aea0d | |||
|
|
ebfe720397 | ||
| 0da25c9550 | |||
|
|
7a6153676c | ||
|
|
d36336f53e | ||
|
|
7f3f89e32d | ||
|
|
8308c1380e | ||
|
|
61ee49b6f4 | ||
|
|
602101bbfb | ||
|
|
9366ff0912 | ||
|
|
17c1b43116 | ||
|
|
7ea0bb86f2 | ||
|
|
3e283783db | ||
|
|
0803cf3e52 | ||
|
|
d53ad9a6d6 | ||
|
|
e8bedc3939 | ||
|
|
4fb56ed09a | ||
|
|
e47ccb64b7 | ||
|
|
4038f5b6a1 | ||
|
|
74c3ef8945 | ||
|
|
d413dcb29f | ||
|
|
e710669c84 | ||
|
|
eb9c56bb5e | ||
|
|
1c6fb9c9c3 | ||
|
|
a7d50a8f36 | ||
|
|
c3ff0514ee | ||
|
|
1b1806907a | ||
|
|
fd57e6b566 | ||
|
|
3c7701a760 | ||
|
|
b1f8eea86f | ||
|
|
dbd864b45e | ||
|
|
8d1b2cd065 | ||
|
|
54b0dc86c8 | ||
|
|
75f4e8611f | ||
|
|
5e9d755390 | ||
|
|
73cf848f82 | ||
|
|
e4dac7808b | ||
|
|
f61addc2be | ||
|
|
94f3de1e7d | ||
|
|
af3c676183 | ||
|
|
c2d67f5498 | ||
|
|
a1d730a18c | ||
|
|
cf2507bdf6 | ||
|
|
21ce9478f4 |
183
check_raid.yml
Normal file
183
check_raid.yml
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
# check_raid.yml
|
||||||
|
|
||||||
|
- name: Check Linux MD RAID health on VM via Proxmox
|
||||||
|
hosts: linux_servers
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# VM connection (provided by Semaphore env vars)
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# Debug mode
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# RAID specifics
|
||||||
|
# RAID_MD can be: md0 / md1 / ... OR "auto" to check all arrays found in /proc/mdstat
|
||||||
|
raid_md_device: "{{ lookup('env', 'RAID_MD') | default('md0', true) }}"
|
||||||
|
raid_allow_sync: "{{ lookup('env', 'RAID_ALLOW_SYNC') | default(1, true) | int }}"
|
||||||
|
raid_allow_no_array: "{{ lookup('env', 'RAID_ALLOW_NO_ARRAY') | default(0, true) | int }}"
|
||||||
|
|
||||||
|
raid_retries: "{{ RETRIES }}"
|
||||||
|
raid_delay: 2
|
||||||
|
ssh_hard_timeout: 30
|
||||||
|
|
||||||
|
# SSH options
|
||||||
|
ssh_opts:
|
||||||
|
- "-o" # English comments
|
||||||
|
- "StrictHostKeyChecking=no"
|
||||||
|
- "-o"
|
||||||
|
- "UserKnownHostsFile=/dev/null"
|
||||||
|
- "-o"
|
||||||
|
- "GlobalKnownHostsFile=/dev/null"
|
||||||
|
- "-o"
|
||||||
|
- "LogLevel=ERROR"
|
||||||
|
- "-o"
|
||||||
|
- "ConnectTimeout=15"
|
||||||
|
- "-o"
|
||||||
|
- "PreferredAuthentications=password"
|
||||||
|
- "-o"
|
||||||
|
- "PubkeyAuthentication=no"
|
||||||
|
- "-o"
|
||||||
|
- "KbdInteractiveAuthentication=no"
|
||||||
|
- "-o"
|
||||||
|
- "NumberOfPasswordPrompts=1"
|
||||||
|
|
||||||
|
raid_check_cmd: |
|
||||||
|
python3 - <<'PY'
|
||||||
|
# Print exactly one status line and exit with code:
|
||||||
|
# 0=OK, 1=FAIL (degraded/disallowed sync), 2=ERROR (unexpected/misconfig)
|
||||||
|
import re, sys
|
||||||
|
|
||||||
|
target = "{{ raid_md_device }}"
|
||||||
|
allow_sync = int("{{ raid_allow_sync }}")
|
||||||
|
allow_no_array = int("{{ raid_allow_no_array }}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
txt = open("/proc/mdstat", "r", encoding="utf-8", errors="ignore").read()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"ERROR RAID read_mdstat err={e}")
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
arrays = {}
|
||||||
|
header_re = re.compile(r"^(md\d+)\s*:\s*active.*$", re.MULTILINE)
|
||||||
|
token_re = re.compile(r"^\s*\d+\s+blocks.*\[\d+/\d+\]\s*\[([U_]+)\]\s*$", re.MULTILINE)
|
||||||
|
|
||||||
|
for m in header_re.finditer(txt):
|
||||||
|
name = m.group(1)
|
||||||
|
chunk = txt[m.end():m.end() + 3000]
|
||||||
|
tm = token_re.search(chunk)
|
||||||
|
if tm:
|
||||||
|
arrays[name] = tm.group(1)
|
||||||
|
|
||||||
|
if not arrays:
|
||||||
|
if allow_no_array:
|
||||||
|
print("OK RAID none=no-md-arrays")
|
||||||
|
sys.exit(0)
|
||||||
|
print("ERROR RAID none=no-md-arrays")
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
syncing = bool(re.search(r"\b(resync|recovery|reshape|check|repair)\b", txt))
|
||||||
|
|
||||||
|
if target == "auto":
|
||||||
|
to_check = sorted(arrays.keys())
|
||||||
|
else:
|
||||||
|
if target not in arrays:
|
||||||
|
found = ",".join(sorted(arrays.keys()))
|
||||||
|
print(f"ERROR RAID target_not_found target={target} found={found}")
|
||||||
|
sys.exit(2)
|
||||||
|
to_check = [target]
|
||||||
|
|
||||||
|
tokens_str = " ".join([f"{name}=[{arrays[name]}]" for name in to_check])
|
||||||
|
degraded = any("_" in arrays[name] for name in to_check)
|
||||||
|
|
||||||
|
if degraded:
|
||||||
|
print(f"FAIL RAID {tokens_str} syncing={int(syncing)}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if syncing and not allow_sync:
|
||||||
|
print(f"FAIL RAID {tokens_str} syncing={int(syncing)} allow_sync={allow_sync}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
print(f"OK RAID {tokens_str} syncing={int(syncing)}")
|
||||||
|
sys.exit(0)
|
||||||
|
PY
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Run RAID check on VM (via SSH) # single command, no loop
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv: >-
|
||||||
|
{{
|
||||||
|
['timeout', '-k', '5', (ssh_hard_timeout | string)]
|
||||||
|
+ ['sshpass', '-e', 'ssh']
|
||||||
|
+ ssh_opts
|
||||||
|
+ [ vm_user ~ '@' ~ vm_ip,
|
||||||
|
'bash', '-lc',
|
||||||
|
('sudo ' if use_sudo else '') + raid_check_cmd
|
||||||
|
]
|
||||||
|
}}
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: raid_cmd
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false # we decide via assert below
|
||||||
|
retries: "{{ raid_retries }}"
|
||||||
|
delay: "{{ raid_delay }}"
|
||||||
|
until: raid_cmd.rc not in [124, 255]
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Build one-line summary (always)
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
raid_line: >-
|
||||||
|
{{
|
||||||
|
(raid_cmd.stdout | default('') | trim)
|
||||||
|
if ((raid_cmd.stdout | default('') | trim) | length) > 0
|
||||||
|
else ('ERROR RAID no-output rc=' ~ (raid_cmd.rc | string))
|
||||||
|
}}
|
||||||
|
changed_when: false
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: RAID result (always one line)
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that:
|
||||||
|
- raid_cmd.rc == 0
|
||||||
|
success_msg: "{{ raid_line }}"
|
||||||
|
fail_msg: "{{ raid_line }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
# Optional verbose debug
|
||||||
|
- name: Debug | /proc/mdstat (VM)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv: >-
|
||||||
|
{{
|
||||||
|
['timeout', '-k', '5', (ssh_hard_timeout | string)]
|
||||||
|
+ ['sshpass', '-e', 'ssh']
|
||||||
|
+ ssh_opts
|
||||||
|
+ [ vm_user ~ '@' ~ vm_ip, 'bash', '-lc', "cat /proc/mdstat" ]
|
||||||
|
}}
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: mdstat_dbg
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: DEBUG == 1
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Debug | mdstat output
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ mdstat_dbg.stdout | default('') }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
run_once: true
|
||||||
3
inv_vm
Normal file
3
inv_vm
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[vm]
|
||||||
|
pve1_vm ansible_host=192.168.69.253
|
||||||
|
pve2_vm ansible_host=192.168.69.254
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# nextcloud/check_stack_nextcloud.yml
|
# nextcloud/check_stack_nextcloud.yml
|
||||||
|
|
||||||
- name: Run Nextcloud maintenance on VM via Proxmox
|
- name: Run Nextcloud maintenance on VM via Proxmox
|
||||||
hosts: proxmox
|
hosts: linux_servers
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: true
|
become: true
|
||||||
become_user: root
|
become_user: root
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# nextcloud/update_collabora.yml
|
# nextcloud/update_collabora.yml
|
||||||
|
|
||||||
- name: Update Collabora CODE on VM via Proxmox
|
- name: Update Collabora CODE on VM via Proxmox
|
||||||
hosts: proxmox
|
hosts: linux_servers
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: true
|
become: true
|
||||||
become_user: root
|
become_user: root
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# nextcloud/update_nextcloud_db_redis.yml
|
# nextcloud/update_nextcloud_db_redis.yml
|
||||||
|
|
||||||
- name: Update Nextcloud DB and Redis on VM via Proxmox
|
- name: Update Nextcloud DB and Redis on VM via Proxmox
|
||||||
hosts: proxmox
|
hosts: linux_servers
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: true
|
become: true
|
||||||
become_user: root
|
become_user: root
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# nextcloud/update_nextcloud.yml
|
# nextcloud/update_nextcloud.yml
|
||||||
|
|
||||||
- name: Update Nextcloud on VM via Proxmox
|
- name: Update Nextcloud on VM via Proxmox
|
||||||
hosts: proxmox
|
hosts: proxmox_nextcloud # linux_servers
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: true
|
become: true
|
||||||
become_user: root
|
become_user: root
|
||||||
113
nextcloud/update_nextcloud_v2.yml
Normal file
113
nextcloud/update_nextcloud_v2.yml
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
- name: Update Nextcloud (Ansible-native)
|
||||||
|
hosts: proxmox_nextcloud
|
||||||
|
become: true
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
nextcloud_project: "nextcloud-collabora"
|
||||||
|
compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
|
||||||
|
backup_dir: "/data/compose/nextcloud/backup-{{ ansible_date_time.iso8601_basic_short }}"
|
||||||
|
|
||||||
|
nextcloud_base_url: "https://cloud.martinfencl.eu"
|
||||||
|
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
# -------------------------
|
||||||
|
# Pre-check
|
||||||
|
# -------------------------
|
||||||
|
- name: Show current Nextcloud version (DEBUG)
|
||||||
|
command: docker exec -u www-data nextcloud php occ -V
|
||||||
|
register: nc_version
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- debug:
|
||||||
|
var: nc_version.stdout
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Backup
|
||||||
|
# -------------------------
|
||||||
|
- name: Enable maintenance mode
|
||||||
|
command: docker exec -u www-data nextcloud php occ maintenance:mode --on
|
||||||
|
|
||||||
|
- name: Create backup directory
|
||||||
|
file:
|
||||||
|
path: "{{ backup_dir }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Backup config and custom_apps
|
||||||
|
command: >
|
||||||
|
docker exec nextcloud
|
||||||
|
tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps
|
||||||
|
|
||||||
|
- name: Copy config backup out of container
|
||||||
|
command: docker cp nextcloud:/tmp/nextcloud_conf.tgz {{ backup_dir }}/
|
||||||
|
|
||||||
|
- name: Remove temp archive from container
|
||||||
|
command: docker exec nextcloud rm -f /tmp/nextcloud_conf.tgz
|
||||||
|
|
||||||
|
- name: Backup database
|
||||||
|
shell: >
|
||||||
|
docker exec nextcloud-db sh -c
|
||||||
|
'command -v mariadb-dump >/dev/null &&
|
||||||
|
mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" ||
|
||||||
|
mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"'
|
||||||
|
register: db_dump
|
||||||
|
|
||||||
|
- name: Save database dump
|
||||||
|
copy:
|
||||||
|
content: "{{ db_dump.stdout }}"
|
||||||
|
dest: "{{ backup_dir }}/db.sql"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Upgrade
|
||||||
|
# -------------------------
|
||||||
|
- name: Pull new Nextcloud image
|
||||||
|
command: docker compose -p {{ nextcloud_project }} -f {{ compose_file }} pull nextcloud
|
||||||
|
|
||||||
|
- name: Recreate Nextcloud container
|
||||||
|
command: docker compose -p {{ nextcloud_project }} -f {{ compose_file }}
|
||||||
|
up -d --no-deps --force-recreate nextcloud
|
||||||
|
|
||||||
|
- name: Run Nextcloud upgrade
|
||||||
|
command: docker exec -u www-data nextcloud php occ upgrade
|
||||||
|
|
||||||
|
- name: Update apps
|
||||||
|
command: docker exec -u www-data nextcloud php occ app:update --all
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Run maintenance repair
|
||||||
|
command: docker exec -u www-data nextcloud php occ maintenance:repair --include-expensive
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Disable maintenance mode
|
||||||
|
command: docker exec -u www-data nextcloud php occ maintenance:mode --off
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Readiness check
|
||||||
|
# -------------------------
|
||||||
|
- name: Wait for status.php
|
||||||
|
uri:
|
||||||
|
url: "{{ nextcloud_status_url }}"
|
||||||
|
status_code: 200
|
||||||
|
return_content: true
|
||||||
|
validate_certs: true
|
||||||
|
register: nc_status
|
||||||
|
retries: "{{ RETRIES }}"
|
||||||
|
delay: 4
|
||||||
|
until: nc_status.status == 200
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Print status summary (DEBUG)
|
||||||
|
debug:
|
||||||
|
msg: >
|
||||||
|
Nextcloud {{ nc_status.json.version }}
|
||||||
|
(installed={{ nc_status.json.installed }},
|
||||||
|
maintenance={{ nc_status.json.maintenance }},
|
||||||
|
needsDbUpgrade={{ nc_status.json.needsDbUpgrade }})
|
||||||
|
when: DEBUG == 1
|
||||||
343
nextcloud/update_nextcloud_v3.yml
Normal file
343
nextcloud/update_nextcloud_v3.yml
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
# nextcloud/update_nextcloud.yml
|
||||||
|
|
||||||
|
- name: Update Nextcloud on VM via Proxmox
|
||||||
|
hosts: proxmox_nextcloud
|
||||||
|
gather_facts: false
|
||||||
|
become: true
|
||||||
|
become_user: root
|
||||||
|
become_method: sudo
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# --- Connection to VM (provided by Semaphore env vars) ---
|
||||||
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
|
use_sudo: false
|
||||||
|
|
||||||
|
# --- Debug / retries ---
|
||||||
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
|
# --- Nextcloud specifics ---
|
||||||
|
nextcloud_project: "nextcloud-collabora"
|
||||||
|
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
|
||||||
|
nextcloud_service: "nextcloud"
|
||||||
|
|
||||||
|
# Backup directory on the VM (timestamped on controller)
|
||||||
|
backup_dir: "/data/compose/nextcloud/backup-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
|
||||||
|
|
||||||
|
nextcloud_base_url: "https://cloud.martinfencl.eu"
|
||||||
|
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
|
||||||
|
|
||||||
|
# Docker command prefix (consistent behavior and quiet hints)
|
||||||
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
|
# --- Backup phase commands (run on VM) ---
|
||||||
|
nextcloud_backup_commands:
|
||||||
|
- >-
|
||||||
|
mkdir -p "{{ backup_dir }}"
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --on
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec nextcloud rm /tmp/nextcloud_conf.tgz || true
|
||||||
|
- >-
|
||||||
|
{{ docker_prefix }} exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Ensure sshpass is installed (for password-based SSH)
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: sshpass
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Nextcloud | Show current version before upgrade (DEBUG)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ docker_prefix }} exec -u www-data nextcloud php occ -V || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_version_before
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
when: DEBUG == 1
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Backup phase
|
||||||
|
# -------------------------
|
||||||
|
- name: Nextcloud | Run backup commands on VM (via SSH)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
loop: "{{ nextcloud_backup_commands }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "backup-cmd-{{ idx }}"
|
||||||
|
register: nc_backup_cmds
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Show outputs of backup commands (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
CMD: {{ item.item }}
|
||||||
|
RC: {{ item.rc }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (item.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (item.stderr | default('')).strip() }}
|
||||||
|
loop: "{{ nc_backup_cmds.results }}"
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Fail play if any backup command failed
|
||||||
|
ansible.builtin.assert:
|
||||||
|
that: "item.rc == 0"
|
||||||
|
fail_msg: "Nextcloud backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
|
success_msg: "All Nextcloud backup commands succeeded."
|
||||||
|
loop: "{{ nc_backup_cmds.results }}"
|
||||||
|
loop_control:
|
||||||
|
index_var: idx
|
||||||
|
label: "backup-cmd-{{ idx }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Upgrade phase (with always cleanup)
|
||||||
|
# -------------------------
|
||||||
|
- name: Nextcloud | Upgrade block
|
||||||
|
block:
|
||||||
|
- name: Nextcloud | Pull image
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_service }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_pull
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Recreate service
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_service }}"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_up
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Ensure maintenance is OFF before occ upgrade
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --off || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_maint_off_before
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | occ upgrade (must succeed)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ upgrade"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_occ_upgrade
|
||||||
|
changed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Update apps (best-effort)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ app:update --all || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_app_update
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Repair (best-effort)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:repair --include-expensive || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
register: nc_repair
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
rescue:
|
||||||
|
- name: Nextcloud | Show occ upgrade output (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: |
|
||||||
|
occ upgrade FAILED
|
||||||
|
RC: {{ nc_occ_upgrade.rc | default('n/a') }}
|
||||||
|
STDOUT:
|
||||||
|
{{ (nc_occ_upgrade.stdout | default('')).strip() }}
|
||||||
|
STDERR:
|
||||||
|
{{ (nc_occ_upgrade.stderr | default('')).strip() }}
|
||||||
|
when: DEBUG == 1
|
||||||
|
|
||||||
|
- name: Nextcloud | Try to force-disable maintenance flag (best-effort)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ config:system:set maintenance --type=boolean --value=false || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
- name: Nextcloud | Fail explicitly
|
||||||
|
ansible.builtin.fail:
|
||||||
|
msg: >-
|
||||||
|
Nextcloud occ upgrade failed. Check nextcloud.log inside the container (data/nextcloud.log).
|
||||||
|
stdout={{ (nc_occ_upgrade.stdout | default('') | trim) }}
|
||||||
|
stderr={{ (nc_occ_upgrade.stderr | default('') | trim) }}
|
||||||
|
|
||||||
|
always:
|
||||||
|
- name: Nextcloud | Ensure maintenance mode is OFF (always)
|
||||||
|
ansible.builtin.command:
|
||||||
|
argv:
|
||||||
|
- sshpass
|
||||||
|
- -e
|
||||||
|
- ssh
|
||||||
|
- -o
|
||||||
|
- StrictHostKeyChecking=no
|
||||||
|
- -o
|
||||||
|
- ConnectTimeout=15
|
||||||
|
- "{{ vm_user }}@{{ vm_ip }}"
|
||||||
|
- bash
|
||||||
|
- -lc
|
||||||
|
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --off || true"
|
||||||
|
environment:
|
||||||
|
SSHPASS: "{{ vm_pass }}"
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
|
||||||
|
# -------------------------
|
||||||
|
# Readiness check (status.php)
|
||||||
|
# -------------------------
|
||||||
|
- name: Nextcloud | Wait for status.php (controller first)
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "{{ nextcloud_status_url }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
validate_certs: true
|
||||||
|
status_code: 200
|
||||||
|
register: nc_status_controller
|
||||||
|
delegate_to: localhost
|
||||||
|
run_once: true
|
||||||
|
retries: "{{ RETRIES }}"
|
||||||
|
delay: 4
|
||||||
|
until: nc_status_controller.status == 200
|
||||||
|
failed_when: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Nextcloud | Print concise status summary (DEBUG)
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: >-
|
||||||
|
Nextcloud {{ nc_status_controller.json.version | default('?') }}
|
||||||
|
(installed={{ nc_status_controller.json.installed | default('?') }},
|
||||||
|
maintenance={{ nc_status_controller.json.maintenance | default('?') }},
|
||||||
|
needsDbUpgrade={{ nc_status_controller.json.needsDbUpgrade | default('?') }})
|
||||||
|
when: DEBUG == 1 and nc_status_controller.json is defined
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# update_portainer_agent.yml
|
# update_portainer_agent.yml
|
||||||
|
|
||||||
- name: Update Portainer Agent on VM via Proxmox
|
- name: Update Portainer Agent on VM via Proxmox
|
||||||
hosts: proxmox
|
hosts: linux_servers
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: true
|
become: true
|
||||||
become_user: root
|
become_user: root
|
||||||
|
|||||||
56
test.yml
56
test.yml
@@ -1,56 +0,0 @@
|
|||||||
- name: Test connectivity from Semaphore container to Homarr VMs
|
|
||||||
hosts: localhost
|
|
||||||
gather_facts: false
|
|
||||||
|
|
||||||
vars:
|
|
||||||
# List of VMs you want to test
|
|
||||||
vm_targets:
|
|
||||||
- { ip: "192.168.69.253" }
|
|
||||||
- { ip: "192.168.69.254" }
|
|
||||||
|
|
||||||
# Credentials (ideálně z env/secret)
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') | default('howard') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (inside container) # install sshpass
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Ping VM IPs from container # simple ICMP ping
|
|
||||||
ansible.builtin.command: "ping -c 2 {{ item.ip }}"
|
|
||||||
loop: "{{ vm_targets }}"
|
|
||||||
register: ping_results
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: Show ping results
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "Ping to {{ item.item.ip }} -> rc={{ item.rc }}, stdout={{ item.stdout }}"
|
|
||||||
loop: "{{ ping_results.results }}"
|
|
||||||
|
|
||||||
- name: Test SSH to VM with ssh (SSH key)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- ssh
|
|
||||||
- -i
|
|
||||||
- /path/to/id_rsa # sem dej cestu k privátnímu klíči v kontejneru
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=5
|
|
||||||
- "{{ vm_user }}@{{ item.ip }}"
|
|
||||||
- "echo OK-from-{{ item.ip }}"
|
|
||||||
loop: "{{ vm_targets }}"
|
|
||||||
register: ssh_results
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: Show SSH results
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
SSH to {{ item.item.ip }}:
|
|
||||||
rc={{ item.rc }}
|
|
||||||
stdout={{ item.stdout }}
|
|
||||||
stderr={{ item.stderr }}
|
|
||||||
loop: "{{ ssh_results.results }}"
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
# update_broker_kafka-ui.yml
|
# update_broker_kafka-ui.yml
|
||||||
|
|
||||||
- name: Update Kafka broker3 and Redpanda Console on VM via Proxmox
|
- name: Update Kafka broker3 and Redpanda Console on VM via Proxmox
|
||||||
hosts: proxmox
|
hosts: linux_servers
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: true
|
become: true
|
||||||
become_user: root
|
become_user: root
|
||||||
|
|||||||
40
update_homarr2.yml
Normal file
40
update_homarr2.yml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# update_homarr2.yml
|
||||||
|
|
||||||
|
- name: Update Homarr
|
||||||
|
hosts: pve2_vm
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
homarr_project: homarr
|
||||||
|
homarr_compose_file: /data/compose/homarr/docker-compose-homarr.yml
|
||||||
|
homarr_service: homarr
|
||||||
|
homarr_port: 7575
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Pull latest Homarr image
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ homarr_compose_file | dirname }}"
|
||||||
|
files:
|
||||||
|
- "{{ homarr_compose_file | basename }}"
|
||||||
|
pull: always
|
||||||
|
|
||||||
|
- name: Recreate Homarr service
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ homarr_compose_file | dirname }}"
|
||||||
|
files:
|
||||||
|
- "{{ homarr_compose_file | basename }}"
|
||||||
|
services:
|
||||||
|
- "{{ homarr_service }}"
|
||||||
|
state: present
|
||||||
|
recreate: always
|
||||||
|
|
||||||
|
- name: Wait for Homarr port
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: "{{ homarr_port }}"
|
||||||
|
timeout: 60
|
||||||
|
|
||||||
|
- name: Check Homarr HTTP endpoint
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "http://127.0.0.1:{{ homarr_port }}/"
|
||||||
|
status_code: 200
|
||||||
@@ -8,7 +8,7 @@
|
|||||||
become_method: sudo
|
become_method: sudo
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
# --- Connection to VM (provided by Semaphore env vars) ---
|
# VM connection (provided by Semaphore env vars)
|
||||||
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
||||||
@@ -18,61 +18,179 @@
|
|||||||
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
||||||
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
||||||
|
|
||||||
# --- Immich specifics ---
|
# Immich specifics
|
||||||
|
immich_dir: "/opt/immich"
|
||||||
immich_project: "immich"
|
immich_project: "immich"
|
||||||
|
|
||||||
# Where compose file lives on the VM
|
|
||||||
immich_compose_dir: "/opt/immich"
|
|
||||||
immich_compose_file: "{{ immich_compose_dir }}/docker-compose.yml"
|
|
||||||
|
|
||||||
# Official Immich compose URL (latest release)
|
|
||||||
immich_compose_url: "https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml"
|
immich_compose_url: "https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml"
|
||||||
|
immich_compose_file: "/opt/immich/docker-compose.yml"
|
||||||
|
immich_override_file: "/opt/immich/docker-compose.override.yml"
|
||||||
immich_port: 2283
|
immich_port: 2283
|
||||||
|
|
||||||
# Optional external URL for controller-side readiness check
|
# Optional external URL for controller-side readiness check (e.g., https://photos.example.com)
|
||||||
# Default to https://photos.martinfencl.eu/photos if IMMICH_URL is not set
|
immich_url: "{{ lookup('env', 'IMMICH_URL') | default('', true) }}"
|
||||||
immich_url: "{{ lookup('env', 'IMMICH_URL') | default('https://photos.martinfencl.eu/photos', true) }}"
|
|
||||||
|
|
||||||
|
# Retry policy
|
||||||
immich_retries: "{{ RETRIES }}"
|
immich_retries: "{{ RETRIES }}"
|
||||||
immich_delay: 2
|
immich_delay: 2
|
||||||
|
|
||||||
# Docker command prefix (consistent behavior and quiet hints)
|
# Docker command prefix (consistent behavior)
|
||||||
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
||||||
|
|
||||||
# Commands to run on the target VM (quiet outputs)
|
|
||||||
# 1) Check directory exists (we do NOT create anything, to stay safe)
|
|
||||||
# 2) Safely download latest docker-compose.yml from GitHub (with backup)
|
|
||||||
# 3) Pull images according to compose
|
|
||||||
# 4) Start / update stack
|
|
||||||
immich_commands:
|
|
||||||
- >-
|
|
||||||
[ -d "{{ immich_compose_dir }}" ] || {
|
|
||||||
echo "ERROR: Directory {{ immich_compose_dir }} does not exist on VM; not touching anything." >&2;
|
|
||||||
exit 1;
|
|
||||||
}
|
|
||||||
- >-
|
|
||||||
cd "{{ immich_compose_dir }}" &&
|
|
||||||
wget -qO docker-compose.yml.new "{{ immich_compose_url }}" || {
|
|
||||||
echo "ERROR: Failed to download docker-compose.yml from GitHub; keeping existing one." >&2;
|
|
||||||
rm -f docker-compose.yml.new 2>/dev/null || true;
|
|
||||||
exit 1;
|
|
||||||
};
|
|
||||||
if [ -s docker-compose.yml.new ]; then
|
|
||||||
echo "Downloaded new docker-compose.yml from GitHub (Immich latest).";
|
|
||||||
if [ -f docker-compose.yml ]; then
|
|
||||||
cp docker-compose.yml "docker-compose.yml.bak-$(date +%F_%H-%M-%S)";
|
|
||||||
echo "Existing docker-compose.yml backed up.";
|
|
||||||
fi;
|
|
||||||
mv docker-compose.yml.new docker-compose.yml;
|
|
||||||
else
|
|
||||||
echo "WARNING: Downloaded docker-compose.yml.new is empty; keeping existing one." >&2;
|
|
||||||
rm -f docker-compose.yml.new 2>/dev/null || true;
|
|
||||||
exit 1;
|
|
||||||
fi
|
|
||||||
- "{{ docker_prefix }} compose -p {{ immich_project }} -f {{ immich_compose_file }} pull >/dev/null"
|
|
||||||
- "{{ docker_prefix }} compose -p {{ immich_project }} -f {{ immich_compose_file }} up -d --remove-orphans >/dev/null"
|
|
||||||
|
|
||||||
|
# Compose command (always include override to keep local mounts separate from upstream compose)
|
||||||
|
immich_compose_cmd: >-
|
||||||
|
{{ docker_prefix }} compose
|
||||||
|
-p {{ immich_project }}
|
||||||
|
-f {{ immich_compose_file }}
|
||||||
|
-f {{ immich_override_file }}
|
||||||
|
|
||||||
|
# Commands to run on the target VM
|
||||||
|
immich_commands:
|
||||||
|
- "cd {{ immich_dir }}"
|
||||||
|
|
||||||
|
- |
|
||||||
|
cd {{ immich_dir }}
|
||||||
|
mkdir -p backups
|
||||||
|
if [ -f docker-compose.yml ]; then
|
||||||
|
cp -a docker-compose.yml "backups/docker-compose.yml.$(date +%F_%H%M%S).bak"
|
||||||
|
fi
|
||||||
|
if [ -f .env ]; then
|
||||||
|
cp -a .env "backups/.env.$(date +%F_%H%M%S).bak"
|
||||||
|
fi
|
||||||
|
if [ -f docker-compose.override.yml ]; then
|
||||||
|
cp -a docker-compose.override.yml "backups/docker-compose.override.yml.$(date +%F_%H%M%S).bak"
|
||||||
|
fi
|
||||||
|
|
||||||
|
- |
|
||||||
|
cd {{ immich_dir }}
|
||||||
|
# Download latest compose from Immich releases (requires curl or wget)
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
curl -fsSL -o docker-compose.yml "{{ immich_compose_url }}"
|
||||||
|
elif command -v wget >/dev/null 2>&1; then
|
||||||
|
wget -qO docker-compose.yml "{{ immich_compose_url }}"
|
||||||
|
else
|
||||||
|
echo "Neither curl nor wget is available on the VM."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
- |
|
||||||
|
cd {{ immich_dir }}
|
||||||
|
# Ensure override compose exists (create if missing)
|
||||||
|
if [ ! -f "{{ immich_override_file }}" ]; then
|
||||||
|
printf '%s\n' \
|
||||||
|
'services:' \
|
||||||
|
' immich-server:' \
|
||||||
|
' volumes:' \
|
||||||
|
' - /mnt/nextcloud-howard-photos:/mnt/nextcloud-howard-photos' \
|
||||||
|
' - /mnt/nextcloud-kamilkaprdelka-photos:/mnt/nextcloud-kamilkaprdelka-photos' \
|
||||||
|
> "{{ immich_override_file }}"
|
||||||
|
fi
|
||||||
|
# Fail early if override is still missing/empty
|
||||||
|
test -s "{{ immich_override_file }}"
|
||||||
|
|
||||||
|
|
||||||
|
- |
|
||||||
|
cd {{ immich_dir }}
|
||||||
|
# Ensure .env exists. If missing, try to reconstruct it from running containers to avoid breaking DB creds.
|
||||||
|
python3 - <<'PY'
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
env_path = Path(".env")
|
||||||
|
if env_path.exists():
|
||||||
|
raise SystemExit(0)
|
||||||
|
|
||||||
|
def run(cmd):
|
||||||
|
p = subprocess.run(cmd, capture_output=True, text=True)
|
||||||
|
return p.returncode, p.stdout, p.stderr
|
||||||
|
|
||||||
|
rc, out, err = run(["bash", "-lc", "command docker inspect immich_postgres immich_server"])
|
||||||
|
if rc != 0 or not out.strip():
|
||||||
|
print("ERROR: .env is missing and cannot inspect running containers (immich_postgres/immich_server).", flush=True)
|
||||||
|
print("Create /opt/immich/.env manually or ensure the containers exist.", flush=True)
|
||||||
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
data = json.loads(out)
|
||||||
|
|
||||||
|
by_name = {}
|
||||||
|
for c in data:
|
||||||
|
name = (c.get("Name") or "").lstrip("/")
|
||||||
|
by_name[name] = c
|
||||||
|
|
||||||
|
pg = by_name.get("immich_postgres")
|
||||||
|
srv = by_name.get("immich_server")
|
||||||
|
if not pg or not srv:
|
||||||
|
print("ERROR: Could not find immich_postgres and immich_server in docker inspect output.", flush=True)
|
||||||
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
def env_map(container):
|
||||||
|
m = {}
|
||||||
|
for kv in (container.get("Config", {}).get("Env") or []):
|
||||||
|
if "=" in kv:
|
||||||
|
k, v = kv.split("=", 1)
|
||||||
|
m[k] = v
|
||||||
|
return m
|
||||||
|
|
||||||
|
def find_mount_source(container, dest):
|
||||||
|
for m in (container.get("Mounts") or []):
|
||||||
|
if m.get("Destination") == dest:
|
||||||
|
return m.get("Source")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
pg_env = env_map(pg)
|
||||||
|
db_user = pg_env.get("POSTGRES_USER", "")
|
||||||
|
db_pass = pg_env.get("POSTGRES_PASSWORD", "")
|
||||||
|
db_name = pg_env.get("POSTGRES_DB", "")
|
||||||
|
|
||||||
|
db_data = find_mount_source(pg, "/var/lib/postgresql/data")
|
||||||
|
upload_loc = find_mount_source(srv, "/usr/src/app/upload")
|
||||||
|
|
||||||
|
# Try to preserve the currently used image tag as IMMICH_VERSION (optional but safer)
|
||||||
|
immich_version = ""
|
||||||
|
image = (srv.get("Config", {}).get("Image") or "")
|
||||||
|
if ":" in image and "@" not in image:
|
||||||
|
immich_version = image.rsplit(":", 1)[-1]
|
||||||
|
elif ":" in image and "@" in image:
|
||||||
|
# image like repo:tag@sha256:...
|
||||||
|
immich_version = image.split("@", 1)[0].rsplit(":", 1)[-1]
|
||||||
|
|
||||||
|
missing = []
|
||||||
|
for k, v in [
|
||||||
|
("DB_USERNAME", db_user),
|
||||||
|
("DB_PASSWORD", db_pass),
|
||||||
|
("DB_DATABASE_NAME", db_name),
|
||||||
|
("DB_DATA_LOCATION", db_data),
|
||||||
|
("UPLOAD_LOCATION", upload_loc),
|
||||||
|
]:
|
||||||
|
if not v:
|
||||||
|
missing.append(k)
|
||||||
|
|
||||||
|
if missing:
|
||||||
|
print("ERROR: Could not reconstruct these .env values from containers: " + ", ".join(missing), flush=True)
|
||||||
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
lines = [
|
||||||
|
f"UPLOAD_LOCATION={upload_loc}",
|
||||||
|
f"DB_DATA_LOCATION={db_data}",
|
||||||
|
f"DB_USERNAME={db_user}",
|
||||||
|
f"DB_PASSWORD={db_pass}",
|
||||||
|
f"DB_DATABASE_NAME={db_name}",
|
||||||
|
]
|
||||||
|
if immich_version:
|
||||||
|
lines.append(f"IMMICH_VERSION={immich_version}")
|
||||||
|
|
||||||
|
env_path.write_text("\n".join(lines) + "\n", encoding="utf-8")
|
||||||
|
print("Created .env from running containers.", flush=True)
|
||||||
|
PY
|
||||||
|
|
||||||
|
- |
|
||||||
|
cd {{ immich_dir }}
|
||||||
|
# Comment out healthcheck.start_interval if present (safe no-op if missing)
|
||||||
|
sed -i -E 's/^([[:space:]]*)start_interval:/\1# start_interval:/' docker-compose.yml || true
|
||||||
|
|
||||||
|
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} config >/dev/null"
|
||||||
|
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} pull >/dev/null"
|
||||||
|
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} up -d --remove-orphans --force-recreate >/dev/null"
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
||||||
@@ -81,11 +199,11 @@
|
|||||||
state: present
|
state: present
|
||||||
update_cache: yes
|
update_cache: yes
|
||||||
|
|
||||||
- name: Run Immich update commands on VM (via SSH) # use SSHPASS env, hide item value
|
- name: Run Immich update commands on VM (via SSH) # use SSHPASS env, hide item label
|
||||||
ansible.builtin.command:
|
ansible.builtin.command:
|
||||||
argv:
|
argv:
|
||||||
- sshpass
|
- sshpass
|
||||||
- -e # read password from SSHPASS environment
|
- -e
|
||||||
- ssh
|
- ssh
|
||||||
- -o
|
- -o
|
||||||
- StrictHostKeyChecking=no
|
- StrictHostKeyChecking=no
|
||||||
@@ -96,14 +214,15 @@
|
|||||||
- -lc
|
- -lc
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
||||||
environment:
|
environment:
|
||||||
SSHPASS: "{{ vm_pass }}" # supply password via environment
|
SSHPASS: "{{ vm_pass }}"
|
||||||
loop: "{{ immich_commands }}"
|
loop: "{{ immich_commands }}"
|
||||||
loop_control:
|
loop_control:
|
||||||
index_var: idx # capture loop index
|
index_var: idx
|
||||||
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
|
label: "cmd-{{ idx }}"
|
||||||
register: immich_cmds
|
register: immich_cmds
|
||||||
changed_when: false
|
changed_when: false
|
||||||
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
- name: Show outputs for each Immich command
|
- name: Show outputs for each Immich command
|
||||||
ansible.builtin.debug:
|
ansible.builtin.debug:
|
||||||
@@ -116,8 +235,9 @@
|
|||||||
{{ (item.stderr | default('')).strip() }}
|
{{ (item.stderr | default('')).strip() }}
|
||||||
loop: "{{ immich_cmds.results }}"
|
loop: "{{ immich_cmds.results }}"
|
||||||
when: DEBUG == 1
|
when: DEBUG == 1
|
||||||
|
run_once: true
|
||||||
|
|
||||||
- name: Fail play if any Immich command failed # also hide item label
|
- name: Fail play if any Immich command failed
|
||||||
ansible.builtin.assert:
|
ansible.builtin.assert:
|
||||||
that: "item.rc == 0"
|
that: "item.rc == 0"
|
||||||
fail_msg: "Immich update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
fail_msg: "Immich update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
||||||
@@ -126,17 +246,17 @@
|
|||||||
loop_control:
|
loop_control:
|
||||||
index_var: idx
|
index_var: idx
|
||||||
label: "cmd-{{ idx }}"
|
label: "cmd-{{ idx }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
# -------------------------
|
# -------------------------
|
||||||
# Readiness checks (controller first, then VM fallback)
|
# Readiness checks (controller first, then VM fallback)
|
||||||
# -------------------------
|
# -------------------------
|
||||||
|
|
||||||
- name: Immich | Wait for web UI (controller first, with retries)
|
- name: Immich | Wait for API ping (controller first, with retries)
|
||||||
ansible.builtin.uri:
|
ansible.builtin.uri:
|
||||||
url: "{{ (immich_url | regex_replace('/$','')) + '/' }}"
|
url: "{{ (immich_url | regex_replace('/$','')) + '/api/server/ping' }}"
|
||||||
method: GET
|
method: GET
|
||||||
return_content: true
|
return_content: true
|
||||||
# Validate TLS only when using https://
|
|
||||||
validate_certs: "{{ (immich_url | default('')) is match('^https://') }}"
|
validate_certs: "{{ (immich_url | default('')) is match('^https://') }}"
|
||||||
status_code: 200
|
status_code: 200
|
||||||
register: immich_controller
|
register: immich_controller
|
||||||
@@ -145,11 +265,11 @@
|
|||||||
when: immich_url is defined and (immich_url | length) > 0
|
when: immich_url is defined and (immich_url | length) > 0
|
||||||
retries: "{{ immich_retries }}"
|
retries: "{{ immich_retries }}"
|
||||||
delay: "{{ immich_delay }}"
|
delay: "{{ immich_delay }}"
|
||||||
until: immich_controller.status == 200
|
until: immich_controller.status == 200 and ('pong' in (immich_controller.content | default('')))
|
||||||
failed_when: false # allow task to finish without failing the play
|
failed_when: false
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: Immich | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
|
- name: Immich | VM-side ping (JSON via Python, with retries)
|
||||||
ansible.builtin.command:
|
ansible.builtin.command:
|
||||||
argv:
|
argv:
|
||||||
- sshpass
|
- sshpass
|
||||||
@@ -164,10 +284,10 @@
|
|||||||
- -lc
|
- -lc
|
||||||
- |
|
- |
|
||||||
python3 - <<'PY'
|
python3 - <<'PY'
|
||||||
# Fetch Immich web UI from localhost and print HTML to stdout
|
# Ping Immich API from localhost and print response to stdout
|
||||||
import urllib.request, sys
|
import urllib.request, sys
|
||||||
try:
|
try:
|
||||||
with urllib.request.urlopen("http://127.0.0.1:{{ immich_port }}/", timeout=15) as r:
|
with urllib.request.urlopen("http://127.0.0.1:{{ immich_port }}/api/server/ping", timeout=15) as r:
|
||||||
sys.stdout.write(r.read().decode(errors='ignore'))
|
sys.stdout.write(r.read().decode(errors='ignore'))
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
@@ -177,62 +297,17 @@
|
|||||||
register: immich_vm
|
register: immich_vm
|
||||||
changed_when: false
|
changed_when: false
|
||||||
failed_when: false
|
failed_when: false
|
||||||
when: immich_controller.status | default(0) != 200 or immich_controller.content is not defined
|
when: immich_controller.status | default(0) != 200
|
||||||
retries: "{{ immich_retries }}"
|
retries: "{{ immich_retries }}"
|
||||||
delay: "{{ immich_delay }}"
|
delay: "{{ immich_delay }}"
|
||||||
until: (immich_vm.stdout | default('') | trim | length) > 0 and ('Immich' in (immich_vm.stdout | default('')))
|
until: (immich_vm.stdout | default('') | trim | length) > 0 and ('pong' in (immich_vm.stdout | default('')))
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
no_log: "{{ DEBUG == 0 }}"
|
||||||
|
run_once: true
|
||||||
- name: Immich | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
immich_home_html: >-
|
|
||||||
{{
|
|
||||||
(
|
|
||||||
immich_controller.content
|
|
||||||
if (immich_controller is defined)
|
|
||||||
and ((immich_controller.status|default(0))==200)
|
|
||||||
and (immich_controller.content is defined)
|
|
||||||
else
|
|
||||||
(immich_vm.stdout | default('') | trim)
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
when:
|
|
||||||
- (immich_controller is defined and (immich_controller.status|default(0))==200 and (immich_controller.content is defined))
|
|
||||||
or ((immich_vm.stdout | default('') | trim | length) > 0)
|
|
||||||
|
|
||||||
- name: Immich | Print concise summary
|
- name: Immich | Print concise summary
|
||||||
ansible.builtin.debug:
|
ansible.builtin.debug:
|
||||||
msg: >-
|
msg: >-
|
||||||
Immich web UI {{ 'reachable' if (immich_home_html is defined) else 'NOT reachable' }}.
|
Immich API ping {{ 'OK' if (('pong' in (immich_controller.content|default(''))) or ('pong' in (immich_vm.stdout|default('')))) else 'NOT OK' }}.
|
||||||
Source={{ 'controller' if ((immich_controller is defined) and (immich_controller.status|default(0))==200 and (immich_controller.content is defined)) else 'vm' if (immich_vm.stdout|default('')|trim|length>0) else 'n/a' }};
|
Source={{ 'controller' if (immich_controller.status|default(0))==200 else 'vm' if (immich_vm.stdout|default('')|trim|length>0) else 'n/a' }}.
|
||||||
length={{ (immich_home_html | default('')) | length }};
|
|
||||||
contains('Immich')={{ (immich_home_html is defined) and ('Immich' in immich_home_html) }}
|
|
||||||
when: DEBUG == 1
|
when: DEBUG == 1
|
||||||
|
run_once: true
|
||||||
- name: Immich | Web UI unavailable (after retries)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "Immich web není dostupný ani po pokusech."
|
|
||||||
when: immich_home_html is not defined and DEBUG == 1
|
|
||||||
|
|
||||||
# Optional detailed dump (short excerpt only)
|
|
||||||
- name: Immich | HTML excerpt (debug)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ (immich_home_html | default(''))[:500] }}"
|
|
||||||
when: immich_home_html is defined and DEBUG == 1
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Final assertion: controller URL must be reachable
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Immich | Assert controller URL reachable (if configured)
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- >
|
|
||||||
not (immich_url is defined and (immich_url | length) > 0)
|
|
||||||
or
|
|
||||||
(
|
|
||||||
immich_controller is defined
|
|
||||||
and (immich_controller.status | default(0)) == 200
|
|
||||||
)
|
|
||||||
fail_msg: "Immich controller URL {{ immich_url }} is NOT reachable with HTTP 200 after retries."
|
|
||||||
success_msg: "Immich controller URL {{ immich_url }} is reachable with HTTP 200."
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# nextcloud/update_uptime_kuma.yml
|
# nextcloud/update_uptime_kuma.yml
|
||||||
|
|
||||||
- name: Update Uptime Kuma on VM via Proxmox
|
- name: Update Uptime Kuma on VM via Proxmox
|
||||||
hosts: proxmox
|
hosts: linux_servers
|
||||||
gather_facts: false
|
gather_facts: false
|
||||||
become: true
|
become: true
|
||||||
become_user: root
|
become_user: root
|
||||||
|
|||||||
Reference in New Issue
Block a user