3
0
forked from jakub/ansible

21 Commits

Author SHA1 Message Date
4de04d0d3a Add homarr.yml 2026-01-09 18:23:56 +00:00
f4262bcb27 Update test_sms.yml 2025-10-14 12:49:40 +00:00
5c69d3a03f Update test_sms.yml 2025-10-14 12:35:33 +00:00
547c9fadc5 Update test_sms.yml 2025-10-14 11:57:23 +00:00
c07181291c Update test_sms.yml 2025-10-14 11:55:48 +00:00
1a0ce36efe Update test_sms.yml 2025-10-13 12:39:59 +00:00
8b57f27ec6 Update test_sms.yml 2025-10-13 09:56:02 +00:00
085e7177f4 Update test_sms.yml 2025-10-13 09:47:28 +00:00
3099a0b2b8 Update test_sms.yml 2025-10-13 09:45:40 +00:00
3d89bc523e Update test_sms.yml 2025-10-13 09:43:52 +00:00
61d288f92a Update test_sms.yml 2025-10-12 19:00:20 +00:00
61beedd023 Update test_sms.yml 2025-10-12 18:59:20 +00:00
bb37cdaa53 Update test_sms.yml 2025-10-12 18:58:25 +00:00
b805b506b4 Update test_sms.yml 2025-10-12 18:56:33 +00:00
9fad4e4d1a Update inv_linuxes 2025-10-12 18:53:08 +00:00
a632da2a62 Update test_sms.yml 2025-10-12 18:50:51 +00:00
cf21ad70c1 Add test_sms.yml 2025-10-12 18:45:30 +00:00
1deb268d73 Update inv_mikrotiks 2025-09-20 10:26:36 +00:00
8373252ae9 Update update.yml 2025-09-20 10:12:50 +00:00
13a48cd734 Update inv_mikrotiks 2025-09-19 11:20:04 +00:00
b497723769 Update mikrotikbackup.yml 2025-09-19 11:13:44 +00:00
21 changed files with 235 additions and 2546 deletions

View File

@@ -1,183 +0,0 @@
# check_raid.yml
- name: Check Linux MD RAID health on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# Debug mode
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# RAID specifics
# RAID_MD can be: md0 / md1 / ... OR "auto" to check all arrays found in /proc/mdstat
raid_md_device: "{{ lookup('env', 'RAID_MD') | default('md0', true) }}"
raid_allow_sync: "{{ lookup('env', 'RAID_ALLOW_SYNC') | default(1, true) | int }}"
raid_allow_no_array: "{{ lookup('env', 'RAID_ALLOW_NO_ARRAY') | default(0, true) | int }}"
raid_retries: "{{ RETRIES }}"
raid_delay: 2
ssh_hard_timeout: 30
# SSH options
ssh_opts:
- "-o" # English comments
- "StrictHostKeyChecking=no"
- "-o"
- "UserKnownHostsFile=/dev/null"
- "-o"
- "GlobalKnownHostsFile=/dev/null"
- "-o"
- "LogLevel=ERROR"
- "-o"
- "ConnectTimeout=15"
- "-o"
- "PreferredAuthentications=password"
- "-o"
- "PubkeyAuthentication=no"
- "-o"
- "KbdInteractiveAuthentication=no"
- "-o"
- "NumberOfPasswordPrompts=1"
raid_check_cmd: |
python3 - <<'PY'
# Print exactly one status line and exit with code:
# 0=OK, 1=FAIL (degraded/disallowed sync), 2=ERROR (unexpected/misconfig)
import re, sys
target = "{{ raid_md_device }}"
allow_sync = int("{{ raid_allow_sync }}")
allow_no_array = int("{{ raid_allow_no_array }}")
try:
txt = open("/proc/mdstat", "r", encoding="utf-8", errors="ignore").read()
except Exception as e:
print(f"ERROR RAID read_mdstat err={e}")
sys.exit(2)
arrays = {}
header_re = re.compile(r"^(md\d+)\s*:\s*active.*$", re.MULTILINE)
token_re = re.compile(r"^\s*\d+\s+blocks.*\[\d+/\d+\]\s*\[([U_]+)\]\s*$", re.MULTILINE)
for m in header_re.finditer(txt):
name = m.group(1)
chunk = txt[m.end():m.end() + 3000]
tm = token_re.search(chunk)
if tm:
arrays[name] = tm.group(1)
if not arrays:
if allow_no_array:
print("OK RAID none=no-md-arrays")
sys.exit(0)
print("ERROR RAID none=no-md-arrays")
sys.exit(2)
syncing = bool(re.search(r"\b(resync|recovery|reshape|check|repair)\b", txt))
if target == "auto":
to_check = sorted(arrays.keys())
else:
if target not in arrays:
found = ",".join(sorted(arrays.keys()))
print(f"ERROR RAID target_not_found target={target} found={found}")
sys.exit(2)
to_check = [target]
tokens_str = " ".join([f"{name}=[{arrays[name]}]" for name in to_check])
degraded = any("_" in arrays[name] for name in to_check)
if degraded:
print(f"FAIL RAID {tokens_str} syncing={int(syncing)}")
sys.exit(1)
if syncing and not allow_sync:
print(f"FAIL RAID {tokens_str} syncing={int(syncing)} allow_sync={allow_sync}")
sys.exit(1)
print(f"OK RAID {tokens_str} syncing={int(syncing)}")
sys.exit(0)
PY
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
run_once: true
- name: Run RAID check on VM (via SSH) # single command, no loop
ansible.builtin.command:
argv: >-
{{
['timeout', '-k', '5', (ssh_hard_timeout | string)]
+ ['sshpass', '-e', 'ssh']
+ ssh_opts
+ [ vm_user ~ '@' ~ vm_ip,
'bash', '-lc',
('sudo ' if use_sudo else '') + raid_check_cmd
]
}}
environment:
SSHPASS: "{{ vm_pass }}"
register: raid_cmd
changed_when: false
failed_when: false # we decide via assert below
retries: "{{ raid_retries }}"
delay: "{{ raid_delay }}"
until: raid_cmd.rc not in [124, 255]
run_once: true
- name: Build one-line summary (always)
ansible.builtin.set_fact:
raid_line: >-
{{
(raid_cmd.stdout | default('') | trim)
if ((raid_cmd.stdout | default('') | trim) | length) > 0
else ('ERROR RAID no-output rc=' ~ (raid_cmd.rc | string))
}}
changed_when: false
run_once: true
- name: RAID result (always one line)
ansible.builtin.assert:
that:
- raid_cmd.rc == 0
success_msg: "{{ raid_line }}"
fail_msg: "{{ raid_line }}"
run_once: true
# Optional verbose debug
- name: Debug | /proc/mdstat (VM)
ansible.builtin.command:
argv: >-
{{
['timeout', '-k', '5', (ssh_hard_timeout | string)]
+ ['sshpass', '-e', 'ssh']
+ ssh_opts
+ [ vm_user ~ '@' ~ vm_ip, 'bash', '-lc', "cat /proc/mdstat" ]
}}
environment:
SSHPASS: "{{ vm_pass }}"
register: mdstat_dbg
changed_when: false
failed_when: false
when: DEBUG == 1
run_once: true
- name: Debug | mdstat output
ansible.builtin.debug:
msg: "{{ mdstat_dbg.stdout | default('') }}"
when: DEBUG == 1
run_once: true

View File

@@ -1,7 +1,6 @@
# update_homarr2.yml
- name: Update Homarr - name: Update Homarr
hosts: pve2_vm hosts: linux_servers
become: true
gather_facts: false gather_facts: false
vars: vars:

View File

@@ -1,3 +1,8 @@
[linux_servers] [linux_servers]
proxmox_nextcloud ansible_host=192.168.69.2 jimbuntu ansible_host=192.168.19.4
proxmox_services ansible_host=192.168.69.3 jim_storage ansible_host=192.168.19.7
portainer2 ansible_host=192.168.52.9
portainernode ansible_host=192.168.52.21
[local]
localhost ansible_connection=local

View File

@@ -1,3 +1,3 @@
[mikrotiks] [mikrotiks]
mikrotik_fencl_server ansible_host=192.168.69.1 jim ansible_host=192.168.19.2
mikrotik_fencl_5G ansible_host=192.168.68.1 hellsos ansible_host=192.168.40.1

3
inv_vm
View File

@@ -1,3 +0,0 @@
[vm]
pve1_vm ansible_host=192.168.69.253
pve2_vm ansible_host=192.168.69.254

View File

@@ -1,68 +0,0 @@
# nextcloud/check_stack_nextcloud.yml
- name: Run Nextcloud maintenance on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
# Flip to true if Docker needs sudo on the VM
use_sudo: false
vm_commands:
- "docker exec -u www-data nextcloud php -f /var/www/html/cron.php"
- "docker exec -u www-data nextcloud php occ app:update --all"
- "docker exec -u www-data nextcloud php occ maintenance:repair --include-expensive"
- "docker exec -u www-data nextcloud php occ status"
- "set -o pipefail; timeout 180s bash -x /data/compose/nextcloud/stack-health.sh </dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Nextcloud commands on VM (via SSH, argv, no line breaks)
ansible.builtin.command:
argv:
- sshpass
- -p
- "{{ vm_pass }}"
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
loop: "{{ vm_commands }}"
register: vm_cmds
changed_when: false
- name: Show outputs for each command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ vm_cmds.results }}"
- name: Fail play if any command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Command failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All commands succeeded."
loop: "{{ vm_cmds.results }}"

View File

@@ -1,174 +0,0 @@
# nextcloud/update_collabora.yml
- name: Update Collabora CODE on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Collabora specifics ---
collabora_debug_caps: true
collabora_caps_url: "https://collabora.martinfencl.eu/hosting/capabilities"
# Use the FULL Nextcloud stack compose file; only target the 'collabora' service inside it
collabora_project: "nextcloud-collabora"
collabora_compose_file: "/data/compose/nextcloud/nextcloud-collabora.yml"
collabora_service: "collabora"
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
collabora_commands:
- "{{ docker_prefix }} pull -q collabora/code:latest >/dev/null"
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} pull {{ collabora_service }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} up -d --no-deps --force-recreate {{ collabora_service }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Collabora update commands on VM (via SSH) # use SSHPASS env, hide item value
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ collabora_commands }}"
loop_control:
index_var: idx # <-- capture loop index here
label: "cmd-{{ idx }}" # <-- use idx instead of loop.index
register: collab_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Show outputs for each Collabora command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ collab_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Collabora command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Collabora update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Collabora update commands succeeded."
loop: "{{ collab_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Collabora | Wait for capabilities (controller first)
ansible.builtin.uri:
url: "{{ collabora_caps_url }}"
method: GET
return_content: true
validate_certs: true
status_code: 200
register: caps_controller
delegate_to: localhost
run_once: true
retries: "{{ RETRIES }}"
delay: 2
until: caps_controller.status == 200
failed_when: false
changed_when: false
- name: Collabora | VM-side fetch (pure JSON via Python) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
import json, urllib.request, sys
try:
with urllib.request.urlopen("{{ collabora_caps_url }}", timeout=15) as r:
sys.stdout.write(r.read().decode())
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: caps_vm
changed_when: false
failed_when: false
when: caps_controller.status | default(0) != 200 or caps_controller.json is not defined
no_log: "{{ DEBUG == 0 }}"
- name: Collabora | Choose JSON (controller wins, else VM)
ansible.builtin.set_fact:
collab_caps_json: >-
{{
(caps_controller.json
if (caps_controller.status|default(0))==200 and (caps_controller.json is defined)
else (
(caps_vm.stdout | default('') | trim | length > 0)
| ternary((caps_vm.stdout | trim | from_json), omit)
)
)
}}
failed_when: false
- name: Collabora | Print concise summary
ansible.builtin.debug:
msg: >-
Collabora {{ collab_caps_json.productVersion | default('?') }}
({{ collab_caps_json.productName | default('?') }}),
convert-to.available={{ collab_caps_json['convert-to']['available'] | default('n/a') }},
serverId={{ collab_caps_json.serverId | default('n/a') }}
when: collab_caps_json is defined and DEBUG == 1
- name: Collabora | Capabilities unavailable (after retries)
ansible.builtin.debug:
msg: "Capabilities endpoint není dostupný ani po pokusech."
when: collab_caps_json is not defined and DEBUG == 1
# Optional full JSON (debug)
- name: Collabora | Full JSON (debug)
ansible.builtin.debug:
var: collab_caps_json
when: collabora_debug_caps and (collab_caps_json is defined) and DEBUG == 1

View File

@@ -1,293 +0,0 @@
# nextcloud/update_nextcloud_db_redis.yml
- name: Update Nextcloud DB and Redis on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug / retries ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Nextcloud specifics ---
nextcloud_project: "nextcloud-collabora"
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
# Service names from docker-compose file
nextcloud_web_service: "nextcloud"
nextcloud_db_service: "nextclouddb"
redis_service: "redis"
# Backup directory on the VM (timestamped on controller)
backup_dir: "/data/compose/nextcloud/backup-db-redis-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
nextcloud_base_url: "https://cloud.martinfencl.eu"
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# --- Backup phase commands (run on VM) ---
# Same idea as in update_nextcloud.yml: maintenance on + config/custom_apps + DB dump
nextcloud_backup_commands:
- >
mkdir -p "{{ backup_dir }}"
- >
docker exec -u www-data nextcloud php occ maintenance:mode --on
- >
docker exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
- >
docker cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
- >
docker exec nextcloud rm /tmp/nextcloud_conf.tgz || true
- >
docker exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
# --- DB + Redis upgrade commands (run on VM) ---
db_redis_upgrade_commands:
# Update MariaDB service
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_db_service }}
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_db_service }}
# Simple DB health check (non-fatal)
- >
docker exec nextcloud-db sh -c 'mysqladmin ping -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' || true
# Update Redis service
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ redis_service }}
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ redis_service }}
# Simple Redis health check (non-fatal)
- >
docker exec redis redis-cli PING || true
tasks:
- name: Ensure sshpass is installed (for password-based SSH)
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Nextcloud | Show current version before DB/Redis upgrade (DEBUG)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- 'docker exec -u www-data nextcloud php occ -V || true'
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_version_before
changed_when: false
failed_when: false
when: DEBUG == 1
# -------------------------
# Backup phase
# -------------------------
- name: Nextcloud | Run backup commands on VM (via SSH)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ nextcloud_backup_commands }}"
loop_control:
index_var: idx
label: "backup-cmd-{{ idx }}"
register: nc_backup_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Show outputs of backup commands (DEBUG)
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ nc_backup_cmds.results }}"
when: DEBUG == 1
- name: Nextcloud | Fail play if any backup command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Nextcloud DB/Redis backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Nextcloud DB/Redis backup commands succeeded."
loop: "{{ nc_backup_cmds.results }}"
loop_control:
index_var: idx
label: "backup-cmd-{{ idx }}"
# -------------------------
# DB + Redis upgrade phase
# -------------------------
- name: Nextcloud | Run DB/Redis upgrade commands on VM (via SSH)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ db_redis_upgrade_commands }}"
loop_control:
index_var: idx
label: "db-redis-cmd-{{ idx }}"
register: nc_db_redis_cmds
changed_when: false
failed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Show outputs of DB/Redis upgrade commands (DEBUG)
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ nc_db_redis_cmds.results }}"
when: DEBUG == 1
- name: Nextcloud | Fail play if any DB/Redis upgrade command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Nextcloud DB/Redis upgrade step failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Nextcloud DB/Redis upgrade commands succeeded."
loop: "{{ nc_db_redis_cmds.results }}"
loop_control:
index_var: idx
label: "db-redis-cmd-{{ idx }}"
# -------------------------
# Disable maintenance + readiness check
# -------------------------
- name: Nextcloud | Disable maintenance mode after DB/Redis upgrade
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- "{{ ('sudo ' if use_sudo else '') }}docker exec -u www-data nextcloud php occ maintenance:mode --off"
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_maint_off
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Wait for status.php (controller first)
ansible.builtin.uri:
url: "{{ nextcloud_status_url }}"
method: GET
return_content: true
validate_certs: true
status_code: 200
register: nc_status_controller
delegate_to: localhost
run_once: true
retries: "{{ RETRIES }}"
delay: 4
failed_when: false
changed_when: false
- name: Nextcloud | VM-side fetch status.php (JSON via Python)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
import json, urllib.request, sys
try:
with urllib.request.urlopen("{{ nextcloud_status_url }}", timeout=15) as r:
sys.stdout.write(r.read().decode())
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_status_vm
changed_when: false
failed_when: false
when: nc_status_controller.status | default(0) != 200 or nc_status_controller.json is not defined
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Choose status JSON (controller wins, else VM)
ansible.builtin.set_fact:
nextcloud_status_json: >-
{{
(nc_status_controller.json
if (nc_status_controller.status | default(0)) == 200 and (nc_status_controller.json is defined)
else (
(nc_status_vm.stdout | default('') | trim | length > 0)
| ternary((nc_status_vm.stdout | trim | from_json), omit)
)
)
}}
failed_when: false
- name: Nextcloud | Print concise status summary (DEBUG)
ansible.builtin.debug:
msg: >-
Nextcloud {{ nextcloud_status_json.version | default('?') }}
(installed={{ nextcloud_status_json.installed | default('?') }},
maintenance={{ nextcloud_status_json.maintenance | default('?') }},
needsDbUpgrade={{ nextcloud_status_json.needsDbUpgrade | default('?') }})
when: nextcloud_status_json is defined and DEBUG == 1
- name: Nextcloud | Status JSON not available (DEBUG)
ansible.builtin.debug:
msg: "status.php is not reachable or did not return JSON."
when: nextcloud_status_json is not defined and DEBUG == 1

View File

@@ -1,287 +0,0 @@
# nextcloud/update_nextcloud.yml
- name: Update Nextcloud on VM via Proxmox
hosts: proxmox_nextcloud # linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug / retries ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Nextcloud specifics ---
nextcloud_project: "nextcloud-collabora"
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
nextcloud_service: "nextcloud"
# Backup directory on the VM (timestamped on controller)
backup_dir: "/data/compose/nextcloud/backup-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
nextcloud_base_url: "https://cloud.martinfencl.eu"
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# --- Backup phase commands (run on VM) ---
nextcloud_backup_commands:
- >
mkdir -p "{{ backup_dir }}"
- >
docker exec -u www-data nextcloud php occ maintenance:mode --on
# Create tarball of config + custom_apps inside the container
- >
docker exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
# Copy that tarball to the host backup directory
- >
docker cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
# Remove temporary file inside the container
- >
docker exec nextcloud rm /tmp/nextcloud_conf.tgz || true
# Database dump from DB container (unchanged)
- >
docker exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
# --- Upgrade phase commands (run on VM) ---
nextcloud_upgrade_commands:
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_service }}
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_service }}
- >
docker exec -u www-data nextcloud php occ upgrade
- >
docker exec -u www-data nextcloud php occ app:update --all || true
- >
docker exec -u www-data nextcloud php occ maintenance:repair --include-expensive || true
tasks:
- name: Ensure sshpass is installed (for password-based SSH)
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Nextcloud | Show current version before upgrade (DEBUG)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- 'docker exec -u www-data nextcloud php occ -V || true'
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_version_before
changed_when: false
failed_when: false
when: DEBUG == 1
# -------------------------
# Backup phase
# -------------------------
- name: Nextcloud | Run backup commands on VM (via SSH) # run plain commands via SSH
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ nextcloud_backup_commands }}"
loop_control:
index_var: idx
label: "backup-cmd-{{ idx }}"
register: nc_backup_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Show outputs of backup commands (DEBUG)
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ nc_backup_cmds.results }}"
when: DEBUG == 1
- name: Nextcloud | Fail play if any backup command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Nextcloud backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Nextcloud backup commands succeeded."
loop: "{{ nc_backup_cmds.results }}"
loop_control:
index_var: idx
label: "backup-cmd-{{ idx }}"
# -------------------------
# Upgrade phase
# -------------------------
- name: Nextcloud | Run upgrade commands on VM (via SSH)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ nextcloud_upgrade_commands }}"
loop_control:
index_var: idx
label: "upgrade-cmd-{{ idx }}"
register: nc_upgrade_cmds
changed_when: false
failed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Show outputs of upgrade commands (DEBUG)
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ nc_upgrade_cmds.results }}"
when: DEBUG == 1
- name: Nextcloud | Fail play if any upgrade command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Nextcloud upgrade step failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Nextcloud upgrade commands succeeded."
loop: "{{ nc_upgrade_cmds.results }}"
loop_control:
index_var: idx
label: "upgrade-cmd-{{ idx }}"
- name: Nextcloud | Disable maintenance mode (only after successful upgrade)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- "{{ ('sudo ' if use_sudo else '') }}docker exec -u www-data nextcloud php occ maintenance:mode --off"
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_maint_off
changed_when: false
no_log: "{{ DEBUG == 0 }}"
# -------------------------
# Readiness check (status.php)
# -------------------------
- name: Nextcloud | Wait for status.php (controller first)
ansible.builtin.uri:
url: "{{ nextcloud_status_url }}"
method: GET
return_content: true
validate_certs: true
status_code: 200
register: nc_status_controller
delegate_to: localhost
run_once: true
retries: "{{ RETRIES }}"
delay: 4
until: nc_status_controller.status == 200
failed_when: false
changed_when: false
- name: Nextcloud | VM-side fetch status.php (JSON via Python)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
import json, urllib.request, sys
try:
with urllib.request.urlopen("{{ nextcloud_status_url }}", timeout=15) as r:
sys.stdout.write(r.read().decode())
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_status_vm
changed_when: false
failed_when: false
when: nc_status_controller.status | default(0) != 200 or nc_status_controller.json is not defined
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Choose status JSON (controller wins, else VM)
ansible.builtin.set_fact:
nextcloud_status_json: >-
{{
(nc_status_controller.json
if (nc_status_controller.status | default(0)) == 200 and (nc_status_controller.json is defined)
else (
(nc_status_vm.stdout | default('') | trim | length > 0)
| ternary((nc_status_vm.stdout | trim | from_json), omit)
)
)
}}
failed_when: false
- name: Nextcloud | Print concise status summary (DEBUG)
ansible.builtin.debug:
msg: >-
Nextcloud {{ nextcloud_status_json.version | default('?') }}
(installed={{ nextcloud_status_json.installed | default('?') }},
maintenance={{ nextcloud_status_json.maintenance | default('?') }},
needsDbUpgrade={{ nextcloud_status_json.needsDbUpgrade | default('?') }})
when: nextcloud_status_json is defined and DEBUG == 1
- name: Nextcloud | Status JSON not available (DEBUG)
ansible.builtin.debug:
msg: "status.php is not reachable or did not return JSON."
when: nextcloud_status_json is not defined and DEBUG == 1

View File

@@ -1,113 +0,0 @@
- name: Update Nextcloud (Ansible-native)
hosts: proxmox_nextcloud
become: true
gather_facts: false
vars:
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
nextcloud_project: "nextcloud-collabora"
compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
backup_dir: "/data/compose/nextcloud/backup-{{ ansible_date_time.iso8601_basic_short }}"
nextcloud_base_url: "https://cloud.martinfencl.eu"
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
tasks:
# -------------------------
# Pre-check
# -------------------------
- name: Show current Nextcloud version (DEBUG)
command: docker exec -u www-data nextcloud php occ -V
register: nc_version
changed_when: false
failed_when: false
when: DEBUG == 1
- debug:
var: nc_version.stdout
when: DEBUG == 1
# -------------------------
# Backup
# -------------------------
- name: Enable maintenance mode
command: docker exec -u www-data nextcloud php occ maintenance:mode --on
- name: Create backup directory
file:
path: "{{ backup_dir }}"
state: directory
- name: Backup config and custom_apps
command: >
docker exec nextcloud
tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps
- name: Copy config backup out of container
command: docker cp nextcloud:/tmp/nextcloud_conf.tgz {{ backup_dir }}/
- name: Remove temp archive from container
command: docker exec nextcloud rm -f /tmp/nextcloud_conf.tgz
- name: Backup database
shell: >
docker exec nextcloud-db sh -c
'command -v mariadb-dump >/dev/null &&
mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" ||
mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"'
register: db_dump
- name: Save database dump
copy:
content: "{{ db_dump.stdout }}"
dest: "{{ backup_dir }}/db.sql"
# -------------------------
# Upgrade
# -------------------------
- name: Pull new Nextcloud image
command: docker compose -p {{ nextcloud_project }} -f {{ compose_file }} pull nextcloud
- name: Recreate Nextcloud container
command: docker compose -p {{ nextcloud_project }} -f {{ compose_file }}
up -d --no-deps --force-recreate nextcloud
- name: Run Nextcloud upgrade
command: docker exec -u www-data nextcloud php occ upgrade
- name: Update apps
command: docker exec -u www-data nextcloud php occ app:update --all
failed_when: false
- name: Run maintenance repair
command: docker exec -u www-data nextcloud php occ maintenance:repair --include-expensive
failed_when: false
- name: Disable maintenance mode
command: docker exec -u www-data nextcloud php occ maintenance:mode --off
# -------------------------
# Readiness check
# -------------------------
- name: Wait for status.php
uri:
url: "{{ nextcloud_status_url }}"
status_code: 200
return_content: true
validate_certs: true
register: nc_status
retries: "{{ RETRIES }}"
delay: 4
until: nc_status.status == 200
changed_when: false
- name: Print status summary (DEBUG)
debug:
msg: >
Nextcloud {{ nc_status.json.version }}
(installed={{ nc_status.json.installed }},
maintenance={{ nc_status.json.maintenance }},
needsDbUpgrade={{ nc_status.json.needsDbUpgrade }})
when: DEBUG == 1

View File

@@ -1,343 +0,0 @@
# nextcloud/update_nextcloud.yml
- name: Update Nextcloud on VM via Proxmox
hosts: proxmox_nextcloud
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug / retries ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Nextcloud specifics ---
nextcloud_project: "nextcloud-collabora"
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
nextcloud_service: "nextcloud"
# Backup directory on the VM (timestamped on controller)
backup_dir: "/data/compose/nextcloud/backup-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
nextcloud_base_url: "https://cloud.martinfencl.eu"
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# --- Backup phase commands (run on VM) ---
nextcloud_backup_commands:
- >-
mkdir -p "{{ backup_dir }}"
- >-
{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --on
- >-
{{ docker_prefix }} exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
- >-
{{ docker_prefix }} cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
- >-
{{ docker_prefix }} exec nextcloud rm /tmp/nextcloud_conf.tgz || true
- >-
{{ docker_prefix }} exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
tasks:
- name: Ensure sshpass is installed (for password-based SSH)
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Nextcloud | Show current version before upgrade (DEBUG)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ docker_prefix }} exec -u www-data nextcloud php occ -V || true"
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_version_before
changed_when: false
failed_when: false
when: DEBUG == 1
no_log: "{{ DEBUG == 0 }}"
# -------------------------
# Backup phase
# -------------------------
- name: Nextcloud | Run backup commands on VM (via SSH)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ nextcloud_backup_commands }}"
loop_control:
index_var: idx
label: "backup-cmd-{{ idx }}"
register: nc_backup_cmds
changed_when: false
failed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Show outputs of backup commands (DEBUG)
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ nc_backup_cmds.results }}"
when: DEBUG == 1
- name: Nextcloud | Fail play if any backup command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Nextcloud backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Nextcloud backup commands succeeded."
loop: "{{ nc_backup_cmds.results }}"
loop_control:
index_var: idx
label: "backup-cmd-{{ idx }}"
# -------------------------
# Upgrade phase (with always cleanup)
# -------------------------
- name: Nextcloud | Upgrade block
block:
- name: Nextcloud | Pull image
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_service }}"
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_pull
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Recreate service
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_service }}"
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_up
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Ensure maintenance is OFF before occ upgrade
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --off || true"
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_maint_off_before
changed_when: false
failed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | occ upgrade (must succeed)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ upgrade"
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_occ_upgrade
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Update apps (best-effort)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ app:update --all || true"
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_app_update
changed_when: false
failed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Repair (best-effort)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:repair --include-expensive || true"
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_repair
changed_when: false
failed_when: false
no_log: "{{ DEBUG == 0 }}"
rescue:
- name: Nextcloud | Show occ upgrade output (DEBUG)
ansible.builtin.debug:
msg: |
occ upgrade FAILED
RC: {{ nc_occ_upgrade.rc | default('n/a') }}
STDOUT:
{{ (nc_occ_upgrade.stdout | default('')).strip() }}
STDERR:
{{ (nc_occ_upgrade.stderr | default('')).strip() }}
when: DEBUG == 1
- name: Nextcloud | Try to force-disable maintenance flag (best-effort)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ config:system:set maintenance --type=boolean --value=false || true"
environment:
SSHPASS: "{{ vm_pass }}"
changed_when: false
failed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Fail explicitly
ansible.builtin.fail:
msg: >-
Nextcloud occ upgrade failed. Check nextcloud.log inside the container (data/nextcloud.log).
stdout={{ (nc_occ_upgrade.stdout | default('') | trim) }}
stderr={{ (nc_occ_upgrade.stderr | default('') | trim) }}
always:
- name: Nextcloud | Ensure maintenance mode is OFF (always)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') }}{{ docker_prefix }} exec -u www-data nextcloud php occ maintenance:mode --off || true"
environment:
SSHPASS: "{{ vm_pass }}"
changed_when: false
failed_when: false
no_log: "{{ DEBUG == 0 }}"
# -------------------------
# Readiness check (status.php)
# -------------------------
- name: Nextcloud | Wait for status.php (controller first)
ansible.builtin.uri:
url: "{{ nextcloud_status_url }}"
method: GET
return_content: true
validate_certs: true
status_code: 200
register: nc_status_controller
delegate_to: localhost
run_once: true
retries: "{{ RETRIES }}"
delay: 4
until: nc_status_controller.status == 200
failed_when: false
changed_when: false
- name: Nextcloud | Print concise status summary (DEBUG)
ansible.builtin.debug:
msg: >-
Nextcloud {{ nc_status_controller.json.version | default('?') }}
(installed={{ nc_status_controller.json.installed | default('?') }},
maintenance={{ nc_status_controller.json.maintenance | default('?') }},
needsDbUpgrade={{ nc_status_controller.json.needsDbUpgrade | default('?') }})
when: DEBUG == 1 and nc_status_controller.json is defined

View File

@@ -1,118 +0,0 @@
# update_portainer_agent.yml
- name: Update Portainer Agent on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Portainer Agent specifics ---
portainer_agent_image: "portainer/agent:latest"
portainer_agent_container: "portainer_agent"
portainer_agent_port: 9001
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
portainer_commands:
- "{{ docker_prefix }} pull -q {{ portainer_agent_image }} >/dev/null"
- "{{ docker_prefix }} stop {{ portainer_agent_container }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} rm {{ portainer_agent_container }} >/dev/null 2>&1 || true"
- >
{{ docker_prefix }} run -d
--name {{ portainer_agent_container }}
--restart=always
-p {{ portainer_agent_port }}:9001
-v /var/run/docker.sock:/var/run/docker.sock
-v /var/lib/docker/volumes:/var/lib/docker/volumes
{{ portainer_agent_image }} >/dev/null
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Portainer Agent update commands on VM (via SSH) # run all commands via sshpass
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ portainer_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: portainer_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Portainer command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ portainer_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Portainer command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Portainer Agent update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Portainer Agent update commands succeeded."
loop: "{{ portainer_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness check (TCP port)
# -------------------------
- name: Portainer Agent | Wait for TCP port to be open from controller
ansible.builtin.wait_for:
host: "{{ vm_ip }}"
port: "{{ portainer_agent_port }}"
delay: 2 # initial delay before first check
timeout: "{{ RETRIES * 2 }}" # total timeout in seconds
state: started
register: portainer_wait
delegate_to: localhost
run_once: true
changed_when: false
- name: Portainer Agent | Print concise summary
ansible.builtin.debug:
msg: >-
Portainer Agent TCP {{ vm_ip }}:{{ portainer_agent_port }}
reachable={{ (portainer_wait is defined) and (not portainer_wait.failed | default(false)) }}
elapsed={{ portainer_wait.elapsed | default('n/a') }}s
when: DEBUG == 1

57
test_sms.yml Normal file
View File

@@ -0,0 +1,57 @@
---
- name: Send and verify SMS delivery via internet-master.cz
hosts: localhost
gather_facts: false
vars:
sms_number: "601358865"
sms_username: "mikrotik"
sms_password_send: "jdkotzHJIOPWhjtr32D"
sms_password_recv: "jdkotzHJIOPWhjtr32D"
sms_wait_seconds: 120 # Wait 2 minutes for delivery
tasks:
- name: Generate random test string
set_fact:
random_string: "mikrotik_{{ lookup('password', '/dev/null length=8 chars=ascii_letters') }}"
- name: Send SMS message
uri:
url: "https://sms.internet-master.cz/send/?number={{ sms_number }}&message=@mikrotik@{{ random_string | urlencode }}&type=class-1&username={{ sms_username }}&password={{ sms_password_send }}"
method: GET
return_content: true
register: send_result
- name: Show send API response
debug:
var: send_result.content
- name: Wait for SMS to be delivered
pause:
seconds: "{{ sms_wait_seconds }}"
- name: Fetch received messages
uri:
url: "https://sms.internet-master.cz/receive/?username={{ sms_username }}&password={{ sms_password_recv }}"
method: GET
return_content: true
register: recv_result
- name: Parse received JSON
set_fact:
inbox: "{{ recv_result.json.inbox | default([]) }}"
- name: Check if random string message was received
set_fact:
message_found: "{{ inbox | selectattr('message', 'equalto', random_string) | list | length > 0 }}"
- name: Report result
debug:
msg: >
SMS with message '{{ random_string }}' was {{
'delivered ✅' if message_found else 'NOT delivered ❌'
}}.
- name: Fail if not delivered
fail:
msg: "Message '{{ random_string }}' not found in received inbox!"
when: not message_found

165
update.yml Normal file
View File

@@ -0,0 +1,165 @@
- name: Update system (APT + Flatpak)
hosts: all
gather_facts: false
strategy: free
serial: 2
become: true
become_user: root
become_method: sudo
vars:
ssh_precheck_timeout: 8
apt_async: 1800
apt_poll: 10
apt_retries: 3
apt_retry_delay: 5
flatpak_timeout: 300
flatpak_async: 600
flatpak_poll: 5
pre_tasks:
- name: Ensure SSH is reachable (skip host if not)
wait_for:
host: "{{ ansible_host | default(inventory_hostname) }}"
port: "{{ ansible_port | default(22) }}"
timeout: "{{ ssh_precheck_timeout }}"
delegate_to: localhost
register: ssh_ok
ignore_errors: true
- meta: end_host
when: ssh_ok is failed
- name: Ping with retries (handle intermittent flaps)
ping:
register: ping_r
retries: 3
delay: 3
until: ping_r is succeeded
ignore_errors: true
- meta: end_host
when: ping_r is failed
tasks:
- name: Update APT cache (bounded + retried)
environment: { DEBIAN_FRONTEND: noninteractive }
apt:
update_cache: yes
cache_valid_time: 3600
async: "{{ apt_async }}"
poll: "{{ apt_poll }}"
register: apt_update
retries: "{{ apt_retries }}"
delay: "{{ apt_retry_delay }}"
until: apt_update is succeeded
- name: If APT cache update failed, try to fix dpkg and retry once
block:
- name: Fix partially configured packages
command: dpkg --configure -a
changed_when: false
- name: Retry APT cache update after dpkg fix
environment: { DEBIAN_FRONTEND: noninteractive }
apt:
update_cache: yes
async: 600
poll: 5
when: apt_update is failed
- name: Upgrade all APT packages (bounded + retried)
environment: { DEBIAN_FRONTEND: noninteractive }
apt:
upgrade: dist
async: "{{ apt_async }}"
poll: "{{ apt_poll }}"
register: apt_upgrade
retries: "{{ apt_retries }}"
delay: "{{ apt_retry_delay }}"
until: apt_upgrade is succeeded
- name: If APT upgrade failed, try to fix dpkg and retry once
block:
- name: Fix partially configured packages
command: dpkg --configure -a
changed_when: false
- name: Retry APT upgrade after dpkg fix
environment: { DEBIAN_FRONTEND: noninteractive }
apt:
upgrade: dist
async: 1200
poll: 5
when: apt_upgrade is failed
- name: Check if flatpak binary exists
become: false
stat:
path: /usr/bin/flatpak
register: flatpak_bin
- name: Update system Flatpaks (bounded; treat timeout as non-fatal)
command: bash -lc "timeout {{ flatpak_timeout }} flatpak update -y --noninteractive"
register: flatpak_sys
async: "{{ flatpak_async }}"
poll: "{{ flatpak_poll }}"
failed_when: flatpak_sys.rc is defined and flatpak_sys.rc not in [0, 124]
when: flatpak_bin.stat.exists
# ---- User-agnostic Flatpak updates (all non-system users) ----
- name: Get passwd database
getent:
database: passwd
register: ge
- name: Build list of regular users (uid >= 1000, real shells)
set_fact:
regular_users: >-
{{
ge.ansible_facts.getent_passwd
| dict2items
| map(attribute='value')
| selectattr('uid', 'defined')
| selectattr('uid', '>=', 1000)
| rejectattr('shell', 'in', ['/usr/sbin/nologin','/sbin/nologin','/bin/false'])
| list
}}
when: ge is succeeded
- name: Stat per-user runtime dir if flatpak is present
stat:
path: "/run/user/{{ item.uid }}"
loop: "{{ regular_users | default([]) }}"
loop_control:
label: "{{ item.name }}"
register: user_runtime_stats
when: flatpak_bin.stat.exists
- name: Merge runtime stats keyed by username
set_fact:
user_runtime_map: >-
{{
user_runtime_stats.results
| items2dict(key_name='item.name', value_name='stat')
}}
when: flatpak_bin.stat.exists
- name: Update user Flatpaks (use XDG_RUNTIME_DIR when available)
become_user: "{{ item.name }}"
environment: >-
{{
user_runtime_map[item.name].exists
| default(false)
| ternary({'XDG_RUNTIME_DIR': '/run/user/' ~ item.uid|string}, {})
}}
command: bash -lc "timeout {{ flatpak_timeout }} flatpak --user update -y --noninteractive"
register: flatpak_user_res
async: "{{ flatpak_async }}"
poll: "{{ flatpak_poll }}"
failed_when: flatpak_user_res.rc is defined and flatpak_user_res.rc not in [0, 124]
changed_when: "'Installing' in (flatpak_user_res.stdout | default('')) or 'Installing' in (flatpak_user_res.stderr | default('')) or 'Updating' in (flatpak_user_res.stdout | default('')) or 'Updating' in (flatpak_user_res.stderr | default(''))"
loop: "{{ regular_users | default([]) }}"
loop_control:
label: "{{ item.name }}"
when: flatpak_bin.stat.exists

View File

@@ -1,155 +0,0 @@
# update_broker_kafka-ui.yml
- name: Update Kafka broker3 and Redpanda Console on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Kafka / Redpanda Console specifics ---
kafka_project: "kafka"
# Adjusted to match your actual compose file location
kafka_compose_file: "/data/compose/docker-compose.yml"
kafka_services:
- broker3
- kafka-ui
redpanda_console_port: 8084
# Controller-side URL (default to direct VM IP/port or external URL)
redpanda_console_url: "{{ lookup('env', 'REDPANDA_CONSOLE_URL') | default('http://192.168.69.254:8084/overview', true) }}"
redpanda_retries: "{{ RETRIES }}"
redpanda_delay: 2
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
# 1) Pull latest images for broker3 + kafka-ui
# 2) Stop any running containers with these names (legacy or compose-managed)
# 3) Remove any containers with these names to avoid name conflicts
# 4) Recreate services via docker compose
kafka_commands:
- "{{ docker_prefix }} compose -p {{ kafka_project }} -f {{ kafka_compose_file }} pull {{ kafka_services | join(' ') }} >/dev/null"
- "{{ docker_prefix }} stop {{ kafka_services | join(' ') }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} rm -f {{ kafka_services | join(' ') }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} compose -p {{ kafka_project }} -f {{ kafka_compose_file }} up -d --no-deps --force-recreate {{ kafka_services | join(' ') }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Kafka update commands on VM (via SSH) # use SSHPASS env, hide item value
ansible.builtin.command:
argv:
- sshpass
- -e # read password from SSHPASS environment
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}" # supply password via environment
loop: "{{ kafka_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: kafka_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Kafka command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ kafka_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Kafka command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Kafka/Redpanda Console update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Kafka/Redpanda Console update commands succeeded."
loop: "{{ kafka_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness check Redpanda Console UI
# -------------------------
- name: Redpanda Console | Wait for overview page (controller, with retries)
ansible.builtin.uri:
url: "{{ redpanda_console_url }}"
method: GET
return_content: true
validate_certs: false # plain HTTP on 192.168.69.254 (or as configured)
status_code: 200
register: redpanda_controller
delegate_to: localhost
run_once: true
when: redpanda_console_url is defined and (redpanda_console_url | length) > 0
retries: "{{ redpanda_retries }}"
delay: "{{ redpanda_delay }}"
until: redpanda_controller.status == 200
failed_when: false
changed_when: false
- name: Redpanda Console | Print concise summary
ansible.builtin.debug:
msg: >-
Redpanda Console overview {{ 'reachable' if (redpanda_controller is defined and (redpanda_controller.status|default(0))==200) else 'NOT reachable' }}.
status={{ redpanda_controller.status | default('n/a') }};
length={{ (redpanda_controller.content | default('')) | length }};
when: DEBUG == 1 and (redpanda_controller is defined)
# Optional detailed dump (short excerpt only)
- name: Redpanda Console | HTML excerpt (debug)
ansible.builtin.debug:
msg: "{{ (redpanda_controller.content | default(''))[:500] }}"
when: DEBUG == 1 and (redpanda_controller is defined) and (redpanda_controller.content is defined)
# -------------------------
# Final assertion: Console URL must be reachable
# -------------------------
- name: Redpanda Console | Assert overview reachable (if URL configured)
ansible.builtin.assert:
that:
- >
not (redpanda_console_url is defined and (redpanda_console_url | length) > 0)
or
(
redpanda_controller is defined
and (redpanda_controller.status | default(0)) == 200
)
fail_msg: "Redpanda Console URL {{ redpanda_console_url }} is NOT reachable with HTTP 200 after retries."
success_msg: "Redpanda Console URL {{ redpanda_console_url }} is reachable with HTTP 200."

View File

@@ -1,194 +0,0 @@
# update_homarr.yml
- name: Update Homarr on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# Homarr specifics
homarr_project: "homarr"
homarr_compose_file: "/data/compose/homarr/docker-compose-homarr.yml"
homarr_service: "homarr"
homarr_image: "ghcr.io/homarr-labs/homarr:latest"
homarr_port: 7575
# Optional external URL for controller-side readiness check (e.g., https://homarr.example.com)
# If empty/undefined, controller check is skipped and we only probe from the VM.
homarr_url: "{{ lookup('env', 'HOMARR_URL') | default('', true) }}"
# Fixed container name used in your compose (avoid conflicts with any leftover container)
homarr_container_name: "homarr"
# Retry policy (same pattern as Kuma): 25x with 2s delay
homarr_retries: "{{ RETRIES }}"
homarr_delay: 2
# Docker command prefix (consistent behavior)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
homarr_commands:
- "{{ docker_prefix }} pull -q {{ homarr_image }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} pull {{ homarr_service }} >/dev/null"
# remove conflicting container name before compose up (silently)
- "{{ docker_prefix }} rm -f {{ homarr_container_name }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} up -d --no-deps --force-recreate {{ homarr_service }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Homarr update commands on VM (via SSH) # use SSHPASS env, hide item label
ansible.builtin.command:
argv:
- sshpass
- -e # read password from SSHPASS environment
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}" # supply password via environment
loop: "{{ homarr_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: homarr_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Homarr command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ homarr_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Homarr command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Homarr update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Homarr update commands succeeded."
loop: "{{ homarr_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Homarr | Wait for homepage (controller first, with retries)
ansible.builtin.uri:
url: "{{ (homarr_url | regex_replace('/$','')) + '/' }}"
method: GET
return_content: true
# Validate TLS only when using https://
validate_certs: "{{ (homarr_url | default('')) is match('^https://') }}"
status_code: 200
register: homarr_controller
delegate_to: localhost
run_once: true
when: homarr_url is defined and (homarr_url | length) > 0
retries: "{{ homarr_retries }}"
delay: "{{ homarr_delay }}"
until: homarr_controller.status == 200
failed_when: false
changed_when: false
- name: Homarr | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Fetch Homarr homepage from localhost and print HTML to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ homarr_port }}/", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: homarr_vm
changed_when: false
failed_when: false
when: homarr_controller.status | default(0) != 200 or homarr_controller.content is not defined
retries: "{{ homarr_retries }}"
delay: "{{ homarr_delay }}"
until: (homarr_vm.stdout | default('') | trim | length) > 0 and ('Homarr' in (homarr_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}"
- name: Homarr | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
ansible.builtin.set_fact:
homarr_home_html: >-
{{
(
homarr_controller.content
if (homarr_controller is defined)
and ((homarr_controller.status|default(0))==200)
and (homarr_controller.content is defined)
else
(homarr_vm.stdout | default('') | trim)
)
}}
when:
- (homarr_controller is defined and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined))
or ((homarr_vm.stdout | default('') | trim | length) > 0)
- name: Homarr | Print concise summary
ansible.builtin.debug:
msg: >-
Homarr homepage {{ 'reachable' if (homarr_home_html is defined) else 'NOT reachable' }}.
Source={{ 'controller' if ((homarr_controller is defined) and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined)) else 'vm' if (homarr_vm.stdout|default('')|trim|length>0) else 'n/a' }};
length={{ (homarr_home_html | default('')) | length }};
contains('Homarr')={{ (homarr_home_html is defined) and ('Homarr' in homarr_home_html) }}
when: DEBUG == 1
- name: Homarr | Homepage unavailable (after retries)
ansible.builtin.debug:
msg: "Homarr web není dostupný ani po pokusech."
when: homarr_home_html is not defined and DEBUG == 1
# Optional detailed dump (short excerpt only)
- name: Homarr | HTML excerpt (debug)
ansible.builtin.debug:
msg: "{{ (homarr_home_html | default(''))[:500] }}"
when: homarr_home_html is defined and DEBUG == 1

View File

@@ -1,313 +0,0 @@
# update_immich.yml
- name: Update Immich on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# Immich specifics
immich_dir: "/opt/immich"
immich_project: "immich"
immich_compose_url: "https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml"
immich_compose_file: "/opt/immich/docker-compose.yml"
immich_override_file: "/opt/immich/docker-compose.override.yml"
immich_port: 2283
# Optional external URL for controller-side readiness check (e.g., https://photos.example.com)
immich_url: "{{ lookup('env', 'IMMICH_URL') | default('', true) }}"
# Retry policy
immich_retries: "{{ RETRIES }}"
immich_delay: 2
# Docker command prefix (consistent behavior)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Compose command (always include override to keep local mounts separate from upstream compose)
immich_compose_cmd: >-
{{ docker_prefix }} compose
-p {{ immich_project }}
-f {{ immich_compose_file }}
-f {{ immich_override_file }}
# Commands to run on the target VM
immich_commands:
- "cd {{ immich_dir }}"
- |
cd {{ immich_dir }}
mkdir -p backups
if [ -f docker-compose.yml ]; then
cp -a docker-compose.yml "backups/docker-compose.yml.$(date +%F_%H%M%S).bak"
fi
if [ -f .env ]; then
cp -a .env "backups/.env.$(date +%F_%H%M%S).bak"
fi
if [ -f docker-compose.override.yml ]; then
cp -a docker-compose.override.yml "backups/docker-compose.override.yml.$(date +%F_%H%M%S).bak"
fi
- |
cd {{ immich_dir }}
# Download latest compose from Immich releases (requires curl or wget)
if command -v curl >/dev/null 2>&1; then
curl -fsSL -o docker-compose.yml "{{ immich_compose_url }}"
elif command -v wget >/dev/null 2>&1; then
wget -qO docker-compose.yml "{{ immich_compose_url }}"
else
echo "Neither curl nor wget is available on the VM."
exit 1
fi
- |
cd {{ immich_dir }}
# Ensure override compose exists (create if missing)
if [ ! -f "{{ immich_override_file }}" ]; then
printf '%s\n' \
'services:' \
' immich-server:' \
' volumes:' \
' - /mnt/nextcloud-howard-photos:/mnt/nextcloud-howard-photos' \
' - /mnt/nextcloud-kamilkaprdelka-photos:/mnt/nextcloud-kamilkaprdelka-photos' \
> "{{ immich_override_file }}"
fi
# Fail early if override is still missing/empty
test -s "{{ immich_override_file }}"
- |
cd {{ immich_dir }}
# Ensure .env exists. If missing, try to reconstruct it from running containers to avoid breaking DB creds.
python3 - <<'PY'
import json
import subprocess
from pathlib import Path
env_path = Path(".env")
if env_path.exists():
raise SystemExit(0)
def run(cmd):
p = subprocess.run(cmd, capture_output=True, text=True)
return p.returncode, p.stdout, p.stderr
rc, out, err = run(["bash", "-lc", "command docker inspect immich_postgres immich_server"])
if rc != 0 or not out.strip():
print("ERROR: .env is missing and cannot inspect running containers (immich_postgres/immich_server).", flush=True)
print("Create /opt/immich/.env manually or ensure the containers exist.", flush=True)
raise SystemExit(1)
data = json.loads(out)
by_name = {}
for c in data:
name = (c.get("Name") or "").lstrip("/")
by_name[name] = c
pg = by_name.get("immich_postgres")
srv = by_name.get("immich_server")
if not pg or not srv:
print("ERROR: Could not find immich_postgres and immich_server in docker inspect output.", flush=True)
raise SystemExit(1)
def env_map(container):
m = {}
for kv in (container.get("Config", {}).get("Env") or []):
if "=" in kv:
k, v = kv.split("=", 1)
m[k] = v
return m
def find_mount_source(container, dest):
for m in (container.get("Mounts") or []):
if m.get("Destination") == dest:
return m.get("Source")
return ""
pg_env = env_map(pg)
db_user = pg_env.get("POSTGRES_USER", "")
db_pass = pg_env.get("POSTGRES_PASSWORD", "")
db_name = pg_env.get("POSTGRES_DB", "")
db_data = find_mount_source(pg, "/var/lib/postgresql/data")
upload_loc = find_mount_source(srv, "/usr/src/app/upload")
# Try to preserve the currently used image tag as IMMICH_VERSION (optional but safer)
immich_version = ""
image = (srv.get("Config", {}).get("Image") or "")
if ":" in image and "@" not in image:
immich_version = image.rsplit(":", 1)[-1]
elif ":" in image and "@" in image:
# image like repo:tag@sha256:...
immich_version = image.split("@", 1)[0].rsplit(":", 1)[-1]
missing = []
for k, v in [
("DB_USERNAME", db_user),
("DB_PASSWORD", db_pass),
("DB_DATABASE_NAME", db_name),
("DB_DATA_LOCATION", db_data),
("UPLOAD_LOCATION", upload_loc),
]:
if not v:
missing.append(k)
if missing:
print("ERROR: Could not reconstruct these .env values from containers: " + ", ".join(missing), flush=True)
raise SystemExit(1)
lines = [
f"UPLOAD_LOCATION={upload_loc}",
f"DB_DATA_LOCATION={db_data}",
f"DB_USERNAME={db_user}",
f"DB_PASSWORD={db_pass}",
f"DB_DATABASE_NAME={db_name}",
]
if immich_version:
lines.append(f"IMMICH_VERSION={immich_version}")
env_path.write_text("\n".join(lines) + "\n", encoding="utf-8")
print("Created .env from running containers.", flush=True)
PY
- |
cd {{ immich_dir }}
# Comment out healthcheck.start_interval if present (safe no-op if missing)
sed -i -E 's/^([[:space:]]*)start_interval:/\1# start_interval:/' docker-compose.yml || true
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} config >/dev/null"
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} pull >/dev/null"
- "cd {{ immich_dir }} && {{ immich_compose_cmd }} up -d --remove-orphans --force-recreate >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Immich update commands on VM (via SSH) # use SSHPASS env, hide item label
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ immich_commands }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
register: immich_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
run_once: true
- name: Show outputs for each Immich command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ immich_cmds.results }}"
when: DEBUG == 1
run_once: true
- name: Fail play if any Immich command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Immich update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Immich update commands succeeded."
loop: "{{ immich_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
run_once: true
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Immich | Wait for API ping (controller first, with retries)
ansible.builtin.uri:
url: "{{ (immich_url | regex_replace('/$','')) + '/api/server/ping' }}"
method: GET
return_content: true
validate_certs: "{{ (immich_url | default('')) is match('^https://') }}"
status_code: 200
register: immich_controller
delegate_to: localhost
run_once: true
when: immich_url is defined and (immich_url | length) > 0
retries: "{{ immich_retries }}"
delay: "{{ immich_delay }}"
until: immich_controller.status == 200 and ('pong' in (immich_controller.content | default('')))
failed_when: false
changed_when: false
- name: Immich | VM-side ping (JSON via Python, with retries)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Ping Immich API from localhost and print response to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ immich_port }}/api/server/ping", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: immich_vm
changed_when: false
failed_when: false
when: immich_controller.status | default(0) != 200
retries: "{{ immich_retries }}"
delay: "{{ immich_delay }}"
until: (immich_vm.stdout | default('') | trim | length) > 0 and ('pong' in (immich_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}"
run_once: true
- name: Immich | Print concise summary
ansible.builtin.debug:
msg: >-
Immich API ping {{ 'OK' if (('pong' in (immich_controller.content|default(''))) or ('pong' in (immich_vm.stdout|default('')))) else 'NOT OK' }}.
Source={{ 'controller' if (immich_controller.status|default(0))==200 else 'vm' if (immich_vm.stdout|default('')|trim|length>0) else 'n/a' }}.
when: DEBUG == 1
run_once: true

View File

@@ -1,65 +0,0 @@
# update_semaphore.yml
- name: Update Semaphore on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}" # IP vm-portainer
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
# --- Semaphore specifics ---
semaphore_project: "semaphore"
semaphore_compose_file: "/data/compose/semaphore/docker-compose.yml"
semaphore_service: "semaphore"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Semaphore self-update on VM in background (nohup)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
nohup bash -c '
unalias docker 2>/dev/null || true
DOCKER_CLI_HINTS=0 docker compose \
-p {{ semaphore_project }} \
-f {{ semaphore_compose_file }} \
up -d --no-deps --force-recreate --pull always {{ semaphore_service }}
' >/dev/null 2>&1 &
environment:
SSHPASS: "{{ vm_pass }}"
register: semaphore_update
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Show result of Semaphore self-update (debug)
ansible.builtin.debug:
msg: |
RC: {{ semaphore_update.rc }}
STDOUT: {{ (semaphore_update.stdout | default('')).strip() }}
STDERR: {{ (semaphore_update.stderr | default('')).strip() }}
when: DEBUG == 1

View File

@@ -1,34 +0,0 @@
- name: Update system (APT + Flatpak)
hosts: all
become: true
become_user: root
become_method: sudo
tasks:
- name: Update APT cache
apt:
update_cache: yes
- name: Upgrade all APT packages
apt:
upgrade: dist
- name: Check if flatpak binary exists
stat:
path: /usr/bin/flatpak
register: flatpak_bin
- name: Update system Flatpaks
shell: timeout 300 flatpak update -y
register: flatpak_sys
failed_when: flatpak_sys.rc != 0 and flatpak_sys.rc != 124
when: flatpak_bin.stat.exists
- name: Update user Flatpaks
become_user: jakub
environment:
XDG_RUNTIME_DIR: /run/user/1000
shell: timeout 300 flatpak update -y
register: flatpak_user
failed_when: flatpak_user.rc != 0 and flatpak_user.rc != 124
when: flatpak_bin.stat.exists

View File

@@ -1,194 +0,0 @@
# nextcloud/update_uptime_kuma.yml
- name: Update Uptime Kuma on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# Uptime Kuma specifics
kuma_project: "uptime-kuma"
kuma_compose_file: "/data/compose/uptime-kuma/docker-compose-uptime-kuma.yml"
kuma_service: "uptime-kuma"
kuma_image: "louislam/uptime-kuma:latest"
kuma_port: 3001
# Optional external URL for controller-side readiness check (e.g., https://kuma.example.com)
# If empty/undefined, controller check is skipped and we only probe from the VM.
kuma_url: "{{ lookup('env', 'KUMA_URL') | default('', true) }}"
# Fixed container name used in your compose (conflicts with previous/Portainer-run container)
kuma_container_name: "uptime-kuma-dev"
# Retry policy
kuma_retries: "{{ RETRIES }}"
kuma_delay: 2
# Docker command prefix (consistent behavior)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs like in Collabora play)
kuma_commands:
- "{{ docker_prefix }} pull -q {{ kuma_image }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} pull {{ kuma_service }} >/dev/null"
# remove conflicting container name before compose up (silently)
- "{{ docker_prefix }} rm -f {{ kuma_container_name }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} up -d --no-deps --force-recreate {{ kuma_service }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Uptime Kuma update commands on VM (via SSH) # use SSHPASS env, hide item label
ansible.builtin.command:
argv:
- sshpass
- -e # read password from SSHPASS environment
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}" # supply password via environment
loop: "{{ kuma_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: kuma_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Uptime Kuma command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ kuma_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Uptime Kuma command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Uptime Kuma update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Uptime Kuma update commands succeeded."
loop: "{{ kuma_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Kuma | Wait for homepage (controller first, with retries)
ansible.builtin.uri:
url: "{{ (kuma_url | regex_replace('/$','')) + '/' }}"
method: GET
return_content: true
# Validate TLS only when using https://
validate_certs: "{{ (kuma_url | default('')) is match('^https://') }}"
status_code: 200
register: kuma_controller
delegate_to: localhost
run_once: true
when: kuma_url is defined and (kuma_url | length) > 0
retries: "{{ kuma_retries }}"
delay: "{{ kuma_delay }}"
until: kuma_controller.status == 200
failed_when: false
changed_when: false
- name: Kuma | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Fetch Kuma homepage from localhost and print HTML to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ kuma_port }}/", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: kuma_vm
changed_when: false
failed_when: false
when: kuma_controller.status | default(0) != 200 or kuma_controller.content is not defined
retries: "{{ kuma_retries }}"
delay: "{{ kuma_delay }}"
until: (kuma_vm.stdout | default('') | trim | length) > 0 and ('Uptime Kuma' in (kuma_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}" # hide command and output when not debugging
- name: Kuma | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
ansible.builtin.set_fact:
kuma_home_html: >-
{{
(
kuma_controller.content
if (kuma_controller is defined)
and ((kuma_controller.status|default(0))==200)
and (kuma_controller.content is defined)
else
(kuma_vm.stdout | default('') | trim)
)
}}
when:
- (kuma_controller is defined and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined))
or ((kuma_vm.stdout | default('') | trim | length) > 0)
- name: Kuma | Print concise summary
ansible.builtin.debug:
msg: >-
Uptime Kuma homepage {{ 'reachable' if (kuma_home_html is defined) else 'NOT reachable' }}.
Source={{ 'controller' if ((kuma_controller is defined) and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined)) else 'vm' if (kuma_vm.stdout|default('')|trim|length>0) else 'n/a' }};
length={{ (kuma_home_html | default('')) | length }};
contains('Uptime Kuma')={{ (kuma_home_html is defined) and ('Uptime Kuma' in kuma_home_html) }}
when: DEBUG == 1
- name: Kuma | Homepage unavailable (after retries)
ansible.builtin.debug:
msg: "Kuma web není dostupná ani po pokusech."
when: kuma_home_html is not defined and DEBUG == 1
# Optional detailed dump (short excerpt only)
- name: Kuma | HTML excerpt (debug)
ansible.builtin.debug:
msg: "{{ (kuma_home_html | default(''))[:500] }}"
when: kuma_home_html is defined and DEBUG == 1