Compare commits

105 Commits
main ... main

Author SHA1 Message Date
martin.fencl
af3c676183 edit 2025-12-10 12:41:42 +01:00
martin.fencl
c2d67f5498 edit 2025-12-10 12:31:17 +01:00
martin.fencl
a1d730a18c edit 2025-12-10 12:25:58 +01:00
martin.fencl
cf2507bdf6 edit 2025-12-10 12:21:09 +01:00
martin.fencl
21ce9478f4 edit 2025-12-10 12:20:47 +01:00
martin.fencl
0fd5ef9e41 edit 2025-12-10 12:11:05 +01:00
martin.fencl
bf35a6c253 edit 2025-12-10 11:48:19 +01:00
martin.fencl
4f39d04e3f edit 2025-12-10 11:12:03 +01:00
martin.fencl
d94f999f7b update immich 2025-12-10 10:41:42 +01:00
martin.fencl
9140c6e2c9 fix 2025-12-05 21:53:31 +01:00
martin.fencl
4500478fce fix 2025-12-05 21:50:49 +01:00
martin.fencl
3735217c58 test host 2025-12-05 21:43:37 +01:00
martin.fencl
cb2099a802 test 2025-12-05 21:19:51 +01:00
martin.fencl
bcbc0269d6 test 2025-12-05 15:03:50 +01:00
martin.fencl
726ccb0242 Update inventory for Linux servers with specific Proxmox entries 2025-12-01 01:07:38 +01:00
martin.fencl
b20f103992 rename 2025-12-01 01:05:39 +01:00
martin.fencl
7d5fa667dd add proxmox 2025-12-01 00:34:59 +01:00
martin.fencl
f74977f1fb Update Mikrotik inventory with server and 5G host entries 2025-11-30 22:10:00 +01:00
martin.fencl
d28c0662c5 rename 2025-11-28 18:26:04 +01:00
martin.fencl
f93373b1c8 Add Ansible playbook for updating Portainer Agent on VM via Proxmox 2025-11-28 18:22:29 +01:00
martin.fencl
6ccbb97fbb rename 2025-11-26 17:21:38 +01:00
martin.fencl
885edc980c Add Ansible playbook for updating Nextcloud DB and Redis on VM via Proxmox 2025-11-26 17:07:02 +01:00
martin.fencl
6b1c5efe22 fix 4 2025-11-26 16:08:52 +01:00
martin.fencl
d9b870d36f fix 3 2025-11-26 15:11:28 +01:00
martin.fencl
21bc7b4fd8 fix 2 2025-11-26 14:46:21 +01:00
martin.fencl
690d03e470 fix 1 2025-11-26 14:14:50 +01:00
martin.fencl
3228778db9 Add Ansible playbook for updating Nextcloud on VM via Proxmox 2025-11-26 13:43:16 +01:00
martin.fencl
0dfeed3e23 Update Collabora and Homarr compose file paths in update playbooks 2025-11-26 12:05:15 +01:00
martin.fencl
a8b8ea8a05 Update Uptime Kuma compose file path in update playbook 2025-11-26 10:39:21 +01:00
martin.fencl
8537742961 Fix Collabora compose file path in update playbook 2025-11-26 10:37:44 +01:00
martin.fencl
4a462417a9 Refactor Semaphore self-update task to use nohup for background execution and simplify Docker command handling 2025-11-25 00:11:20 +01:00
martin.fencl
97dc09e45c Refactor Semaphore update process to use a single compose command and simplify task structure 2025-11-25 00:08:22 +01:00
martin.fencl
cf07ef7608 Add playbook to update Semaphore on VM via Proxmox 2025-11-24 23:23:06 +01:00
martin.fencl
04450776fe edit kafka 2025-11-24 23:16:12 +01:00
martin.fencl
aa4c6fb6b6 Add playbook to update Redpanda Console on VM via Proxmox 2025-11-24 23:06:08 +01:00
martin.fencl
46d44ae924 Add assertion to check Immich controller URL reachability and update task failure handling 2025-11-24 22:23:28 +01:00
martin.fencl
e1474fd587 edit immich 2025-11-24 21:48:17 +01:00
martin.fencl
8b6eec595c Add Immich update playbook for Proxmox VM management 2025-11-24 21:28:04 +01:00
fencl
3c406662cb . 2025-10-05 16:48:41 +02:00
fencl
f08dc68e20 . 2025-10-05 16:45:20 +02:00
fencl
cadc296a1f . 2025-10-05 16:39:27 +02:00
fencl
65642d8114 . 2025-10-05 16:35:35 +02:00
fencl
fd7ec9a3e7 . 2025-10-05 16:18:36 +02:00
fencl
40586253a5 . 2025-10-05 16:00:27 +02:00
fencl
b834c2e4c4 . 2025-10-05 15:45:37 +02:00
fencl
928a131ac8 . 2025-10-05 15:32:56 +02:00
fencl
d3a424508e . 2025-10-05 14:20:20 +02:00
fencl
615ebcfe65 . 2025-10-05 14:12:44 +02:00
fencl
8d9d39590e . 2025-10-05 14:03:34 +02:00
fencl
3e5099b31d . 2025-10-05 13:58:12 +02:00
fencl
a6f4c8cd75 . 2025-10-05 13:51:45 +02:00
fencl
c60c881f5a . 2025-10-05 13:27:22 +02:00
fencl
e94a76dde4 Refactor update_uptime_kuma.yml: enhance debug toggle, streamline preflight checks, and improve command execution logic 2025-10-05 13:21:02 +02:00
fencl
ef67219c98 Refactor update_uptime_kuma.yml: clean up variable definitions for clarity 2025-10-05 13:09:40 +02:00
fencl
3c0f29e3cb Refactor update_uptime_kuma.yml: simplify variable definitions and enhance command execution logic 2025-10-05 13:07:11 +02:00
fencl
f077a811da Refactor update_uptime_kuma.yml: enhance compose file discovery and health check logic 2025-10-05 12:57:26 +02:00
fencl
bd25ea0eb1 Refactor update_uptime_kuma.yml: enhance task descriptions and streamline variable definitions for clarity 2025-10-05 12:53:13 +02:00
fencl
3853c25f7b Add playbook for updating Uptime Kuma on VM via Proxmox 2025-10-05 12:45:05 +02:00
fencl
d38d6c76ed Refactor check_stack_nextcloud.yml and update_collabora.yml: add blank lines for improved readability 2025-10-05 10:15:06 +02:00
fencl
fa74512fa4 Refactor check_stack_nextcloud.yml: add blank line for improved readability 2025-10-05 10:12:35 +02:00
fencl
31771567b0 Refactor update_collabora.yml: remove unnecessary comments for clarity 2025-10-05 10:10:26 +02:00
fencl
b7d968b8cc Refactor update_collabora.yml: enable debug capabilities for Collabora service 2025-10-05 09:55:29 +02:00
fencl
49904d991e Refactor update_collabora.yml: enhance capability fetching with improved error handling and JSON parsing 2025-10-05 09:52:25 +02:00
fencl
bf60fdd9f1 Refactor update_collabora.yml: enhance capability fetching with improved JSON parsing and fallback handling 2025-10-05 09:47:03 +02:00
fencl
168b729503 Refactor update_collabora.yml: enhance capability fetching with retries and improved error handling 2025-10-05 09:43:55 +02:00
fencl
9f08ef2d76 Refactor update_collabora.yml: add capability fetching and display for Collabora service 2025-10-05 09:39:40 +02:00
fencl
463990f772 Refactor update_collabora.yml: add health check for Collabora container and optional orphan cleanup toggle 2025-10-05 09:36:14 +02:00
fencl
7bc6c917f1 Refactor update_collabora.yml: add health check for Collabora container and optional orphan cleanup toggle 2025-10-05 09:35:25 +02:00
fencl
866abc3d83 Refactor update_collabora.yml: remove unnecessary comments and streamline Docker command execution with a prefix for better compatibility 2025-10-05 09:30:22 +02:00
fencl
4dd4b3b6f7 Refactor update_collabora.yml: streamline Collabora update process with improved command structure and error handling 2025-10-05 09:27:19 +02:00
fencl
abc7fba684 Refactor update_collabora.yml: enhance Collabora configuration with robust detection and improved command execution 2025-10-05 09:26:01 +02:00
fencl
b7f6d38a32 Refactor update_collabora.yml: add comprehensive maintenance and update commands for Nextcloud and Collabora with error handling 2025-10-05 09:22:42 +02:00
fencl
1a8690529c Refactor check_stack_nextcloud.yml: enhance stack health check command with timeout and error handling 2025-10-05 09:16:16 +02:00
fencl
cbac27b3f2 Refactor check_stack_nextcloud.yml: uncomment stack health check command for execution 2025-10-05 09:10:17 +02:00
fencl
9869bbc383 Refactor: remove deprecated playbooks and configuration files for Nextcloud and Portainer 2025-10-05 09:08:35 +02:00
fencl
10f542989b Refactor miniplay.yml: enhance command execution structure, improve documentation, and add error handling for VM commands. 2025-10-05 08:56:11 +02:00
fencl
3fdea9f960 Refactor miniplay.yml: remove outdated documentation and improve VM hostname retrieval method for clarity and efficiency. 2025-10-05 08:48:47 +02:00
fencl
2678483149 Refactor miniplay.yml: enhance documentation for command execution parameters and update task structure for clarity and functionality. 2025-10-05 08:47:14 +02:00
fencl
fdd8834ea3 Refactor inv_linuxes_portainer and miniplay.yml: remove hardcoded passwords for security, simplify tasks, and enhance clarity. 2025-10-03 16:06:29 +02:00
fencl
27577a2ff9 Refactor portainer.yml and miniplay.yml: improve task naming consistency and enhance comments for clarity. 2025-10-03 15:56:01 +02:00
fencl
f36b78baa4 Refactor miniplay.yml: remove redundant prefix from task name for clarity. 2025-10-03 15:48:01 +02:00
fencl
21c6781bb3 Restore miniplay.yml: reintroduce sanity check tasks for SSH and sudo user verification. 2025-10-03 15:46:29 +02:00
fencl
71fd262c40 E 2025-10-03 15:43:59 +02:00
fencl
060065e040 Refactor portainer.yml and miniplay: remove hardcoded passwords for security, update comments for clarity, and enhance task descriptions. 2025-10-03 15:43:43 +02:00
fencl
9b111803c6 Refactor portainer.yml: update ansible_password comments for clarity and add optional SSH and sudo passwords; add sanity check playbook for SSH and sudo verification. 2025-10-03 15:38:22 +02:00
fencl
004b560004 Refactor portainer.yml and check_stack_nextcloud.yml: restore ansible_password, enhance sudo settings, and improve command formatting for clarity. 2025-10-03 15:30:32 +02:00
fencl
52fcb80ec4 Update ansible_password in portainer.yml to a placeholder value for security 2025-10-03 15:22:19 +02:00
fencl
243b88521d Update ansible_password in portainer.yml to remove quotes for consistency 2025-10-03 15:19:52 +02:00
fencl
5df4686c00 Refactor portainer configuration: standardize ansible_password, simplify SSH arguments, and clean up inventory group naming. 2025-10-03 15:16:44 +02:00
fencl
2643526326 Refactor portainer configuration: update ansible_password for consistency, enhance SSH settings, and rename host group in inventory for clarity. 2025-10-03 14:55:51 +02:00
fencl
1591c2e787 Update ansible_password in portainer.yml to use a static value for consistency 2025-10-03 14:50:06 +02:00
fencl
5c74f10f37 Refactor inventory and playbook for Nextcloud maintenance: update host configuration and streamline health check tasks 2025-10-03 14:49:30 +02:00
fencl
76fde11ad9 Update ansible_host for vm_portainer in nextcloud_stack to correct IP address 2025-10-03 14:35:24 +02:00
fencl
d40bb2984b Refactor health check playbook to update host target from 'proxmox' to 'nextcloud_stack' and streamline stack health script content 2025-10-03 14:32:32 +02:00
fencl
bbc27cb2f6 Refactor stack health check script to improve readability and structure, ensuring directory existence and enhancing validation for Docker, Nextcloud, database, and Redis connectivity. 2025-10-03 14:25:29 +02:00
fencl
6fbb3fb088 Enhance stack health checks by installing script inline and adding comprehensive validation for Nextcloud, database, and Redis connectivity 2025-10-03 14:20:53 +02:00
fencl
b4758ee0e1 Refactor health check playbook to ensure script existence before upload and restore requirements.yml for Nextcloud collections 2025-10-03 14:12:14 +02:00
fencl
20d2aacd47 Refactor health check playbook to ensure script existence before upload and simplify task structure 2025-10-03 14:10:21 +02:00
fencl
7b5d3a097e Restore requirements.yml for Nextcloud collections 2025-10-03 14:05:49 +02:00
fencl
65ea83638d Add Nextcloud deployment and management playbooks
- Introduced playbooks for health checks, Collabora updates, Nextcloud backups, upgrades, and Redis updates.
- Each playbook includes necessary tasks for managing services in a Docker environment.
2025-10-03 14:04:31 +02:00
595c0624d6 Merge pull request 'edit init 1' (#1) from edit into main
Reviewed-on: IM/ansible_fencl#1
2025-10-03 11:47:51 +00:00
fencl
2b5a2b4a1a edit init 1 2025-10-03 13:36:35 +02:00
b247ea0832 Update mikrotikbackup.yml 2025-09-19 10:58:22 +00:00
c476f04a8e Update inv_mikrotiks 2025-09-19 09:30:23 +00:00
5c185324d5 Update inv_linuxes 2025-09-18 12:31:07 +00:00
15 changed files with 1828 additions and 8 deletions

View File

@@ -1,5 +1,3 @@
[linux_servers]
jimbuntu ansible_host=192.168.19.4
jim_storage ansible_host=192.168.19.7
portainer2 ansible_host=192.168.52.9
portainernode ansible_host=192.168.52.21
proxmox_nextcloud ansible_host=192.168.69.2
proxmox_services ansible_host=192.168.69.3

View File

@@ -1,2 +1,3 @@
[mikrotiks]
storage ansible_host=192.168.19.3 ansible_ssh_common_args='-o StrictHostKeyChecking=no'
mikrotik_fencl_server ansible_host=192.168.69.1
mikrotik_fencl_5G ansible_host=192.168.68.1

View File

@@ -7,7 +7,7 @@
- name: Ensure output directory exists
ansible.builtin.file:
path: output
path: /opt/mikrotik_backups
state: directory
mode: '0755'
delegate_to: localhost
@@ -42,7 +42,7 @@
- name: Save export to local file
ansible.builtin.copy:
content: "{{ export_output.stdout }}"
dest: "output/{{ router_name }}-{{ current_date }}.config"
dest: "/opt/mikrotik_backups/{{ router_name }}-{{ current_date }}.config"
delegate_to: localhost
when: export_output.rc == 0
@@ -52,7 +52,7 @@
when: system_identity.rc == 0
- name: Download binary backup
shell: timeout 15 scp -o StrictHostKeyChecking=no -P {{ ansible_port }} {{ ansible_user }}@{{ ansible_host }}:{{ router_name }}-{{ current_date }}-backup.backup output/
shell: timeout 15 scp -o StrictHostKeyChecking=no -P {{ ansible_port }} {{ ansible_user }}@{{ ansible_host }}:{{ router_name }}-{{ current_date }}-backup.backup /opt/mikrotik_backups/
delegate_to: localhost
when: system_identity.rc == 0

View File

@@ -0,0 +1,68 @@
# nextcloud/check_stack_nextcloud.yml
- name: Run Nextcloud maintenance on VM via Proxmox
hosts: proxmox
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
# Flip to true if Docker needs sudo on the VM
use_sudo: false
vm_commands:
- "docker exec -u www-data nextcloud php -f /var/www/html/cron.php"
- "docker exec -u www-data nextcloud php occ app:update --all"
- "docker exec -u www-data nextcloud php occ maintenance:repair --include-expensive"
- "docker exec -u www-data nextcloud php occ status"
- "set -o pipefail; timeout 180s bash -x /data/compose/nextcloud/stack-health.sh </dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Nextcloud commands on VM (via SSH, argv, no line breaks)
ansible.builtin.command:
argv:
- sshpass
- -p
- "{{ vm_pass }}"
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
loop: "{{ vm_commands }}"
register: vm_cmds
changed_when: false
- name: Show outputs for each command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ vm_cmds.results }}"
- name: Fail play if any command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Command failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All commands succeeded."
loop: "{{ vm_cmds.results }}"

View File

@@ -0,0 +1,174 @@
# nextcloud/update_collabora.yml
- name: Update Collabora CODE on VM via Proxmox
hosts: proxmox
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Collabora specifics ---
collabora_debug_caps: true
collabora_caps_url: "https://collabora.martinfencl.eu/hosting/capabilities"
# Use the FULL Nextcloud stack compose file; only target the 'collabora' service inside it
collabora_project: "nextcloud-collabora"
collabora_compose_file: "/data/compose/nextcloud/nextcloud-collabora.yml"
collabora_service: "collabora"
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
collabora_commands:
- "{{ docker_prefix }} pull -q collabora/code:latest >/dev/null"
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} pull {{ collabora_service }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} up -d --no-deps --force-recreate {{ collabora_service }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Collabora update commands on VM (via SSH) # use SSHPASS env, hide item value
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ collabora_commands }}"
loop_control:
index_var: idx # <-- capture loop index here
label: "cmd-{{ idx }}" # <-- use idx instead of loop.index
register: collab_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Show outputs for each Collabora command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ collab_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Collabora command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Collabora update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Collabora update commands succeeded."
loop: "{{ collab_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Collabora | Wait for capabilities (controller first)
ansible.builtin.uri:
url: "{{ collabora_caps_url }}"
method: GET
return_content: true
validate_certs: true
status_code: 200
register: caps_controller
delegate_to: localhost
run_once: true
retries: "{{ RETRIES }}"
delay: 2
until: caps_controller.status == 200
failed_when: false
changed_when: false
- name: Collabora | VM-side fetch (pure JSON via Python) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
import json, urllib.request, sys
try:
with urllib.request.urlopen("{{ collabora_caps_url }}", timeout=15) as r:
sys.stdout.write(r.read().decode())
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: caps_vm
changed_when: false
failed_when: false
when: caps_controller.status | default(0) != 200 or caps_controller.json is not defined
no_log: "{{ DEBUG == 0 }}"
- name: Collabora | Choose JSON (controller wins, else VM)
ansible.builtin.set_fact:
collab_caps_json: >-
{{
(caps_controller.json
if (caps_controller.status|default(0))==200 and (caps_controller.json is defined)
else (
(caps_vm.stdout | default('') | trim | length > 0)
| ternary((caps_vm.stdout | trim | from_json), omit)
)
)
}}
failed_when: false
- name: Collabora | Print concise summary
ansible.builtin.debug:
msg: >-
Collabora {{ collab_caps_json.productVersion | default('?') }}
({{ collab_caps_json.productName | default('?') }}),
convert-to.available={{ collab_caps_json['convert-to']['available'] | default('n/a') }},
serverId={{ collab_caps_json.serverId | default('n/a') }}
when: collab_caps_json is defined and DEBUG == 1
- name: Collabora | Capabilities unavailable (after retries)
ansible.builtin.debug:
msg: "Capabilities endpoint není dostupný ani po pokusech."
when: collab_caps_json is not defined and DEBUG == 1
# Optional full JSON (debug)
- name: Collabora | Full JSON (debug)
ansible.builtin.debug:
var: collab_caps_json
when: collabora_debug_caps and (collab_caps_json is defined) and DEBUG == 1

View File

@@ -0,0 +1,287 @@
# nextcloud/update_nextcloud.yml
- name: Update Nextcloud on VM via Proxmox
hosts: proxmox
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug / retries ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Nextcloud specifics ---
nextcloud_project: "nextcloud-collabora"
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
nextcloud_service: "nextcloud"
# Backup directory on the VM (timestamped on controller)
backup_dir: "/data/compose/nextcloud/backup-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
nextcloud_base_url: "https://cloud.martinfencl.eu"
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# --- Backup phase commands (run on VM) ---
nextcloud_backup_commands:
- >
mkdir -p "{{ backup_dir }}"
- >
docker exec -u www-data nextcloud php occ maintenance:mode --on
# Create tarball of config + custom_apps inside the container
- >
docker exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
# Copy that tarball to the host backup directory
- >
docker cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
# Remove temporary file inside the container
- >
docker exec nextcloud rm /tmp/nextcloud_conf.tgz || true
# Database dump from DB container (unchanged)
- >
docker exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
# --- Upgrade phase commands (run on VM) ---
nextcloud_upgrade_commands:
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_service }}
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_service }}
- >
docker exec -u www-data nextcloud php occ upgrade
- >
docker exec -u www-data nextcloud php occ app:update --all || true
- >
docker exec -u www-data nextcloud php occ maintenance:repair --include-expensive || true
tasks:
- name: Ensure sshpass is installed (for password-based SSH)
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Nextcloud | Show current version before upgrade (DEBUG)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- 'docker exec -u www-data nextcloud php occ -V || true'
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_version_before
changed_when: false
failed_when: false
when: DEBUG == 1
# -------------------------
# Backup phase
# -------------------------
- name: Nextcloud | Run backup commands on VM (via SSH) # run plain commands via SSH
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ nextcloud_backup_commands }}"
loop_control:
index_var: idx
label: "backup-cmd-{{ idx }}"
register: nc_backup_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Show outputs of backup commands (DEBUG)
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ nc_backup_cmds.results }}"
when: DEBUG == 1
- name: Nextcloud | Fail play if any backup command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Nextcloud backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Nextcloud backup commands succeeded."
loop: "{{ nc_backup_cmds.results }}"
loop_control:
index_var: idx
label: "backup-cmd-{{ idx }}"
# -------------------------
# Upgrade phase
# -------------------------
- name: Nextcloud | Run upgrade commands on VM (via SSH)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ nextcloud_upgrade_commands }}"
loop_control:
index_var: idx
label: "upgrade-cmd-{{ idx }}"
register: nc_upgrade_cmds
changed_when: false
failed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Show outputs of upgrade commands (DEBUG)
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ nc_upgrade_cmds.results }}"
when: DEBUG == 1
- name: Nextcloud | Fail play if any upgrade command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Nextcloud upgrade step failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Nextcloud upgrade commands succeeded."
loop: "{{ nc_upgrade_cmds.results }}"
loop_control:
index_var: idx
label: "upgrade-cmd-{{ idx }}"
- name: Nextcloud | Disable maintenance mode (only after successful upgrade)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- "{{ ('sudo ' if use_sudo else '') }}docker exec -u www-data nextcloud php occ maintenance:mode --off"
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_maint_off
changed_when: false
no_log: "{{ DEBUG == 0 }}"
# -------------------------
# Readiness check (status.php)
# -------------------------
- name: Nextcloud | Wait for status.php (controller first)
ansible.builtin.uri:
url: "{{ nextcloud_status_url }}"
method: GET
return_content: true
validate_certs: true
status_code: 200
register: nc_status_controller
delegate_to: localhost
run_once: true
retries: "{{ RETRIES }}"
delay: 4
until: nc_status_controller.status == 200
failed_when: false
changed_when: false
- name: Nextcloud | VM-side fetch status.php (JSON via Python)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
import json, urllib.request, sys
try:
with urllib.request.urlopen("{{ nextcloud_status_url }}", timeout=15) as r:
sys.stdout.write(r.read().decode())
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_status_vm
changed_when: false
failed_when: false
when: nc_status_controller.status | default(0) != 200 or nc_status_controller.json is not defined
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Choose status JSON (controller wins, else VM)
ansible.builtin.set_fact:
nextcloud_status_json: >-
{{
(nc_status_controller.json
if (nc_status_controller.status | default(0)) == 200 and (nc_status_controller.json is defined)
else (
(nc_status_vm.stdout | default('') | trim | length > 0)
| ternary((nc_status_vm.stdout | trim | from_json), omit)
)
)
}}
failed_when: false
- name: Nextcloud | Print concise status summary (DEBUG)
ansible.builtin.debug:
msg: >-
Nextcloud {{ nextcloud_status_json.version | default('?') }}
(installed={{ nextcloud_status_json.installed | default('?') }},
maintenance={{ nextcloud_status_json.maintenance | default('?') }},
needsDbUpgrade={{ nextcloud_status_json.needsDbUpgrade | default('?') }})
when: nextcloud_status_json is defined and DEBUG == 1
- name: Nextcloud | Status JSON not available (DEBUG)
ansible.builtin.debug:
msg: "status.php is not reachable or did not return JSON."
when: nextcloud_status_json is not defined and DEBUG == 1

View File

@@ -0,0 +1,293 @@
# nextcloud/update_nextcloud_db_redis.yml
- name: Update Nextcloud DB and Redis on VM via Proxmox
hosts: proxmox
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug / retries ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Nextcloud specifics ---
nextcloud_project: "nextcloud-collabora"
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
# Service names from docker-compose file
nextcloud_web_service: "nextcloud"
nextcloud_db_service: "nextclouddb"
redis_service: "redis"
# Backup directory on the VM (timestamped on controller)
backup_dir: "/data/compose/nextcloud/backup-db-redis-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
nextcloud_base_url: "https://cloud.martinfencl.eu"
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# --- Backup phase commands (run on VM) ---
# Same idea as in update_nextcloud.yml: maintenance on + config/custom_apps + DB dump
nextcloud_backup_commands:
- >
mkdir -p "{{ backup_dir }}"
- >
docker exec -u www-data nextcloud php occ maintenance:mode --on
- >
docker exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
- >
docker cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
- >
docker exec nextcloud rm /tmp/nextcloud_conf.tgz || true
- >
docker exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
# --- DB + Redis upgrade commands (run on VM) ---
db_redis_upgrade_commands:
# Update MariaDB service
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_db_service }}
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_db_service }}
# Simple DB health check (non-fatal)
- >
docker exec nextcloud-db sh -c 'mysqladmin ping -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' || true
# Update Redis service
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ redis_service }}
- >
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ redis_service }}
# Simple Redis health check (non-fatal)
- >
docker exec redis redis-cli PING || true
tasks:
- name: Ensure sshpass is installed (for password-based SSH)
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Nextcloud | Show current version before DB/Redis upgrade (DEBUG)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- 'docker exec -u www-data nextcloud php occ -V || true'
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_version_before
changed_when: false
failed_when: false
when: DEBUG == 1
# -------------------------
# Backup phase
# -------------------------
- name: Nextcloud | Run backup commands on VM (via SSH)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ nextcloud_backup_commands }}"
loop_control:
index_var: idx
label: "backup-cmd-{{ idx }}"
register: nc_backup_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Show outputs of backup commands (DEBUG)
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ nc_backup_cmds.results }}"
when: DEBUG == 1
- name: Nextcloud | Fail play if any backup command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Nextcloud DB/Redis backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Nextcloud DB/Redis backup commands succeeded."
loop: "{{ nc_backup_cmds.results }}"
loop_control:
index_var: idx
label: "backup-cmd-{{ idx }}"
# -------------------------
# DB + Redis upgrade phase
# -------------------------
- name: Nextcloud | Run DB/Redis upgrade commands on VM (via SSH)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ db_redis_upgrade_commands }}"
loop_control:
index_var: idx
label: "db-redis-cmd-{{ idx }}"
register: nc_db_redis_cmds
changed_when: false
failed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Show outputs of DB/Redis upgrade commands (DEBUG)
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ nc_db_redis_cmds.results }}"
when: DEBUG == 1
- name: Nextcloud | Fail play if any DB/Redis upgrade command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Nextcloud DB/Redis upgrade step failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Nextcloud DB/Redis upgrade commands succeeded."
loop: "{{ nc_db_redis_cmds.results }}"
loop_control:
index_var: idx
label: "db-redis-cmd-{{ idx }}"
# -------------------------
# Disable maintenance + readiness check
# -------------------------
- name: Nextcloud | Disable maintenance mode after DB/Redis upgrade
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- "{{ ('sudo ' if use_sudo else '') }}docker exec -u www-data nextcloud php occ maintenance:mode --off"
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_maint_off
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Wait for status.php (controller first)
ansible.builtin.uri:
url: "{{ nextcloud_status_url }}"
method: GET
return_content: true
validate_certs: true
status_code: 200
register: nc_status_controller
delegate_to: localhost
run_once: true
retries: "{{ RETRIES }}"
delay: 4
failed_when: false
changed_when: false
- name: Nextcloud | VM-side fetch status.php (JSON via Python)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
import json, urllib.request, sys
try:
with urllib.request.urlopen("{{ nextcloud_status_url }}", timeout=15) as r:
sys.stdout.write(r.read().decode())
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: nc_status_vm
changed_when: false
failed_when: false
when: nc_status_controller.status | default(0) != 200 or nc_status_controller.json is not defined
no_log: "{{ DEBUG == 0 }}"
- name: Nextcloud | Choose status JSON (controller wins, else VM)
ansible.builtin.set_fact:
nextcloud_status_json: >-
{{
(nc_status_controller.json
if (nc_status_controller.status | default(0)) == 200 and (nc_status_controller.json is defined)
else (
(nc_status_vm.stdout | default('') | trim | length > 0)
| ternary((nc_status_vm.stdout | trim | from_json), omit)
)
)
}}
failed_when: false
- name: Nextcloud | Print concise status summary (DEBUG)
ansible.builtin.debug:
msg: >-
Nextcloud {{ nextcloud_status_json.version | default('?') }}
(installed={{ nextcloud_status_json.installed | default('?') }},
maintenance={{ nextcloud_status_json.maintenance | default('?') }},
needsDbUpgrade={{ nextcloud_status_json.needsDbUpgrade | default('?') }})
when: nextcloud_status_json is defined and DEBUG == 1
- name: Nextcloud | Status JSON not available (DEBUG)
ansible.builtin.debug:
msg: "status.php is not reachable or did not return JSON."
when: nextcloud_status_json is not defined and DEBUG == 1

View File

@@ -0,0 +1,118 @@
# update_portainer_agent.yml
- name: Update Portainer Agent on VM via Proxmox
hosts: proxmox
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Portainer Agent specifics ---
portainer_agent_image: "portainer/agent:latest"
portainer_agent_container: "portainer_agent"
portainer_agent_port: 9001
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
portainer_commands:
- "{{ docker_prefix }} pull -q {{ portainer_agent_image }} >/dev/null"
- "{{ docker_prefix }} stop {{ portainer_agent_container }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} rm {{ portainer_agent_container }} >/dev/null 2>&1 || true"
- >
{{ docker_prefix }} run -d
--name {{ portainer_agent_container }}
--restart=always
-p {{ portainer_agent_port }}:9001
-v /var/run/docker.sock:/var/run/docker.sock
-v /var/lib/docker/volumes:/var/lib/docker/volumes
{{ portainer_agent_image }} >/dev/null
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Portainer Agent update commands on VM (via SSH) # run all commands via sshpass
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ portainer_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: portainer_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Portainer command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ portainer_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Portainer command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Portainer Agent update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Portainer Agent update commands succeeded."
loop: "{{ portainer_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness check (TCP port)
# -------------------------
- name: Portainer Agent | Wait for TCP port to be open from controller
ansible.builtin.wait_for:
host: "{{ vm_ip }}"
port: "{{ portainer_agent_port }}"
delay: 2 # initial delay before first check
timeout: "{{ RETRIES * 2 }}" # total timeout in seconds
state: started
register: portainer_wait
delegate_to: localhost
run_once: true
changed_when: false
- name: Portainer Agent | Print concise summary
ansible.builtin.debug:
msg: >-
Portainer Agent TCP {{ vm_ip }}:{{ portainer_agent_port }}
reachable={{ (portainer_wait is defined) and (not portainer_wait.failed | default(false)) }}
elapsed={{ portainer_wait.elapsed | default('n/a') }}s
when: DEBUG == 1

56
test.yml Normal file
View File

@@ -0,0 +1,56 @@
- name: Test connectivity from Semaphore container to Homarr VMs
hosts: localhost
gather_facts: false
vars:
# List of VMs you want to test
vm_targets:
- { ip: "192.168.69.253" }
- { ip: "192.168.69.254" }
# Credentials (ideálně z env/secret)
vm_user: "{{ lookup('env', 'VM_USER') | default('howard') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
tasks:
- name: Ensure sshpass is installed (inside container) # install sshpass
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Ping VM IPs from container # simple ICMP ping
ansible.builtin.command: "ping -c 2 {{ item.ip }}"
loop: "{{ vm_targets }}"
register: ping_results
ignore_errors: true
- name: Show ping results
ansible.builtin.debug:
msg: "Ping to {{ item.item.ip }} -> rc={{ item.rc }}, stdout={{ item.stdout }}"
loop: "{{ ping_results.results }}"
- name: Test SSH to VM with ssh (SSH key)
ansible.builtin.command:
argv:
- ssh
- -i
- /path/to/id_rsa # sem dej cestu k privátnímu klíči v kontejneru
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=5
- "{{ vm_user }}@{{ item.ip }}"
- "echo OK-from-{{ item.ip }}"
loop: "{{ vm_targets }}"
register: ssh_results
ignore_errors: true
- name: Show SSH results
ansible.builtin.debug:
msg: |
SSH to {{ item.item.ip }}:
rc={{ item.rc }}
stdout={{ item.stdout }}
stderr={{ item.stderr }}
loop: "{{ ssh_results.results }}"

155
update_broker_kafka-ui.yml Normal file
View File

@@ -0,0 +1,155 @@
# update_broker_kafka-ui.yml
- name: Update Kafka broker3 and Redpanda Console on VM via Proxmox
hosts: proxmox
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Kafka / Redpanda Console specifics ---
kafka_project: "kafka"
# Adjusted to match your actual compose file location
kafka_compose_file: "/data/compose/docker-compose.yml"
kafka_services:
- broker3
- kafka-ui
redpanda_console_port: 8084
# Controller-side URL (default to direct VM IP/port or external URL)
redpanda_console_url: "{{ lookup('env', 'REDPANDA_CONSOLE_URL') | default('http://192.168.69.254:8084/overview', true) }}"
redpanda_retries: "{{ RETRIES }}"
redpanda_delay: 2
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
# 1) Pull latest images for broker3 + kafka-ui
# 2) Stop any running containers with these names (legacy or compose-managed)
# 3) Remove any containers with these names to avoid name conflicts
# 4) Recreate services via docker compose
kafka_commands:
- "{{ docker_prefix }} compose -p {{ kafka_project }} -f {{ kafka_compose_file }} pull {{ kafka_services | join(' ') }} >/dev/null"
- "{{ docker_prefix }} stop {{ kafka_services | join(' ') }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} rm -f {{ kafka_services | join(' ') }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} compose -p {{ kafka_project }} -f {{ kafka_compose_file }} up -d --no-deps --force-recreate {{ kafka_services | join(' ') }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Kafka update commands on VM (via SSH) # use SSHPASS env, hide item value
ansible.builtin.command:
argv:
- sshpass
- -e # read password from SSHPASS environment
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}" # supply password via environment
loop: "{{ kafka_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: kafka_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Kafka command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ kafka_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Kafka command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Kafka/Redpanda Console update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Kafka/Redpanda Console update commands succeeded."
loop: "{{ kafka_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness check Redpanda Console UI
# -------------------------
- name: Redpanda Console | Wait for overview page (controller, with retries)
ansible.builtin.uri:
url: "{{ redpanda_console_url }}"
method: GET
return_content: true
validate_certs: false # plain HTTP on 192.168.69.254 (or as configured)
status_code: 200
register: redpanda_controller
delegate_to: localhost
run_once: true
when: redpanda_console_url is defined and (redpanda_console_url | length) > 0
retries: "{{ redpanda_retries }}"
delay: "{{ redpanda_delay }}"
until: redpanda_controller.status == 200
failed_when: false
changed_when: false
- name: Redpanda Console | Print concise summary
ansible.builtin.debug:
msg: >-
Redpanda Console overview {{ 'reachable' if (redpanda_controller is defined and (redpanda_controller.status|default(0))==200) else 'NOT reachable' }}.
status={{ redpanda_controller.status | default('n/a') }};
length={{ (redpanda_controller.content | default('')) | length }};
when: DEBUG == 1 and (redpanda_controller is defined)
# Optional detailed dump (short excerpt only)
- name: Redpanda Console | HTML excerpt (debug)
ansible.builtin.debug:
msg: "{{ (redpanda_controller.content | default(''))[:500] }}"
when: DEBUG == 1 and (redpanda_controller is defined) and (redpanda_controller.content is defined)
# -------------------------
# Final assertion: Console URL must be reachable
# -------------------------
- name: Redpanda Console | Assert overview reachable (if URL configured)
ansible.builtin.assert:
that:
- >
not (redpanda_console_url is defined and (redpanda_console_url | length) > 0)
or
(
redpanda_controller is defined
and (redpanda_controller.status | default(0)) == 200
)
fail_msg: "Redpanda Console URL {{ redpanda_console_url }} is NOT reachable with HTTP 200 after retries."
success_msg: "Redpanda Console URL {{ redpanda_console_url }} is reachable with HTTP 200."

194
update_homarr.yml Normal file
View File

@@ -0,0 +1,194 @@
# update_homarr.yml
- name: Update Homarr on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# Homarr specifics
homarr_project: "homarr"
homarr_compose_file: "/data/compose/homarr/docker-compose-homarr.yml"
homarr_service: "homarr"
homarr_image: "ghcr.io/homarr-labs/homarr:latest"
homarr_port: 7575
# Optional external URL for controller-side readiness check (e.g., https://homarr.example.com)
# If empty/undefined, controller check is skipped and we only probe from the VM.
homarr_url: "{{ lookup('env', 'HOMARR_URL') | default('', true) }}"
# Fixed container name used in your compose (avoid conflicts with any leftover container)
homarr_container_name: "homarr"
# Retry policy (same pattern as Kuma): 25x with 2s delay
homarr_retries: "{{ RETRIES }}"
homarr_delay: 2
# Docker command prefix (consistent behavior)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
homarr_commands:
- "{{ docker_prefix }} pull -q {{ homarr_image }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} pull {{ homarr_service }} >/dev/null"
# remove conflicting container name before compose up (silently)
- "{{ docker_prefix }} rm -f {{ homarr_container_name }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} up -d --no-deps --force-recreate {{ homarr_service }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Homarr update commands on VM (via SSH) # use SSHPASS env, hide item label
ansible.builtin.command:
argv:
- sshpass
- -e # read password from SSHPASS environment
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}" # supply password via environment
loop: "{{ homarr_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: homarr_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Homarr command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ homarr_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Homarr command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Homarr update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Homarr update commands succeeded."
loop: "{{ homarr_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Homarr | Wait for homepage (controller first, with retries)
ansible.builtin.uri:
url: "{{ (homarr_url | regex_replace('/$','')) + '/' }}"
method: GET
return_content: true
# Validate TLS only when using https://
validate_certs: "{{ (homarr_url | default('')) is match('^https://') }}"
status_code: 200
register: homarr_controller
delegate_to: localhost
run_once: true
when: homarr_url is defined and (homarr_url | length) > 0
retries: "{{ homarr_retries }}"
delay: "{{ homarr_delay }}"
until: homarr_controller.status == 200
failed_when: false
changed_when: false
- name: Homarr | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Fetch Homarr homepage from localhost and print HTML to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ homarr_port }}/", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: homarr_vm
changed_when: false
failed_when: false
when: homarr_controller.status | default(0) != 200 or homarr_controller.content is not defined
retries: "{{ homarr_retries }}"
delay: "{{ homarr_delay }}"
until: (homarr_vm.stdout | default('') | trim | length) > 0 and ('Homarr' in (homarr_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}"
- name: Homarr | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
ansible.builtin.set_fact:
homarr_home_html: >-
{{
(
homarr_controller.content
if (homarr_controller is defined)
and ((homarr_controller.status|default(0))==200)
and (homarr_controller.content is defined)
else
(homarr_vm.stdout | default('') | trim)
)
}}
when:
- (homarr_controller is defined and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined))
or ((homarr_vm.stdout | default('') | trim | length) > 0)
- name: Homarr | Print concise summary
ansible.builtin.debug:
msg: >-
Homarr homepage {{ 'reachable' if (homarr_home_html is defined) else 'NOT reachable' }}.
Source={{ 'controller' if ((homarr_controller is defined) and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined)) else 'vm' if (homarr_vm.stdout|default('')|trim|length>0) else 'n/a' }};
length={{ (homarr_home_html | default('')) | length }};
contains('Homarr')={{ (homarr_home_html is defined) and ('Homarr' in homarr_home_html) }}
when: DEBUG == 1
- name: Homarr | Homepage unavailable (after retries)
ansible.builtin.debug:
msg: "Homarr web není dostupný ani po pokusech."
when: homarr_home_html is not defined and DEBUG == 1
# Optional detailed dump (short excerpt only)
- name: Homarr | HTML excerpt (debug)
ansible.builtin.debug:
msg: "{{ (homarr_home_html | default(''))[:500] }}"
when: homarr_home_html is defined and DEBUG == 1

217
update_immich.yml Normal file
View File

@@ -0,0 +1,217 @@
# update_immich.yml
- name: Update Immich on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# --- Immich specifics ---
immich_project: "immich"
# Where compose file lives on the VM
immich_compose_dir: "/opt/immich"
immich_compose_file: "{{ immich_compose_dir }}/docker-compose.yml"
# Official Immich compose URL (latest release)
immich_compose_url: "https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml"
immich_port: 2283
# Optional external URL for controller-side readiness check
# Default to https://photos.martinfencl.eu/photos if IMMICH_URL is not set
immich_url: "{{ lookup('env', 'IMMICH_URL') | default('https://photos.martinfencl.eu/photos', true) }}"
immich_retries: "{{ RETRIES }}"
immich_delay: 2
# Docker command prefix (consistent behavior and quiet hints)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs)
# 1) Download latest docker-compose.yml from GitHub (with backup)
# 2) Pull images according to compose
# 3) Start / update stack
immich_commands:
- "cd {{ immich_compose_dir }} && wget -qO docker-compose.yml.new {{ immich_compose_url }} || true; if [ -s docker-compose.yml.new ]; then echo 'Downloaded new docker-compose.yml from GitHub (Immich latest).'; if [ -f docker-compose.yml ]; then cp docker-compose.yml docker-compose.yml.bak-$(date +%F_%H-%M-%S); echo 'Existing docker-compose.yml backed up.'; fi; mv docker-compose.yml.new docker-compose.yml; else echo 'WARNING: Failed to download a valid docker-compose.yml, keeping existing one.' >&2; rm -f docker-compose.yml.new 2>/dev/null || true; fi"
- "{{ docker_prefix }} compose -p {{ immich_project }} -f {{ immich_compose_file }} pull >/dev/null"
- "{{ docker_prefix }} compose -p {{ immich_project }} -f {{ immich_compose_file }} up -d --remove-orphans --force-recreate >/dev/null"
# - "{{ docker_prefix }} compose -p {{ immich_project }} -f {{ immich_compose_file }} up -d --remove-orphans >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Immich update commands on VM (via SSH) # use SSHPASS env, hide item value
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}"
loop: "{{ immich_commands }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
register: immich_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}"
run_once: true # <<< přidat
- name: Show outputs for each Immich command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ immich_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Immich command failed
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Immich update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Immich update commands succeeded."
loop: "{{ immich_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
run_once: true
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Immich | Wait for web UI (controller first, with retries)
ansible.builtin.uri:
url: "{{ (immich_url | regex_replace('/$','')) + '/' }}"
method: GET
return_content: true
# Validate TLS only when using https://
validate_certs: "{{ (immich_url | default('')) is match('^https://') }}"
status_code: 200
register: immich_controller
delegate_to: localhost
run_once: true
when: immich_url is defined and (immich_url | length) > 0
retries: "{{ immich_retries }}"
delay: "{{ immich_delay }}"
until: immich_controller.status == 200
failed_when: false # allow task to finish without failing the play
changed_when: false
- name: Immich | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Fetch Immich web UI from localhost and print HTML to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ immich_port }}/", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: immich_vm
changed_when: false
failed_when: false
when: immich_controller.status | default(0) != 200 or immich_controller.content is not defined
retries: "{{ immich_retries }}"
delay: "{{ immich_delay }}"
until: (immich_vm.stdout | default('') | trim | length) > 0 and ('Immich' in (immich_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}"
- name: Immich | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
ansible.builtin.set_fact:
immich_home_html: >-
{{
(
immich_controller.content
if (immich_controller is defined)
and ((immich_controller.status|default(0))==200)
and (immich_controller.content is defined)
else
(immich_vm.stdout | default('') | trim)
)
}}
when:
- (immich_controller is defined and (immich_controller.status|default(0))==200 and (immich_controller.content is defined))
or ((immich_vm.stdout | default('') | trim | length) > 0)
- name: Immich | Print concise summary
ansible.builtin.debug:
msg: >-
Immich web UI {{ 'reachable' if (immich_home_html is defined) else 'NOT reachable' }}.
Source={{ 'controller' if ((immich_controller is defined) and (immich_controller.status|default(0))==200 and (immich_controller.content is defined)) else 'vm' if (immich_vm.stdout|default('')|trim|length>0) else 'n/a' }};
length={{ (immich_home_html | default('')) | length }};
contains('Immich')={{ (immich_home_html is defined) and ('Immich' in immich_home_html) }}
when: DEBUG == 1
- name: Immich | Web UI unavailable (after retries)
ansible.builtin.debug:
msg: "Immich web není dostupný ani po pokusech."
when: immich_home_html is not defined and DEBUG == 1
# Optional detailed dump (short excerpt only)
- name: Immich | HTML excerpt (debug)
ansible.builtin.debug:
msg: "{{ (immich_home_html | default(''))[:500] }}"
when: immich_home_html is defined and DEBUG == 1
# -------------------------
# Final assertion: controller URL must be reachable
# -------------------------
- name: Immich | Assert controller URL reachable (if configured)
ansible.builtin.assert:
that:
- >
not (immich_url is defined and (immich_url | length) > 0)
or
(
immich_controller is defined
and (immich_controller.status | default(0)) == 200
)
fail_msg: "Immich controller URL {{ immich_url }} is NOT reachable with HTTP 200 after retries."
success_msg: "Immich controller URL {{ immich_url }} is reachable with HTTP 200."

65
update_semaphore.yml Normal file
View File

@@ -0,0 +1,65 @@
# update_semaphore.yml
- name: Update Semaphore on VM via Proxmox
hosts: linux_servers
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# --- Connection to VM (provided by Semaphore env vars) ---
vm_ip: "{{ lookup('env', 'VM_IP') }}" # IP vm-portainer
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
# --- Semaphore specifics ---
semaphore_project: "semaphore"
semaphore_compose_file: "/data/compose/semaphore/docker-compose.yml"
semaphore_service: "semaphore"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Semaphore self-update on VM in background (nohup)
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
nohup bash -c '
unalias docker 2>/dev/null || true
DOCKER_CLI_HINTS=0 docker compose \
-p {{ semaphore_project }} \
-f {{ semaphore_compose_file }} \
up -d --no-deps --force-recreate --pull always {{ semaphore_service }}
' >/dev/null 2>&1 &
environment:
SSHPASS: "{{ vm_pass }}"
register: semaphore_update
changed_when: false
no_log: "{{ DEBUG == 0 }}"
- name: Show result of Semaphore self-update (debug)
ansible.builtin.debug:
msg: |
RC: {{ semaphore_update.rc }}
STDOUT: {{ (semaphore_update.stdout | default('')).strip() }}
STDERR: {{ (semaphore_update.stderr | default('')).strip() }}
when: DEBUG == 1

194
update_uptime_kuma.yml Normal file
View File

@@ -0,0 +1,194 @@
# nextcloud/update_uptime_kuma.yml
- name: Update Uptime Kuma on VM via Proxmox
hosts: proxmox
gather_facts: false
become: true
become_user: root
become_method: sudo
vars:
# VM connection (provided by Semaphore env vars)
vm_ip: "{{ lookup('env', 'VM_IP') }}"
vm_user: "{{ lookup('env', 'VM_USER') }}"
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
use_sudo: false
# --- Debug mode (controlled via Semaphore variable) ---
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
# Uptime Kuma specifics
kuma_project: "uptime-kuma"
kuma_compose_file: "/data/compose/uptime-kuma/docker-compose-uptime-kuma.yml"
kuma_service: "uptime-kuma"
kuma_image: "louislam/uptime-kuma:latest"
kuma_port: 3001
# Optional external URL for controller-side readiness check (e.g., https://kuma.example.com)
# If empty/undefined, controller check is skipped and we only probe from the VM.
kuma_url: "{{ lookup('env', 'KUMA_URL') | default('', true) }}"
# Fixed container name used in your compose (conflicts with previous/Portainer-run container)
kuma_container_name: "uptime-kuma-dev"
# Retry policy
kuma_retries: "{{ RETRIES }}"
kuma_delay: 2
# Docker command prefix (consistent behavior)
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
# Commands to run on the target VM (quiet outputs like in Collabora play)
kuma_commands:
- "{{ docker_prefix }} pull -q {{ kuma_image }} >/dev/null"
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} pull {{ kuma_service }} >/dev/null"
# remove conflicting container name before compose up (silently)
- "{{ docker_prefix }} rm -f {{ kuma_container_name }} >/dev/null 2>&1 || true"
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} up -d --no-deps --force-recreate {{ kuma_service }} >/dev/null"
tasks:
- name: Ensure sshpass is installed (for password-based SSH) # English comments
ansible.builtin.apt:
name: sshpass
state: present
update_cache: yes
- name: Run Uptime Kuma update commands on VM (via SSH) # use SSHPASS env, hide item label
ansible.builtin.command:
argv:
- sshpass
- -e # read password from SSHPASS environment
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- "{{ ('sudo ' if use_sudo else '') + item }}"
environment:
SSHPASS: "{{ vm_pass }}" # supply password via environment
loop: "{{ kuma_commands }}"
loop_control:
index_var: idx # capture loop index
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
register: kuma_cmds
changed_when: false
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
- name: Show outputs for each Uptime Kuma command
ansible.builtin.debug:
msg: |
CMD: {{ item.item }}
RC: {{ item.rc }}
STDOUT:
{{ (item.stdout | default('')).strip() }}
STDERR:
{{ (item.stderr | default('')).strip() }}
loop: "{{ kuma_cmds.results }}"
when: DEBUG == 1
- name: Fail play if any Uptime Kuma command failed # also hide item label
ansible.builtin.assert:
that: "item.rc == 0"
fail_msg: "Uptime Kuma update failed on VM: {{ item.item }} (rc={{ item.rc }})"
success_msg: "All Uptime Kuma update commands succeeded."
loop: "{{ kuma_cmds.results }}"
loop_control:
index_var: idx
label: "cmd-{{ idx }}"
# -------------------------
# Readiness checks (controller first, then VM fallback)
# -------------------------
- name: Kuma | Wait for homepage (controller first, with retries)
ansible.builtin.uri:
url: "{{ (kuma_url | regex_replace('/$','')) + '/' }}"
method: GET
return_content: true
# Validate TLS only when using https://
validate_certs: "{{ (kuma_url | default('')) is match('^https://') }}"
status_code: 200
register: kuma_controller
delegate_to: localhost
run_once: true
when: kuma_url is defined and (kuma_url | length) > 0
retries: "{{ kuma_retries }}"
delay: "{{ kuma_delay }}"
until: kuma_controller.status == 200
failed_when: false
changed_when: false
- name: Kuma | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
ansible.builtin.command:
argv:
- sshpass
- -e
- ssh
- -o
- StrictHostKeyChecking=no
- -o
- ConnectTimeout=15
- "{{ vm_user }}@{{ vm_ip }}"
- bash
- -lc
- |
python3 - <<'PY'
# Fetch Kuma homepage from localhost and print HTML to stdout
import urllib.request, sys
try:
with urllib.request.urlopen("http://127.0.0.1:{{ kuma_port }}/", timeout=15) as r:
sys.stdout.write(r.read().decode(errors='ignore'))
except Exception:
pass
PY
environment:
SSHPASS: "{{ vm_pass }}"
register: kuma_vm
changed_when: false
failed_when: false
when: kuma_controller.status | default(0) != 200 or kuma_controller.content is not defined
retries: "{{ kuma_retries }}"
delay: "{{ kuma_delay }}"
until: (kuma_vm.stdout | default('') | trim | length) > 0 and ('Uptime Kuma' in (kuma_vm.stdout | default('')))
no_log: "{{ DEBUG == 0 }}" # hide command and output when not debugging
- name: Kuma | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
ansible.builtin.set_fact:
kuma_home_html: >-
{{
(
kuma_controller.content
if (kuma_controller is defined)
and ((kuma_controller.status|default(0))==200)
and (kuma_controller.content is defined)
else
(kuma_vm.stdout | default('') | trim)
)
}}
when:
- (kuma_controller is defined and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined))
or ((kuma_vm.stdout | default('') | trim | length) > 0)
- name: Kuma | Print concise summary
ansible.builtin.debug:
msg: >-
Uptime Kuma homepage {{ 'reachable' if (kuma_home_html is defined) else 'NOT reachable' }}.
Source={{ 'controller' if ((kuma_controller is defined) and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined)) else 'vm' if (kuma_vm.stdout|default('')|trim|length>0) else 'n/a' }};
length={{ (kuma_home_html | default('')) | length }};
contains('Uptime Kuma')={{ (kuma_home_html is defined) and ('Uptime Kuma' in kuma_home_html) }}
when: DEBUG == 1
- name: Kuma | Homepage unavailable (after retries)
ansible.builtin.debug:
msg: "Kuma web není dostupná ani po pokusech."
when: kuma_home_html is not defined and DEBUG == 1
# Optional detailed dump (short excerpt only)
- name: Kuma | HTML excerpt (debug)
ansible.builtin.debug:
msg: "{{ (kuma_home_html | default(''))[:500] }}"
when: kuma_home_html is defined and DEBUG == 1