# nextcloud/update_uptime_kuma.yml - name: Update Uptime Kuma on VM via Proxmox hosts: proxmox gather_facts: false become: true become_user: root become_method: sudo vars: # ---- VM access via sshpass (same pattern as your Collabora playbook) ---- vm_ip: "{{ lookup('env', 'VM_IP') }}" vm_user: "{{ lookup('env', 'VM_USER') }}" vm_pass: "{{ lookup('env', 'VM_PASS') }}" use_sudo: false # ---- Uptime Kuma specifics ---- kuma_url: "https://monitor.martinfencl.eu/" kuma_project: "uptime-kuma" # adjust if your compose project has a different name kuma_compose_file: "/data/compose/uptime-kuma/docker-compose.yml" # adjust path to your compose file # ---- Docker CLI prefix (keeps your style) ---- docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker" # ---- Update commands: pull image + compose pull/up for the specific service ---- kuma_commands: - "{{ docker_prefix }} pull -q louislam/uptime-kuma:latest >/dev/null" - "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} pull uptime-kuma >/dev/null" - "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} up -d --no-deps --force-recreate uptime-kuma >/dev/null" tasks: - name: Ensure sshpass is installed (for password-based SSH) # English comments ansible.builtin.apt: name: sshpass state: present update_cache: yes - name: Run Uptime Kuma update commands on VM (via SSH) ansible.builtin.command: argv: - sshpass - -p - "{{ vm_pass }}" - ssh - -o - StrictHostKeyChecking=no - -o - ConnectTimeout=15 - "{{ vm_user }}@{{ vm_ip }}" - bash - -lc - "{{ ('sudo ' if use_sudo else '') + item }}" loop: "{{ kuma_commands }}" register: kuma_cmds changed_when: false - name: Show outputs for each Uptime Kuma command ansible.builtin.debug: msg: | CMD: {{ item.item }} RC: {{ item.rc }} STDOUT: {{ (item.stdout | default('')).strip() }} STDERR: {{ (item.stderr | default('')).strip() }} loop: "{{ kuma_cmds.results }}" - name: Fail play if any Uptime Kuma command failed ansible.builtin.assert: that: "item.rc == 0" fail_msg: "Uptime Kuma update failed on VM: {{ item.item }} (rc={{ item.rc }})" success_msg: "All Uptime Kuma update commands succeeded." loop: "{{ kuma_cmds.results }}" # ---- Health check from the controller: wait for 200 on the public URL ---- - name: Uptime Kuma | Wait for web to return 200 (controller first) ansible.builtin.uri: url: "{{ kuma_url }}" method: GET return_content: true validate_certs: true status_code: 200 register: kuma_controller delegate_to: localhost run_once: true retries: 15 # allow a bit more time after container recreate delay: 2 until: kuma_controller.status == 200 failed_when: false changed_when: false # ---- Optional: VM-side fetch (curl via SSH) to double-check reachability from the VM ---- - name: Uptime Kuma | VM-side fetch HTML (via Python) ansible.builtin.command: argv: - sshpass - -p - "{{ vm_pass }}" - ssh - -o - StrictHostKeyChecking=no - -o - ConnectTimeout=15 - "{{ vm_user }}@{{ vm_ip }}" - bash - -lc - | python3 - <<'PY' import sys, urllib.request, ssl try: ctx = ssl.create_default_context() with urllib.request.urlopen("{{ kuma_url }}", timeout=15, context=ctx) as r: sys.stdout.write(r.read().decode(errors="ignore")) except Exception: pass PY register: kuma_vm changed_when: false failed_when: false when: kuma_controller.status | default(0) != 200 or kuma_controller.content is not defined # ---- Pick HTML source (controller wins) ---- - name: Uptime Kuma | Choose HTML (controller wins, else VM) ansible.builtin.set_fact: kuma_html: >- {{ (kuma_controller.content if (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined) else ((kuma_vm.stdout | default('') | trim | length > 0) | ternary(kuma_vm.stdout | trim, omit)) ) }} failed_when: false # ---- Print concise summary (tries to extract