forked from jakub/ansible
Compare commits
42 Commits
cf2507bdf6
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| b727d51dfd | |||
| 75f2f20531 | |||
| 8e5c1377eb | |||
| 5ac5e82b16 | |||
| b95bdf0b3a | |||
| 3464fe007a | |||
| 62d64b0411 | |||
| 1bad80c04d | |||
| f46ab32d7c | |||
| bd775c5163 | |||
| ad318c50fd | |||
| fdc61bd22e | |||
| 3238ad0a5e | |||
| b1a849824f | |||
| 11a48e4ccb | |||
| e42363aaec | |||
| 79ee0ecd46 | |||
| 8fd180ab11 | |||
| 07bc4693e3 | |||
| 8ea60d9e15 | |||
| 4eb25cb78b | |||
| 4de04d0d3a | |||
| f4262bcb27 | |||
| 5c69d3a03f | |||
| 547c9fadc5 | |||
| c07181291c | |||
| 1a0ce36efe | |||
| 8b57f27ec6 | |||
| 085e7177f4 | |||
| 3099a0b2b8 | |||
| 3d89bc523e | |||
| 61d288f92a | |||
| 61beedd023 | |||
| bb37cdaa53 | |||
| b805b506b4 | |||
| 9fad4e4d1a | |||
| a632da2a62 | |||
| cf21ad70c1 | |||
| 1deb268d73 | |||
| 8373252ae9 | |||
| 13a48cd734 | |||
| b497723769 |
39
homarr.yml
Normal file
39
homarr.yml
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
- name: Update Homarr
|
||||||
|
hosts: linux_servers
|
||||||
|
become: true
|
||||||
|
gather_facts: false
|
||||||
|
|
||||||
|
vars:
|
||||||
|
homarr_project: homarr
|
||||||
|
homarr_compose_file: /data/compose/homarr/docker-compose-homarr.yml
|
||||||
|
homarr_service: homarr
|
||||||
|
homarr_port: 7575
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Pull latest Homarr image
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ homarr_compose_file | dirname }}"
|
||||||
|
files:
|
||||||
|
- "{{ homarr_compose_file | basename }}"
|
||||||
|
pull: always
|
||||||
|
|
||||||
|
- name: Recreate Homarr service
|
||||||
|
community.docker.docker_compose_v2:
|
||||||
|
project_src: "{{ homarr_compose_file | dirname }}"
|
||||||
|
files:
|
||||||
|
- "{{ homarr_compose_file | basename }}"
|
||||||
|
services:
|
||||||
|
- "{{ homarr_service }}"
|
||||||
|
state: present
|
||||||
|
recreate: always
|
||||||
|
|
||||||
|
- name: Wait for Homarr port
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
host: 127.0.0.1
|
||||||
|
port: "{{ homarr_port }}"
|
||||||
|
timeout: 60
|
||||||
|
|
||||||
|
- name: Check Homarr HTTP endpoint
|
||||||
|
ansible.builtin.uri:
|
||||||
|
url: "http://127.0.0.1:{{ homarr_port }}/"
|
||||||
|
status_code: 200
|
||||||
111
initial_setup.yml
Normal file
111
initial_setup.yml
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
---
|
||||||
|
- name: Baseline user setup
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
|
||||||
|
vars:
|
||||||
|
users:
|
||||||
|
- name: automation
|
||||||
|
shell: /bin/bash
|
||||||
|
groups: []
|
||||||
|
sudo_nopasswd: true
|
||||||
|
ssh_keys:
|
||||||
|
- "ssh-ed25519 AAAAC3..."
|
||||||
|
|
||||||
|
- name: hellsos
|
||||||
|
shell: /bin/bash
|
||||||
|
groups: []
|
||||||
|
sudo_nopasswd: true
|
||||||
|
ssh_keys:
|
||||||
|
- "ssh-ed25519 AAAAC3..."
|
||||||
|
|
||||||
|
- name: jim
|
||||||
|
shell: /bin/bash
|
||||||
|
groups: []
|
||||||
|
sudo_nopasswd: true
|
||||||
|
ssh_keys:
|
||||||
|
- "ssh-ed25519 AAAAC3..."
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: Pick sudo group per distro
|
||||||
|
set_fact:
|
||||||
|
sudo_group: >-
|
||||||
|
{{ 'wheel'
|
||||||
|
if ansible_facts.os_family in
|
||||||
|
['RedHat','Rocky','AlmaLinux','Fedora','OracleLinux','Suse']
|
||||||
|
else 'sudo' }}
|
||||||
|
|
||||||
|
- name: Ensure user exists
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
shell: "{{ item.shell }}"
|
||||||
|
groups: "{{ sudo_group }}"
|
||||||
|
append: true
|
||||||
|
create_home: true
|
||||||
|
loop: "{{ users }}"
|
||||||
|
|
||||||
|
- name: Enforce authorized SSH keys
|
||||||
|
ansible.builtin.authorized_key:
|
||||||
|
user: "{{ item.name }}"
|
||||||
|
key: "{{ item.ssh_keys | join('\n') }}"
|
||||||
|
exclusive: true
|
||||||
|
loop: "{{ users }}"
|
||||||
|
|
||||||
|
- name: Grant passwordless sudo
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "/etc/sudoers.d/{{ item.name }}"
|
||||||
|
mode: '0440'
|
||||||
|
content: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL\n"
|
||||||
|
validate: 'visudo -cf %s'
|
||||||
|
loop: "{{ users }}"
|
||||||
|
when: item.sudo_nopasswd
|
||||||
|
|
||||||
|
# ==============================
|
||||||
|
# SECOND PLAY: SSH HARDENING
|
||||||
|
# ==============================
|
||||||
|
|
||||||
|
- name: SSH Hardening
|
||||||
|
hosts: all
|
||||||
|
become: true
|
||||||
|
tags: never,hardening
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: Detect if system is Proxmox
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: /usr/bin/pveversion
|
||||||
|
register: proxmox_check
|
||||||
|
|
||||||
|
- name: Ensure sshd_config.d directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /etc/ssh/sshd_config.d
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Deploy SSH hardening config
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /etc/ssh/sshd_config.d/99-ansible-hardening.conf
|
||||||
|
mode: '0644'
|
||||||
|
content: |
|
||||||
|
PasswordAuthentication no
|
||||||
|
ChallengeResponseAuthentication no
|
||||||
|
PubkeyAuthentication yes
|
||||||
|
AuthenticationMethods publickey
|
||||||
|
UsePAM yes
|
||||||
|
|
||||||
|
{% if not proxmox_check.stat.exists %}
|
||||||
|
PermitRootLogin no
|
||||||
|
{% else %}
|
||||||
|
PermitRootLogin prohibit-password
|
||||||
|
{% endif %}
|
||||||
|
validate: 'sshd -t -f %s'
|
||||||
|
notify: Restart SSH
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
- name: Restart SSH
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: "{{ 'sshd'
|
||||||
|
if ansible_facts.os_family in
|
||||||
|
['RedHat','Rocky','AlmaLinux','Fedora','OracleLinux','Suse']
|
||||||
|
else 'ssh' }}"
|
||||||
|
state: restarted
|
||||||
10
inv_linuxes
10
inv_linuxes
@@ -1,3 +1,9 @@
|
|||||||
[linux_servers]
|
[linux_servers]
|
||||||
proxmox_nextcloud ansible_host=192.168.69.2
|
jimbuntu ansible_host=192.168.19.4
|
||||||
proxmox_services ansible_host=192.168.69.3
|
jim_storage ansible_host=192.168.19.7
|
||||||
|
portainer2_hellsos ansible_host=192.168.52.9
|
||||||
|
portainernode_hellsos ansible_host=192.168.52.21
|
||||||
|
portainernode2_jim ansible_host=192.168.19.8
|
||||||
|
|
||||||
|
[local]
|
||||||
|
localhost ansible_connection=local
|
||||||
@@ -1,3 +1,16 @@
|
|||||||
[mikrotiks]
|
[mikrotik_routers]
|
||||||
mikrotik_fencl_server ansible_host=192.168.69.1
|
jim_main ansible_host=192.168.19.2
|
||||||
mikrotik_fencl_5G ansible_host=192.168.68.1
|
jim_gw2 ansible_host=192.168.19.3
|
||||||
|
hellsos ansible_host=192.168.40.1
|
||||||
|
ewolet ansible_host=192.168.90.1
|
||||||
|
Poli ansible_host=192.168.2.1
|
||||||
|
Schmid ansible_host=192.168.177.1
|
||||||
|
#Volf ansible_host=192.168.88.1
|
||||||
|
fencl_home ansible_host=192.168.68.1
|
||||||
|
fencl_tata ansible_host=192.168.69.1
|
||||||
|
|
||||||
|
|
||||||
|
[mikrotik_routers:vars]
|
||||||
|
ansible_connection=network_cli
|
||||||
|
ansible_network_os=community.routeros.routeros
|
||||||
|
ansible_command_timeout=15
|
||||||
@@ -33,7 +33,7 @@
|
|||||||
current_date: "{{ date_output.stdout }}"
|
current_date: "{{ date_output.stdout }}"
|
||||||
|
|
||||||
- name: Export router config
|
- name: Export router config
|
||||||
shell: timeout 15 ssh -o StrictHostKeyChecking=no {{ ansible_user }}@{{ ansible_host }} -p {{ ansible_port }} "/export"
|
shell: timeout 15 ssh -o StrictHostKeyChecking=no {{ ansible_user }}@{{ ansible_host }} -p {{ ansible_port }} "/export show-sensitive"
|
||||||
register: export_output
|
register: export_output
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
when: system_identity.rc == 0
|
when: system_identity.rc == 0
|
||||||
@@ -59,4 +59,4 @@
|
|||||||
- name: Remove backup file from router
|
- name: Remove backup file from router
|
||||||
shell: timeout 15 ssh -o StrictHostKeyChecking=no {{ ansible_user }}@{{ ansible_host }} -p {{ ansible_port }} "/file remove {{ router_name }}-{{ current_date }}-backup.backup"
|
shell: timeout 15 ssh -o StrictHostKeyChecking=no {{ ansible_user }}@{{ ansible_host }} -p {{ ansible_port }} "/file remove {{ router_name }}-{{ current_date }}-backup.backup"
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
when: system_identity.rc == 0
|
when: system_identity.rc == 0
|
||||||
108
mikrotikbackup_clean.yml
Normal file
108
mikrotikbackup_clean.yml
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
- name: Backup and/or Upgrade MikroTik
|
||||||
|
hosts: mikrotik_routers
|
||||||
|
gather_facts: no
|
||||||
|
|
||||||
|
vars:
|
||||||
|
backup_dir: /opt/mikrotik_backups/
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
# ----------------------------
|
||||||
|
# Always: identity + timestamp
|
||||||
|
# ----------------------------
|
||||||
|
- name: Get router identity
|
||||||
|
community.routeros.command:
|
||||||
|
commands: /system identity print
|
||||||
|
register: identity_raw
|
||||||
|
tags: always
|
||||||
|
|
||||||
|
- name: Parse router name
|
||||||
|
set_fact:
|
||||||
|
router_name: "{{ identity_raw.stdout[0].split(': ')[1] | trim }}"
|
||||||
|
tags: always
|
||||||
|
|
||||||
|
- name: Get timestamp
|
||||||
|
ansible.builtin.command: date +%Y-%m-%d_%H-%M-%S
|
||||||
|
register: date_out
|
||||||
|
delegate_to: localhost
|
||||||
|
tags: always
|
||||||
|
|
||||||
|
- name: Set timestamp fact
|
||||||
|
set_fact:
|
||||||
|
ts: "{{ date_out.stdout }}"
|
||||||
|
tags: always
|
||||||
|
|
||||||
|
# ----------------------------
|
||||||
|
# Backup (tag: backup)
|
||||||
|
# ----------------------------
|
||||||
|
- name: Ensure local backup directory exists
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ backup_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: "0755"
|
||||||
|
delegate_to: localhost
|
||||||
|
tags: [backup, never]
|
||||||
|
|
||||||
|
- name: Export router config
|
||||||
|
community.routeros.command:
|
||||||
|
commands: /export terse show-sensitive
|
||||||
|
register: export_cfg
|
||||||
|
tags: [backup, never]
|
||||||
|
|
||||||
|
- name: Save export locally
|
||||||
|
ansible.builtin.copy:
|
||||||
|
content: "{{ export_cfg.stdout[0] }}"
|
||||||
|
dest: "{{ backup_dir }}/{{ router_name }}-{{ ts }}.rsc"
|
||||||
|
delegate_to: localhost
|
||||||
|
tags: [backup, never]
|
||||||
|
|
||||||
|
# ----------------------------
|
||||||
|
# Upgrade (tag: upgrade)
|
||||||
|
# ----------------------------
|
||||||
|
- name: Check current and latest available package versions
|
||||||
|
community.routeros.command:
|
||||||
|
commands: /system package update check-for-updates
|
||||||
|
register: update_check
|
||||||
|
tags: [upgrade, never]
|
||||||
|
|
||||||
|
- name: Parse installed and latest versions
|
||||||
|
set_fact:
|
||||||
|
installed_version: "{{ update_check.stdout[0] | regex_search('installed-version: ([\\d.]+)', '\\1') | first }}"
|
||||||
|
latest_version: "{{ update_check.stdout[0] | regex_search('latest-version: ([\\d.]+)', '\\1') | first }}"
|
||||||
|
tags: [upgrade, never]
|
||||||
|
|
||||||
|
- name: Skip upgrade if already on latest
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "Router {{ router_name }} is already on latest version {{ installed_version }}. Skipping upgrade."
|
||||||
|
when: installed_version == latest_version
|
||||||
|
tags: [upgrade, never]
|
||||||
|
|
||||||
|
- name: Trigger package download and install
|
||||||
|
community.routeros.command:
|
||||||
|
commands: /system package update install
|
||||||
|
register: upgrade_result
|
||||||
|
when: installed_version != latest_version
|
||||||
|
tags: [upgrade, never]
|
||||||
|
|
||||||
|
- name: Wait for router to come back online after reboot
|
||||||
|
ansible.builtin.wait_for_connection:
|
||||||
|
delay: 180
|
||||||
|
timeout: 300
|
||||||
|
sleep: 10
|
||||||
|
when:
|
||||||
|
- installed_version != latest_version
|
||||||
|
- upgrade_result is not failed
|
||||||
|
tags: [upgrade, never]
|
||||||
|
|
||||||
|
- name: Confirm upgraded version
|
||||||
|
community.routeros.command:
|
||||||
|
commands: /system resource print
|
||||||
|
register: post_upgrade_info
|
||||||
|
when: installed_version != latest_version
|
||||||
|
tags: [upgrade, never]
|
||||||
|
|
||||||
|
- name: Show post-upgrade RouterOS version
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ post_upgrade_info.stdout[0] | regex_search('version: .+') }}"
|
||||||
|
when: installed_version != latest_version
|
||||||
|
tags: [upgrade, never]
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
# nextcloud/check_stack_nextcloud.yml
|
|
||||||
|
|
||||||
- name: Run Nextcloud maintenance on VM via Proxmox
|
|
||||||
hosts: proxmox
|
|
||||||
gather_facts: false
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
|
|
||||||
vars:
|
|
||||||
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
|
|
||||||
# Flip to true if Docker needs sudo on the VM
|
|
||||||
use_sudo: false
|
|
||||||
|
|
||||||
vm_commands:
|
|
||||||
- "docker exec -u www-data nextcloud php -f /var/www/html/cron.php"
|
|
||||||
- "docker exec -u www-data nextcloud php occ app:update --all"
|
|
||||||
- "docker exec -u www-data nextcloud php occ maintenance:repair --include-expensive"
|
|
||||||
- "docker exec -u www-data nextcloud php occ status"
|
|
||||||
- "set -o pipefail; timeout 180s bash -x /data/compose/nextcloud/stack-health.sh </dev/null"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Run Nextcloud commands on VM (via SSH, argv, no line breaks)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -p
|
|
||||||
- "{{ vm_pass }}"
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
|
||||||
loop: "{{ vm_commands }}"
|
|
||||||
register: vm_cmds
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Show outputs for each command
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
CMD: {{ item.item }}
|
|
||||||
RC: {{ item.rc }}
|
|
||||||
STDOUT:
|
|
||||||
{{ (item.stdout | default('')).strip() }}
|
|
||||||
STDERR:
|
|
||||||
{{ (item.stderr | default('')).strip() }}
|
|
||||||
loop: "{{ vm_cmds.results }}"
|
|
||||||
|
|
||||||
- name: Fail play if any command failed
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that: "item.rc == 0"
|
|
||||||
fail_msg: "Command failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
|
||||||
success_msg: "All commands succeeded."
|
|
||||||
loop: "{{ vm_cmds.results }}"
|
|
||||||
@@ -1,174 +0,0 @@
|
|||||||
# nextcloud/update_collabora.yml
|
|
||||||
|
|
||||||
- name: Update Collabora CODE on VM via Proxmox
|
|
||||||
hosts: proxmox
|
|
||||||
gather_facts: false
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
vars:
|
|
||||||
# --- Connection to VM (provided by Semaphore env vars) ---
|
|
||||||
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
use_sudo: false
|
|
||||||
|
|
||||||
# --- Debug mode (controlled via Semaphore variable) ---
|
|
||||||
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
|
||||||
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
|
||||||
|
|
||||||
# --- Collabora specifics ---
|
|
||||||
collabora_debug_caps: true
|
|
||||||
collabora_caps_url: "https://collabora.martinfencl.eu/hosting/capabilities"
|
|
||||||
|
|
||||||
# Use the FULL Nextcloud stack compose file; only target the 'collabora' service inside it
|
|
||||||
collabora_project: "nextcloud-collabora"
|
|
||||||
collabora_compose_file: "/data/compose/nextcloud/nextcloud-collabora.yml"
|
|
||||||
collabora_service: "collabora"
|
|
||||||
|
|
||||||
# Docker command prefix (consistent behavior and quiet hints)
|
|
||||||
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
|
||||||
|
|
||||||
# Commands to run on the target VM (quiet outputs)
|
|
||||||
collabora_commands:
|
|
||||||
- "{{ docker_prefix }} pull -q collabora/code:latest >/dev/null"
|
|
||||||
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} pull {{ collabora_service }} >/dev/null"
|
|
||||||
- "{{ docker_prefix }} compose -p {{ collabora_project }} -f {{ collabora_compose_file }} up -d --no-deps --force-recreate {{ collabora_service }} >/dev/null"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Run Collabora update commands on VM (via SSH) # use SSHPASS env, hide item value
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
loop: "{{ collabora_commands }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx # <-- capture loop index here
|
|
||||||
label: "cmd-{{ idx }}" # <-- use idx instead of loop.index
|
|
||||||
register: collab_cmds
|
|
||||||
changed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Show outputs for each Collabora command
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
CMD: {{ item.item }}
|
|
||||||
RC: {{ item.rc }}
|
|
||||||
STDOUT:
|
|
||||||
{{ (item.stdout | default('')).strip() }}
|
|
||||||
STDERR:
|
|
||||||
{{ (item.stderr | default('')).strip() }}
|
|
||||||
loop: "{{ collab_cmds.results }}"
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Fail play if any Collabora command failed # also hide item label
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that: "item.rc == 0"
|
|
||||||
fail_msg: "Collabora update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
|
||||||
success_msg: "All Collabora update commands succeeded."
|
|
||||||
loop: "{{ collab_cmds.results }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "cmd-{{ idx }}"
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Readiness checks (controller first, then VM fallback)
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Collabora | Wait for capabilities (controller first)
|
|
||||||
ansible.builtin.uri:
|
|
||||||
url: "{{ collabora_caps_url }}"
|
|
||||||
method: GET
|
|
||||||
return_content: true
|
|
||||||
validate_certs: true
|
|
||||||
status_code: 200
|
|
||||||
register: caps_controller
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
retries: "{{ RETRIES }}"
|
|
||||||
delay: 2
|
|
||||||
until: caps_controller.status == 200
|
|
||||||
failed_when: false
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Collabora | VM-side fetch (pure JSON via Python) # use SSHPASS env here too
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- |
|
|
||||||
python3 - <<'PY'
|
|
||||||
import json, urllib.request, sys
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen("{{ collabora_caps_url }}", timeout=15) as r:
|
|
||||||
sys.stdout.write(r.read().decode())
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
PY
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: caps_vm
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
when: caps_controller.status | default(0) != 200 or caps_controller.json is not defined
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Collabora | Choose JSON (controller wins, else VM)
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
collab_caps_json: >-
|
|
||||||
{{
|
|
||||||
(caps_controller.json
|
|
||||||
if (caps_controller.status|default(0))==200 and (caps_controller.json is defined)
|
|
||||||
else (
|
|
||||||
(caps_vm.stdout | default('') | trim | length > 0)
|
|
||||||
| ternary((caps_vm.stdout | trim | from_json), omit)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: Collabora | Print concise summary
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: >-
|
|
||||||
Collabora {{ collab_caps_json.productVersion | default('?') }}
|
|
||||||
({{ collab_caps_json.productName | default('?') }}),
|
|
||||||
convert-to.available={{ collab_caps_json['convert-to']['available'] | default('n/a') }},
|
|
||||||
serverId={{ collab_caps_json.serverId | default('n/a') }}
|
|
||||||
when: collab_caps_json is defined and DEBUG == 1
|
|
||||||
|
|
||||||
- name: Collabora | Capabilities unavailable (after retries)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "Capabilities endpoint není dostupný ani po pokusech."
|
|
||||||
when: collab_caps_json is not defined and DEBUG == 1
|
|
||||||
|
|
||||||
# Optional full JSON (debug)
|
|
||||||
- name: Collabora | Full JSON (debug)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
var: collab_caps_json
|
|
||||||
when: collabora_debug_caps and (collab_caps_json is defined) and DEBUG == 1
|
|
||||||
@@ -1,287 +0,0 @@
|
|||||||
# nextcloud/update_nextcloud.yml
|
|
||||||
|
|
||||||
- name: Update Nextcloud on VM via Proxmox
|
|
||||||
hosts: proxmox
|
|
||||||
gather_facts: false
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
vars:
|
|
||||||
# --- Connection to VM (provided by Semaphore env vars) ---
|
|
||||||
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
use_sudo: false
|
|
||||||
|
|
||||||
# --- Debug / retries ---
|
|
||||||
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
|
||||||
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
|
||||||
|
|
||||||
# --- Nextcloud specifics ---
|
|
||||||
nextcloud_project: "nextcloud-collabora"
|
|
||||||
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
|
|
||||||
nextcloud_service: "nextcloud"
|
|
||||||
|
|
||||||
# Backup directory on the VM (timestamped on controller)
|
|
||||||
backup_dir: "/data/compose/nextcloud/backup-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
|
|
||||||
|
|
||||||
nextcloud_base_url: "https://cloud.martinfencl.eu"
|
|
||||||
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
|
|
||||||
|
|
||||||
# Docker command prefix (consistent behavior and quiet hints)
|
|
||||||
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
|
||||||
|
|
||||||
# --- Backup phase commands (run on VM) ---
|
|
||||||
nextcloud_backup_commands:
|
|
||||||
- >
|
|
||||||
mkdir -p "{{ backup_dir }}"
|
|
||||||
- >
|
|
||||||
docker exec -u www-data nextcloud php occ maintenance:mode --on
|
|
||||||
# Create tarball of config + custom_apps inside the container
|
|
||||||
- >
|
|
||||||
docker exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
|
|
||||||
# Copy that tarball to the host backup directory
|
|
||||||
- >
|
|
||||||
docker cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
|
|
||||||
# Remove temporary file inside the container
|
|
||||||
- >
|
|
||||||
docker exec nextcloud rm /tmp/nextcloud_conf.tgz || true
|
|
||||||
# Database dump from DB container (unchanged)
|
|
||||||
- >
|
|
||||||
docker exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
|
|
||||||
|
|
||||||
# --- Upgrade phase commands (run on VM) ---
|
|
||||||
nextcloud_upgrade_commands:
|
|
||||||
- >
|
|
||||||
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_service }}
|
|
||||||
- >
|
|
||||||
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_service }}
|
|
||||||
- >
|
|
||||||
docker exec -u www-data nextcloud php occ upgrade
|
|
||||||
- >
|
|
||||||
docker exec -u www-data nextcloud php occ app:update --all || true
|
|
||||||
- >
|
|
||||||
docker exec -u www-data nextcloud php occ maintenance:repair --include-expensive || true
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (for password-based SSH)
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Nextcloud | Show current version before upgrade (DEBUG)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- 'docker exec -u www-data nextcloud php occ -V || true'
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: nc_version_before
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Backup phase
|
|
||||||
# -------------------------
|
|
||||||
- name: Nextcloud | Run backup commands on VM (via SSH) # run plain commands via SSH
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
loop: "{{ nextcloud_backup_commands }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "backup-cmd-{{ idx }}"
|
|
||||||
register: nc_backup_cmds
|
|
||||||
changed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Nextcloud | Show outputs of backup commands (DEBUG)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
CMD: {{ item.item }}
|
|
||||||
RC: {{ item.rc }}
|
|
||||||
STDOUT:
|
|
||||||
{{ (item.stdout | default('')).strip() }}
|
|
||||||
STDERR:
|
|
||||||
{{ (item.stderr | default('')).strip() }}
|
|
||||||
loop: "{{ nc_backup_cmds.results }}"
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Nextcloud | Fail play if any backup command failed
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that: "item.rc == 0"
|
|
||||||
fail_msg: "Nextcloud backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
|
||||||
success_msg: "All Nextcloud backup commands succeeded."
|
|
||||||
loop: "{{ nc_backup_cmds.results }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "backup-cmd-{{ idx }}"
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Upgrade phase
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Nextcloud | Run upgrade commands on VM (via SSH)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
loop: "{{ nextcloud_upgrade_commands }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "upgrade-cmd-{{ idx }}"
|
|
||||||
register: nc_upgrade_cmds
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Nextcloud | Show outputs of upgrade commands (DEBUG)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
CMD: {{ item.item }}
|
|
||||||
RC: {{ item.rc }}
|
|
||||||
STDOUT:
|
|
||||||
{{ (item.stdout | default('')).strip() }}
|
|
||||||
STDERR:
|
|
||||||
{{ (item.stderr | default('')).strip() }}
|
|
||||||
loop: "{{ nc_upgrade_cmds.results }}"
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Nextcloud | Fail play if any upgrade command failed
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that: "item.rc == 0"
|
|
||||||
fail_msg: "Nextcloud upgrade step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
|
||||||
success_msg: "All Nextcloud upgrade commands succeeded."
|
|
||||||
loop: "{{ nc_upgrade_cmds.results }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "upgrade-cmd-{{ idx }}"
|
|
||||||
|
|
||||||
- name: Nextcloud | Disable maintenance mode (only after successful upgrade)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') }}docker exec -u www-data nextcloud php occ maintenance:mode --off"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: nc_maint_off
|
|
||||||
changed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Readiness check (status.php)
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Nextcloud | Wait for status.php (controller first)
|
|
||||||
ansible.builtin.uri:
|
|
||||||
url: "{{ nextcloud_status_url }}"
|
|
||||||
method: GET
|
|
||||||
return_content: true
|
|
||||||
validate_certs: true
|
|
||||||
status_code: 200
|
|
||||||
register: nc_status_controller
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
retries: "{{ RETRIES }}"
|
|
||||||
delay: 4
|
|
||||||
until: nc_status_controller.status == 200
|
|
||||||
failed_when: false
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Nextcloud | VM-side fetch status.php (JSON via Python)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- |
|
|
||||||
python3 - <<'PY'
|
|
||||||
import json, urllib.request, sys
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen("{{ nextcloud_status_url }}", timeout=15) as r:
|
|
||||||
sys.stdout.write(r.read().decode())
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
PY
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: nc_status_vm
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
when: nc_status_controller.status | default(0) != 200 or nc_status_controller.json is not defined
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Nextcloud | Choose status JSON (controller wins, else VM)
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
nextcloud_status_json: >-
|
|
||||||
{{
|
|
||||||
(nc_status_controller.json
|
|
||||||
if (nc_status_controller.status | default(0)) == 200 and (nc_status_controller.json is defined)
|
|
||||||
else (
|
|
||||||
(nc_status_vm.stdout | default('') | trim | length > 0)
|
|
||||||
| ternary((nc_status_vm.stdout | trim | from_json), omit)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: Nextcloud | Print concise status summary (DEBUG)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: >-
|
|
||||||
Nextcloud {{ nextcloud_status_json.version | default('?') }}
|
|
||||||
(installed={{ nextcloud_status_json.installed | default('?') }},
|
|
||||||
maintenance={{ nextcloud_status_json.maintenance | default('?') }},
|
|
||||||
needsDbUpgrade={{ nextcloud_status_json.needsDbUpgrade | default('?') }})
|
|
||||||
when: nextcloud_status_json is defined and DEBUG == 1
|
|
||||||
|
|
||||||
- name: Nextcloud | Status JSON not available (DEBUG)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "status.php is not reachable or did not return JSON."
|
|
||||||
when: nextcloud_status_json is not defined and DEBUG == 1
|
|
||||||
@@ -1,293 +0,0 @@
|
|||||||
# nextcloud/update_nextcloud_db_redis.yml
|
|
||||||
|
|
||||||
- name: Update Nextcloud DB and Redis on VM via Proxmox
|
|
||||||
hosts: proxmox
|
|
||||||
gather_facts: false
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
vars:
|
|
||||||
# --- Connection to VM (provided by Semaphore env vars) ---
|
|
||||||
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
use_sudo: false
|
|
||||||
|
|
||||||
# --- Debug / retries ---
|
|
||||||
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
|
||||||
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
|
||||||
|
|
||||||
# --- Nextcloud specifics ---
|
|
||||||
nextcloud_project: "nextcloud-collabora"
|
|
||||||
nextcloud_compose_file: "/data/compose/nextcloud/docker-compose-nextcloud.yml"
|
|
||||||
|
|
||||||
# Service names from docker-compose file
|
|
||||||
nextcloud_web_service: "nextcloud"
|
|
||||||
nextcloud_db_service: "nextclouddb"
|
|
||||||
redis_service: "redis"
|
|
||||||
|
|
||||||
# Backup directory on the VM (timestamped on controller)
|
|
||||||
backup_dir: "/data/compose/nextcloud/backup-db-redis-{{ lookup('pipe', 'date +%F-%H%M%S') }}"
|
|
||||||
|
|
||||||
nextcloud_base_url: "https://cloud.martinfencl.eu"
|
|
||||||
nextcloud_status_url: "{{ nextcloud_base_url }}/status.php"
|
|
||||||
|
|
||||||
# Docker command prefix (consistent behavior and quiet hints)
|
|
||||||
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
|
||||||
|
|
||||||
# --- Backup phase commands (run on VM) ---
|
|
||||||
# Same idea as in update_nextcloud.yml: maintenance on + config/custom_apps + DB dump
|
|
||||||
nextcloud_backup_commands:
|
|
||||||
- >
|
|
||||||
mkdir -p "{{ backup_dir }}"
|
|
||||||
- >
|
|
||||||
docker exec -u www-data nextcloud php occ maintenance:mode --on
|
|
||||||
- >
|
|
||||||
docker exec nextcloud sh -c 'tar czf /tmp/nextcloud_conf.tgz -C /var/www/html config custom_apps'
|
|
||||||
- >
|
|
||||||
docker cp nextcloud:/tmp/nextcloud_conf.tgz "{{ backup_dir }}/nextcloud_conf.tgz"
|
|
||||||
- >
|
|
||||||
docker exec nextcloud rm /tmp/nextcloud_conf.tgz || true
|
|
||||||
- >
|
|
||||||
docker exec nextcloud-db sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE" || mysqldump -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' > "{{ backup_dir }}/db.sql"
|
|
||||||
|
|
||||||
# --- DB + Redis upgrade commands (run on VM) ---
|
|
||||||
db_redis_upgrade_commands:
|
|
||||||
# Update MariaDB service
|
|
||||||
- >
|
|
||||||
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ nextcloud_db_service }}
|
|
||||||
- >
|
|
||||||
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ nextcloud_db_service }}
|
|
||||||
# Simple DB health check (non-fatal)
|
|
||||||
- >
|
|
||||||
docker exec nextcloud-db sh -c 'mysqladmin ping -u"$MYSQL_USER" -p"$MYSQL_PASSWORD" "$MYSQL_DATABASE"' || true
|
|
||||||
# Update Redis service
|
|
||||||
- >
|
|
||||||
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} pull {{ redis_service }}
|
|
||||||
- >
|
|
||||||
{{ docker_prefix }} compose -p {{ nextcloud_project }} -f {{ nextcloud_compose_file }} up -d --no-deps --force-recreate {{ redis_service }}
|
|
||||||
# Simple Redis health check (non-fatal)
|
|
||||||
- >
|
|
||||||
docker exec redis redis-cli PING || true
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (for password-based SSH)
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Nextcloud | Show current version before DB/Redis upgrade (DEBUG)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- 'docker exec -u www-data nextcloud php occ -V || true'
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: nc_version_before
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Backup phase
|
|
||||||
# -------------------------
|
|
||||||
- name: Nextcloud | Run backup commands on VM (via SSH)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
loop: "{{ nextcloud_backup_commands }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "backup-cmd-{{ idx }}"
|
|
||||||
register: nc_backup_cmds
|
|
||||||
changed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Nextcloud | Show outputs of backup commands (DEBUG)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
CMD: {{ item.item }}
|
|
||||||
RC: {{ item.rc }}
|
|
||||||
STDOUT:
|
|
||||||
{{ (item.stdout | default('')).strip() }}
|
|
||||||
STDERR:
|
|
||||||
{{ (item.stderr | default('')).strip() }}
|
|
||||||
loop: "{{ nc_backup_cmds.results }}"
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Nextcloud | Fail play if any backup command failed
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that: "item.rc == 0"
|
|
||||||
fail_msg: "Nextcloud DB/Redis backup step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
|
||||||
success_msg: "All Nextcloud DB/Redis backup commands succeeded."
|
|
||||||
loop: "{{ nc_backup_cmds.results }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "backup-cmd-{{ idx }}"
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# DB + Redis upgrade phase
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Nextcloud | Run DB/Redis upgrade commands on VM (via SSH)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
loop: "{{ db_redis_upgrade_commands }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "db-redis-cmd-{{ idx }}"
|
|
||||||
register: nc_db_redis_cmds
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Nextcloud | Show outputs of DB/Redis upgrade commands (DEBUG)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
CMD: {{ item.item }}
|
|
||||||
RC: {{ item.rc }}
|
|
||||||
STDOUT:
|
|
||||||
{{ (item.stdout | default('')).strip() }}
|
|
||||||
STDERR:
|
|
||||||
{{ (item.stderr | default('')).strip() }}
|
|
||||||
loop: "{{ nc_db_redis_cmds.results }}"
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Nextcloud | Fail play if any DB/Redis upgrade command failed
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that: "item.rc == 0"
|
|
||||||
fail_msg: "Nextcloud DB/Redis upgrade step failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
|
||||||
success_msg: "All Nextcloud DB/Redis upgrade commands succeeded."
|
|
||||||
loop: "{{ nc_db_redis_cmds.results }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "db-redis-cmd-{{ idx }}"
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Disable maintenance + readiness check
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Nextcloud | Disable maintenance mode after DB/Redis upgrade
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') }}docker exec -u www-data nextcloud php occ maintenance:mode --off"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: nc_maint_off
|
|
||||||
changed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Nextcloud | Wait for status.php (controller first)
|
|
||||||
ansible.builtin.uri:
|
|
||||||
url: "{{ nextcloud_status_url }}"
|
|
||||||
method: GET
|
|
||||||
return_content: true
|
|
||||||
validate_certs: true
|
|
||||||
status_code: 200
|
|
||||||
register: nc_status_controller
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
retries: "{{ RETRIES }}"
|
|
||||||
delay: 4
|
|
||||||
failed_when: false
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Nextcloud | VM-side fetch status.php (JSON via Python)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- |
|
|
||||||
python3 - <<'PY'
|
|
||||||
import json, urllib.request, sys
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen("{{ nextcloud_status_url }}", timeout=15) as r:
|
|
||||||
sys.stdout.write(r.read().decode())
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
PY
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: nc_status_vm
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
when: nc_status_controller.status | default(0) != 200 or nc_status_controller.json is not defined
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Nextcloud | Choose status JSON (controller wins, else VM)
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
nextcloud_status_json: >-
|
|
||||||
{{
|
|
||||||
(nc_status_controller.json
|
|
||||||
if (nc_status_controller.status | default(0)) == 200 and (nc_status_controller.json is defined)
|
|
||||||
else (
|
|
||||||
(nc_status_vm.stdout | default('') | trim | length > 0)
|
|
||||||
| ternary((nc_status_vm.stdout | trim | from_json), omit)
|
|
||||||
)
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: Nextcloud | Print concise status summary (DEBUG)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: >-
|
|
||||||
Nextcloud {{ nextcloud_status_json.version | default('?') }}
|
|
||||||
(installed={{ nextcloud_status_json.installed | default('?') }},
|
|
||||||
maintenance={{ nextcloud_status_json.maintenance | default('?') }},
|
|
||||||
needsDbUpgrade={{ nextcloud_status_json.needsDbUpgrade | default('?') }})
|
|
||||||
when: nextcloud_status_json is defined and DEBUG == 1
|
|
||||||
|
|
||||||
- name: Nextcloud | Status JSON not available (DEBUG)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "status.php is not reachable or did not return JSON."
|
|
||||||
when: nextcloud_status_json is not defined and DEBUG == 1
|
|
||||||
@@ -1,118 +0,0 @@
|
|||||||
# update_portainer_agent.yml
|
|
||||||
|
|
||||||
- name: Update Portainer Agent on VM via Proxmox
|
|
||||||
hosts: proxmox
|
|
||||||
gather_facts: false
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
vars:
|
|
||||||
# --- Connection to VM (provided by Semaphore env vars) ---
|
|
||||||
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
use_sudo: false
|
|
||||||
|
|
||||||
# --- Debug mode (controlled via Semaphore variable) ---
|
|
||||||
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
|
||||||
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
|
||||||
|
|
||||||
# --- Portainer Agent specifics ---
|
|
||||||
portainer_agent_image: "portainer/agent:latest"
|
|
||||||
portainer_agent_container: "portainer_agent"
|
|
||||||
portainer_agent_port: 9001
|
|
||||||
|
|
||||||
# Docker command prefix (consistent behavior and quiet hints)
|
|
||||||
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
|
||||||
|
|
||||||
# Commands to run on the target VM (quiet outputs)
|
|
||||||
portainer_commands:
|
|
||||||
- "{{ docker_prefix }} pull -q {{ portainer_agent_image }} >/dev/null"
|
|
||||||
- "{{ docker_prefix }} stop {{ portainer_agent_container }} >/dev/null 2>&1 || true"
|
|
||||||
- "{{ docker_prefix }} rm {{ portainer_agent_container }} >/dev/null 2>&1 || true"
|
|
||||||
- >
|
|
||||||
{{ docker_prefix }} run -d
|
|
||||||
--name {{ portainer_agent_container }}
|
|
||||||
--restart=always
|
|
||||||
-p {{ portainer_agent_port }}:9001
|
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock
|
|
||||||
-v /var/lib/docker/volumes:/var/lib/docker/volumes
|
|
||||||
{{ portainer_agent_image }} >/dev/null
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Run Portainer Agent update commands on VM (via SSH) # run all commands via sshpass
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
loop: "{{ portainer_commands }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx # capture loop index
|
|
||||||
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
|
|
||||||
register: portainer_cmds
|
|
||||||
changed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
|
|
||||||
|
|
||||||
- name: Show outputs for each Portainer command
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
CMD: {{ item.item }}
|
|
||||||
RC: {{ item.rc }}
|
|
||||||
STDOUT:
|
|
||||||
{{ (item.stdout | default('')).strip() }}
|
|
||||||
STDERR:
|
|
||||||
{{ (item.stderr | default('')).strip() }}
|
|
||||||
loop: "{{ portainer_cmds.results }}"
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Fail play if any Portainer command failed
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that: "item.rc == 0"
|
|
||||||
fail_msg: "Portainer Agent update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
|
||||||
success_msg: "All Portainer Agent update commands succeeded."
|
|
||||||
loop: "{{ portainer_cmds.results }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "cmd-{{ idx }}"
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Readiness check (TCP port)
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Portainer Agent | Wait for TCP port to be open from controller
|
|
||||||
ansible.builtin.wait_for:
|
|
||||||
host: "{{ vm_ip }}"
|
|
||||||
port: "{{ portainer_agent_port }}"
|
|
||||||
delay: 2 # initial delay before first check
|
|
||||||
timeout: "{{ RETRIES * 2 }}" # total timeout in seconds
|
|
||||||
state: started
|
|
||||||
register: portainer_wait
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Portainer Agent | Print concise summary
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: >-
|
|
||||||
Portainer Agent TCP {{ vm_ip }}:{{ portainer_agent_port }}
|
|
||||||
reachable={{ (portainer_wait is defined) and (not portainer_wait.failed | default(false)) }}
|
|
||||||
elapsed={{ portainer_wait.elapsed | default('n/a') }}s
|
|
||||||
when: DEBUG == 1
|
|
||||||
2
requirements.yml
Normal file
2
requirements.yml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
collections:
|
||||||
|
- name: community.routeros
|
||||||
56
test.yml
56
test.yml
@@ -1,56 +0,0 @@
|
|||||||
- name: Test connectivity from Semaphore container to Homarr VMs
|
|
||||||
hosts: localhost
|
|
||||||
gather_facts: false
|
|
||||||
|
|
||||||
vars:
|
|
||||||
# List of VMs you want to test
|
|
||||||
vm_targets:
|
|
||||||
- { ip: "192.168.69.253" }
|
|
||||||
- { ip: "192.168.69.254" }
|
|
||||||
|
|
||||||
# Credentials (ideálně z env/secret)
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') | default('howard') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (inside container) # install sshpass
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Ping VM IPs from container # simple ICMP ping
|
|
||||||
ansible.builtin.command: "ping -c 2 {{ item.ip }}"
|
|
||||||
loop: "{{ vm_targets }}"
|
|
||||||
register: ping_results
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: Show ping results
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "Ping to {{ item.item.ip }} -> rc={{ item.rc }}, stdout={{ item.stdout }}"
|
|
||||||
loop: "{{ ping_results.results }}"
|
|
||||||
|
|
||||||
- name: Test SSH to VM with ssh (SSH key)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- ssh
|
|
||||||
- -i
|
|
||||||
- /path/to/id_rsa # sem dej cestu k privátnímu klíči v kontejneru
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=5
|
|
||||||
- "{{ vm_user }}@{{ item.ip }}"
|
|
||||||
- "echo OK-from-{{ item.ip }}"
|
|
||||||
loop: "{{ vm_targets }}"
|
|
||||||
register: ssh_results
|
|
||||||
ignore_errors: true
|
|
||||||
|
|
||||||
- name: Show SSH results
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
SSH to {{ item.item.ip }}:
|
|
||||||
rc={{ item.rc }}
|
|
||||||
stdout={{ item.stdout }}
|
|
||||||
stderr={{ item.stderr }}
|
|
||||||
loop: "{{ ssh_results.results }}"
|
|
||||||
57
test_sms.yml
Normal file
57
test_sms.yml
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
---
|
||||||
|
- name: Send and verify SMS delivery via internet-master.cz
|
||||||
|
hosts: localhost
|
||||||
|
gather_facts: false
|
||||||
|
vars:
|
||||||
|
sms_number: "601358865"
|
||||||
|
sms_username: "mikrotik"
|
||||||
|
sms_password_send: "jdkotzHJIOPWhjtr32D"
|
||||||
|
sms_password_recv: "jdkotzHJIOPWhjtr32D"
|
||||||
|
sms_wait_seconds: 120 # Wait 2 minutes for delivery
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Generate random test string
|
||||||
|
set_fact:
|
||||||
|
random_string: "mikrotik_{{ lookup('password', '/dev/null length=8 chars=ascii_letters') }}"
|
||||||
|
|
||||||
|
- name: Send SMS message
|
||||||
|
uri:
|
||||||
|
url: "https://sms.internet-master.cz/send/?number={{ sms_number }}&message=@mikrotik@{{ random_string | urlencode }}&type=class-1&username={{ sms_username }}&password={{ sms_password_send }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
register: send_result
|
||||||
|
|
||||||
|
- name: Show send API response
|
||||||
|
debug:
|
||||||
|
var: send_result.content
|
||||||
|
|
||||||
|
- name: Wait for SMS to be delivered
|
||||||
|
pause:
|
||||||
|
seconds: "{{ sms_wait_seconds }}"
|
||||||
|
|
||||||
|
- name: Fetch received messages
|
||||||
|
uri:
|
||||||
|
url: "https://sms.internet-master.cz/receive/?username={{ sms_username }}&password={{ sms_password_recv }}"
|
||||||
|
method: GET
|
||||||
|
return_content: true
|
||||||
|
register: recv_result
|
||||||
|
|
||||||
|
- name: Parse received JSON
|
||||||
|
set_fact:
|
||||||
|
inbox: "{{ recv_result.json.inbox | default([]) }}"
|
||||||
|
|
||||||
|
- name: Check if random string message was received
|
||||||
|
set_fact:
|
||||||
|
message_found: "{{ inbox | selectattr('message', 'equalto', random_string) | list | length > 0 }}"
|
||||||
|
|
||||||
|
- name: Report result
|
||||||
|
debug:
|
||||||
|
msg: >
|
||||||
|
SMS with message '{{ random_string }}' was {{
|
||||||
|
'delivered ✅' if message_found else 'NOT delivered ❌'
|
||||||
|
}}.
|
||||||
|
|
||||||
|
- name: Fail if not delivered
|
||||||
|
fail:
|
||||||
|
msg: "Message '{{ random_string }}' not found in received inbox!"
|
||||||
|
when: not message_found
|
||||||
91
update.yml
Normal file
91
update.yml
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
---
|
||||||
|
- name: Update system (APT + Flatpak)
|
||||||
|
hosts: all
|
||||||
|
become: yes
|
||||||
|
gather_facts: yes
|
||||||
|
serial: 5
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: Ensure SSH is reachable (skip host if not)
|
||||||
|
delegate_to: localhost
|
||||||
|
wait_for:
|
||||||
|
host: "{{ ansible_host | default(inventory_hostname) }}"
|
||||||
|
port: 22
|
||||||
|
timeout: 5
|
||||||
|
register: ssh_check
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- meta: end_host
|
||||||
|
when: ssh_check is failed
|
||||||
|
|
||||||
|
- name: Ping with retries (handle intermittent flaps)
|
||||||
|
ping:
|
||||||
|
register: ping_result
|
||||||
|
retries: 5
|
||||||
|
delay: 5
|
||||||
|
until: ping_result is success
|
||||||
|
|
||||||
|
- name: Wait for apt lock to be released
|
||||||
|
shell: |
|
||||||
|
while fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1; do
|
||||||
|
echo "Waiting for apt lock..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Update apt cache
|
||||||
|
apt:
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Perform full upgrade
|
||||||
|
apt:
|
||||||
|
upgrade: full
|
||||||
|
autoremove: yes
|
||||||
|
autoclean: yes
|
||||||
|
register: apt_upgrade
|
||||||
|
retries: 3
|
||||||
|
delay: 10
|
||||||
|
until: apt_upgrade is succeeded
|
||||||
|
|
||||||
|
- name: Fix broken packages
|
||||||
|
command: apt-get -f install -y
|
||||||
|
register: fix_result
|
||||||
|
failed_when: false
|
||||||
|
changed_when: "'Setting up' in fix_result.stdout"
|
||||||
|
|
||||||
|
- name: Check if Flatpak is installed
|
||||||
|
command: which flatpak
|
||||||
|
register: flatpak_check
|
||||||
|
changed_when: false
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Update system Flatpaks
|
||||||
|
command: flatpak update -y --noninteractive --system
|
||||||
|
when: flatpak_check.rc == 0
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Update user Flatpaks
|
||||||
|
command: flatpak update -y --noninteractive --user
|
||||||
|
become: false
|
||||||
|
when: flatpak_check.rc == 0
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Remove unused Flatpaks
|
||||||
|
command: flatpak uninstall -y --noninteractive --unused
|
||||||
|
when: flatpak_check.rc == 0
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Update snap packages
|
||||||
|
command: snap refresh
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
- name: Check if reboot is required
|
||||||
|
stat:
|
||||||
|
path: /var/run/reboot-required
|
||||||
|
register: reboot_required
|
||||||
|
|
||||||
|
- name: Notify if reboot required
|
||||||
|
debug:
|
||||||
|
msg: "Reboot required on {{ inventory_hostname }}"
|
||||||
|
when: reboot_required.stat.exists
|
||||||
@@ -1,155 +0,0 @@
|
|||||||
# update_broker_kafka-ui.yml
|
|
||||||
|
|
||||||
- name: Update Kafka broker3 and Redpanda Console on VM via Proxmox
|
|
||||||
hosts: proxmox
|
|
||||||
gather_facts: false
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
vars:
|
|
||||||
# --- Connection to VM (provided by Semaphore env vars) ---
|
|
||||||
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
use_sudo: false
|
|
||||||
|
|
||||||
# --- Debug mode (controlled via Semaphore variable) ---
|
|
||||||
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
|
||||||
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
|
||||||
|
|
||||||
# --- Kafka / Redpanda Console specifics ---
|
|
||||||
kafka_project: "kafka"
|
|
||||||
# Adjusted to match your actual compose file location
|
|
||||||
kafka_compose_file: "/data/compose/docker-compose.yml"
|
|
||||||
|
|
||||||
kafka_services:
|
|
||||||
- broker3
|
|
||||||
- kafka-ui
|
|
||||||
|
|
||||||
redpanda_console_port: 8084
|
|
||||||
|
|
||||||
# Controller-side URL (default to direct VM IP/port or external URL)
|
|
||||||
redpanda_console_url: "{{ lookup('env', 'REDPANDA_CONSOLE_URL') | default('http://192.168.69.254:8084/overview', true) }}"
|
|
||||||
|
|
||||||
redpanda_retries: "{{ RETRIES }}"
|
|
||||||
redpanda_delay: 2
|
|
||||||
|
|
||||||
# Docker command prefix (consistent behavior and quiet hints)
|
|
||||||
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
|
||||||
|
|
||||||
# Commands to run on the target VM (quiet outputs)
|
|
||||||
# 1) Pull latest images for broker3 + kafka-ui
|
|
||||||
# 2) Stop any running containers with these names (legacy or compose-managed)
|
|
||||||
# 3) Remove any containers with these names to avoid name conflicts
|
|
||||||
# 4) Recreate services via docker compose
|
|
||||||
kafka_commands:
|
|
||||||
- "{{ docker_prefix }} compose -p {{ kafka_project }} -f {{ kafka_compose_file }} pull {{ kafka_services | join(' ') }} >/dev/null"
|
|
||||||
- "{{ docker_prefix }} stop {{ kafka_services | join(' ') }} >/dev/null 2>&1 || true"
|
|
||||||
- "{{ docker_prefix }} rm -f {{ kafka_services | join(' ') }} >/dev/null 2>&1 || true"
|
|
||||||
- "{{ docker_prefix }} compose -p {{ kafka_project }} -f {{ kafka_compose_file }} up -d --no-deps --force-recreate {{ kafka_services | join(' ') }} >/dev/null"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Run Kafka update commands on VM (via SSH) # use SSHPASS env, hide item value
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e # read password from SSHPASS environment
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}" # supply password via environment
|
|
||||||
loop: "{{ kafka_commands }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx # capture loop index
|
|
||||||
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
|
|
||||||
register: kafka_cmds
|
|
||||||
changed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
|
|
||||||
|
|
||||||
- name: Show outputs for each Kafka command
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
CMD: {{ item.item }}
|
|
||||||
RC: {{ item.rc }}
|
|
||||||
STDOUT:
|
|
||||||
{{ (item.stdout | default('')).strip() }}
|
|
||||||
STDERR:
|
|
||||||
{{ (item.stderr | default('')).strip() }}
|
|
||||||
loop: "{{ kafka_cmds.results }}"
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Fail play if any Kafka command failed # also hide item label
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that: "item.rc == 0"
|
|
||||||
fail_msg: "Kafka/Redpanda Console update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
|
||||||
success_msg: "All Kafka/Redpanda Console update commands succeeded."
|
|
||||||
loop: "{{ kafka_cmds.results }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "cmd-{{ idx }}"
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Readiness check – Redpanda Console UI
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Redpanda Console | Wait for overview page (controller, with retries)
|
|
||||||
ansible.builtin.uri:
|
|
||||||
url: "{{ redpanda_console_url }}"
|
|
||||||
method: GET
|
|
||||||
return_content: true
|
|
||||||
validate_certs: false # plain HTTP on 192.168.69.254 (or as configured)
|
|
||||||
status_code: 200
|
|
||||||
register: redpanda_controller
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
when: redpanda_console_url is defined and (redpanda_console_url | length) > 0
|
|
||||||
retries: "{{ redpanda_retries }}"
|
|
||||||
delay: "{{ redpanda_delay }}"
|
|
||||||
until: redpanda_controller.status == 200
|
|
||||||
failed_when: false
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Redpanda Console | Print concise summary
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: >-
|
|
||||||
Redpanda Console overview {{ 'reachable' if (redpanda_controller is defined and (redpanda_controller.status|default(0))==200) else 'NOT reachable' }}.
|
|
||||||
status={{ redpanda_controller.status | default('n/a') }};
|
|
||||||
length={{ (redpanda_controller.content | default('')) | length }};
|
|
||||||
when: DEBUG == 1 and (redpanda_controller is defined)
|
|
||||||
|
|
||||||
# Optional detailed dump (short excerpt only)
|
|
||||||
- name: Redpanda Console | HTML excerpt (debug)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ (redpanda_controller.content | default(''))[:500] }}"
|
|
||||||
when: DEBUG == 1 and (redpanda_controller is defined) and (redpanda_controller.content is defined)
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Final assertion: Console URL must be reachable
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Redpanda Console | Assert overview reachable (if URL configured)
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- >
|
|
||||||
not (redpanda_console_url is defined and (redpanda_console_url | length) > 0)
|
|
||||||
or
|
|
||||||
(
|
|
||||||
redpanda_controller is defined
|
|
||||||
and (redpanda_controller.status | default(0)) == 200
|
|
||||||
)
|
|
||||||
fail_msg: "Redpanda Console URL {{ redpanda_console_url }} is NOT reachable with HTTP 200 after retries."
|
|
||||||
success_msg: "Redpanda Console URL {{ redpanda_console_url }} is reachable with HTTP 200."
|
|
||||||
@@ -1,194 +0,0 @@
|
|||||||
# update_homarr.yml
|
|
||||||
|
|
||||||
- name: Update Homarr on VM via Proxmox
|
|
||||||
hosts: linux_servers
|
|
||||||
gather_facts: false
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
vars:
|
|
||||||
# VM connection (provided by Semaphore env vars)
|
|
||||||
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
use_sudo: false
|
|
||||||
|
|
||||||
# --- Debug mode (controlled via Semaphore variable) ---
|
|
||||||
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
|
||||||
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
|
||||||
|
|
||||||
# Homarr specifics
|
|
||||||
homarr_project: "homarr"
|
|
||||||
homarr_compose_file: "/data/compose/homarr/docker-compose-homarr.yml"
|
|
||||||
homarr_service: "homarr"
|
|
||||||
homarr_image: "ghcr.io/homarr-labs/homarr:latest"
|
|
||||||
homarr_port: 7575
|
|
||||||
|
|
||||||
# Optional external URL for controller-side readiness check (e.g., https://homarr.example.com)
|
|
||||||
# If empty/undefined, controller check is skipped and we only probe from the VM.
|
|
||||||
homarr_url: "{{ lookup('env', 'HOMARR_URL') | default('', true) }}"
|
|
||||||
|
|
||||||
# Fixed container name used in your compose (avoid conflicts with any leftover container)
|
|
||||||
homarr_container_name: "homarr"
|
|
||||||
|
|
||||||
# Retry policy (same pattern as Kuma): 25x with 2s delay
|
|
||||||
homarr_retries: "{{ RETRIES }}"
|
|
||||||
homarr_delay: 2
|
|
||||||
|
|
||||||
# Docker command prefix (consistent behavior)
|
|
||||||
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
|
||||||
|
|
||||||
# Commands to run on the target VM (quiet outputs)
|
|
||||||
homarr_commands:
|
|
||||||
- "{{ docker_prefix }} pull -q {{ homarr_image }} >/dev/null"
|
|
||||||
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} pull {{ homarr_service }} >/dev/null"
|
|
||||||
# remove conflicting container name before compose up (silently)
|
|
||||||
- "{{ docker_prefix }} rm -f {{ homarr_container_name }} >/dev/null 2>&1 || true"
|
|
||||||
- "{{ docker_prefix }} compose -p {{ homarr_project }} -f {{ homarr_compose_file }} up -d --no-deps --force-recreate {{ homarr_service }} >/dev/null"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Run Homarr update commands on VM (via SSH) # use SSHPASS env, hide item label
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e # read password from SSHPASS environment
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}" # supply password via environment
|
|
||||||
loop: "{{ homarr_commands }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx # capture loop index
|
|
||||||
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
|
|
||||||
register: homarr_cmds
|
|
||||||
changed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
|
|
||||||
|
|
||||||
- name: Show outputs for each Homarr command
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
CMD: {{ item.item }}
|
|
||||||
RC: {{ item.rc }}
|
|
||||||
STDOUT:
|
|
||||||
{{ (item.stdout | default('')).strip() }}
|
|
||||||
STDERR:
|
|
||||||
{{ (item.stderr | default('')).strip() }}
|
|
||||||
loop: "{{ homarr_cmds.results }}"
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Fail play if any Homarr command failed # also hide item label
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that: "item.rc == 0"
|
|
||||||
fail_msg: "Homarr update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
|
||||||
success_msg: "All Homarr update commands succeeded."
|
|
||||||
loop: "{{ homarr_cmds.results }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "cmd-{{ idx }}"
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Readiness checks (controller first, then VM fallback)
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Homarr | Wait for homepage (controller first, with retries)
|
|
||||||
ansible.builtin.uri:
|
|
||||||
url: "{{ (homarr_url | regex_replace('/$','')) + '/' }}"
|
|
||||||
method: GET
|
|
||||||
return_content: true
|
|
||||||
# Validate TLS only when using https://
|
|
||||||
validate_certs: "{{ (homarr_url | default('')) is match('^https://') }}"
|
|
||||||
status_code: 200
|
|
||||||
register: homarr_controller
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
when: homarr_url is defined and (homarr_url | length) > 0
|
|
||||||
retries: "{{ homarr_retries }}"
|
|
||||||
delay: "{{ homarr_delay }}"
|
|
||||||
until: homarr_controller.status == 200
|
|
||||||
failed_when: false
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Homarr | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- |
|
|
||||||
python3 - <<'PY'
|
|
||||||
# Fetch Homarr homepage from localhost and print HTML to stdout
|
|
||||||
import urllib.request, sys
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen("http://127.0.0.1:{{ homarr_port }}/", timeout=15) as r:
|
|
||||||
sys.stdout.write(r.read().decode(errors='ignore'))
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
PY
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: homarr_vm
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
when: homarr_controller.status | default(0) != 200 or homarr_controller.content is not defined
|
|
||||||
retries: "{{ homarr_retries }}"
|
|
||||||
delay: "{{ homarr_delay }}"
|
|
||||||
until: (homarr_vm.stdout | default('') | trim | length) > 0 and ('Homarr' in (homarr_vm.stdout | default('')))
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Homarr | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
homarr_home_html: >-
|
|
||||||
{{
|
|
||||||
(
|
|
||||||
homarr_controller.content
|
|
||||||
if (homarr_controller is defined)
|
|
||||||
and ((homarr_controller.status|default(0))==200)
|
|
||||||
and (homarr_controller.content is defined)
|
|
||||||
else
|
|
||||||
(homarr_vm.stdout | default('') | trim)
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
when:
|
|
||||||
- (homarr_controller is defined and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined))
|
|
||||||
or ((homarr_vm.stdout | default('') | trim | length) > 0)
|
|
||||||
|
|
||||||
- name: Homarr | Print concise summary
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: >-
|
|
||||||
Homarr homepage {{ 'reachable' if (homarr_home_html is defined) else 'NOT reachable' }}.
|
|
||||||
Source={{ 'controller' if ((homarr_controller is defined) and (homarr_controller.status|default(0))==200 and (homarr_controller.content is defined)) else 'vm' if (homarr_vm.stdout|default('')|trim|length>0) else 'n/a' }};
|
|
||||||
length={{ (homarr_home_html | default('')) | length }};
|
|
||||||
contains('Homarr')={{ (homarr_home_html is defined) and ('Homarr' in homarr_home_html) }}
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Homarr | Homepage unavailable (after retries)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "Homarr web není dostupný ani po pokusech."
|
|
||||||
when: homarr_home_html is not defined and DEBUG == 1
|
|
||||||
|
|
||||||
# Optional detailed dump (short excerpt only)
|
|
||||||
- name: Homarr | HTML excerpt (debug)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ (homarr_home_html | default(''))[:500] }}"
|
|
||||||
when: homarr_home_html is defined and DEBUG == 1
|
|
||||||
@@ -1,236 +0,0 @@
|
|||||||
# update_immich.yml
|
|
||||||
|
|
||||||
- name: Update Immich on VM via Proxmox
|
|
||||||
hosts: linux_servers
|
|
||||||
gather_facts: false
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
vars:
|
|
||||||
# --- Connection to VM (provided by Semaphore env vars) ---
|
|
||||||
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
use_sudo: false
|
|
||||||
|
|
||||||
# --- Debug mode (controlled via Semaphore variable) ---
|
|
||||||
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
|
||||||
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
|
||||||
|
|
||||||
# --- Immich specifics ---
|
|
||||||
immich_project: "immich"
|
|
||||||
|
|
||||||
# Where compose file lives on the VM
|
|
||||||
immich_compose_dir: "/opt/immich"
|
|
||||||
immich_compose_file: "{{ immich_compose_dir }}/docker-compose.yml"
|
|
||||||
|
|
||||||
# Official Immich compose URL (latest release)
|
|
||||||
immich_compose_url: "https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml"
|
|
||||||
|
|
||||||
immich_port: 2283
|
|
||||||
|
|
||||||
# Optional external URL for controller-side readiness check
|
|
||||||
# Default to https://photos.martinfencl.eu/photos if IMMICH_URL is not set
|
|
||||||
immich_url: "{{ lookup('env', 'IMMICH_URL') | default('https://photos.martinfencl.eu/photos', true) }}"
|
|
||||||
|
|
||||||
immich_retries: "{{ RETRIES }}"
|
|
||||||
immich_delay: 2
|
|
||||||
|
|
||||||
# Docker command prefix (consistent behavior and quiet hints)
|
|
||||||
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
|
||||||
|
|
||||||
# Commands to run on the target VM (quiet outputs)
|
|
||||||
# 1) Download latest docker-compose.yml from GitHub (with backup)
|
|
||||||
# 2) Pull images according to compose
|
|
||||||
# 3) Start / update stack
|
|
||||||
immich_commands:
|
|
||||||
- "cd {{ immich_compose_dir }} && wget -qO docker-compose.yml.new {{ immich_compose_url }} || true; if [ -s docker-compose.yml.new ]; then echo 'Downloaded new docker-compose.yml from GitHub (Immich latest).'; if [ -f docker-compose.yml ]; then cp docker-compose.yml docker-compose.yml.bak-$(date +%F_%H-%M-%S); echo 'Existing docker-compose.yml backed up.'; fi; mv docker-compose.yml.new docker-compose.yml; else echo 'WARNING: Failed to download a valid docker-compose.yml, keeping existing one.' >&2; rm -f docker-compose.yml.new 2>/dev/null || true; fi"
|
|
||||||
- "{{ docker_prefix }} compose -p {{ immich_project }} -f {{ immich_compose_file }} pull >/dev/null"
|
|
||||||
- "{{ docker_prefix }} compose -p {{ immich_project }} -f {{ immich_compose_file }} up -d --remove-orphans >/dev/null"
|
|
||||||
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
|
|
||||||
- name: Immich | Check compose directory exists on VM
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- "test -d {{ immich_compose_dir }}"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: immich_dir
|
|
||||||
changed_when: false
|
|
||||||
failed_when: immich_dir.rc != 0
|
|
||||||
|
|
||||||
|
|
||||||
- name: Run Immich update commands on VM (via SSH) # use SSHPASS env, hide item value
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e # read password from SSHPASS environment
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}" # supply password via environment
|
|
||||||
loop: "{{ immich_commands }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx # capture loop index
|
|
||||||
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
|
|
||||||
register: immich_cmds
|
|
||||||
changed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
|
|
||||||
|
|
||||||
- name: Show outputs for each Immich command
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
CMD: {{ item.item }}
|
|
||||||
RC: {{ item.rc }}
|
|
||||||
STDOUT:
|
|
||||||
{{ (item.stdout | default('')).strip() }}
|
|
||||||
STDERR:
|
|
||||||
{{ (item.stderr | default('')).strip() }}
|
|
||||||
loop: "{{ immich_cmds.results }}"
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Fail play if any Immich command failed # also hide item label
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that: "item.rc == 0"
|
|
||||||
fail_msg: "Immich update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
|
||||||
success_msg: "All Immich update commands succeeded."
|
|
||||||
loop: "{{ immich_cmds.results }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "cmd-{{ idx }}"
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Readiness checks (controller first, then VM fallback)
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Immich | Wait for web UI (controller first, with retries)
|
|
||||||
ansible.builtin.uri:
|
|
||||||
url: "{{ (immich_url | regex_replace('/$','')) + '/' }}"
|
|
||||||
method: GET
|
|
||||||
return_content: true
|
|
||||||
# Validate TLS only when using https://
|
|
||||||
validate_certs: "{{ (immich_url | default('')) is match('^https://') }}"
|
|
||||||
status_code: 200
|
|
||||||
register: immich_controller
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
when: immich_url is defined and (immich_url | length) > 0
|
|
||||||
retries: "{{ immich_retries }}"
|
|
||||||
delay: "{{ immich_delay }}"
|
|
||||||
until: immich_controller.status == 200
|
|
||||||
failed_when: false # allow task to finish without failing the play
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Immich | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- |
|
|
||||||
python3 - <<'PY'
|
|
||||||
# Fetch Immich web UI from localhost and print HTML to stdout
|
|
||||||
import urllib.request, sys
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen("http://127.0.0.1:{{ immich_port }}/", timeout=15) as r:
|
|
||||||
sys.stdout.write(r.read().decode(errors='ignore'))
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
PY
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: immich_vm
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
when: immich_controller.status | default(0) != 200 or immich_controller.content is not defined
|
|
||||||
retries: "{{ immich_retries }}"
|
|
||||||
delay: "{{ immich_delay }}"
|
|
||||||
until: (immich_vm.stdout | default('') | trim | length) > 0 and ('Immich' in (immich_vm.stdout | default('')))
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Immich | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
immich_home_html: >-
|
|
||||||
{{
|
|
||||||
(
|
|
||||||
immich_controller.content
|
|
||||||
if (immich_controller is defined)
|
|
||||||
and ((immich_controller.status|default(0))==200)
|
|
||||||
and (immich_controller.content is defined)
|
|
||||||
else
|
|
||||||
(immich_vm.stdout | default('') | trim)
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
when:
|
|
||||||
- (immich_controller is defined and (immich_controller.status|default(0))==200 and (immich_controller.content is defined))
|
|
||||||
or ((immich_vm.stdout | default('') | trim | length) > 0)
|
|
||||||
|
|
||||||
- name: Immich | Print concise summary
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: >-
|
|
||||||
Immich web UI {{ 'reachable' if (immich_home_html is defined) else 'NOT reachable' }}.
|
|
||||||
Source={{ 'controller' if ((immich_controller is defined) and (immich_controller.status|default(0))==200 and (immich_controller.content is defined)) else 'vm' if (immich_vm.stdout|default('')|trim|length>0) else 'n/a' }};
|
|
||||||
length={{ (immich_home_html | default('')) | length }};
|
|
||||||
contains('Immich')={{ (immich_home_html is defined) and ('Immich' in immich_home_html) }}
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Immich | Web UI unavailable (after retries)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "Immich web není dostupný ani po pokusech."
|
|
||||||
when: immich_home_html is not defined and DEBUG == 1
|
|
||||||
|
|
||||||
# Optional detailed dump (short excerpt only)
|
|
||||||
- name: Immich | HTML excerpt (debug)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ (immich_home_html | default(''))[:500] }}"
|
|
||||||
when: immich_home_html is defined and DEBUG == 1
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Final assertion: controller URL must be reachable
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Immich | Assert controller URL reachable (if configured)
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that:
|
|
||||||
- >
|
|
||||||
not (immich_url is defined and (immich_url | length) > 0)
|
|
||||||
or
|
|
||||||
(
|
|
||||||
immich_controller is defined
|
|
||||||
and (immich_controller.status | default(0)) == 200
|
|
||||||
)
|
|
||||||
fail_msg: "Immich controller URL {{ immich_url }} is NOT reachable with HTTP 200 after retries."
|
|
||||||
success_msg: "Immich controller URL {{ immich_url }} is reachable with HTTP 200."
|
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
# update_semaphore.yml
|
|
||||||
|
|
||||||
- name: Update Semaphore on VM via Proxmox
|
|
||||||
hosts: linux_servers
|
|
||||||
gather_facts: false
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
vars:
|
|
||||||
# --- Connection to VM (provided by Semaphore env vars) ---
|
|
||||||
vm_ip: "{{ lookup('env', 'VM_IP') }}" # IP vm-portainer
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
use_sudo: false
|
|
||||||
|
|
||||||
# --- Debug mode (controlled via Semaphore variable) ---
|
|
||||||
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
|
||||||
|
|
||||||
# --- Semaphore specifics ---
|
|
||||||
semaphore_project: "semaphore"
|
|
||||||
semaphore_compose_file: "/data/compose/semaphore/docker-compose.yml"
|
|
||||||
semaphore_service: "semaphore"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Run Semaphore self-update on VM in background (nohup)
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- |
|
|
||||||
nohup bash -c '
|
|
||||||
unalias docker 2>/dev/null || true
|
|
||||||
DOCKER_CLI_HINTS=0 docker compose \
|
|
||||||
-p {{ semaphore_project }} \
|
|
||||||
-f {{ semaphore_compose_file }} \
|
|
||||||
up -d --no-deps --force-recreate --pull always {{ semaphore_service }}
|
|
||||||
' >/dev/null 2>&1 &
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: semaphore_update
|
|
||||||
changed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}"
|
|
||||||
|
|
||||||
- name: Show result of Semaphore self-update (debug)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
RC: {{ semaphore_update.rc }}
|
|
||||||
STDOUT: {{ (semaphore_update.stdout | default('')).strip() }}
|
|
||||||
STDERR: {{ (semaphore_update.stderr | default('')).strip() }}
|
|
||||||
when: DEBUG == 1
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
- name: Update system (APT + Flatpak)
|
|
||||||
hosts: all
|
|
||||||
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
tasks:
|
|
||||||
- name: Update APT cache
|
|
||||||
apt:
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Upgrade all APT packages
|
|
||||||
apt:
|
|
||||||
upgrade: dist
|
|
||||||
|
|
||||||
- name: Check if flatpak binary exists
|
|
||||||
stat:
|
|
||||||
path: /usr/bin/flatpak
|
|
||||||
register: flatpak_bin
|
|
||||||
|
|
||||||
- name: Update system Flatpaks
|
|
||||||
shell: timeout 300 flatpak update -y
|
|
||||||
register: flatpak_sys
|
|
||||||
failed_when: flatpak_sys.rc != 0 and flatpak_sys.rc != 124
|
|
||||||
when: flatpak_bin.stat.exists
|
|
||||||
|
|
||||||
- name: Update user Flatpaks
|
|
||||||
become_user: jakub
|
|
||||||
environment:
|
|
||||||
XDG_RUNTIME_DIR: /run/user/1000
|
|
||||||
shell: timeout 300 flatpak update -y
|
|
||||||
register: flatpak_user
|
|
||||||
failed_when: flatpak_user.rc != 0 and flatpak_user.rc != 124
|
|
||||||
when: flatpak_bin.stat.exists
|
|
||||||
@@ -1,194 +0,0 @@
|
|||||||
# nextcloud/update_uptime_kuma.yml
|
|
||||||
|
|
||||||
- name: Update Uptime Kuma on VM via Proxmox
|
|
||||||
hosts: proxmox
|
|
||||||
gather_facts: false
|
|
||||||
become: true
|
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
vars:
|
|
||||||
# VM connection (provided by Semaphore env vars)
|
|
||||||
vm_ip: "{{ lookup('env', 'VM_IP') }}"
|
|
||||||
vm_user: "{{ lookup('env', 'VM_USER') }}"
|
|
||||||
vm_pass: "{{ lookup('env', 'VM_PASS') }}"
|
|
||||||
use_sudo: false
|
|
||||||
|
|
||||||
# --- Debug mode (controlled via Semaphore variable) ---
|
|
||||||
DEBUG: "{{ lookup('env', 'DEBUG') | default(0) | int }}"
|
|
||||||
RETRIES: "{{ lookup('env', 'RETRIES') | default(25) | int }}"
|
|
||||||
|
|
||||||
# Uptime Kuma specifics
|
|
||||||
kuma_project: "uptime-kuma"
|
|
||||||
kuma_compose_file: "/data/compose/uptime-kuma/docker-compose-uptime-kuma.yml"
|
|
||||||
kuma_service: "uptime-kuma"
|
|
||||||
kuma_image: "louislam/uptime-kuma:latest"
|
|
||||||
kuma_port: 3001
|
|
||||||
|
|
||||||
# Optional external URL for controller-side readiness check (e.g., https://kuma.example.com)
|
|
||||||
# If empty/undefined, controller check is skipped and we only probe from the VM.
|
|
||||||
kuma_url: "{{ lookup('env', 'KUMA_URL') | default('', true) }}"
|
|
||||||
|
|
||||||
# Fixed container name used in your compose (conflicts with previous/Portainer-run container)
|
|
||||||
kuma_container_name: "uptime-kuma-dev"
|
|
||||||
|
|
||||||
# Retry policy
|
|
||||||
kuma_retries: "{{ RETRIES }}"
|
|
||||||
kuma_delay: 2
|
|
||||||
|
|
||||||
# Docker command prefix (consistent behavior)
|
|
||||||
docker_prefix: "unalias docker 2>/dev/null || true; DOCKER_CLI_HINTS=0; command docker"
|
|
||||||
|
|
||||||
# Commands to run on the target VM (quiet outputs like in Collabora play)
|
|
||||||
kuma_commands:
|
|
||||||
- "{{ docker_prefix }} pull -q {{ kuma_image }} >/dev/null"
|
|
||||||
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} pull {{ kuma_service }} >/dev/null"
|
|
||||||
# remove conflicting container name before compose up (silently)
|
|
||||||
- "{{ docker_prefix }} rm -f {{ kuma_container_name }} >/dev/null 2>&1 || true"
|
|
||||||
- "{{ docker_prefix }} compose -p {{ kuma_project }} -f {{ kuma_compose_file }} up -d --no-deps --force-recreate {{ kuma_service }} >/dev/null"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure sshpass is installed (for password-based SSH) # English comments
|
|
||||||
ansible.builtin.apt:
|
|
||||||
name: sshpass
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Run Uptime Kuma update commands on VM (via SSH) # use SSHPASS env, hide item label
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e # read password from SSHPASS environment
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- "{{ ('sudo ' if use_sudo else '') + item }}"
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}" # supply password via environment
|
|
||||||
loop: "{{ kuma_commands }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx # capture loop index
|
|
||||||
label: "cmd-{{ idx }}" # avoid printing full command in (item=...) line
|
|
||||||
register: kuma_cmds
|
|
||||||
changed_when: false
|
|
||||||
no_log: "{{ DEBUG == 0 }}" # hide outputs and env when not debugging
|
|
||||||
|
|
||||||
- name: Show outputs for each Uptime Kuma command
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: |
|
|
||||||
CMD: {{ item.item }}
|
|
||||||
RC: {{ item.rc }}
|
|
||||||
STDOUT:
|
|
||||||
{{ (item.stdout | default('')).strip() }}
|
|
||||||
STDERR:
|
|
||||||
{{ (item.stderr | default('')).strip() }}
|
|
||||||
loop: "{{ kuma_cmds.results }}"
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Fail play if any Uptime Kuma command failed # also hide item label
|
|
||||||
ansible.builtin.assert:
|
|
||||||
that: "item.rc == 0"
|
|
||||||
fail_msg: "Uptime Kuma update failed on VM: {{ item.item }} (rc={{ item.rc }})"
|
|
||||||
success_msg: "All Uptime Kuma update commands succeeded."
|
|
||||||
loop: "{{ kuma_cmds.results }}"
|
|
||||||
loop_control:
|
|
||||||
index_var: idx
|
|
||||||
label: "cmd-{{ idx }}"
|
|
||||||
|
|
||||||
# -------------------------
|
|
||||||
# Readiness checks (controller first, then VM fallback)
|
|
||||||
# -------------------------
|
|
||||||
|
|
||||||
- name: Kuma | Wait for homepage (controller first, with retries)
|
|
||||||
ansible.builtin.uri:
|
|
||||||
url: "{{ (kuma_url | regex_replace('/$','')) + '/' }}"
|
|
||||||
method: GET
|
|
||||||
return_content: true
|
|
||||||
# Validate TLS only when using https://
|
|
||||||
validate_certs: "{{ (kuma_url | default('')) is match('^https://') }}"
|
|
||||||
status_code: 200
|
|
||||||
register: kuma_controller
|
|
||||||
delegate_to: localhost
|
|
||||||
run_once: true
|
|
||||||
when: kuma_url is defined and (kuma_url | length) > 0
|
|
||||||
retries: "{{ kuma_retries }}"
|
|
||||||
delay: "{{ kuma_delay }}"
|
|
||||||
until: kuma_controller.status == 200
|
|
||||||
failed_when: false
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
- name: Kuma | VM-side fetch (HTML via Python, with retries) # use SSHPASS env here too
|
|
||||||
ansible.builtin.command:
|
|
||||||
argv:
|
|
||||||
- sshpass
|
|
||||||
- -e
|
|
||||||
- ssh
|
|
||||||
- -o
|
|
||||||
- StrictHostKeyChecking=no
|
|
||||||
- -o
|
|
||||||
- ConnectTimeout=15
|
|
||||||
- "{{ vm_user }}@{{ vm_ip }}"
|
|
||||||
- bash
|
|
||||||
- -lc
|
|
||||||
- |
|
|
||||||
python3 - <<'PY'
|
|
||||||
# Fetch Kuma homepage from localhost and print HTML to stdout
|
|
||||||
import urllib.request, sys
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen("http://127.0.0.1:{{ kuma_port }}/", timeout=15) as r:
|
|
||||||
sys.stdout.write(r.read().decode(errors='ignore'))
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
PY
|
|
||||||
environment:
|
|
||||||
SSHPASS: "{{ vm_pass }}"
|
|
||||||
register: kuma_vm
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
when: kuma_controller.status | default(0) != 200 or kuma_controller.content is not defined
|
|
||||||
retries: "{{ kuma_retries }}"
|
|
||||||
delay: "{{ kuma_delay }}"
|
|
||||||
until: (kuma_vm.stdout | default('') | trim | length) > 0 and ('Uptime Kuma' in (kuma_vm.stdout | default('')))
|
|
||||||
no_log: "{{ DEBUG == 0 }}" # hide command and output when not debugging
|
|
||||||
|
|
||||||
- name: Kuma | Choose homepage HTML (controller wins, else VM) # safe guard against empty result
|
|
||||||
ansible.builtin.set_fact:
|
|
||||||
kuma_home_html: >-
|
|
||||||
{{
|
|
||||||
(
|
|
||||||
kuma_controller.content
|
|
||||||
if (kuma_controller is defined)
|
|
||||||
and ((kuma_controller.status|default(0))==200)
|
|
||||||
and (kuma_controller.content is defined)
|
|
||||||
else
|
|
||||||
(kuma_vm.stdout | default('') | trim)
|
|
||||||
)
|
|
||||||
}}
|
|
||||||
when:
|
|
||||||
- (kuma_controller is defined and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined))
|
|
||||||
or ((kuma_vm.stdout | default('') | trim | length) > 0)
|
|
||||||
|
|
||||||
- name: Kuma | Print concise summary
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: >-
|
|
||||||
Uptime Kuma homepage {{ 'reachable' if (kuma_home_html is defined) else 'NOT reachable' }}.
|
|
||||||
Source={{ 'controller' if ((kuma_controller is defined) and (kuma_controller.status|default(0))==200 and (kuma_controller.content is defined)) else 'vm' if (kuma_vm.stdout|default('')|trim|length>0) else 'n/a' }};
|
|
||||||
length={{ (kuma_home_html | default('')) | length }};
|
|
||||||
contains('Uptime Kuma')={{ (kuma_home_html is defined) and ('Uptime Kuma' in kuma_home_html) }}
|
|
||||||
when: DEBUG == 1
|
|
||||||
|
|
||||||
- name: Kuma | Homepage unavailable (after retries)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "Kuma web není dostupná ani po pokusech."
|
|
||||||
when: kuma_home_html is not defined and DEBUG == 1
|
|
||||||
|
|
||||||
# Optional detailed dump (short excerpt only)
|
|
||||||
- name: Kuma | HTML excerpt (debug)
|
|
||||||
ansible.builtin.debug:
|
|
||||||
msg: "{{ (kuma_home_html | default(''))[:500] }}"
|
|
||||||
when: kuma_home_html is defined and DEBUG == 1
|
|
||||||
@@ -1,15 +1,12 @@
|
|||||||
# users-ssh-nopasswd.yml
|
---
|
||||||
- name: Ensure users, SSH keys, and passwordless sudo
|
- name: Ensure users, SSH keys, and passwordless sudo
|
||||||
hosts: all
|
hosts: all
|
||||||
become: true
|
become: true
|
||||||
become_user: root
|
|
||||||
become_method: sudo
|
|
||||||
|
|
||||||
vars:
|
vars:
|
||||||
users:
|
users:
|
||||||
- name: automation
|
- name: automation
|
||||||
shell: /bin/bash
|
shell: /bin/bash
|
||||||
# optional extra groups besides sudo/wheel
|
|
||||||
groups: []
|
groups: []
|
||||||
sudo_nopasswd: true
|
sudo_nopasswd: true
|
||||||
keys:
|
keys:
|
||||||
@@ -30,33 +27,43 @@
|
|||||||
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPFS4fsqMjMMu/Bi/884bw7yJBqvWusDRESvanH6Owco jakub@jimbuntu"
|
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPFS4fsqMjMMu/Bi/884bw7yJBqvWusDRESvanH6Owco jakub@jimbuntu"
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
|
|
||||||
- name: Pick sudo group per distro
|
- name: Pick sudo group per distro
|
||||||
ansible.builtin.set_fact:
|
ansible.builtin.set_fact:
|
||||||
sudo_group: "{{ 'wheel' if ansible_facts.os_family in ['RedHat','Rocky','AlmaLinux','Fedora','OracleLinux','Suse'] else 'sudo' }}"
|
sudo_group: >-
|
||||||
|
{{ 'wheel'
|
||||||
|
if ansible_facts.os_family in
|
||||||
|
['RedHat','Rocky','AlmaLinux','Fedora','OracleLinux','Suse']
|
||||||
|
else 'sudo' }}
|
||||||
|
|
||||||
- name: Ensure user exists (creates home)
|
- name: Ensure user exists (creates home)
|
||||||
ansible.builtin.user:
|
ansible.builtin.user:
|
||||||
name: "{{ item.name }}"
|
name: "{{ item.name }}"
|
||||||
shell: "{{ item.shell | default('/bin/bash') }}"
|
shell: "{{ item.shell | default(omit) }}"
|
||||||
groups: >-
|
groups: >-
|
||||||
{{ (
|
{{ (
|
||||||
(item.groups | default([]))
|
(item.groups | default([]))
|
||||||
+ ([sudo_group] if item.sudo_nopasswd | default(false) else [])
|
+ ([sudo_group] if item.sudo_nopasswd | default(false) else [])
|
||||||
) | unique | join(',') if
|
) | unique | join(',')
|
||||||
((item.groups | default([])) | length > 0) or (item.sudo_nopasswd | default(false))
|
if (
|
||||||
else omit }}
|
(item.groups | default([]) | length > 0)
|
||||||
|
or item.sudo_nopasswd | default(false)
|
||||||
|
)
|
||||||
|
else omit }}
|
||||||
append: true
|
append: true
|
||||||
create_home: true
|
create_home: true
|
||||||
state: present
|
state: present
|
||||||
loop: "{{ users }}"
|
loop: "{{ users }}"
|
||||||
|
|
||||||
- name: Install authorized SSH keys
|
- name: Enforce authorized SSH keys
|
||||||
ansible.builtin.authorized_key:
|
ansible.builtin.authorized_key:
|
||||||
user: "{{ item.0.name }}"
|
user: "{{ item.name }}"
|
||||||
key: "{{ item.1 }}"
|
key: "{{ item.keys | join('\n') }}"
|
||||||
state: present
|
state: present
|
||||||
manage_dir: true
|
manage_dir: true
|
||||||
loop: "{{ users | subelements('keys', skip_missing=True) }}"
|
exclusive: true
|
||||||
|
loop: "{{ users }}"
|
||||||
|
when: item.keys is defined
|
||||||
|
|
||||||
- name: Grant passwordless sudo via sudoers.d
|
- name: Grant passwordless sudo via sudoers.d
|
||||||
ansible.builtin.copy:
|
ansible.builtin.copy:
|
||||||
@@ -64,7 +71,9 @@
|
|||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
mode: '0440'
|
mode: '0440'
|
||||||
content: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL"
|
content: |
|
||||||
|
# Managed by Ansible
|
||||||
|
{{ item.name }} ALL=(ALL) NOPASSWD:ALL
|
||||||
validate: 'visudo -cf %s'
|
validate: 'visudo -cf %s'
|
||||||
when: item.sudo_nopasswd | default(false)
|
when: item.sudo_nopasswd | default(false)
|
||||||
loop: "{{ users }}"
|
loop: "{{ users }}"
|
||||||
Reference in New Issue
Block a user