Compare commits

5 Commits

Author SHA1 Message Date
595c0624d6 Merge pull request 'edit init 1' (#1) from edit into main
Reviewed-on: IM/ansible_fencl#1
2025-10-03 11:47:51 +00:00
fencl
2b5a2b4a1a edit init 1 2025-10-03 13:36:35 +02:00
b247ea0832 Update mikrotikbackup.yml 2025-09-19 10:58:22 +00:00
c476f04a8e Update inv_mikrotiks 2025-09-19 09:30:23 +00:00
5c185324d5 Update inv_linuxes 2025-09-18 12:31:07 +00:00
15 changed files with 330 additions and 589 deletions

29
check_stack_nextcloud.yml Normal file
View File

@@ -0,0 +1,29 @@
---
- name: Upload and run stack health checks
hosts: proxmox
become: true
vars:
health_script_path: /data/compose/nextcloud/stack-health.sh
tasks:
- name: Upload stack-health.sh
ansible.builtin.copy:
src: files/stack-health.sh
dest: "{{ health_script_path }}"
mode: '0755'
- name: Run stack-health.sh
ansible.builtin.shell: "{{ health_script_path }}"
register: health
args:
executable: /bin/bash
- name: Show health output
ansible.builtin.debug:
msg: "{{ health.stdout | default('no stdout') }}"
- name: Fail if checks failed (rc != 0)
ansible.builtin.fail:
msg: "Health checks failed"
when: health.rc != 0

30
collabora_update.yml Normal file
View File

@@ -0,0 +1,30 @@
---
- name: Update Collabora (pull + recreate in same compose project)
hosts: proxmox
become: true
vars:
collabora_compose_path: /data/compose/nextcloud/collabora-only.yml
collabora_project_name: nextcloud-collabora # based on your labels
tasks:
- name: Pull collabora/code:latest image
community.docker.docker_image:
name: collabora/code
tag: latest
source: pull
# Compose file contains only service "collabora", so this acts on that service only
- name: Compose pull (ensure freshest image)
community.docker.docker_compose_v2:
project_name: "{{ collabora_project_name }}"
files: ["{{ collabora_compose_path }}"]
pull: always
state: present
- name: Recreate collabora with new image
community.docker.docker_compose_v2:
project_name: "{{ collabora_project_name }}"
files: ["{{ collabora_compose_path }}"]
recreate: always
state: present

View File

@@ -1,39 +0,0 @@
- name: Update Homarr
hosts: linux_servers
become: true
gather_facts: false
vars:
homarr_project: homarr
homarr_compose_file: /data/compose/homarr/docker-compose-homarr.yml
homarr_service: homarr
homarr_port: 7575
tasks:
- name: Pull latest Homarr image
community.docker.docker_compose_v2:
project_src: "{{ homarr_compose_file | dirname }}"
files:
- "{{ homarr_compose_file | basename }}"
pull: always
- name: Recreate Homarr service
community.docker.docker_compose_v2:
project_src: "{{ homarr_compose_file | dirname }}"
files:
- "{{ homarr_compose_file | basename }}"
services:
- "{{ homarr_service }}"
state: present
recreate: always
- name: Wait for Homarr port
ansible.builtin.wait_for:
host: 127.0.0.1
port: "{{ homarr_port }}"
timeout: 60
- name: Check Homarr HTTP endpoint
ansible.builtin.uri:
url: "http://127.0.0.1:{{ homarr_port }}/"
status_code: 200

View File

@@ -1,111 +0,0 @@
---
- name: Baseline user setup
hosts: all
become: true
vars:
users:
- name: automation
shell: /bin/bash
groups: []
sudo_nopasswd: true
ssh_keys:
- "ssh-ed25519 AAAAC3..."
- name: hellsos
shell: /bin/bash
groups: []
sudo_nopasswd: true
ssh_keys:
- "ssh-ed25519 AAAAC3..."
- name: jim
shell: /bin/bash
groups: []
sudo_nopasswd: true
ssh_keys:
- "ssh-ed25519 AAAAC3..."
tasks:
- name: Pick sudo group per distro
set_fact:
sudo_group: >-
{{ 'wheel'
if ansible_facts.os_family in
['RedHat','Rocky','AlmaLinux','Fedora','OracleLinux','Suse']
else 'sudo' }}
- name: Ensure user exists
ansible.builtin.user:
name: "{{ item.name }}"
shell: "{{ item.shell }}"
groups: "{{ sudo_group }}"
append: true
create_home: true
loop: "{{ users }}"
- name: Enforce authorized SSH keys
ansible.builtin.authorized_key:
user: "{{ item.name }}"
key: "{{ item.ssh_keys | join('\n') }}"
exclusive: true
loop: "{{ users }}"
- name: Grant passwordless sudo
ansible.builtin.copy:
dest: "/etc/sudoers.d/{{ item.name }}"
mode: '0440'
content: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL\n"
validate: 'visudo -cf %s'
loop: "{{ users }}"
when: item.sudo_nopasswd
# ==============================
# SECOND PLAY: SSH HARDENING
# ==============================
- name: SSH Hardening
hosts: all
become: true
tags: never,hardening
tasks:
- name: Detect if system is Proxmox
ansible.builtin.stat:
path: /usr/bin/pveversion
register: proxmox_check
- name: Ensure sshd_config.d directory exists
ansible.builtin.file:
path: /etc/ssh/sshd_config.d
state: directory
- name: Deploy SSH hardening config
ansible.builtin.copy:
dest: /etc/ssh/sshd_config.d/99-ansible-hardening.conf
mode: '0644'
content: |
PasswordAuthentication no
ChallengeResponseAuthentication no
PubkeyAuthentication yes
AuthenticationMethods publickey
UsePAM yes
{% if not proxmox_check.stat.exists %}
PermitRootLogin no
{% else %}
PermitRootLogin prohibit-password
{% endif %}
validate: 'sshd -t -f %s'
notify: Restart SSH
handlers:
- name: Restart SSH
ansible.builtin.service:
name: "{{ 'sshd'
if ansible_facts.os_family in
['RedHat','Rocky','AlmaLinux','Fedora','OracleLinux','Suse']
else 'ssh' }}"
state: restarted

View File

@@ -1,9 +1,2 @@
[linux_servers] [linux_servers]
jimbuntu ansible_host=192.168.19.4 proxmox ansible_host=192.168.69.2
jim_storage ansible_host=192.168.19.7
portainer2_hellsos ansible_host=192.168.52.9
portainernode_hellsos ansible_host=192.168.52.21
portainernode2_jim ansible_host=192.168.19.8
[local]
localhost ansible_connection=local

View File

@@ -1,16 +1,2 @@
[mikrotik_routers] [mikrotiks]
jim_main ansible_host=192.168.19.2 main_mikrotik_fencl ansible_host=192.168.69.1
jim_gw2 ansible_host=192.168.19.3
hellsos ansible_host=192.168.40.1
ewolet ansible_host=192.168.90.1
Poli ansible_host=192.168.2.1
Schmid ansible_host=192.168.177.1
#Volf ansible_host=192.168.88.1
fencl_home ansible_host=192.168.68.1
fencl_tata ansible_host=192.168.69.1
[mikrotik_routers:vars]
ansible_connection=network_cli
ansible_network_os=community.routeros.routeros
ansible_command_timeout=15

View File

@@ -33,7 +33,7 @@
current_date: "{{ date_output.stdout }}" current_date: "{{ date_output.stdout }}"
- name: Export router config - name: Export router config
shell: timeout 15 ssh -o StrictHostKeyChecking=no {{ ansible_user }}@{{ ansible_host }} -p {{ ansible_port }} "/export show-sensitive" shell: timeout 15 ssh -o StrictHostKeyChecking=no {{ ansible_user }}@{{ ansible_host }} -p {{ ansible_port }} "/export"
register: export_output register: export_output
delegate_to: localhost delegate_to: localhost
when: system_identity.rc == 0 when: system_identity.rc == 0

View File

@@ -1,251 +0,0 @@
---
- name: Backup and/or Upgrade MikroTik
hosts: mikrotik_routers
gather_facts: no
serial: 10
vars:
backup_dir: /opt/mikrotik_backups/
checkmk_url: "http://192.168.19.8:8080/cmk/check_mk/api/1.0/domain-types/host_service_passive_check/actions/set/invoke"
checkmk_user: "apitoken"
checkmk_token: "YXBpdG9rZW46TkVXX1NFQ1JFVA=="
checkmk_service_description: "MikroTik Backup/Upgrade"
tasks:
# ----------------------------
# Identity + timestamp
# ----------------------------
- name: Get router identity
community.routeros.command:
commands: /system identity print
register: identity_raw
- name: Parse router name
set_fact:
router_name: "{{ identity_raw.stdout[0].split(': ')[1] | trim }}"
- name: Get timestamp
command: date +%Y-%m-%d_%H-%M-%S
register: date_out
delegate_to: localhost
changed_when: false
- name: Set timestamp
set_fact:
ts: "{{ date_out.stdout }}"
# ----------------------------
# Backup
# ----------------------------
- name: Ensure backup directory exists
file:
path: "{{ backup_dir }}"
state: directory
mode: "0755"
delegate_to: localhost
- name: Export router config
community.routeros.command:
commands: /export terse show-sensitive
register: export_cfg
- name: Save export locally
copy:
content: "{{ export_cfg.stdout[0] }}"
dest: "{{ backup_dir }}/{{ router_name }}-{{ ts }}.rsc"
mode: "0600"
delegate_to: localhost
- name: Mark backup success
set_fact:
backup_file: "{{ backup_dir }}/{{ router_name }}-{{ ts }}.rsc"
# ----------------------------
# Update check
# ----------------------------
- name: Trigger update check
community.routeros.command:
commands: /system package update check-for-updates once
- name: Wait for MikroTik
pause:
seconds: 5
- name: Get update info
community.routeros.command:
commands: /system package update print
register: update_info
# ----------------------------
# Parsing
# ----------------------------
- name: Extract installed version
set_fact:
installed_version: >-
{{
update_info.stdout[0]
| regex_search('installed-version:\s*(\S+)', '\1')
| first
| default('unknown')
| trim
}}
- name: Extract latest version
set_fact:
latest_version: >-
{{
update_info.stdout[0]
| regex_search('latest-version:\s*(\S+)', '\1')
| first
| default('unknown')
| trim
}}
- name: Extract status
set_fact:
update_status: >-
{{
update_info.stdout[0]
| regex_search('status:\s*(.+)', '\1')
| first
| default('unknown')
| trim
}}
- name: Extract channel
set_fact:
update_channel: >-
{{
update_info.stdout[0]
| regex_search('channel:\s*(\S+)', '\1')
| first
| default('unknown')
| trim
}}
- name: Debug parsed values
debug:
msg: >
router={{ router_name }}
channel={{ update_channel }}
installed={{ installed_version }}
latest={{ latest_version }}
status={{ update_status }}
# ----------------------------
# Logic
# ----------------------------
- name: Detect update failure
set_fact:
update_failed: "{{ 'error' in (update_status | lower) or 'failed' in (update_status | lower) }}"
- name: Decide upgrade
set_fact:
upgrade_needed: >-
{{
not update_failed | bool and
installed_version != 'unknown' and
latest_version != 'unknown' and
installed_version != latest_version
}}
- name: Show decision
debug:
msg: "Router={{ router_name }} installed={{ installed_version }} latest={{ latest_version }} upgrade_needed={{ upgrade_needed }}"
- name: Skip upgrade (already up to date)
debug:
msg: "Router {{ router_name }} is already up to date ({{ installed_version }})"
when: not upgrade_needed | bool
# ----------------------------
# Upgrade
# ----------------------------
- name: Install update
community.routeros.command:
commands: /system package update install
when: upgrade_needed | bool
register: upgrade_result
- name: Wait for reboot
wait_for_connection:
delay: 180
timeout: 600
sleep: 10
when:
- upgrade_needed | bool
- upgrade_result is succeeded
- name: Confirm version after upgrade
community.routeros.command:
commands: /system resource print
register: post_upgrade_info
when: upgrade_needed | bool
- name: Parse new version
set_fact:
post_upgrade_version: >-
{{
post_upgrade_info.stdout[0]
| regex_search('version:\s*(\S+)', '\1')
| first
| default('unknown')
| trim
}}
when: upgrade_needed | bool
# ----------------------------
# Checkmk
# ----------------------------
- name: Result when no upgrade needed
set_fact:
cmk_state: "{{ 2 if update_failed | bool else 0 }}"
cmk_output: >-
{{ 'CRIT' if update_failed | bool else 'OK' }} -
router={{ router_name }}
installed={{ installed_version }}
latest={{ latest_version }}
status="{{ update_status }}"
upgrade_needed=no
when: not upgrade_needed | bool
- name: Result when upgrade happened
set_fact:
cmk_state: "{{ 0 if post_upgrade_version == latest_version else 2 }}"
cmk_output: >-
{{ 'OK' if post_upgrade_version == latest_version else 'CRIT' }} -
router={{ router_name }}
upgraded_from={{ installed_version }}
upgraded_to={{ post_upgrade_version }}
latest={{ latest_version }}
when: upgrade_needed | bool
- name: Send result to Checkmk
delegate_to: localhost
uri:
url: "{{ checkmk_url }}"
method: POST
url_username: "{{ checkmk_user }}"
url_password: "{{ checkmk_token }}"
force_basic_auth: yes
headers:
Content-Type: "application/json"
Accept: "application/json"
body_format: json
body:
host_name: "mikrotiks"
service_description: "{{ checkmk_service_description }}"
state: "{{ cmk_state | int }}"
output: "{{ cmk_output }}"
failed_when: false
register: cmk_result
- name: Debug Checkmk response
debug:
msg: "status={{ cmk_result.status }} body={{ cmk_result.json | default(cmk_result.msg) }}"
- name: Show Checkmk output
debug:
var: cmk_output

38
nextcloud_backup.yml Normal file
View File

@@ -0,0 +1,38 @@
---
- name: Nextcloud backup (config, custom_apps, DB)
hosts: proxmox
become: true
vars:
nc_root: /data/compose/nextcloud
backup_dir: "{{ nc_root }}/backup-{{ ansible_date_time.date }}"
db_container: nextcloud-db
tasks:
- name: Ensure backup directory exists
ansible.builtin.file:
path: "{{ backup_dir }}"
state: directory
mode: '0755'
# Use archive module to create tar.gz directly on the remote host
- name: Archive config directory
ansible.builtin.archive:
path: "{{ nc_root }}/config"
dest: "{{ backup_dir }}/config.tgz"
format: gz
- name: Archive custom_apps directory
ansible.builtin.archive:
path: "{{ nc_root }}/custom_apps"
dest: "{{ backup_dir }}/custom_apps.tgz"
format: gz
# Dump DB directly to a file on the host (avoid shuttling dump through Ansible)
- name: Dump MariaDB from container to file
ansible.builtin.shell: |
set -euo pipefail
docker exec {{ db_container }} sh -c 'command -v mariadb-dump >/dev/null && mariadb-dump -u"$$MYSQL_USER" -p"$$MYSQL_PASSWORD" "$$MYSQL_DATABASE" || mysqldump -u"$$MYSQL_USER" -p"$$MYSQL_PASSWORD" "$$MYSQL_DATABASE"' \
> "{{ backup_dir }}/db.sql"
args:
executable: /bin/bash

112
nextcloud_upgrade.yml Normal file
View File

@@ -0,0 +1,112 @@
---
- name: Upgrade Nextcloud to 31-apache (pull + recreate + occ)
hosts: proxmox
become: true
vars:
nc_container: nextcloud
nc_image_tag: "31-apache" # change to 32-apache when you step to next major
# Ports/volumes/env exactly as you use:
nc_root: /data/compose/nextcloud
nc_http_port: "8080:80"
tasks:
- name: Gather nextcloud container info
community.docker.docker_container_info:
name: "{{ nc_container }}"
register: nc_info
- name: Derive compose project & network from existing container
ansible.builtin.set_fact:
nc_project: "{{ nc_info.container.Config.Labels['com.docker.compose.project'] | default('nextcloud') }}"
nc_networks: "{{ (nc_info.container.NetworkSettings.Networks | default({})).keys() | list }}"
nc_net_primary: "{{ (nc_info.container.NetworkSettings.Networks | default({})).keys() | list | first }}"
when: nc_info.exists
- name: Enable maintenance mode
community.docker.docker_container_exec:
container: "{{ nc_container }}"
user: "www-data"
command: php occ maintenance:mode --on
- name: Render one-off compose for nextcloud (single-service)
ansible.builtin.copy:
dest: /tmp/nc.yml
mode: '0644'
content: |
name: {{ nc_project }}
services:
nextcloud:
image: nextcloud:{{ nc_image_tag }}
container_name: {{ nc_container }}
restart: unless-stopped
networks: [cloud]
ports: ["{{ nc_http_port }}"]
volumes:
- {{ nc_root }}/config:/var/www/html/config
- {{ nc_root }}/data:/var/www/html/data
- {{ nc_root }}/custom_apps:/var/www/html/custom_apps
environment:
TZ: Europe/Prague
MYSQL_DATABASE: nextcloud
MYSQL_USER: nextcloud
MYSQL_PASSWORD: dbpassword
MYSQL_HOST: nextclouddb
REDIS_HOST: redis
NEXTCLOUD_ADMIN_USER: root
NEXTCLOUD_ADMIN_PASSWORD: '1234SilneHeslo.-.'
networks:
cloud:
external: true
name: {{ nc_net_primary }}
- name: Pull the new Nextcloud image
community.docker.docker_compose_v2:
project_name: "{{ nc_project }}"
files: ["/tmp/nc.yml"]
pull: always
state: present
- name: Recreate Nextcloud with the new image
community.docker.docker_compose_v2:
project_name: "{{ nc_project }}"
files: ["/tmp/nc.yml"]
recreate: always
state: present
- name: Run occ upgrade
community.docker.docker_container_exec:
container: "{{ nc_container }}"
user: "www-data"
command: php occ upgrade
- name: Recommended DB maintenance (safe to run)
community.docker.docker_container_exec:
container: "{{ nc_container }}"
user: "www-data"
command: php occ db:add-missing-indices
ignore_errors: true
- name: Convert filecache bigint (safe)
community.docker.docker_container_exec:
container: "{{ nc_container }}"
user: "www-data"
command: php occ db:convert-filecache-bigint --no-interaction
ignore_errors: true
- name: Disable maintenance mode
community.docker.docker_container_exec:
container: "{{ nc_container }}"
user: "www-data"
command: php occ maintenance:mode --off
- name: Show status
community.docker.docker_container_exec:
container: "{{ nc_container }}"
user: "www-data"
command: php occ status
register: nc_status
- name: Print status
ansible.builtin.debug:
msg: "{{ nc_status.stdout | default('no output') }}"

75
redis_update.yml Normal file
View File

@@ -0,0 +1,75 @@
---
- name: Update Redis (pull + recreate, same stack)
hosts: proxmox
become: true
vars:
nc_container: nextcloud
redis_container: redis
redis_image: "redis:7-alpine"
nc_root: /data/compose/nextcloud
tasks:
- name: Gather nextcloud container info (to learn project + network)
community.docker.docker_container_info:
name: "{{ nc_container }}"
register: nc_info
- name: Derive compose project & network
ansible.builtin.set_fact:
nc_project: "{{ nc_info.container.Config.Labels['com.docker.compose.project'] | default('nextcloud') }}"
nc_net_primary: "{{ (nc_info.container.NetworkSettings.Networks | default({})).keys() | list | first }}"
when: nc_info.exists
- name: Enable maintenance mode (optional safety)
community.docker.docker_container_exec:
container: "{{ nc_container }}"
user: "www-data"
command: php occ maintenance:mode --on
ignore_errors: true
- name: Render one-off compose for Redis
ansible.builtin.copy:
dest: /tmp/redis.yml
mode: '0644'
content: |
name: {{ nc_project }}
services:
redis:
image: {{ redis_image }}
container_name: {{ redis_container }}
restart: unless-stopped
networks: [cloud]
volumes:
- {{ nc_root }}/redis:/data
networks:
cloud:
external: true
name: {{ nc_net_primary }}
- name: Pull redis image
community.docker.docker_compose_v2:
project_name: "{{ nc_project }}"
files: ["/tmp/redis.yml"]
pull: always
state: present
- name: Recreate redis
community.docker.docker_compose_v2:
project_name: "{{ nc_project }}"
files: ["/tmp/redis.yml"]
recreate: always
state: present
- name: Disable maintenance mode (if we turned it on)
community.docker.docker_container_exec:
container: "{{ nc_container }}"
user: "www-data"
command: php occ maintenance:mode --off
ignore_errors: true
- name: Fire one cron tick (cleanup pending jobs)
community.docker.docker_container_exec:
container: "{{ nc_container }}"
user: "www-data"
command: php -f /var/www/html/cron.php

View File

@@ -1,2 +1,4 @@
---
collections: collections:
- name: community.routeros - name: community.docker
- name: ansible.posix

View File

@@ -1,57 +0,0 @@
---
- name: Send and verify SMS delivery via internet-master.cz
hosts: localhost
gather_facts: false
vars:
sms_number: "601358865"
sms_username: "mikrotik"
sms_password_send: "jdkotzHJIOPWhjtr32D"
sms_password_recv: "jdkotzHJIOPWhjtr32D"
sms_wait_seconds: 120 # Wait 2 minutes for delivery
tasks:
- name: Generate random test string
set_fact:
random_string: "mikrotik_{{ lookup('password', '/dev/null length=8 chars=ascii_letters') }}"
- name: Send SMS message
uri:
url: "https://sms.internet-master.cz/send/?number={{ sms_number }}&message=@mikrotik@{{ random_string | urlencode }}&type=class-1&username={{ sms_username }}&password={{ sms_password_send }}"
method: GET
return_content: true
register: send_result
- name: Show send API response
debug:
var: send_result.content
- name: Wait for SMS to be delivered
pause:
seconds: "{{ sms_wait_seconds }}"
- name: Fetch received messages
uri:
url: "https://sms.internet-master.cz/receive/?username={{ sms_username }}&password={{ sms_password_recv }}"
method: GET
return_content: true
register: recv_result
- name: Parse received JSON
set_fact:
inbox: "{{ recv_result.json.inbox | default([]) }}"
- name: Check if random string message was received
set_fact:
message_found: "{{ inbox | selectattr('message', 'equalto', random_string) | list | length > 0 }}"
- name: Report result
debug:
msg: >
SMS with message '{{ random_string }}' was {{
'delivered ✅' if message_found else 'NOT delivered ❌'
}}.
- name: Fail if not delivered
fail:
msg: "Message '{{ random_string }}' not found in received inbox!"
when: not message_found

View File

@@ -1,91 +1,34 @@
---
- name: Update system (APT + Flatpak) - name: Update system (APT + Flatpak)
hosts: all hosts: all
become: yes
gather_facts: yes
serial: 5
become: true
become_user: root
become_method: sudo
tasks: tasks:
- name: Update APT cache
- name: Ensure SSH is reachable (skip host if not)
delegate_to: localhost
wait_for:
host: "{{ ansible_host | default(inventory_hostname) }}"
port: 22
timeout: 5
register: ssh_check
ignore_errors: yes
- meta: end_host
when: ssh_check is failed
- name: Ping with retries (handle intermittent flaps)
ping:
register: ping_result
retries: 5
delay: 5
until: ping_result is success
- name: Wait for apt lock to be released
shell: |
while fuser /var/lib/dpkg/lock-frontend >/dev/null 2>&1; do
echo "Waiting for apt lock..."
sleep 5
done
changed_when: false
- name: Update apt cache
apt: apt:
update_cache: yes update_cache: yes
- name: Perform full upgrade - name: Upgrade all APT packages
apt: apt:
upgrade: full upgrade: dist
autoremove: yes
autoclean: yes
register: apt_upgrade
retries: 3
delay: 10
until: apt_upgrade is succeeded
- name: Fix broken packages - name: Check if flatpak binary exists
command: apt-get -f install -y stat:
register: fix_result path: /usr/bin/flatpak
failed_when: false register: flatpak_bin
changed_when: "'Setting up' in fix_result.stdout"
- name: Check if Flatpak is installed
command: which flatpak
register: flatpak_check
changed_when: false
failed_when: false
- name: Update system Flatpaks - name: Update system Flatpaks
command: flatpak update -y --noninteractive --system shell: timeout 300 flatpak update -y
when: flatpak_check.rc == 0 register: flatpak_sys
failed_when: false failed_when: flatpak_sys.rc != 0 and flatpak_sys.rc != 124
when: flatpak_bin.stat.exists
- name: Update user Flatpaks - name: Update user Flatpaks
command: flatpak update -y --noninteractive --user become_user: jakub
become: false environment:
when: flatpak_check.rc == 0 XDG_RUNTIME_DIR: /run/user/1000
failed_when: false shell: timeout 300 flatpak update -y
register: flatpak_user
- name: Remove unused Flatpaks failed_when: flatpak_user.rc != 0 and flatpak_user.rc != 124
command: flatpak uninstall -y --noninteractive --unused when: flatpak_bin.stat.exists
when: flatpak_check.rc == 0
failed_when: false
- name: Update snap packages
command: snap refresh
failed_when: false
- name: Check if reboot is required
stat:
path: /var/run/reboot-required
register: reboot_required
- name: Notify if reboot required
debug:
msg: "Reboot required on {{ inventory_hostname }}"
when: reboot_required.stat.exists

View File

@@ -1,12 +1,15 @@
--- # users-ssh-nopasswd.yml
- name: Ensure users, SSH keys, and passwordless sudo - name: Ensure users, SSH keys, and passwordless sudo
hosts: all hosts: all
become: true become: true
become_user: root
become_method: sudo
vars: vars:
users: users:
- name: automation - name: automation
shell: /bin/bash shell: /bin/bash
# optional extra groups besides sudo/wheel
groups: [] groups: []
sudo_nopasswd: true sudo_nopasswd: true
keys: keys:
@@ -27,43 +30,33 @@
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPFS4fsqMjMMu/Bi/884bw7yJBqvWusDRESvanH6Owco jakub@jimbuntu" - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPFS4fsqMjMMu/Bi/884bw7yJBqvWusDRESvanH6Owco jakub@jimbuntu"
tasks: tasks:
- name: Pick sudo group per distro - name: Pick sudo group per distro
ansible.builtin.set_fact: ansible.builtin.set_fact:
sudo_group: >- sudo_group: "{{ 'wheel' if ansible_facts.os_family in ['RedHat','Rocky','AlmaLinux','Fedora','OracleLinux','Suse'] else 'sudo' }}"
{{ 'wheel'
if ansible_facts.os_family in
['RedHat','Rocky','AlmaLinux','Fedora','OracleLinux','Suse']
else 'sudo' }}
- name: Ensure user exists (creates home) - name: Ensure user exists (creates home)
ansible.builtin.user: ansible.builtin.user:
name: "{{ item.name }}" name: "{{ item.name }}"
shell: "{{ item.shell | default(omit) }}" shell: "{{ item.shell | default('/bin/bash') }}"
groups: >- groups: >-
{{ ( {{ (
(item.groups | default([])) (item.groups | default([]))
+ ([sudo_group] if item.sudo_nopasswd | default(false) else []) + ([sudo_group] if item.sudo_nopasswd | default(false) else [])
) | unique | join(',') ) | unique | join(',') if
if ( ((item.groups | default([])) | length > 0) or (item.sudo_nopasswd | default(false))
(item.groups | default([]) | length > 0)
or item.sudo_nopasswd | default(false)
)
else omit }} else omit }}
append: true append: true
create_home: true create_home: true
state: present state: present
loop: "{{ users }}" loop: "{{ users }}"
- name: Enforce authorized SSH keys - name: Install authorized SSH keys
ansible.builtin.authorized_key: ansible.builtin.authorized_key:
user: "{{ item.name }}" user: "{{ item.0.name }}"
key: "{{ item.keys | join('\n') }}" key: "{{ item.1 }}"
state: present state: present
manage_dir: true manage_dir: true
exclusive: true loop: "{{ users | subelements('keys', skip_missing=True) }}"
loop: "{{ users }}"
when: item.keys is defined
- name: Grant passwordless sudo via sudoers.d - name: Grant passwordless sudo via sudoers.d
ansible.builtin.copy: ansible.builtin.copy:
@@ -71,9 +64,7 @@
owner: root owner: root
group: root group: root
mode: '0440' mode: '0440'
content: | content: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL"
# Managed by Ansible
{{ item.name }} ALL=(ALL) NOPASSWD:ALL
validate: 'visudo -cf %s' validate: 'visudo -cf %s'
when: item.sudo_nopasswd | default(false) when: item.sudo_nopasswd | default(false)
loop: "{{ users }}" loop: "{{ users }}"