added middleware whitelist for .internal services
This commit is contained in:
73
infrastructure/ansible/cleanup_rook.yml
Normal file
73
infrastructure/ansible/cleanup_rook.yml
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
---
|
||||||
|
- name: Cleanup Rook Ceph Resources (K8s)
|
||||||
|
hosts: k3s_masters[0]
|
||||||
|
become: yes
|
||||||
|
tasks:
|
||||||
|
- name: Delete ArgoCD Applications if they exist
|
||||||
|
shell: kubectl delete application -n argocd rook-ceph-cluster rook-ceph-operator --ignore-not-found
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Delete Rook Ceph Cluster CR
|
||||||
|
shell: kubectl -n rook-ceph delete cephcluster rook-ceph --wait=false --ignore-not-found
|
||||||
|
|
||||||
|
- name: Patch CephCluster finalizer (to force deletion if stuck)
|
||||||
|
shell: |
|
||||||
|
kubectl -n rook-ceph patch cephcluster rook-ceph --type merge -p '{"metadata":{"finalizers": []}}'
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Patch CephBlockPool finalizers
|
||||||
|
shell: |
|
||||||
|
kubectl -n rook-ceph get cephblockpool -o name | xargs -I {} kubectl -n rook-ceph patch {} --type merge -p '{"metadata":{"finalizers": []}}'
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Patch CephObjectStore finalizers
|
||||||
|
shell: |
|
||||||
|
kubectl -n rook-ceph get cephobjectstore -o name | xargs -I {} kubectl -n rook-ceph patch {} --type merge -p '{"metadata":{"finalizers": []}}'
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Patch CephFilesystem finalizers
|
||||||
|
shell: |
|
||||||
|
kubectl -n rook-ceph get cephfilesystem -o name | xargs -I {} kubectl -n rook-ceph patch {} --type merge -p '{"metadata":{"finalizers": []}}'
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Patch all remaining Rook resources finalizers
|
||||||
|
shell: |
|
||||||
|
kubectl api-resources --verbs=list --namespaced -o name | grep ceph.rook.io | xargs -n 1 kubectl get --show-kind --ignore-not-found -n rook-ceph -o name | xargs -r -n 1 kubectl -n rook-ceph patch --type merge -p '{"metadata":{"finalizers": []}}'
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Force delete Namespace rook-ceph (remove finalizers from NS)
|
||||||
|
shell: |
|
||||||
|
kubectl get namespace rook-ceph -o json | jq '.spec.finalizers=[]' | kubectl replace --raw "/api/v1/namespaces/rook-ceph/finalize" -f -
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Delete Rook Ceph Namespace
|
||||||
|
shell: kubectl delete namespace rook-ceph --wait=false --ignore-not-found
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Delete Rook Ceph CRDs (Global cleanup)
|
||||||
|
shell: kubectl delete crd $(kubectl get crd | grep ceph.rook.io | awk '{print $1}')
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Cleanup Rook Ceph Data on Nodes
|
||||||
|
hosts: k3s_masters
|
||||||
|
become: yes
|
||||||
|
tasks:
|
||||||
|
- name: Remove /var/lib/rook directory
|
||||||
|
file:
|
||||||
|
path: /var/lib/rook
|
||||||
|
state: absent
|
||||||
|
force: yes
|
||||||
|
|
||||||
|
# WARNING: These commands will WIPE DATA on /dev/sdb
|
||||||
|
- name: Zap Disk sdb
|
||||||
|
shell: sgdisk --zap-all /dev/sdb || true
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: WipeFS sdb
|
||||||
|
shell: wipefs -a /dev/sdb || true
|
||||||
|
ignore_errors: yes
|
||||||
|
|
||||||
|
- name: Mapper clean
|
||||||
|
shell: ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove %
|
||||||
|
ignore_errors: yes
|
||||||
|
failed_when: false
|
||||||
@@ -11,4 +11,5 @@ vm-k3s-master-402.stabify.de ansible_host=10.100.40.12
|
|||||||
[all:vars]
|
[all:vars]
|
||||||
ansible_user=ansible
|
ansible_user=ansible
|
||||||
ansible_ssh_common_args='-o StrictHostKeyChecking=no'
|
ansible_ssh_common_args='-o StrictHostKeyChecking=no'
|
||||||
|
#ansible_ssh_private_key_file=~/.ssh/id_ed25519
|
||||||
ansible_ssh_private_key_file=~/.ssh/id_ed25519_ansible_prod
|
ansible_ssh_private_key_file=~/.ssh/id_ed25519_ansible_prod
|
||||||
|
|||||||
60
infrastructure/ansible/setup_longhorn_disks.yml
Normal file
60
infrastructure/ansible/setup_longhorn_disks.yml
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
---
|
||||||
|
- name: Setup Storage Nodes for Longhorn
|
||||||
|
hosts: k3s_masters
|
||||||
|
become: yes
|
||||||
|
tasks:
|
||||||
|
- name: Install required packages for Longhorn (iSCSI, NFS)
|
||||||
|
apt:
|
||||||
|
name:
|
||||||
|
- open-iscsi
|
||||||
|
- nfs-common
|
||||||
|
- util-linux
|
||||||
|
- cryptsetup
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
|
||||||
|
- name: Enable and start iscsid service
|
||||||
|
systemd:
|
||||||
|
name: iscsid
|
||||||
|
enabled: yes
|
||||||
|
state: started
|
||||||
|
|
||||||
|
- name: Check if /dev/sdb exists
|
||||||
|
stat:
|
||||||
|
path: /dev/sdb
|
||||||
|
register: disk_sdb
|
||||||
|
|
||||||
|
- name: Fail if /dev/sdb is missing
|
||||||
|
fail:
|
||||||
|
msg: "/dev/sdb was not found on this host!"
|
||||||
|
when: not disk_sdb.stat.exists
|
||||||
|
|
||||||
|
- name: Create ext4 filesystem on /dev/sdb
|
||||||
|
filesystem:
|
||||||
|
fstype: ext4
|
||||||
|
dev: /dev/sdb
|
||||||
|
# force: yes # Be careful with force, but since we wiped it, it should be fine.
|
||||||
|
# If filesystem already exists (e.g. from a previous partial run), this is idempotent.
|
||||||
|
|
||||||
|
- name: Create mount point /var/lib/longhorn
|
||||||
|
file:
|
||||||
|
path: /var/lib/longhorn
|
||||||
|
state: directory
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
- name: Mount /dev/sdb to /var/lib/longhorn
|
||||||
|
mount:
|
||||||
|
path: /var/lib/longhorn
|
||||||
|
src: /dev/sdb
|
||||||
|
fstype: ext4
|
||||||
|
state: mounted
|
||||||
|
opts: defaults,noatime
|
||||||
|
|
||||||
|
- name: Display disk usage for /var/lib/longhorn
|
||||||
|
shell: df -h /var/lib/longhorn
|
||||||
|
register: df_output
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Show disk usage
|
||||||
|
debug:
|
||||||
|
msg: "{{ df_output.stdout_lines }}"
|
||||||
Reference in New Issue
Block a user