full remove of rook

This commit is contained in:
2026-01-15 13:53:45 +01:00
parent 9618d0f4e4
commit 1f43cc3f0d
7 changed files with 0 additions and 257 deletions

View File

@@ -1,22 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: rook-ceph-cluster
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "2"
spec:
project: default
source:
repoURL: https://git.cloud-infra.prod.openmailserver.de/stabify/gitops.git
targetRevision: HEAD
path: infrastructure/rook-ceph
destination:
server: https://kubernetes.default.svc
namespace: rook-ceph
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@@ -1,34 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: rook-ceph-operator
namespace: argocd
annotations:
argocd.argoproj.io/sync-wave: "1"
spec:
project: default
source:
chart: rook-ceph
repoURL: https://charts.rook.io/release
targetRevision: v1.14.8 # Nutze eine feste Version für Stabilität
helm:
values: |
crds:
enabled: true
resources:
limits:
cpu: 500m
memory: 256Mi
requests:
cpu: 100m
memory: 128Mi
destination:
server: https://kubernetes.default.svc
namespace: rook-ceph
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true # Wichtig für große CRDs

View File

@@ -1,104 +0,0 @@
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
cephVersion:
image: quay.io/ceph/ceph:v18.2.2
allowUnsupported: false
dataDirHostPath: /var/lib/rook
skipUpgradeChecks: false
continueUpgradeAfterChecksEvenIfNotHealthy: false
waitTimeoutForHealthyOSDInMinutes: 10
upgradeOSDRequiresHealthyPGs: false
mon:
count: 3
allowMultiplePerNode: false
mgr:
count: 2
allowMultiplePerNode: false
modules:
- name: rook
enabled: true
dashboard:
enabled: true
port: 8443
ssl: true
monitoring:
enabled: false
metricsDisabled: false
network:
connections:
encryption:
enabled: false
compression:
enabled: false
requireMsgr2: false
crashCollector:
disable: false
logCollector:
enabled: true
periodicity: daily
maxLogSize: 500M
cleanupPolicy:
confirmation: ""
sanitizeDisks:
method: quick
dataSource: zero
iteration: 1
allowUninstallWithVolumes: false
removeOSDsIfOutAndSafeToRemove: false
priorityClassNames:
mon: system-node-critical
osd: system-node-critical
mgr: system-cluster-critical
storage:
useAllNodes: false
useAllDevices: false # Set to true if you want to use all available raw devices on these nodes
config:
databaseSizeMB: "1024"
nodes:
- name: "vm-k3s-master-400.stabify.de"
devices:
- name: "sdb"
# - name: "sdc"
- name: "vm-k3s-master-401.stabify.de"
devices:
- name: "sdb"
- name: "vm-k3s-master-402.stabify.de"
devices:
- name: "sdb"
onlyApplyOSDPlacement: false
disruptionManagement:
managePodBudgets: true
osdMaintenanceTimeout: 30
pgHealthCheckTimeout: 0
csi:
readAffinity:
enabled: false
healthCheck:
daemonHealth:
mon:
disabled: false
interval: 45s
osd:
disabled: false
interval: 60s
status:
disabled: false
interval: 60s
livenessProbe:
mon:
disabled: false
mgr:
disabled: false
osd:
disabled: false
startupProbe:
mon:
disabled: false
mgr:
disabled: false
osd:
disabled: false

View File

@@ -1,25 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rook-ceph-dashboard
namespace: rook-ceph
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
ingressClassName: traefik
tls:
- hosts:
- ceph.apps.internal.k3s.stabify.de
secretName: ceph-dashboard-tls
rules:
- host: ceph.apps.internal.k3s.stabify.de
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: rook-ceph-mgr-dashboard
port:
number: 8443

View File

@@ -1,8 +0,0 @@
apiVersion: ceph.rook.io/v1
kind: CephObjectStoreUser
metadata:
name: ceph-objectstore-user
namespace: rook-ceph
spec:
store: ceph-objectstore
displayName: "Ceph Object Store User"

View File

@@ -1,19 +0,0 @@
apiVersion: ceph.rook.io/v1
kind: CephObjectStore
metadata:
name: ceph-objectstore
namespace: rook-ceph
spec:
metadataPool:
failureDomain: host
replicated:
size: 3
dataPool:
failureDomain: host
replicated:
size: 3
preservePoolsOnDelete: true
gateway:
type: s3
port: 80
instances: 1

View File

@@ -1,45 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rook-ceph-tools
namespace: rook-ceph
labels:
app: rook-ceph-tools
spec:
replicas: 1
selector:
matchLabels:
app: rook-ceph-tools
template:
metadata:
labels:
app: rook-ceph-tools
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: rook-ceph-tools
image: quay.io/ceph/ceph:v18.2.2
command: ["/bin/bash"]
args: ["-c", "while true; do sleep 10; done"]
securityContext:
runAsUser: 0
runAsNonRoot: false
volumeMounts:
- mountPath: /etc/ceph
name: mon-endpoint-volume
- mountPath: /var/lib/rook
name: rook-config
volumes:
- name: mon-endpoint-volume
configMap:
name: rook-ceph-mon-endpoints
items:
- key: data
path: mon-endpoints
- name: rook-config
emptyDir: {}
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 5