apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: rook-ceph-cluster namespace: argocd annotations: argocd.argoproj.io/sync-wave: "2" spec: project: default source: chart: rook-ceph-cluster repoURL: https://charts.rook.io/release targetRevision: v1.13.3 helm: values: | operatorNamespace: rook-ceph toolbox: enabled: true dashboard: enabled: true ssl: false cephClusterSpec: dataDirHostPath: /var/lib/rook mon: count: 1 allowMultiplePerNode: true mgr: count: 1 allowMultiplePerNode: true resources: mgr: limits: cpu: "1000m" memory: "1Gi" requests: cpu: "100m" memory: "512Mi" mon: limits: cpu: "1000m" memory: "2Gi" requests: cpu: "100m" memory: "512Mi" osd: limits: cpu: "1000m" memory: "2Gi" requests: cpu: "100m" memory: "1Gi" # STORAGE CONFIGURATION (Raw Disk Mode - Enterprise Style) storage: useAllNodes: true useAllDevices: true # Rook krallt sich alle leeren Disks (sdb, vdb, etc.) cephObjectStores: - name: ceph-objectstore storageClass: enabled: true name: ceph-bucket reclaimPolicy: Delete spec: metadataPool: failureDomain: host replicated: size: 1 dataPool: failureDomain: host replicated: size: 1 preservePoolsOnDelete: true gateway: port: 80 instances: 1 # healthCheck entfernt da schema error cephObjectStoreUser: enabled: false ingress: dashboard: ingressClassName: traefik annotations: cert-manager.io/cluster-issuer: letsencrypt-prod traefik.ingress.kubernetes.io/router.entrypoints: websecure traefik.ingress.kubernetes.io/service.serverstransport: rook-ceph-insecure-transport host: name: ceph.apps.internal.k3s.stabify.de tls: - hosts: - ceph.apps.internal.k3s.stabify.de secretName: ceph-dashboard-tls destination: server: https://kubernetes.default.svc namespace: rook-ceph syncPolicy: automated: prune: true selfHeal: true syncOptions: - CreateNamespace=true - ServerSideApply=true