refactor(storage): replace minio with rook-ceph, remove minio files
This commit is contained in:
91
infrastructure/rook-ceph-cluster-app.yaml
Normal file
91
infrastructure/rook-ceph-cluster-app.yaml
Normal file
@@ -0,0 +1,91 @@
|
||||
apiVersion: argoproj.io/v1alpha1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: rook-ceph-cluster
|
||||
namespace: argocd
|
||||
annotations:
|
||||
argocd.argoproj.io/sync-wave: "2"
|
||||
spec:
|
||||
project: default
|
||||
source:
|
||||
chart: rook-ceph-cluster
|
||||
repoURL: https://charts.rook.io/release
|
||||
targetRevision: v1.13.3
|
||||
helm:
|
||||
values: |
|
||||
operatorNamespace: rook-ceph
|
||||
toolbox:
|
||||
enabled: true # Nützlich für Debugging (ceph CLI)
|
||||
|
||||
# Monitoring Dashboard
|
||||
dashboard:
|
||||
enabled: true
|
||||
ssl: false
|
||||
|
||||
# Single Node Config (WICHTIG für dein Setup)
|
||||
cephClusterSpec:
|
||||
dataDirHostPath: /var/lib/rook
|
||||
mon:
|
||||
count: 1
|
||||
allowMultiplePerNode: true
|
||||
mgr:
|
||||
count: 1
|
||||
allowMultiplePerNode: true
|
||||
|
||||
# Wir nutzen PVCs statt Raw Disks (einfacher in VM)
|
||||
storage:
|
||||
useAllNodes: false
|
||||
useAllDevices: false
|
||||
storageClassDeviceSets:
|
||||
- name: set1
|
||||
count: 1 # Anzahl der OSDs
|
||||
portable: false
|
||||
tuneDeviceClass: true
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
resources:
|
||||
requests:
|
||||
storage: 50Gi
|
||||
storageClassName: local-path
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
|
||||
# S3 Object Store (RadosGW)
|
||||
cephObjectStores:
|
||||
- name: ceph-objectstore
|
||||
spec:
|
||||
metadataPool:
|
||||
failureDomain: host
|
||||
replicated:
|
||||
size: 1 # Keine Replikation (Single Node)
|
||||
dataPool:
|
||||
failureDomain: host
|
||||
replicated:
|
||||
size: 1
|
||||
preservePoolsOnDelete: true
|
||||
gateway:
|
||||
port: 80
|
||||
instances: 1
|
||||
healthCheck:
|
||||
bucket:
|
||||
enabled: true
|
||||
interval: 60s
|
||||
|
||||
# StorageClass für S3 Buckets (damit wir Buckets via K8s Yaml anlegen können)
|
||||
cephObjectStoreUser:
|
||||
enabled: false # Wir legen User manuell oder via CRD an
|
||||
|
||||
ingress:
|
||||
dashboard:
|
||||
ingressClassName: traefik
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: websecure
|
||||
host:
|
||||
name: ceph.apps.internal.k3s.stabify.de
|
||||
tls:
|
||||
- hosts:
|
||||
- ceph.apps.internal.k3s.stabify.de
|
||||
secretName: ceph-dashboard-tls
|
||||
Reference in New Issue
Block a user