reseteo completo del cluster. CEPH con arbitro

This commit is contained in:
2025-08-29 17:36:52 +02:00
parent 4389c53a2c
commit dad001ae56
13 changed files with 693 additions and 263 deletions

View File

@@ -8,25 +8,50 @@ spec:
image: quay.io/ceph/ceph:v19.2.3
dataDirHostPath: /var/lib/rook
# Redes: pública por VLAN 40 (4.0), cluster por VLAN 30 (3.0)
network:
provider: host
addressRanges:
public:
- "192.168.4.0/24"
cluster:
- "192.168.4.0/24"
mgr:
count: 2
- "192.168.3.0/24"
mon:
count: 3
count: 5
allowMultiplePerNode: false
mgr:
count: 2
dashboard:
enabled: true
# Evita OSDs en el futuro nodo árbitro (cuando lo añadas)
placement:
# Permite programar en nodos con taint de control-plane
all:
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
# MON: fija 1 por cada host 0105 y evita colocalizar
mon:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: ["srvfkvm01","srvfkvm02","srvfkvm03","srvfkvm04","srvfkvm05"]
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: rook-ceph-mon
topologyKey: kubernetes.io/hostname
# OSD: solo en zonas de datos (evita árbitro)
osd:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
@@ -36,6 +61,7 @@ spec:
operator: In
values: ["site-a","site-b"]
# Preferencia de MGR (opcional)
mgr:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
@@ -46,6 +72,9 @@ spec:
operator: In
values: ["srvfkvm01","srvfkvm04"]
cleanupPolicy:
wipeDevicesFromOtherClusters: true
storage:
useAllNodes: false
useAllDevices: false
@@ -85,3 +114,4 @@ spec:
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288c62acb6efc
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288e456c6d441
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288f976534b4f