Compare commits

...

39 Commits

Author SHA1 Message Date
aa976406e4 a ver 2025-09-04 23:20:28 +02:00
11da827e52 configurado external, dynu-updater, repo y coredns con cariño 2025-09-04 23:19:05 +02:00
23c556938b añadido script clientes 2025-09-04 00:39:48 +02:00
3bfbd99958 Actualizar readme.md 2025-09-03 22:23:46 +00:00
1d06b6bb37 Actualizar readme.md 2025-09-03 22:20:29 +00:00
612b808259 Actualizar readme.md 2025-09-03 22:19:32 +00:00
9991f318fc Actualizar readme.md 2025-09-03 22:19:00 +00:00
acc0b0bb79 Actualizar readme.md 2025-09-03 22:18:06 +00:00
0bbc20ca14 add repo 2025-09-04 00:13:39 +02:00
bb76fc67dc añadido dynu-updater 2025-09-01 17:18:13 +02:00
549fd4ca37 arreglando dynu 2025-09-01 17:15:49 +02:00
4c5b451308 añadido dynu uodater 2025-09-01 14:06:16 +02:00
bf2e26a8bf añadiendo maquinas virtuales 2025-08-31 13:44:21 +02:00
01cbdf2a1d arreglado apolo kubevirt y cosillas 2025-08-31 10:35:16 +02:00
0fc9bbb2c0 pulido de argos, coredns y guacamole 2025-08-31 08:42:35 +02:00
xguefer
a3500d6250 Actualizar readme.md 2025-08-30 23:08:49 +00:00
xguefer
f2f8a5cf5a Actualizar readme.md 2025-08-30 22:56:25 +00:00
xguefer
91737b6592 Actualizar readme.md 2025-08-30 22:28:53 +00:00
xguefer
b836c7afce Actualizar readme.md 2025-08-30 22:28:25 +00:00
9291e46fe9 renombrando carpeta 2025-08-31 00:27:06 +02:00
2331da8cf8 velero 2025-08-31 00:24:21 +02:00
2eff32d251 driver dell 2025-08-30 13:35:31 +02:00
55c23dda1d añadido manual de harbor 2025-08-30 00:06:05 +02:00
xguefer
8f7eee7cc1 Actualizar readme.md 2025-08-29 19:59:48 +00:00
xguefer
b6da6350ac Actualizar cluster_init.md 2025-08-29 19:58:34 +00:00
xguefer
d173a90b2e Actualizar readme.md 2025-08-29 15:44:31 +00:00
452b04aa5d limpieza de manifiesto ceph. gitea y wireguard funcionando 2025-08-29 17:43:07 +02:00
dad001ae56 reseteo completo del cluster. CEPH con arbitro 2025-08-29 17:36:52 +02:00
4389c53a2c script de limpieza de discos 2025-08-28 13:31:43 +02:00
xguefer
bda9a3be17 Actualizar readme.md 2025-08-27 01:03:49 +00:00
xguefer
ac5f10b281 Actualizar readme.md 2025-08-27 00:55:54 +00:00
3d41b2cda4 corregido velero 2025-08-27 02:44:33 +02:00
xguefer
6bdc7e0e30 Actualizar minio/readme.md 2025-08-26 23:48:51 +00:00
7e429dd17a minio, velero y tal 2025-08-27 01:46:41 +02:00
xguefer
4265121e6e Actualizar readme.md 2025-08-26 20:16:12 +00:00
33aced03a0 añadido driver seagate 2025-08-26 22:00:11 +02:00
fc3640f5e6 arreglos streamer apolo 2025-08-25 23:03:41 +02:00
xguefer
a321cc7928 Actualizar readme.md 2025-08-24 21:09:25 +00:00
xguefer
4c39e29748 Actualizar rook/readme.md 2025-08-24 21:08:20 +00:00
128 changed files with 3376 additions and 1056 deletions

View File

@@ -1,10 +0,0 @@
kubeadm join 192.168.0.20:6443 --token rvz86n.c8rdb9ygtikrwnub --discovery-token-ca-cert-hash sha256:f925653dfb3d2b4697395a272e0b07cf4eb16b7ae5a2cc3b33aeab1f36fe7d13 --control-plane --certificate-key eeab98651b2f07f6ce53649b2cca1bf3c449d4fe6270ec0645219cd8c6795ca7 --apiserver-advertise-address=192.168.4.2
sudo rm -rf /var/lib/kubelet/
sudo rm -rf /etc/kubernetes
sudo rm -rf /etc/cni/
sudo rm -rf /var/lib/etcd
sudo systemctl stop kubelet
sudo systemctl status kubelet
sudo kubeadm join 192.168.0.20:6443 --token rvz86n.c8rdb9ygtikrwnub --discovery-token-ca-cert-hash sha256:f925653dfb3d2b4697395a272e0b07cf4eb16b7ae5a2cc3b33aeab1f36fe7d13 --control-plane --certificate-key eeab98651b2f07f6ce53649b2cca1bf3c449d4fe6270ec0645219cd8c6795ca7 --apiserver-advertise-address=192.168.4.2

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: app6
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: app6
image: harbor.c2et.com/xrf-ssl/xrf-app6:6.0
image: harbor.c2et.net/apolo/xrf-app6:6.0
imagePullPolicy: IfNotPresent
ports:
- name: tcp-app6

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: colossus
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: colossus
image: harbor.c2et.com/xrf-ssl/xrf-webcolossus:6.0
image: harbor.c2et.net/apolo/xrf-webcolossus:6.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: consumer
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: consumer
image: harbor.c2et.com/xrf-ssl/xrf-consumer:6.0
image: harbor.c2et.net/apolo/xrf-consumer:6.0
imagePullPolicy: IfNotPresent
envFrom:
- secretRef:

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: drone
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: drone
image: harbor.c2et.com/xrf-ssl/xrf-drone:6.0
image: harbor.c2et.net/apolo/xrf-drone:6.0
imagePullPolicy: IfNotPresent
# Si Drone necesita otras vars del backend, puedes añadir:
# envFrom:

View File

@@ -20,9 +20,7 @@ spec:
app.kubernetes.io/component: ejabberd
spec:
imagePullSecrets:
- name: harbor-cred
# >>> Asegura permisos/ownership en volúmenes
- name: harbor-cred-apolo
securityContext:
runAsUser: 9000
runAsGroup: 9000
@@ -62,7 +60,7 @@ spec:
containers:
- name: ejabberd
image: harbor.c2et.com/xrf-ssl/xrf-ejabberd:6.0
image: harbor.c2et.net/apolo/xrf-ejabberd:6.0
imagePullPolicy: IfNotPresent
envFrom:
- secretRef:

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: kurento
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: kurento-media-server
image: harbor.c2et.com/xrf-ssl/xrf-kurento-media-server:6.0
image: harbor.c2et.net/apolo/xrf-kurento-media-server:6.0
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: kurento-api
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: kurento-api
image: harbor.c2et.com/xrf-ssl/xrf-kurento-api:6.0
image: harbor.c2et.net/apolo/xrf-kurento-api:6.0
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: media
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: mediamtx
image: harbor.c2et.com/xrf-ssl/xrf-media-server:6.0
image: harbor.c2et.net/apolo/xrf-media-server:6.0
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: nakama
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: nakama
image: harbor.c2et.com/xrf-ssl/xrf-nakama:6.0
image: harbor.c2et.net/apolo/xrf-nakama:6.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: php
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: php-fpm
image: harbor.c2et.com/xrf-ssl/xrf-php:6.0
image: harbor.c2et.net/apolo/xrf-php:6.0
imagePullPolicy: IfNotPresent
ports:
- name: php-fpm

View File

@@ -20,11 +20,11 @@ spec:
app.kubernetes.io/component: portal
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
# Contenedor de la app (como venías)
- name: portal
image: harbor.c2et.com/xrf-ssl/xrf-portal-https:6.0
image: harbor.c2et.net/apolo/xrf-portal-https:6.0
imagePullPolicy: IfNotPresent
ports:
- name: app

View File

@@ -20,7 +20,7 @@ spec:
app.kubernetes.io/component: postgres
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
securityContext:
fsGroup: 999
initContainers:
@@ -40,7 +40,7 @@ spec:
runAsUser: 0
containers:
- name: postgres
image: harbor.c2et.com/xrf-ssl/xrf-db:6.0
image: harbor.c2et.net/apolo/xrf-db:6.0
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 999

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: rabbitmq
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: rabbitmq
image: harbor.c2et.com/xrf-ssl/xrf-rabbitmq:6.0
image: harbor.c2et.net/apolo/xrf-rabbitmq:6.0
imagePullPolicy: IfNotPresent
envFrom:
- secretRef:

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: streamer
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: streamer
image: harbor.c2et.com/xrf-ssl/xrf-streamer-server:6.0
image: harbor.c2et.net/apolo/xrf-streamer-server:6.0
imagePullPolicy: IfNotPresent
command: ["npm","start"]
envFrom:

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: web
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: nginx
image: harbor.c2et.com/xrf-ssl/xrf-web:6.0
image: harbor.c2et.net/apolo/xrf-web:6.0
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:

View File

@@ -20,10 +20,10 @@ spec:
app.kubernetes.io/component: websocket
spec:
imagePullSecrets:
- name: harbor-cred
- name: harbor-cred-apolo
containers:
- name: websocket
image: harbor.c2et.com/xrf-ssl/xrf-websocket:6.0
image: harbor.c2et.net/apolo/xrf-websocket:6.0
imagePullPolicy: IfNotPresent
ports:
- name: ws

View File

@@ -18,7 +18,6 @@ resources:
- certs/certificate-meeting.yaml
# ConfigMaps
- configmaps/configmap-coredns.yaml
- configmaps/configmap-ejabberd-inetrc.yaml
- configmaps/configmap-ejabberd.yaml
- configmaps/configmap-kms-api.yaml
@@ -49,7 +48,6 @@ resources:
- deployments/deploy-app6.yaml
- deployments/deploy-colossus.yaml
- deployments/deploy-consumer.yaml
- deployments/deploy-coredns.yaml
- deployments/deploy-drone.yaml
- deployments/deploy-ejabberd.yaml
- deployments/deploy-kms.yaml
@@ -68,7 +66,6 @@ resources:
- services/svc-aliases-compose.yaml
- services/svc-app6.yaml
- services/svc-colossus.yaml
- services/svc-coredns.yaml
- services/svc-ejabberd.yaml
- services/svc-kms.yaml
- services/svc-kurento-api.yaml

View File

@@ -1,9 +1,9 @@
apiVersion: v1
data:
.dockerconfigjson: eyJhdXRocyI6eyJoYXJib3IuYzJldC5jb20iOnsidXNlcm5hbWUiOiJ4YXZvciIsInBhc3N3b3JkIjoiTUBuYWJvMjAyNSIsImVtYWlsIjoibm8tcmVwbHlAYzJldC5jb20iLCJhdXRoIjoiZUdGMmIzSTZUVUJ1WVdKdk1qQXlOUT09In19fQ==
.dockerconfigjson: eyJhdXRocyI6eyJoYXJib3IuYzJldC5uZXQiOnsidXNlcm5hbWUiOiJ4YXZvciIsInBhc3N3b3JkIjoiTUBuYWJvMjAyNSIsImVtYWlsIjoieGF2b3JAaG90bWFpbC5lcyIsImF1dGgiOiJlR0YyYjNJNlRVQnVZV0p2TWpBeU5RPT0ifX19
kind: Secret
metadata:
creationTimestamp: null
name: harbor-cred
namespace: apolo
namespace: guacamole
type: kubernetes.io/dockerconfigjson

View File

@@ -0,0 +1,9 @@
apiVersion: v1
data:
.dockerconfigjson: eyJhdXRocyI6eyJoYXJib3IuYzJldC5jb20iOnsidXNlcm5hbWUiOiJ4YXZvciIsInBhc3N3b3JkIjoiTUBuYWJvMjAyNSIsImVtYWlsIjoibm8tcmVwbHlAYzJldC5jb20iLCJhdXRoIjoiZUdGMmIzSTZUVUJ1WVdKdk1qQXlOUT09In19fQ==
kind: Secret
metadata:
creationTimestamp: null
name: harbor-cred
namespace: apolo
type: kubernetes.io/dockerconfigjson

View File

@@ -7,10 +7,11 @@ metadata:
app.kubernetes.io/name: apolo-mediamtx
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: media
annotations:
metallb.universe.tf/allow-shared-ip: streaming
spec:
type: LoadBalancer
loadBalancerIP: 192.168.200.12
externalTrafficPolicy: Local
selector:
app.kubernetes.io/name: apolo-mediamtx
ports:

View File

@@ -7,6 +7,8 @@ metadata:
app.kubernetes.io/name: apolo-streamer
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: streamer
annotations:
metallb.universe.tf/allow-shared-ip: streaming
spec:
type: LoadBalancer
loadBalancerIP: 192.168.200.12

View File

@@ -1,87 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: argos-panel-config
namespace: argos-core
data:
app.py: |
import os, sqlite3, time
from fastapi import FastAPI, HTTPException
from fastapi.responses import HTMLResponse
from minio import Minio
from urllib.parse import urlparse
DB="/data/argos.db"
mc=Minio(os.getenv("MINIO_ENDPOINT","s3.argos.interna"),
access_key=os.getenv("MINIO_ACCESS_KEY"),
secret_key=os.getenv("MINIO_SECRET_KEY"),
secure=os.getenv("MINIO_SECURE","false").lower()=="true")
app=FastAPI()
def rows(limit=100, camera=None, since=None):
q="SELECT id, ts, edge, camera, label, s3url, thumb_s3 FROM events"
cond=[]; args=[]
if camera: cond.append("camera=?"); args.append(camera)
if since: cond.append("ts>=?"); args.append(int(since))
if cond: q+=" WHERE "+ " AND ".join(cond)
q+=" ORDER BY ts DESC LIMIT ?"; args.append(limit)
con=sqlite3.connect(DB); cur=con.cursor()
cur.execute(q, tuple(args)); r=cur.fetchall(); con.close()
return r
@app.get("/api/events")
def api_events(limit:int=100, camera:str=None, since:int=None):
return [dict(id=i, ts=t, edge=e, camera=c, label=l or "", s3url=s, thumb=th or "")
for (i,t,e,c,l,s,th) in rows(limit,camera,since)]
@app.get("/api/url/{event_id}")
def presign(event_id: str, expires: int = 600):
con=sqlite3.connect(DB); cur=con.cursor()
cur.execute("SELECT s3url FROM events WHERE id=?", (event_id,))
row=cur.fetchone(); con.close()
if not row: raise HTTPException(404, "Not found")
s3url=row[0]; p=urlparse(s3url); b=p.netloc; k=p.path.lstrip("/")
return {"url": mc.presigned_get_object(b, k, expires=expires)}
@app.get("/", response_class=HTMLResponse)
def index():
return """
<!doctype html><meta charset="utf-8"><title>ARGOS Panel</title>
<style>body{font-family:system-ui;margin:1.5rem} .grid{display:grid;grid-template-columns:repeat(auto-fill,minmax(280px,1fr));gap:12px}
.card{border:1px solid #ddd;border-radius:10px;padding:10px} img{width:100%;height:160px;object-fit:cover;border-radius:8px}
button{padding:.4rem .6rem;margin-right:.3rem}</style>
<h1>ARGOS Alarmas</h1>
<div class="grid" id="grid"></div>
<div style="margin-top:1rem"><video id="player" width="960" controls></video></div>
<script>
const fmt=t=>new Date(t*1000).toLocaleString();
async function load(){
const r=await fetch('/api/events?limit=100'); const data=await r.json();
const g=document.getElementById('grid'); g.innerHTML='';
for(const ev of data){
const d=document.createElement('div'); d.className='card';
const img = ev.thumb ? `<img src="${ev.thumb.replace('s3://','/api/url/THUMB?key=')}" alt="thumb">` : '';
d.innerHTML = `${img}<div><b>${ev.camera}</b> — ${fmt(ev.ts)}<br>${ev.label||''}</div>
<div style="margin-top:.4rem">
<button data-id="${ev.id}" data-action="clip">Ver clip</button>
<button data-path="${ev.camera}" data-action="live">En directo</button>
</div>`;
g.appendChild(d);
}
g.onclick=async (e)=>{
if(e.target.tagName!=='BUTTON') return;
const v=document.getElementById('player');
if(e.target.dataset.action==='clip'){
const id=e.target.dataset.id;
const j=await (await fetch('/api/url/'+id)).json();
v.src=j.url; v.play();
}else if(e.target.dataset.action==='live'){
const path=e.target.dataset.path;
// usa MediaMTX web player
window.open('http://mediamtx.argos.interna/?path='+encodeURIComponent(path),'_blank');
}
}
}
load(); setInterval(load,10000);
</script>
"""

View File

@@ -13,7 +13,7 @@ spec:
spec:
containers:
- name: mosquitto
image: eclipse-mosquitto:2
image: eclipse-mosquitto:latest
ports:
- containerPort: 1883
volumeMounts:

View File

@@ -13,7 +13,7 @@ spec:
spec:
containers:
- name: panel
image: python:3.13.7-slim-bookworm
image: harbor.c2et.net/library/python:3.13.7-slim-bookworm
command: ["/bin/sh","-c"]
args:
- |

View File

@@ -86,7 +86,7 @@ Este paso parte de una instalación limpia de openSUSE/SLES actualizada y con pe
```bash
sudo zypper refresh
sudo zypper update
sudo zypper install -y curl ca-certificates keepalived chrony
sudo zypper install -y curl ca-certificates keepalived chrony yq jq open-iscsi yast2-iscsi-client multipath-tools gdisk util-linux helm tree git htop
```
### b) Añade el repositorio oficial de Kubernetes

View File

@@ -33,9 +33,37 @@ data:
192.168.0.100 admin.powervault1.c2et.net
192.168.0.100 admin.powervault2.c2et.net
192.168.0.100 ceph.c2et.net
192.168.0.100 heimdall.c2et.net
# === dotcom ) ===
192.168.0.100 cockpit.c2et.com
192.168.0.100 git.c2et.com
192.168.0.100 harbor.c2et.com
192.168.0.100 wireguard.c2et.com
192.168.0.100 proxy.c2et.com
192.168.0.80 backend.apolo.c2et.com
192.168.0.80 portal.apolo.c2et.com
192.168.0.80 colossus.apolo.c2et.com
192.168.0.80 chat.apolo.c2et.com
192.168.0.80 muc.chat.apolo.c2et.com
192.168.0.81 streaming.apolo.c2et.com
192.168.0.81 meeting.apolo.c2et.com
# === dotnet ) ===
192.168.0.100 repo.c2et.net
192.168.0.100 git.c2et.net
192.168.0.100 wireguard.c2et.net
192.168.0.100 ceph.c2et.net
192.168.0.100 harbor.c2et.net
192.168.0.100 grafana.c2et.net
192.168.0.100 kubevirt.c2et.net
192.168.0.100 heimdall.c2et.net
192.168.0.100 argos.panel.c2et.net
192.168.0.100 vscode.c2et.net
fallthrough
}
forward . /etc/resolv.conf
forward . 8.8.8.8 1.1.1.1
cache 120
# prometheus 0.0.0.0:9153 # <- activa si quieres métricas
}

View File

@@ -8,3 +8,4 @@ resources:
- configmap-coredns.yaml
- deploy-coredns.yaml
- svc-coredns.yaml
- svc-coredns-admin.yaml

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: coredns-custom-admin
namespace: coredns
spec:
type: LoadBalancer
loadBalancerIP: 192.168.0.110
selector:
app: coredns-custom
ports:
- name: dns-udp
port: 53
targetPort: 53
protocol: UDP
- name: dns-tcp
port: 53
targetPort: 53
protocol: TCP

18
dynu-updater/cronjob.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: dynu-updater
namespace: dynu-updater
spec:
schedule: "*/5 * * * *"
jobTemplate:
spec:
template:
spec:
imagePullSecrets:
- name: harbor-regcred
containers:
- name: dynu-updater
image: harbor.c2et.net/c3s/dynu-updater-c3s:1.0
imagePullPolicy: Always
restartPolicy: OnFailure

View File

@@ -0,0 +1,9 @@
apiVersion: v1
data:
.dockerconfigjson: eyJhdXRocyI6eyJoYXJib3IuYzJldC5uZXQiOnsidXNlcm5hbWUiOiJ4YXZvciIsInBhc3N3b3JkIjoiTUBuYWJvMjAyNSIsImVtYWlsIjoiYWRtaW5AYzJldC5uZXQiLCJhdXRoIjoiZUdGMmIzSTZUVUJ1WVdKdk1qQXlOUT09In19fQ==
kind: Secret
metadata:
creationTimestamp: null
name: harbor-regcred
namespace: dynu-updater
type: kubernetes.io/dockerconfigjson

View File

@@ -0,0 +1,4 @@
resources:
- harbor-regcred.yaml
- namespace.yaml
- cronjob.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: dynu-updater

119
dynu-updater/readme.md Normal file
View File

@@ -0,0 +1,119 @@
# Dynu IP Updater en Kubernetes
Este proyecto despliega un cliente de Dynu personalizado como `CronJob` y `Job` en un clúster K3s/Kubernetes. Su función es mantener actualizada la dirección IP pública de un grupo de dominios gestionados en Dynu usando su API.
---
## 🚀 Características
* Imagen Docker ligera basada en Alpine.
* Actualiza la IP pública mediante peticiones HTTP a la API de Dynu.
* Guarda la IP anterior para evitar actualizaciones innecesarias.
* Ejecutado periódicamente mediante `CronJob` (cada 5 minutos).
* Puede ejecutarse manualmente mediante un `Job`.
* Los logs de ejecución se almacenan y pueden consultarse con `kubectl logs`.
---
## 🚜 Estructura de archivos
```
.
k8s-dynu-updater/
├── cronjob.yaml # CronJob de Kubernetes
├── job-manual.yaml # Job manual para pruebas
├── kustomization.yaml # Kustomize para despliegue
├── namespace.yaml # Namespace aislado para el updater
```
La imagen Docker utilizada se crea con el script `update.sh` incrustado, que:
1. Detecta la IP pública actual.
2. Comprueba si ha cambiado desde la última ejecución.
3. Llama a la API de Dynu con usuario, grupo y contraseña hash MD5.
---
## 📂 Despliegue
1. Aplicar los manifiestos:
```bash
cd k8s-dynu-updater
kubectl apply -k .
```
2. Comprobar el estado:
```bash
kubectl get pods -n dynu-updater
```
---
## ✅ Ejecución manual
Para probar el script sin esperar al cron:
```bash
kubectl apply -f job-manual.yaml
kubectl logs -n dynu-updater job/dynu-updater-manual
```
---
## ⚖️ Configuración del script
El script embebido en la imagen Docker:
```bash
USERNAME="xavor"
PASSWORD="M@nabo2025"
GROUP="Trabajo"
```
> La contraseña se convierte a hash MD5 antes de enviarla.
---
## 🚨 Seguridad
* La contraseña se envía como hash MD5.
* Se recomienda usar un "IP Update Password" diferente del de cuenta.
* Puedes montar `Secret` en Kubernetes para no incluir credenciales directamente en la imagen.
---
## 🔍 Logs en Dynu
Dynu registra las actualizaciones entrantes. Puedes ver líneas como:
```
/nic/update?username=xavor&group=manabovalencia&myip=62.15.155.254&myipv6=no&password=***** Good
```
Esto confirma que el pod ha funcionado correctamente.
---
## 🔍 Referencias
* Dynu IP Update Protocol: [https://www.dynu.com/DynamicDNS/IP-Update-Protocol](https://www.dynu.com/DynamicDNS/IP-Update-Protocol)
* API: `https://api.dynu.com/nic/update`
* Cliente basado en `curl` y `cron` en Alpine Linux
---
## 📅 Mantenimiento
* Se puede adaptar a otros grupos (por ejemplo: `ManaboTorrevieja`).
* Si se quiere logs persistentes, se puede montar un volumen.
* Si se quiere gestionar con ArgoCD, agregarlo como `Application`.
---
## 📄 Autor
Xavor (2025)
Este cliente se ha probado en K3s y Dynu, actualizando correctamente el grupo `ManaboValencia`.

View File

@@ -4,6 +4,20 @@ metadata:
name: nginx-router-config
namespace: external
data:
_common.conf: |
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_read_timeout 3600;
proxy_send_timeout 3600;
client_max_body_size 0;
proxy_redirect off;
proxy_ssl_server_name on;
router.conf: |
server {
listen 80 default_server;
@@ -33,3 +47,46 @@ data:
proxy_ssl_verify off;
}
}
gitdotcom.conf: |
server {
listen 80;
server_name git.c2et.com;
location / {
include /etc/nginx/conf.d/_common.conf;
proxy_pass http://192.168.0.40:3001;
}
}
wireguarddotcom.conf: |
server {
listen 80;
server_name wireguard.c2et.com;
location / {
include /etc/nginx/conf.d/_common.conf;
proxy_pass http://192.168.0.40:51821;
}
}
harbordotcom.conf: |
server {
listen 80;
server_name harbor.c2et.com;
location / {
include /etc/nginx/conf.d/_common.conf;
proxy_pass http://192.168.0.40:85;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
}
}
cockpitdotcom.conf: |
server {
listen 80;
server_name cockpit.c2et.com;
location / {
proxy_pass https://192.168.0.40:9090;
proxy_ssl_verify off;
}
}

37
external/configmaps/configmap.yaml.save vendored Normal file
View File

@@ -0,0 +1,37 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-router-config
namespace: external
data:
router.conf: |
server {
listen 80 default_server;
server_name admin.firewall.c2et.net;
location / {
proxy_pass https://192.168.0.1;
proxy_ssl_verify off;
}
}
powervault1.conf: |
server {
listen 80;
server_name admin.powervault1.c2et.net;
location / {
proxy_pass https://192.168.0.71;
proxy_ssl_verify off;
}
}
powervault2.conf: |
server {
listen 80;
server_name admin.powervault2.c2et.net;
location / {
proxy_pass https://192.168.0.74;
proxy_ssl_verify off;
}
}

27
external/ingress/cockpitdotcom.yaml vendored Normal file
View File

@@ -0,0 +1,27 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: cockpitdotcom-ingress
namespace: external
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- cockpit.c2et.com
secretName: cockpitdotcom-tls
rules:
- host: cockpit.c2et.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: external-router-svc
port:
number: 80

27
external/ingress/gitdotcom.yaml vendored Normal file
View File

@@ -0,0 +1,27 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: gitdotcom-ingress
namespace: external
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- git.c2et.com
secretName: gitdotcom-tls
rules:
- host: git.c2et.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: external-router-svc
port:
number: 80

27
external/ingress/harbordotcom.yaml vendored Normal file
View File

@@ -0,0 +1,27 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: harbordotcom-ingress
namespace: external
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- harbor.c2et.com
secretName: harbordotcom-tls
rules:
- host: harbor.c2et.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: external-router-svc
port:
number: 80

28
external/ingress/wireguarddotcom.yaml vendored Normal file
View File

@@ -0,0 +1,28 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: wireguarddotcom-ingress
namespace: external
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/upstream-vhost: "wireguard.c2et.com"
spec:
ingressClassName: nginx
tls:
- hosts:
- wireguard.c2et.com
secretName: wireguarddotcom-tls
rules:
- host: wireguard.c2et.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: external-router-svc
port:
number: 80

View File

@@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: mysql
image: mysql:8
image: mysql:latest
env:
- name: MYSQL_ROOT_PASSWORD
value: gitea123

View File

@@ -32,7 +32,7 @@ spec:
- name: GITEA__database__USER
value: "gitea"
- name: GITEA__database__PASSWD
value: "gitea123"
value: "gitea"
volumeMounts:
- name: gitea-data
mountPath: /data

View File

@@ -1,7 +1,5 @@
resources:
- namespace.yaml
- pvc/gitea-data.yaml
- pvc/gitea-db.yaml
- deployments/gitea.yaml
- deployments/gitea-db.yaml
- services/gitea.yaml

View File

@@ -4,11 +4,9 @@ metadata:
name: gitea
namespace: gitea
spec:
type: NodePort
selector:
app: gitea
ports:
- name: http
port: 3000
targetPort: 3000
nodePort: 30300

28
grafana/kps-values.yaml Normal file
View File

@@ -0,0 +1,28 @@
grafana:
enabled: true
service:
type: ClusterIP
ingress:
enabled: true
ingressClassName: nginx # <- tu IngressClass
hosts:
- grafana.c2et.net
tls:
- secretName: grafana-tls
hosts:
- grafana.c2et.net
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod # o el que uses
adminPassword: "Pozuelo12345" # vacío = autogenera; o pon tu contraseña si quieres
# puedes forzar la password así:
# admin:
# existingSecret: grafana-admin
# userKey: admin-user
# passwordKey: admin-password
prometheus:
ingress:
enabled: false # <- mantenlo interno (recomendado)
alertmanager:
ingress:
enabled: false # <- interno

View File

@@ -29,6 +29,7 @@ spec:
volumeMounts:
- name: mysql-storage
mountPath: /var/lib/mysql
subPath: data
volumes:
- name: mysql-storage
persistentVolumeClaim:

27
harbor/readme.md Normal file
View File

@@ -0,0 +1,27 @@
## Instalacion de Harbor
### Fase 1: Despliegue con Ingress
```bash
helm repo add harbor https://helm.goharbor.io
helm repo update
helm install harbor harbor/harbor \
--namespace harbor --create-namespace \
-f values.yaml
```
> Una vez listo, podrás acceder a:
>
> **[https://harbor.c2et.net](https://harbor.c2et.net)**
>
> Usuario: `admin`
> Contraseña: la definida en `harborAdminPassword` (p.ej. `Harbor12345`)
```bash
docker login harbor.c2et.net
```

View File

@@ -4,7 +4,7 @@ expose:
enabled: true
certSource: auto
ingress:
ingressClassName: nginx
className: nginx
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
hosts:

View File

@@ -6,4 +6,5 @@ resources:
- configmap/configmap.yaml
- deployments/deployment.yaml
- services/service.yaml
- services/service-srv.yaml
- ingressclass/ingressclass.yaml

View File

@@ -0,0 +1,14 @@
apiVersion: kubevirt.io/v1
kind: KubeVirt
metadata:
name: kubevirt
namespace: kubevirt
spec:
certificateRotateStrategy:
selfSigned:
ca:
duration: "26280h" # 3 años
renewBefore: "720h" # 30 días antes
server:
duration: "8760h" # 1 año
renewBefore: "240h" # 10 días antes

View File

@@ -13,6 +13,17 @@ spec:
labels:
app: iso-server
spec:
initContainers:
- name: init-dirs
image: alpine:latest
command: ["sh","-lc"]
args:
- |
mkdir -p /share/isos
chmod 755 /share/isos
volumeMounts:
- name: iso-storage
mountPath: /share
containers:
- name: httpd
image: httpd:2.4
@@ -21,6 +32,7 @@ spec:
volumeMounts:
- name: iso-storage
mountPath: /usr/local/apache2/htdocs
subPath: isos
readinessProbe:
httpGet:
path: /
@@ -36,7 +48,7 @@ spec:
- name: samba
image: dperson/samba
args: ["-p", "-s", "isos;/share;yes;no"]
args: ["-p", "-s", "isos;/share/isos;yes;no"]
ports:
- containerPort: 445
securityContext:

View File

@@ -32,6 +32,9 @@ Este repositorio contiene los **manifiestos, scripts y documentación** para des
* **Multus** ➝ permite varias interfaces de red por pod (**NAD**).
### 2.3. Almacenamiento
* **CSI contra DELL Powervault**:
* 1 cabina por site.
* Requiere nodeSelector por zonas (para hacerlo bien)
* **Ceph distribuido**:
@@ -104,22 +107,38 @@ Este repositorio contiene los **manifiestos, scripts y documentación** para des
* **Dashboard** para gestionarlas.
* **iso-server** ➝ sirve ISOs por HTTPS (subidas vía Samba).
### 5.3. Copias de seguridad
* **Velero**
* Montado sobre dos **almacenes S3** (Minio), uno por **SITE**
* Cada almacen en una cabina de almacenamiento **(DriverCSI)**
### 5.4. Repositorio interno
* **Repo**
* De momento, los repos de OpenSUSE 15.6 (ampliable)
* Servidor HTTP/HTTPS para centralizar las descargas
* servidor Samba para replicas a traves de diodo de datos
---
## 6. 📚 Índice de documentos y referencias cruzadas
| Documento | Descripción | Referencia |
| --------------------------- | --------------------------------------------- | ---------------------------------- |
| `estructura_manifiestos.md` | Explicación de nuestra estructura de manifiestos | [Ver](estructura_manifiestos.md) |
| `estructura_manifiestos.md` | Explicación de la estructura de manifiestos | [Ver](estructura_manifiestos.md) |
| `cluster_init.md` | Proceso de inicialización del cluster en SUSE | [Ver](cluster_init.md) |
| `redes_internet.md` | MetalLB, Multus y demás | [Ver](redes_internet.md) |
| `ingress.md` | Capítulo de cert-manager e ingress | [Ver](ingress.md) |
| `cephrook.md` | Instalación e integración de Ceph/Rook | [Ver](./cephrook.md) |
| `rook\readme.md` | Instalación e integración de Ceph/Rook | [Ver](./rook/readme.md) |
| `seagate\readme.md` | Instalación del driver CSI para DELL SAN | [Ver](./seagate/readme.md) |
| `kubevirt\readme.md` | Despliegue de KubeVirt y gestión de VMs | [Ver](./kubevirt/readme.md) |
| `vm-windows-demo\readme.md` | Máquina virtual de ejemplo | [Ver](./vm-windows-demo/readme.md) |
| `comprobaciones.md` | Checklist tras cada paso crítico | [Ver](./comprobaciones.md) |
| `script_limpieza.md` | Script para limpiar un nodo | [Ver](script_limpieza.md) |
| `coredns-demo\readme.md` | Ejemplo de Multus con CoreDNS | [Ver](./coredns-demo/readme.md) |
| `harbor\readme.md` | Manual de instalacion de Harbor | [Ver](./harbor/readme.md) |
| `storage\readme.md` | Ejemplo de StorageClass | [Ver](./storage/readme.md) |
| `dashboard\readme.md` | Ejemplo con ingress dashboard | [Ver](./dashboard/readme.md) |
| `wireguard\readme.md` | Manual de WireGuard | [Ver](./wireguard/readme.md) |
@@ -132,28 +151,35 @@ Este repositorio contiene los **manifiestos, scripts y documentación** para des
| `mapas\readme.md` | Manual de instalación de Tileserver-GL | [Ver](./mapas/readme.md) |
| `argos\readme.md` | Manual de instalación de Argos Core | [Ver](./argos/readme.md) |
| `multusk3s.md` | Notas para Multus en K3s | [Ver](./multusk3s.md) |
| `velero\readme.md` | Manual de instalación de Velero | [Ver](./velero/readme.md) |
| `dynu-updater\readme.md` | Manual de Dynu-updater | [Ver](./dynu-updater/readme.md) |
| `repo\readme.md` | Manual del repo SUSE | [Ver](./repo/readme.md) |
---
## 7. 📊 Estado actual de la instalación
| Componente | Estado | Comentario | Enlace | User/Pass |
| ------------------------ | ------------------ | -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | -------------------- |
| ------------------------ | ---------------------- | -------------------------------------------- | ------------------------------------------------------------------------------------------------------------------- | -------------------- |
| `Arranque Cluster` | ✅ Completado | Instalación básica validada | [https://k8s.c2et.net](https://k8s.c2et.net) | kubeconfig |
| `Networking` | ✅ Completado | probado Multus, flannel y MetalLB y validado | - | - |
| `Ingress` | ✅ Completado Nginx | Nginx funcionando | - | - |
| `Volumenes persistentes` | ✅ Completado | Rook Ceph a 4 nodos, falta ampliar a 5 nodos | [https://ceph.c2et.net/](https://ceph.c2et.net/) | admin / Pozuelo12345 |
| `Maquinas Virtuales` | ✅ Completado | Desplegado kubevirt, dashboard e isoserver | [https://kubevirt.c2et.net/](https://kubevirt.c2et.net/) [https://isoserver.c2et.net/](https://isoserver.c2et.net/) | - |
| `Wireguard` | ✅ Completado | Funcionando | [https://wireguard.c2et.net/](https://wireguard.c2et.net/) | Pozuelo12345 |
| `CoreDNS` | ✅ Completado | Funcionando | | |
| `Apolo` | ✅ Completado | Funcionando | [https://portal.apolo.c2et.net/](https://portal.apolo.c2et.net/) | admin / 123456 |
| `Gitea` | ✅ Completado | Funcionando | [https://git.c2et.net/](https://git.c2et.net/) | |
| `Harbor` | ✅ Completado | Funcionando | [https://harbor.c2et.net/](https://harbor.c2et.net/) | |
| `Guacamole` | ✅ Completado | Funcionando | [https://heimdall.c2et.net/](https://heimdall.c2et.net/) | |
| `VSCode` | ✅ Completado | Funcionando | [https://vscode.c2et.net/](https://vscode.c2et.net/) | Pozuelo12345 |
| `Tileserver-GL` | ✅ Completado | Funcionando | [https://mapas.c2et.net/](https://mapas.c2et.net/) | |
| `External` | ✅ Completado | Funcionando | varias | |
| `Argos Core` | ✅ Completado | Funcionando | [https://argos.panel.c2et.net/](https://argos.panel.c2et.net/) | |
| `Volumenes persistentes` | ✅ Completado | Rook Ceph a 4 nodos, falta ampliar a 5 nodos | [https://ceph.c2et.net](https://ceph.c2et.net/) | admin / Pozuelo12345 |
| `Volumenes persistentes` | ✅ Completado | Driver para las cabinas de almacenamiendo DEEL Powervault | | |
| `Maquinas Virtuales` | ✅ Completado | Desplegado kubevirt, dashboard e isoserver | [https://kubevirt.c2et.net](https://kubevirt.c2et.net/) <br>[https://isoserver.c2et.net](https://isoserver.c2et.net/) | - |
| `Wireguard` | ✅ Completado | version con acceso a 0.0 y a 200.0 | [https://wireguard.c2et.net](https://wireguard.c2et.net/) | Pozuelo12345 |
| `CoreDNS` | ✅ Completado | Split DNS interno configurado en los host fisicos | | |
| `Apolo` | ✅ Completado | Funcionando, falta probar streaming | [https://portal.apolo.c2et.net](https://portal.apolo.c2et.net/) | admin / 123456 |
| `Gitea` | ✅ Completado | Funcionando | [https://git.c2et.net](https://git.c2et.net) | |
| `Harbor` | ✅ Completado | Funcionando pero no esta Ismael (solo estoy yo)| [https://harbor.c2et.net](https://harbor.c2et.net) | |
| `Guacamole` | ✅ Completado | Funcionando, pero esta en blanco (hay que crear los hosts) | [https://heimdall.c2et.net](https://heimdall.c2et.net) | guacadmin / guacadmin |
| `VSCode` | ✅ Completado | Funcionando | [https://vscode.c2et.net](https://vscode.c2et.net) | Pozuelo12345 |
| `Tileserver-GL` | ✅ Completado | Funcionando con mapa de España de prueba | [https://mapas.c2et.net](https://mapas.c2et.net) | |
| `External` | ✅ Completado | Funcionando los servicios de docker | [https://admin.firewall.c2et.net](https://admin.firewall.c2et.net) <br>[https://admin.powervault1.c2et.net](https://admin.powervault1.c2et.net)<br> [https://admin.powervault2.c2et.net](https://admin.powervault2.c2et.net) | |
| `Argos Core` | ✅ Completado | Funcionando sin clientes configurados | [https://argos.panel.c2et.net/](https://argos.panel.c2et.net) | |
| `Velero` | ✅ Completado | Copia de seguridad diaria de dynu y semanal de todo | | |
| `Dynu-updater` | ✅ Completado | Funcionando, actualiza el grupo "Trabajo" | | |
| `Repo` | ✅ Completado | Probando a ver si funciona | [http://repo.c2et.net/](https://repo.c2et.net) | |
---
@@ -164,7 +190,7 @@ Este repositorio contiene los **manifiestos, scripts y documentación** para des
* Dos redes: administración y servicios.
* Seguridad basada en **VPN + DNS + ACLs**.
* Ingress con SSL automático.
* Funcionalidades extra: proxy externo + VMs con KubeVirt.
* Funcionalidades extra: proxy externo + VMs con KubeVirt + backup.
---

View File

@@ -0,0 +1,100 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: repo-sources
namespace: repo
data:
# Lista de orígenes a espejar
# Formato por línea: NAME|URL|SUBDIR
# SUBDIR cuelga de /mirror/repos (asegúrate de que tu Deployment monta el PVC en /usr/local/apache2/htdocs con subPath repos)
sources.txt: |
# openSUSE Leap 15.6 (básicos + updates)
repo-oss|http://download.opensuse.org/distribution/leap/15.6/repo/oss/|opensuse/leap/15.6/oss
repo-non-oss|http://download.opensuse.org/distribution/leap/15.6/repo/non-oss/|opensuse/leap/15.6/non-oss
update-oss|http://download.opensuse.org/update/leap/15.6/oss/|opensuse/leap/15.6/update/oss
update-non-oss|http://download.opensuse.org/update/leap/15.6/non-oss/|opensuse/leap/15.6/update/non-oss
update-sle|http://download.opensuse.org/update/leap/15.6/sle/|opensuse/leap/15.6/update/sle
backports|http://download.opensuse.org/update/leap/15.6/backports/|opensuse/leap/15.6/update/backports
# Codecs openh264
openh264|http://codecs.opensuse.org/openh264/openSUSE_Leap/|opensuse/openh264
# Terceros (opcional)
nvidia|https://download.nvidia.com/opensuse/leap/15.6/|thirdparty/nvidia/leap/15.6
k8s-stable|https://pkgs.k8s.io/core:/stable:/v1.33/rpm/|thirdparty/kubernetes/core/stable/v1.33/rpm
# Claves públicas a publicar en /mirror/keys
# Formato por línea: NAME|URL|FILENAME
# Ajusta las URLs si prefieres otras fuentes/ubicaciones oficiales
keys.txt: |
opensuse|https://download.opensuse.org/repositories/openSUSE:/Leap:/15.6:/Update/standard/repodata/repomd.xml.key|RPM-GPG-KEY-openSUSE
nvidia|https://download.nvidia.com/opensuse/repodata/repomd.xml.key|RPM-GPG-KEY-NVIDIA
k8s|https://pkgs.k8s.io/core:/stable:/v1.33/rpm/repodata/repomd.xml.key|RPM-GPG-KEY-k8s
# Script de sincronización diario (CronJob)
# - Sincroniza repos a /mirror/repos/...
# - Publica claves GPG en /mirror/keys
sync.sh: |
#!/usr/bin/env bash
set -euo pipefail
SRC_LIST="/config/sources.txt"
KEYS_LIST="/config/keys.txt"
DEST_ROOT="/mirror/repos"
DEST_KEYS="/mirror/keys"
mkdir -p "$DEST_ROOT" "$DEST_KEYS"
# Requisitos
command -v wget >/dev/null 2>&1 || { echo "ERROR: wget requerido"; exit 1; }
if ! command -v rsync >/dev/null 2>&1; then
echo "Aviso: rsync no disponible; usaré wget para HTTP/HTTPS"
fi
echo "===== SYNC REPOS ====="
while IFS='|' read -r NAME URL SUBDIR; do
[[ -z "${NAME:-}" || "${NAME:0:1}" == "#" ]] && continue
DEST="${DEST_ROOT}/${SUBDIR}"
mkdir -p "$DEST"
echo "==> Sync ${NAME} (${URL}) -> ${DEST}"
if [[ "$URL" == rsync://* ]]; then
# Sincronización eficiente por rsync (si el mirror lo soporta)
rsync -aH --delete --partial --info=stats1,progress2 "${URL}" "${DEST}/"
else
# Mirror vía HTTP/HTTPS con wget
TMP="${DEST}.tmp"
mkdir -p "$TMP"
# -m (mirror), -np (no subir), -nH (sin host en ruta), robots=off
wget -m -np -nH -e robots=off -P "$TMP" --no-verbose --show-progress "$URL"
# Mover contenido espeljado a DEST (limpiando y dejando estructura limpia)
shopt -s dotglob nullglob
if compgen -G "$TMP/*" >/dev/null; then
rsync -a --delete "$TMP"/ "$DEST"/
fi
rm -rf "$TMP"
fi
# Permisos legibles por httpd y Samba
chmod -R a+rX "$DEST"
done < "$SRC_LIST"
echo "===== SYNC KEYS ====="
if [[ -f "$KEYS_LIST" ]]; then
while IFS='|' read -r KNAME KURL KFILE; do
[[ -z "${KNAME:-}" || "${KNAME:0:1}" == "#" ]] && continue
echo "==> Key ${KNAME} (${KURL}) -> ${DEST_KEYS}/${KFILE}"
wget -q -O "${DEST_KEYS}/${KFILE}.tmp" "$KURL"
mv "${DEST_KEYS}/${KFILE}.tmp" "${DEST_KEYS}/${KFILE}"
chmod a+r "${DEST_KEYS}/${KFILE}"
done < "$KEYS_LIST"
else
echo "No hay KEYS_LIST ($KEYS_LIST), omitido."
fi
echo "===== DONE ====="

View File

@@ -0,0 +1,144 @@
#!/usr/bin/env bash
set -euo pipefail
# ============================
# Configuración (ajusta aquí)
# ============================
BASE_URL="${BASE_URL:-http://repo.c2et.net}" # tu dominio del mirror (sin / al final)
LEAP_VER="${LEAP_VER:-15.6}" # versión de Leap
ENABLE_NVIDIA="${ENABLE_NVIDIA:-true}" # true/false
ENABLE_K8S="${ENABLE_K8S:-true}" # true/false
DISABLE_EXTERNAL="${DISABLE_EXTERNAL:-true}" # true/false (deshabilitar repos externos)
# Rutas base del mirror interno
REPO_BASE="${BASE_URL}/opensuse/leap/${LEAP_VER}"
KEYS_BASE="${BASE_URL}/keys"
# Directorio de repos dnf/zypp
REPOS_DIR="/etc/zypp/repos.d"
# ============================
# Helpers
# ============================
need_root() {
if [[ $EUID -ne 0 ]]; then
echo "Este script debe ejecutarse como root (o con sudo)." >&2
exit 1
fi
}
have_cmd() {
command -v "$1" >/dev/null 2>&1
}
write_repo() {
local alias="$1" name="$2" baseurl="$3" gpgkey="$4"
local path="${REPOS_DIR}/${alias}.repo"
cat >"${path}.tmp" <<EOF
[${alias}]
name=${name}
enabled=1
autorefresh=1
baseurl=${baseurl}
type=rpm-md
gpgcheck=1
gpgkey=${gpgkey}
EOF
# Solo mueve si cambió (idempotente)
if [[ ! -f "${path}" ]] || ! cmp -s "${path}.tmp" "${path}"; then
mv "${path}.tmp" "${path}"
echo " - Escrito ${path}"
else
rm -f "${path}.tmp"
echo " - Sin cambios ${path}"
fi
}
import_key() {
local url="$1" file="key-$(basename "$url")"
# rpm ignora si ya está importada; esto es idempotente
echo " - Importando clave: $url"
rpm --import "$url" || {
echo " * Aviso: no se pudo importar $url. ¿Hay conectividad al mirror?" >&2
return 1
}
}
disable_external_repos() {
echo "Deshabilitando repos externos conocidos..."
# Deshabilita TODO lo que no sea *_local que creemos, de forma segura:
# Busca todos los alias actuales y deshabilita los que no terminen en '-local'
local aliases
aliases=$(zypper --non-interactive lr -u | awk 'NR>2 {print $1,$2}' | tail -n +1 | awk '{print $2}')
for a in $aliases; do
if [[ "$a" != *-local ]]; then
# Algunos sistemas traen alias con espacios; saltamos los complicados
if [[ "$a" =~ ^[A-Za-z0-9._:-]+$ ]]; then
zypper --non-interactive mr -d "$a" || true
fi
fi
done
}
# ============================
# Main
# ============================
need_root
if ! have_cmd zypper; then
echo "No se encontró zypper. ¿Es openSUSE/SLE este sistema?" >&2
exit 1
fi
echo "== Configurando repos locales desde ${BASE_URL} para Leap ${LEAP_VER} =="
mkdir -p "$REPOS_DIR"
# 1) Importa claves GPG desde tu mirror
echo "Importando claves GPG desde ${KEYS_BASE} ..."
import_key "${KEYS_BASE}/RPM-GPG-KEY-openSUSE" || true
$ENABLE_NVIDIA && import_key "${KEYS_BASE}/RPM-GPG-KEY-NVIDIA" || true
$ENABLE_K8S && import_key "${KEYS_BASE}/RPM-GPG-KEY-k8s" || true
# 2) Repos base de openSUSE
echo "Escribiendo archivos .repo para repos locales..."
write_repo "repo-oss-local" "repo-oss-local" "${REPO_BASE}/oss" "${KEYS_BASE}/RPM-GPG-KEY-openSUSE"
write_repo "repo-non-oss-local" "repo-non-oss-local" "${REPO_BASE}/non-oss" "${KEYS_BASE}/RPM-GPG-KEY-openSUSE"
write_repo "update-oss-local" "update-oss-local" "${REPO_BASE}/update/oss" "${KEYS_BASE}/RPM-GPG-KEY-openSUSE"
write_repo "update-non-oss-local" "update-non-oss-local" "${REPO_BASE}/update/non-oss" "${KEYS_BASE}/RPM-GPG-KEY-openSUSE"
write_repo "update-sle-local" "update-sle-local" "${REPO_BASE}/update/sle" "${KEYS_BASE}/RPM-GPG-KEY-openSUSE"
write_repo "update-backports-local" "update-backports-local" "${REPO_BASE}/update/backports" "${KEYS_BASE}/RPM-GPG-KEY-openSUSE"
write_repo "openh264-local" "openh264-local" "${BASE_URL}/opensuse/openh264" "${KEYS_BASE}/RPM-GPG-KEY-openSUSE"
# 3) Repos de terceros (opcional)
if $ENABLE_NVIDIA; then
write_repo "nvidia-local" "nvidia-local" \
"${BASE_URL}/thirdparty/nvidia/leap/${LEAP_VER}" \
"${KEYS_BASE}/RPM-GPG-KEY-NVIDIA"
fi
if $ENABLE_K8S; then
write_repo "k8s-stable-local" "k8s-stable-local" \
"${BASE_URL}/thirdparty/kubernetes/core/stable/v1.33/rpm" \
"${KEYS_BASE}/RPM-GPG-KEY-k8s"
fi
# 4) Deshabilitar repos externos si procede
if $DISABLE_EXTERNAL; then
disable_external_repos
fi
# 5) Refrescar repos (no interactivo)
echo "Refrescando repos..."
zypper --non-interactive --gpg-auto-import-keys ref || true
echo
echo "== Listado final de repos =="
zypper lr -d || true
echo
echo "Listo. Si quieres personalizar:"
echo " BASE_URL=... LEAP_VER=... ENABLE_NVIDIA=true/false ENABLE_K8S=true/false DISABLE_EXTERNAL=true/false \\"
echo " sudo -E ./$(basename "$0")"

View File

@@ -0,0 +1,38 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: repo-sync
namespace: repo
spec:
schedule: "15 2 * * *"
concurrencyPolicy: Forbid
successfulJobsHistoryLimit: 2
failedJobsHistoryLimit: 2
jobTemplate:
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: sync
image: alpine:latest
command: ["sh","-lc"]
args:
- |
set -e
apk add --no-cache rsync wget bash coreutils
chmod +x /config/sync.sh
/config/sync.sh
volumeMounts:
- name: repo-storage
mountPath: /mirror
- name: repo-config
mountPath: /config
volumes:
- name: repo-storage
persistentVolumeClaim:
claimName: repo-pvc
- name: repo-config
configMap:
name: repo-sources
defaultMode: 0755

View File

@@ -0,0 +1,59 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: repo-server
namespace: repo
spec:
replicas: 1
selector:
matchLabels:
app: repo-server
template:
metadata:
labels:
app: repo-server
spec:
initContainers:
- name: init-dirs
image: alpine:3.20
command: ["sh","-lc"]
args:
- |
mkdir -p /share/repos
chmod 755 /share /share/repos
volumeMounts:
- name: repo-storage
mountPath: /share
containers:
- name: httpd
image: httpd:2.4
ports:
- containerPort: 80
volumeMounts:
- name: repo-storage
mountPath: /usr/local/apache2/htdocs
subPath: repos
readinessProbe:
httpGet: { path: /, port: 80 }
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet: { path: /, port: 80 }
initialDelaySeconds: 15
periodSeconds: 20
- name: samba
image: dperson/samba
args: ["-p", "-s", "repos;/share/repos;yes;no"]
ports:
- containerPort: 445
securityContext:
runAsUser: 0
volumeMounts:
- name: repo-storage
mountPath: /share
volumes:
- name: repo-storage
persistentVolumeClaim:
claimName: repo-pvc

View File

@@ -0,0 +1,23 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: repo
namespace: repo
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
spec:
ingressClassName: nginx
tls:
- hosts: [ "repo.c2et.net" ]
secretName: repo-c2et-net-tls
rules:
- host: repo.c2et.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: repo-http
port:
number: 80

26
repo/kustomization.yaml Normal file
View File

@@ -0,0 +1,26 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: repo
commonLabels:
app.kubernetes.io/name: repo-mirror
app.kubernetes.io/part-of: suse-repo
resources:
- namespace.yaml
- pvc.yaml
- configmap/repo-sources.yaml
- deployments/repo-server.yaml
- services/service-http.yaml
- services/service-samba.yaml
- ingress/ingress-repo.yaml
- cronjobs/repo-sync.yaml
images:
- name: httpd
newTag: "2.4"
- name: alpine
newTag: "latest"
- name: dperson/samba
newTag: "latest"

4
repo/namespace.yaml Normal file
View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: repo

12
repo/pvc.yaml Normal file
View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: repo-pvc
namespace: repo
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Ti
storageClassName: sc-me5-site-a

71
repo/readme.md Normal file
View File

@@ -0,0 +1,71 @@
# Repositorio Privado openSUSE
Este despliegue en Kubernetes crea un **mirror interno de repositorios de openSUSE** (y de terceros opcionales, como NVIDIA o Kubernetes). Sirve para que los servidores de nuestra red se actualicen **desde dentro**, sin depender de internet.
El sistema funciona con:
* **Servidor HTTP/HTTPS** → los clientes SUSE acceden vía `http://repo.c2et.net/...` o `https://repo.c2et.net/...` para descargar paquetes y metadatos.
* **Servidor Samba (SMB)** → expone la misma carpeta por red. Esto nos permite que el **“diodo de datos”** copie los repos de manera unidireccional hacia la red clasificada. Así aseguramos que las máquinas en la red sensible reciben actualizaciones sin conectividad exterior.
La carpeta de repos se actualiza automáticamente cada día mediante un **CronJob**, que sincroniza contra los repos oficiales de openSUSE y de terceros.
---
## Cómo desplegarlo
1. Ajusta **dominio** en el Ingress y (si quieres) IP fija en el Service de Samba.
2. Revisa tamaño de **PVC** (mínimo 300GB recomendado).
3. (Opcional) Cambia o amplía la lista en `sources.txt` (por ejemplo, usando mirrors con `rsync://`).
4. Despliega todo de una vez con **Kustomize**:
```bash
kubectl apply -k repo/
```
*(Si prefieres, aún puedes aplicar los manifiestos uno por uno en el orden indicado en la carpeta `repo/`.)*
5. Para lanzar una sincronización inicial manual (sin esperar al cron):
```bash
kubectl create job --from=cronjob/repo-sync repo-sync-now -n repo
kubectl logs -f job/repo-sync-now -n repo
```
---
## Configuración en los clientes SUSE
En los clientes no hace falta configurar repos manualmente. Basta con ejecutar el **script de cliente** incluido en este repo (`configure-local-repos.sh`). Este script:
* Importa las claves GPG desde `http://repo.c2et.net/keys/`.
* Crea los `.repo` apuntando al mirror interno.
* Deshabilita los repos externos para que solo se usen los `-local`.
### Uso del script en el cliente
```bash
chmod +x configure-local-repos.sh
sudo ./configure-local-repos.sh
```
Esto deja el sistema listo para trabajar solo con los repos locales.
---
## Ventajas de esta arquitectura
* **Seguridad**: los clientes nunca salen a internet, solo acceden al repo interno.
* **Control**: el mirror se actualiza de forma programada (p. ej. de madrugada). Siempre sabemos qué versiones están disponibles.
* **Simplicidad**: los clientes usan HTTP/HTTPS estándar; el Ingress se encarga del TLS si hace falta.
* **Integración con el diodo**: gracias a Samba, la carpeta puede replicarse unidireccionalmente hacia la red clasificada.
* **Verificación**: zypper siempre valida las firmas GPG de los paquetes, aunque se distribuyan por HTTP.
---
## Sugerencias y mejoras
* Usar **mirrors oficiales con rsync** para ahorrar ancho de banda y tiempo de sincronización.
* Añadir `--bwlimit` en el `sync.sh` si queremos limitar consumo nocturno de ancho de banda.
* Sustituir `httpd` por `nginx` si se busca mayor rendimiento en descargas masivas.
* Proteger el Ingress con autenticación si se expone fuera de la red de confianza.
* Mantener el **script de cliente** actualizado para simplificar el alta de repos en todos los servidores SUSE.

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: repo-http
namespace: repo
spec:
type: ClusterIP
selector:
app: repo-server
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: repo-samba
namespace: repo
spec:
type: LoadBalancer
loadBalancerIP: 192.168.0.106
selector:
app: repo-server
ports:
- name: samba
port: 445
targetPort: 445
protocol: TCP

38
rook/borrar_discos.sh Executable file
View File

@@ -0,0 +1,38 @@
#!/usr/bin/env bash
# ⚠️ AJUSTA ESTA LISTA A TUS DISCOS DE CEPH (NUNCA el del sistema)
DISKS=(sdb sdc sdd sde sdf sdg)
echo "Discos objetivo:"
printf ' - /dev/%s\n' "${DISKS[@]}"
echo
read -r -p "¿Seguro que quieres LIMPIAR estos discos? Escribe 'SI' para continuar: " ok
[[ "$ok" == "SI" ]] || { echo "Cancelado."; exit 1; }
for d in "${DISKS[@]}"; do
dev="/dev/$d"
echo ">>> Limpiando $dev"
# 0) Opcional: si vienen de un cluster Ceph viejo, intenta zappearlos con ceph-volume
if command -v ceph-volume >/dev/null 2>&1; then
sudo ceph-volume lvm zap --destroy "$dev" || true
fi
# 1) GPT/MBR
sudo sgdisk --zap-all "$dev" || true
# 2) Firmas de FS/LVM/RAID
sudo wipefs -a "$dev" || true
# 3) TRIM (si soporta). Si falla, hacemos un “zero header” de 10 MiB.
if ! sudo blkdiscard -f "$dev"; then
sudo dd if=/dev/zero of="$dev" bs=1M count=10 oflag=direct conv=fsync || true
fi
# 4) Limpieza de particiones fantasma en el kernel
sudo partprobe "$dev" || true
echo ">>> $dev limpiado."
done
echo "Hecho."

View File

@@ -1,447 +0,0 @@
# Despliegue de RookCeph en clúster **Kubernetes** (SUSE) con discos locales (Bluestore)
> Guía actualizada para un clúster **Kubernetes** (no K3s) en SUSE, con 4 nodos iniciales y **futura ampliación a stretch** con un quinto nodo **árbitro**. Discos locales (RAID/HBA), red de almacenamiento dedicada **VLAN 30 192.168.3.0/24**, y exposición del dashboard **vía Ingress NGINX** con TLS.
---
## 1) Requisitos previos
* 4 nodos Kubernetes operativos: `srvfkvm01`, `srvfkvm02`, `srvfkvm03`, `srvfkvm04` (control-plane o mixtos)
* Cada nodo con **6 discos** dedicados (\~894GB) para Ceph
* Acceso a Internet desde los nodos
* Red de almacenamiento dedicada **VLAN 30 192.168.3.0/24** (Ceph public/cluster)
* `kubectl` configurado y permisos de admin
> **Nota de versiones**: ejemplos probados con Rook 1.17.x y Ceph v19.x (Squid) o v18.x (Reef). En los manifiestos se usa una imagen estable.
---
## 2) Preparar discos en SUSE (solo discos de datos)
Instala utilidades necesarias en **cada nodo**:
```bash
sudo zypper -n install gdisk util-linux
```
Limpieza segura **solo** de `sdb…sdg` (ajusta si difiere):
```bash
set -euo pipefail
DISKS=(sdb sdc sdd sde sdf sdg)
for d in "${DISKS[@]}"; do
echo ">>> /dev/$d"
sudo sgdisk --zap-all /dev/$d || true # limpia GPT/MBR
sudo wipefs -a /dev/$d || true # borra firmas FS/LVM
sudo blkdiscard -f /dev/$d || \ # TRIM (si soporta)
sudo dd if=/dev/zero of=/dev/$d bs=1M count=10 oflag=direct,dsync
done
```
Obtén las rutas **persistentes** *byid* para cada disco (en cada nodo):
```bash
for d in sdb sdc sdd sde sdf sdg; do
echo "=== $HOSTNAME -> $d ==="
ls -l /dev/disk/by-id/ | awk -v d="$d" '$NF ~ ("/" d "$") {print "/dev/disk/by-id/"$9}'
done
```
> **Usa siempre** `/dev/disk/by-id/...` en los manifiestos (campo `fullpath:`) para evitar cambios de letra.
---
## 3) Etiquetado de nodos por **site**
Vamos a distribuir por zonas lógicas desde el inicio (A/B). El árbitro llegará después.
```bash
# SITE A
kubectl label node srvfkvm01 topology.kubernetes.io/zone=site-a --overwrite
kubectl label node srvfkvm02 topology.kubernetes.io/zone=site-a --overwrite
# SITE B
kubectl label node srvfkvm03 topology.kubernetes.io/zone=site-b --overwrite
kubectl label node srvfkvm04 topology.kubernetes.io/zone=site-b --overwrite
```
> Cuando exista el nodo **árbitro**, se etiquetará como `topology.kubernetes.io/zone=arbiter`.
---
## 4) Instalar Rook (CRDs, comunes y operador)
```bash
kubectl create namespace rook-ceph || true
# Clonar repo oficial (opcional para tener toolbox/ejemplos)
git clone https://github.com/rook/rook.git
cd rook/deploy/examples
kubectl apply -f crds.yaml -f common.yaml -f operator.yaml
```
Comprueba el operador:
```bash
kubectl -n rook-ceph get pods | grep operator
```
---
## 5) CephCluster 4 nodos, discos *byid*, red de storage (VLAN 30)
Archivo `cluster/ceph-cluster.yaml`:
```yaml
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
cephVersion:
image: quay.io/ceph/ceph:v19.2.3 # estable (puedes usar v18.2.x si prefieres)
dataDirHostPath: /var/lib/rook
# Red: usamos hostNetworking y restringimos a VLAN de storage
network:
provider: host
addressRanges:
public:
- "192.168.3.0/24"
cluster:
- "192.168.3.0/24"
mon:
count: 3
allowMultiplePerNode: false
dashboard:
enabled: true
# No queremos OSDs en el futuro nodo árbitro
placement:
osd:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values: ["site-a", "site-b"]
storage:
useAllNodes: false
useAllDevices: false
nodes:
- name: srvfkvm01
devices:
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5bb177a1716
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5dc196bd3a7
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5f81b10f7ef
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d6151cca8afd
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d62f1e5e9699
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d64f204b2405
- name: srvfkvm02
devices:
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030127eef88828273
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030127f879197de32
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128081a076ba0c
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128114a93e33b9
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94300301281a7b1fc151a
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128235ba79d801
- name: srvfkvm03
devices:
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128aef3bb4e0ae
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b0e3d8bc1dc
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b2b3f446dd7
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b4440c2d027
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b5e42510c2a
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b7d442e592c
- name: srvfkvm04
devices:
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c003012887ebfca6752
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c0030128896e360075f
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288ac038600d4
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288c62acb6efc
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288e456c6d441
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288f976534b4f
```
Aplicar y verificar:
```bash
kubectl apply -f cluster/ceph-cluster.yaml
kubectl -n rook-ceph get pods
```
> Instala el **toolbox** para diagnósticos: `kubectl -n rook-ceph apply -f rook/deploy/examples/toolbox.yaml`
---
## 6) Pool RBD inicial (replica **4** sobre **hosts**) + StorageClass
> Con 2 sites (A/B) y **sin** árbitro, **no** uses `failureDomain: zone` con `size: 4` o las PGs quedarán *undersized*. Empezamos con **`host`** y, cuando activemos **stretch**, pasaremos a `zone`.
`pools/ceph-blockpool-rbd.yaml`:
```yaml
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: rbd-2x2-sites
namespace: rook-ceph
spec:
failureDomain: host
replicated:
size: 4
```
`storageclasses/rbd.yaml`:
```yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-rbd
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
pool: rbd-2x2-sites
imageFormat: "2"
imageFeatures: layering
csi.storage.k8s.io/fstype: ext4
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: ["discard"]
```
Aplicar:
```bash
kubectl apply -f pools/ceph-blockpool-rbd.yaml
kubectl apply -f storageclasses/rbd.yaml
kubectl get sc
```
> Si creaste el pool inicialmente con `failureDomain: zone` y ves `active+undersized`, crea y asigna una **CRUSH rule** a host:
>
> ```bash
> kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- bash -lc '
> set -e
> ceph osd crush rule create-replicated rbd-4x-host default host || true
> ceph osd pool set rbd-2x2-sites crush_rule rbd-4x-host
> ceph osd pool get rbd-2x2-sites crush_rule
> '
> ```
---
## 7) Marcar OSDs como **SSD** (si Ceph los detecta como HDD por el HBA)
```bash
# Desde el toolbox
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- bash -lc '
for id in $(ceph osd ls); do ceph osd crush rm-device-class osd.$id || true; done
for id in $(ceph osd ls); do ceph osd crush set-device-class ssd osd.$id; done
ceph osd tree | egrep "zone|host|osd."
'
```
> Si más adelante creas un pool **soloSSD**, añade `spec.deviceClass: ssd` al `CephBlockPool`.
---
## 8) Dashboard por **Ingress** (NGINX) en `ceph.c2et.net`
> El dashboard del MGR escucha por defecto en **HTTP 7000**. Hacemos **TLS en el Ingress** (certmanager) y **HTTP** hacia el backend.
`ingress/dashboard.yaml`:
```yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ceph-dashboard
namespace: rook-ceph
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
spec:
ingressClassName: nginx
tls:
- hosts: ["ceph.c2et.net"]
secretName: ceph-dashboard-tls
rules:
- host: ceph.c2et.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: rook-ceph-mgr-dashboard
port:
number: 7000
```
Credenciales:
```bash
# Usuario por defecto
admin
# Contraseña generada
kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{.data.password}" | base64 -d; echo
# Cambiar contraseña (ejemplo)
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- bash -lc \
'echo -n "MiNuevaPass" | ceph dashboard ac-user-set-password admin -i -'
```
> Si prefieres **HTTPS 8443** también hacia el backend, habilita TLS en el dashboard de Ceph y cambia el Ingress a `backend-protocol: "HTTPS"` y puerto `8443` (y opcionalmente `proxy-ssl-verify: "off"`).
---
## 9) Prueba rápida de PVC
`tests/pvc-test.yaml`:
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-rbd
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 5Gi
storageClassName: ceph-rbd
```
`tests/pod-test.yaml`:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: rbd-tester
spec:
containers:
- name: app
image: busybox
command: ["sh","-c","sleep 36000"]
volumeMounts:
- mountPath: /data
name: vol
volumes:
- name: vol
persistentVolumeClaim:
claimName: test-rbd
```
Aplicar y verificar:
```bash
kubectl apply -f tests/pvc-test.yaml
kubectl apply -f tests/pod-test.yaml
kubectl exec -it rbd-tester -- sh -c 'df -h /data && dd if=/dev/zero of=/data/test.bin bs=1M count=100 && ls -lh /data'
```
---
## 10) **Ampliación futura**: modo **Stretch** con **árbitro** (2 sites + arbiter)
Objetivo: supervivencia a la caída completa de un site y distribución **2+2** de réplicas entre `site-a` y `site-b`.
1. **Añade el nodo árbitro** y etiqueta:
```bash
kubectl label node <NODO_ARBITRO> topology.kubernetes.io/zone=arbiter --overwrite
```
2. **Actualiza el CephCluster** a stretch (5 MON):
```yaml
# parche del CephCluster (fragmento spec)
mon:
count: 5
allowMultiplePerNode: false
stretchCluster:
failureDomainLabel: topology.kubernetes.io/zone
subFailureDomain: host
zones:
- name: arbiter
arbiter: true
- name: site-a
- name: site-b
```
> Mantén `placement.osd` restringido a `site-a`/`site-b` para no crear OSDs en el árbitro.
3. **(Opcional recomendado)** Cambia el `CephBlockPool` para que el *failure domain* vuelva a **`zone`** con `size: 4` (2 por zona). Si prefieres asegurar la regla, crea una CRUSH rule específica y asígnala al pool.
```bash
# Ejemplo: regla por zona
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- bash -lc '
set -e
# Crea regla "rbd-4x-zone" (elige leaves de tipo zone)
ceph osd crush rule create-replicated rbd-4x-zone default zone || true
# Asigna la regla al pool y ajusta size
ceph osd pool set rbd-2x2-sites crush_rule rbd-4x-zone
ceph osd pool set rbd-2x2-sites size 4
ceph osd pool get rbd-2x2-sites crush_rule
'
```
> Tras el cambio a `zone`, Ceph reubica PGs para cumplir **2+2** entre `site-a` y `site-b`. Hazlo en ventana si ya hay mucho dato.
---
## 11) Troubleshooting rápido
* **PGs `active+undersized` con pool size=4**: ocurre si la regla CRUSH elige `zone` y solo hay 2 zonas (sin stretch). Solución: usa `failureDomain: host` o asigna una regla a `host` (sección 6) hasta activar stretch.
* **Ingress 503** al abrir el dashboard: el Service `rook-ceph-mgr-dashboard` usa **puerto 7000** (HTTP). Ajusta Ingress a `backend-protocol: "HTTP"` y puerto `7000`.
* **Cert TLS no emite**: revisa ClusterIssuer, DNS público hacia el Ingress y que el solver HTTP01 use `class: nginx`. Evita redirecciones que interfieran `/.well-known/acme-challenge/`.
---
## 12) Apéndice Comandos útiles
Estado general:
```bash
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph -s
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph osd tree
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph df
```
Ver pools y reglas:
```bash
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph osd pool ls detail
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph osd pool get rbd-2x2-sites crush_rule
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- ceph osd crush rule dump rbd-4x-host
```
Dashboard:
```bash
kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath="{.data.password}" | base64 -d; echo
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- bash -lc 'echo -n "NuevaPass" | ceph dashboard ac-user-set-password admin -i -'
```
---
> **Resumen**: despliegas RookCeph con red de almacenamiento dedicada, discos por **byid**, pool RBD **size 4** sobre **host** para evitar PGs undersized sin árbitro, dashboard por **Ingress** (TLS en NGINX, backend HTTP:7000) y, cuando añadas el **árbitro**, pasas el clúster a **stretch** y el pool a **`failureDomain: zone`** con **2+2** por site.

View File

@@ -1,87 +0,0 @@
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
cephVersion:
image: quay.io/ceph/ceph:v19.2.3
dataDirHostPath: /var/lib/rook
network:
provider: host
addressRanges:
public:
- "192.168.4.0/24"
cluster:
- "192.168.4.0/24"
mgr:
count: 2
mon:
count: 3
allowMultiplePerNode: false
dashboard:
enabled: true
# Evita OSDs en el futuro nodo árbitro (cuando lo añadas)
placement:
osd:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values: ["site-a","site-b"]
mgr:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: kubernetes.io/hostname
operator: In
values: ["srvfkvm01","srvfkvm04"]
storage:
useAllNodes: false
useAllDevices: false
nodes:
- name: srvfkvm01
devices:
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5bb177a1716
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5dc196bd3a7
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5f81b10f7ef
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d6151cca8afd
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d62f1e5e9699
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d64f204b2405
- name: srvfkvm02
devices:
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030127eef88828273
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030127f879197de32
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128081a076ba0c
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128114a93e33b9
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94300301281a7b1fc151a
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128235ba79d801
- name: srvfkvm03
devices:
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128aef3bb4e0ae
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b0e3d8bc1dc
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b2b3f446dd7
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b4440c2d027
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b5e42510c2a
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b7d442e592c
- name: srvfkvm04
devices:
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c003012887ebfca6752
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c0030128896e360075f
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288ac038600d4
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288c62acb6efc
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288e456c6d441
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288f976534b4f

View File

@@ -1,73 +0,0 @@
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
cephVersion:
image: quay.io/ceph/ceph:v19.2.3
dataDirHostPath: /var/lib/rook
network:
provider: host
connections:
publicNetwork: "192.168.3.0/24"
clusterNetwork: "192.168.3.0/24"
mon:
count: 3
allowMultiplePerNode: false
dashboard:
enabled: true
# Evita OSDs en el futuro nodo árbitro (cuando lo añadas)
placement:
osd:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values: ["site-a","site-b"]
storage:
useAllNodes: false
useAllDevices: false
nodes:
- name: srvfkvm01
devices:
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5bb177a1716
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5dc196bd3a7
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5f81b10f7ef
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d6151cca8afd
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d62f1e5e9699
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d64f204b2405
- name: srvfkvm02
devices:
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030127eef88828273
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030127f879197de32
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128081a076ba0c
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128114a93e33b9
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94300301281a7b1fc151a
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128235ba79d801
- name: srvfkvm03
devices:
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128aef3bb4e0ae
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b0e3d8bc1dc
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b2b3f446dd7
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b4440c2d027
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b5e42510c2a
- fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b7d442e592c
- name: srvfkvm04
devices:
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c003012887ebfca6752
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c0030128896e360075f
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288ac038600d4
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288c62acb6efc
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288e456c6d441
- fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288f976534b4f

View File

@@ -0,0 +1,109 @@
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
cephVersion:
image: quay.io/ceph/ceph:v18
dataDirHostPath: /var/lib/rook
dashboard:
enabled: true
mon:
count: 3
allowMultiplePerNode: false
mgr:
count: 2
# Reglas de colocación: mons (A/B/arbiter), mgrs (A/B), OSDs solo en A/B
placement:
mon:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values: ["site-a","site-b","arbiter"]
topologySpreadConstraints:
- labelSelector:
matchLabels: { app: rook-ceph-mon }
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
mgr:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values: ["site-a","site-b"]
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values: ["rook-ceph-mgr"]
topologyKey: kubernetes.io/hostname
topologySpreadConstraints:
- labelSelector:
matchLabels: { app: rook-ceph-mgr }
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
osd:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values: ["site-a","site-b"]
cleanupPolicy:
wipeDevicesFromOtherClusters: true
sanitizeDisks:
method: quick
dataSource: zero
storage:
useAllNodes: false
useAllDevices: false
nodes:
- name: srvfkvm01
devices:
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5bb177a1716, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5dc196bd3a7, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5f81b10f7ef, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d6151cca8afd, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d62f1e5e9699, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d64f204b2405, config: { deviceClass: ssd } }
- name: srvfkvm02
devices:
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030127eef88828273, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030127f879197de32, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128081a076ba0c, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128114a93e33b9, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94300301281a7b1fc151a, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128235ba79d801, config: { deviceClass: ssd } }
- name: srvfkvm03
devices:
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128aef3bb4e0ae, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b0e3d8bc1dc, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b2b3f446dd7, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b4440c2d027, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b5e42510c2a, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b7d442e592c, config: { deviceClass: ssd } }
- name: srvfkvm04
devices:
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c003012887ebfca6752, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c0030128896e360075f, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288ac038600d4, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288c62acb6efc, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288e456c6d441, config: { deviceClass: ssd } }
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288f976534b4f, config: { deviceClass: ssd } }

View File

@@ -5,8 +5,8 @@ metadata:
namespace: rook-ceph
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
nginx.ingress.kubernetes.io/whitelist-source-range: "192.168.200.0/24,192.168.0.0/24,10.244.0.0/16,192.168.4.0/24"
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
# nginx.ingress.kubernetes.io/whitelist-source-range: "192.168.200.0/24,192.168.0.0/24,10.244.0.0/16,192.168.4.0/24"
spec:
ingressClassName: nginx
tls:

View File

@@ -1,9 +0,0 @@
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: rbd-2x2-sites
namespace: rook-ceph
spec:
failureDomain: zone
replicated:
size: 4

View File

@@ -0,0 +1,18 @@
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: rbd-2x2-sites
namespace: rook-ceph
spec:
deviceClass: ssd
failureDomain: zone
replicated:
size: 4
replicasPerFailureDomain: 2
subFailureDomain: host
requireSafeReplicaSize: true
parameters:
pg_autoscale_mode: "on"
min_size: "2"
mirroring:
enabled: false

411
rook/readme.md Normal file
View File

@@ -0,0 +1,411 @@
# Despliegue de **RookCeph** en Kubernetes (SUSE) con 2 zonas + **árbitro**
> Guía basada en el estado **actual** del clúster (A/B + *arbiter*), sin fase previa “sin árbitro”. Discos locales (Bluestore), distribución por **zona**, 3 MON (uno por zona) y 2 MGR (uno por site A y otro por site B). Pool RBD con **size=4** (2+2 por zona) y **min
> to**=2.
---
## 1) Topología y requisitos
* Nodos y zonas:
* **site-a**: `srvfkvm01`, `srvfkvm02`
* **site-b**: `srvfkvm03`, `srvfkvm04`
* **arbiter**: `srvfkvm05` *(sin OSDs)*
* Cada nodo de datos con **6 discos** dedicados a Ceph (usar rutas persistentes `/dev/disk/by-id/...`).
* Acceso a Internet desde los nodos. `kubectl` con permisos de admin.
* Versiones empleadas: **Rook v1.18.x**, **Ceph v18 (Reef)**.
> **Objetivo de resiliencia**: tolerar la caída completa de un site (A **o** B). El árbitro aloja MON (y opcionalmente MGR), **no** OSDs.
---
## 2) Etiquetar nodos por **zona**
```bash
# SITE A
kubectl label node srvfkvm01 topology.kubernetes.io/zone=site-a --overwrite
kubectl label node srvfkvm02 topology.kubernetes.io/zone=site-a --overwrite
# SITE B
kubectl label node srvfkvm03 topology.kubernetes.io/zone=site-b --overwrite
kubectl label node srvfkvm04 topology.kubernetes.io/zone=site-b --overwrite
# ÁRBITRO
kubectl label node srvfkvm05 topology.kubernetes.io/zone=arbiter --overwrite
```
---
## 3) Preparar discos (SUSE)
Instalar utilidades (en **cada nodo de datos**):
```bash
sudo zypper -n install gdisk util-linux
```
Limpiar de forma segura (ajusta IDs según cada host):
```bash
# Ejemplo genérico; usa *by-id* reales de cada nodo
for d in \
/dev/disk/by-id/wwn-...a \
/dev/disk/by-id/wwn-...b \
/dev/disk/by-id/wwn-...c \
/dev/disk/by-id/wwn-...d \
/dev/disk/by-id/wwn-...e \
/dev/disk/by-id/wwn-...f; do
echo ">>> $d"
sudo wipefs -a "$d" || true
# Cabecera 100MiB
sudo dd if=/dev/zero of="$d" bs=1M count=100 oflag=direct,dsync || true
# Cola 100MiB
real=$(readlink -f "$d"); dev=$(basename "$real")
sz=$(cat /sys/class/block/$dev/size); tail=$((100*1024*1024/512)); seek=$((sz - tail)); ((seek<0)) && seek=0
sudo dd if=/dev/zero of="$real" bs=512 seek="$seek" count="$tail" oflag=direct,dsync || true
sudo partprobe "$real" || true; sudo udevadm settle || true
done
```
> **Consejo**: guarda las rutas *byid* exactas de cada nodo; son las que se usarán en el `CephCluster`.
---
## 4) Instalar Rook (CRDs + operador)
```bash
kubectl create namespace rook-ceph || true
# CRDs + common + operator (Rook v1.18.x)
kubectl apply -f https://raw.githubusercontent.com/rook/rook/v1.18.0/deploy/examples/crds.yaml \
-f https://raw.githubusercontent.com/rook/rook/v1.18.0/deploy/examples/common.yaml \
-f https://raw.githubusercontent.com/rook/rook/v1.18.0/deploy/examples/operator.yaml
kubectl -n rook-ceph get pods | grep operator
```
> **Toolbox** (útil para diagnosticar):
>
> ```bash
> kubectl -n rook-ceph apply -f https://raw.githubusercontent.com/rook/rook/v1.18.0/deploy/examples/toolbox.yaml
> ```
---
## 5) Manifiesto **CephCluster** (A/B + árbitro, OSDs solo en A/B)
Archivo `cluster/ceph-cluster.yaml` **adaptado a tu entorno actual**:
```yaml
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
cephVersion:
image: quay.io/ceph/ceph:v18
dataDirHostPath: /var/lib/rook
dashboard:
enabled: true
mgr:
count: 2
mon:
count: 3
allowMultiplePerNode: false
placement:
# MGR repartidos entre site-a y site-b
mgr:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values: ["site-a","site-b"]
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values: ["rook-ceph-mgr"]
topologyKey: kubernetes.io/hostname
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: rook-ceph-mgr
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
# MON uno por zona (site-a, site-b, arbiter)
mon:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: topology.kubernetes.io/zone
operator: In
values: ["site-a","site-b","arbiter"]
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: rook-ceph-mon
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: DoNotSchedule
security:
cephx:
csi: {}
daemon: {}
rbdMirrorPeer: {}
storage:
useAllDevices: false
nodes:
- name: srvfkvm01
devices:
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5bb177a1716, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5dc196bd3a7, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d5f81b10f7ef, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d6151cca8afd, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d62f1e5e9699, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94b003012d64f204b2405, config: {deviceClass: ssd}}
- name: srvfkvm02
devices:
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030127eef88828273, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030127f879197de32, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128081a076ba0c, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128114a93e33b9, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d94300301281a7b1fc151a, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9430030128235ba79d801, config: {deviceClass: ssd}}
- name: srvfkvm03
devices:
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128aef3bb4e0ae, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b0e3d8bc1dc, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b2b3f446dd7, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b4440c2d027, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b5e42510c2a, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x64cd98f036d9510030128b7d442e592c, config: {deviceClass: ssd}}
- name: srvfkvm04
devices:
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c003012887ebfca6752, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c0030128896e360075f, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288ac038600d4, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288c62acb6efc, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288e456c6d441, config: {deviceClass: ssd}}
- { fullpath: /dev/disk/by-id/wwn-0x6ec2a72037894c00301288f976534b4f, config: {deviceClass: ssd}}
```
Aplicar y verificar:
```bash
kubectl apply -f cluster/ceph-cluster.yaml
kubectl -n rook-ceph get pods
```
> **Nota**: los MON deberían quedar uno en `site-a`, otro en `site-b` y otro en `arbiter`; los MGR en `site-a` y `site-b`. Los OSDs solo en A/B.
---
## 6) Activar **Orchestrator** (backend Rook)
```bash
kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph orch set backend rook
kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph orch status
```
---
## 7) Pool **RBD** 2×2 por **zona** + StorageClass
`pools/ceph-blockpool-rbd.yaml`:
```yaml
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
name: rbd-2x2-sites
namespace: rook-ceph
spec:
deviceClass: ssd
failureDomain: zone
replicated:
size: 4 # 2 por site (A/B)
minSize: 2
replicasPerFailureDomain: 2
subFailureDomain: host
requireSafeReplicaSize: true
parameters:
pg_autoscale_mode: "on"
```
`storageclasses/rbd.yaml`:
```yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: ceph-rbd
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: rook-ceph.rbd.csi.ceph.com
parameters:
clusterID: rook-ceph
pool: rbd-2x2-sites
imageFormat: "2"
imageFeatures: layering
csi.storage.k8s.io/fstype: ext4
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions: ["discard"]
```
Aplicar y comprobar:
```bash
kubectl apply -f pools/ceph-blockpool-rbd.yaml
kubectl apply -f storageclasses/rbd.yaml
# Verificaciones rápidas
kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph osd pool get rbd-2x2-sites size
kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph osd pool get rbd-2x2-sites min_size
kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph osd crush rule dump rbd-2x2-sites -f json-pretty
```
> La regla CRUSH generada elige **zona** y luego **host** (2 réplicas por zona). Con OSDs solo en A/B, el árbitro **no** aloja datos.
---
## 8) Dashboard por **Ingress** (opcional)
`ingress/dashboard.yaml` (backend HTTP:7000):
```yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ceph-dashboard
namespace: rook-ceph
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
spec:
ingressClassName: nginx
rules:
- host: ceph.example.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: rook-ceph-mgr-dashboard
port:
number: 7000
```
Contraseña admin:
```bash
kubectl -n rook-ceph get secret rook-ceph-dashboard-password -o jsonpath='{.data.password}' | base64 -d; echo
```
Crear usuario admin.c3s (el otro suele resetear la pass):
```bash
kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- bash -lc \
'echo -n "Pozuelo12345" | ceph dashboard ac-user-create admin.c3s administrator -i - && ceph dashboard ac-user-list'
```
---
## 9) Prueba de StorageClass (PVC + Pod)
`tests/pvc.yaml`:
```yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-rbd
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 5Gi
storageClassName: ceph-rbd
```
`tests/pod.yaml`:
```yaml
apiVersion: v1
kind: Pod
metadata:
name: rbd-tester
spec:
containers:
- name: app
image: busybox
command: ["sh","-c","sleep 36000"]
volumeMounts:
- mountPath: /data
name: vol
volumes:
- name: vol
persistentVolumeClaim:
claimName: test-rbd
```
```bash
kubectl apply -f tests/pvc.yaml
kubectl apply -f tests/pod.yaml
kubectl exec -it rbd-tester -- sh -c 'df -h /data && dd if=/dev/zero of=/data/test.bin bs=1M count=100 && ls -lh /data'
```
---
## 10) Guardar manifiestos exactos desde el clúster
```bash
# CephCluster “limpio” sin campos efímeros
kubectl -n rook-ceph get cephcluster rook-ceph -o yaml --show-managed-fields=false \
| yq 'del(.metadata.creationTimestamp,.metadata.generation,.metadata.resourceVersion,.metadata.uid,.status)' \
> ceph-cluster-export.yaml
# Pool y StorageClass
kubectl -n rook-ceph get cephblockpool rbd-2x2-sites -o yaml > ceph-blockpool-export.yaml
kubectl get sc ceph-rbd -o yaml > storageclass-rbd-export.yaml
```
---
## 11) Troubleshooting breve
* **MON no se reprograma** tras borrar uno: el operador necesita que el **quórum** quede seguro. Revisa `rook-ceph-mon-endpoints`, `deployment/rook-ceph-mon-*` y `op-mon` en logs del operador.
* **OSDs detectados como HDD** vía HBA: puedes forzar `deviceClass: ssd` por disco (como en el `CephCluster`) o, ya desplegado, ajustar con `ceph osd crush set-device-class ssd osd.N`.
* **Dashboard “Orchestrator is not available”**:
```bash
kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph orch set backend rook
kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph orch status
```
---
### Fin
Con esto dispones de un despliegue RookCeph alineado con la realidad actual: 2 zonas de datos + árbitro, 3 MON (uno por zona), 2 MGR (A/B), OSDs solo en A/B, y un pool RBD con réplicas **2+2** por zona. ¡Listo para producción y ampliaciones futuras!

View File

@@ -0,0 +1,13 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi-exos-x.seagate.com
spec:
attachRequired: true
podInfoOnMount: false
requiresRepublish: false
fsGroupPolicy: File
seLinuxMount: false
storageCapacity: false
volumeLifecycleModes:
- Persistent

View File

@@ -0,0 +1,4 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- csi-exos-x-csidriver.yaml

View File

@@ -0,0 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- csidriver/
- secrets/
- storageclass/

6
seagate/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: seagate
labels:
app.kubernetes.io/name: seagate

18
seagate/pod-a.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Pod
metadata:
name: pod-a
spec:
nodeSelector:
topology.kubernetes.io/zone: site-a
containers:
- name: app
image: busybox:1.36
command: ["sh","-c","sleep 3600"]
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
persistentVolumeClaim:
claimName: pvc-a

8
seagate/pvc-pod-a.yaml Normal file
View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc-a
spec:
accessModes: ["ReadWriteOnce"]
resources: { requests: { storage: 10Gi } }
storageClassName: sc-me5-site-a

276
seagate/readme.md Normal file
View File

@@ -0,0 +1,276 @@
# Seagate Exos X CSI (ME5 dual-site) — Guía de instalación y operación
Este README documenta cómo he dejado **reproducible** la instalación del *Seagate Exos X CSI Driver* (soporta ME5) en un clúster Kubernetes con **dos cabinas / dos zonas** (site-a y site-b) usando iSCSI + multipath y *topología por zona*.
> **Objetivo**
>
> * Un único despliegue del driver (Helm).
> * **Dos StorageClass** (uno por sitio) con `allowedTopologies` y credenciales (Secret) separadas.
> * *WaitForFirstConsumer* para que el volumen se cree en la **misma zona** del pod.
> * Montajes iSCSI rápidos gracias a multipath bien configurado (modo `greedy`).
---
## 1) Configuración iSCSI en los nodos
En **todos los nodos** del clúster:
1. Instalar dependencias:
```bash
sudo zypper install open-iscsi yast2-iscsi-client multipath-tools
```
2. Habilitar y arrancar el servicio iSCSI:
```bash
sudo systemctl enable --now iscsid.service
systemctl status iscsid.service
```
3. Descubrir los targets en las cabinas:
```bash
sudo iscsiadm -m discovery -t sendtargets -p 192.168.3.11
sudo iscsiadm -m discovery -t sendtargets -p 192.168.3.21
```
En este punto hay que **añadir en las cabinas el grupo de host con cada host**.
4. Iniciar sesión contra todos los portales de ambas cabinas:
```bash
# Cabina site-a
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e92b6 -p 192.168.3.11:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e92b6 -p 192.168.3.12:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e92b6 -p 192.168.3.13:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e92b6 -p 192.168.3.14:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e92b6 -p 192.168.3.15:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e92b6 -p 192.168.3.16:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e92b6 -p 192.168.3.17:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e92b6 -p 192.168.3.18:3260 --login &
# Cabina site-b
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e8e43 -p 192.168.3.21:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e8e43 -p 192.168.3.22:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e8e43 -p 192.168.3.23:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e8e43 -p 192.168.3.24:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e8e43 -p 192.168.3.25:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e8e43 -p 192.168.3.26:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e8e43 -p 192.168.3.27:3260 --login &
sudo iscsiadm -m node -T iqn.1988-11.com.dell:01.array.bc305b5e8e43 -p 192.168.3.28:3260 --login
```
5. Verificar la sesión activa:
```bash
sudo iscsiadm -m session
```
6. Editar configuración de iSCSI en `/etc/iscsi/iscsid.conf`:
```conf
iscsid.startup = /bin/systemctl start iscsid.socket iscsiuio.socket
iscsid.safe_logout = Yes
node.startup = automatic
node.leading_login = No
node.session.timeo.replacement_timeout = 120
node.conn[0].timeo.login_timeout = 15
node.conn[0].timeo.logout_timeout = 15
node.conn[0].timeo.noop_out_interval = 5
node.conn[0].timeo.noop_out_timeout = 5
node.session.err_timeo.abort_timeout = 15
node.session.err_timeo.lu_reset_timeout = 30
node.session.err_timeo.tgt_reset_timeout = 30
node.session.err_timeo.host_reset_timeout = 60
node.session.initial_login_retry_max = 8
node.session.cmds_max = 128
node.session.queue_depth = 32
node.session.xmit_thread_priority = -20
node.session.iscsi.InitialR2T = No
node.session.iscsi.ImmediateData = Yes
node.session.iscsi.FirstBurstLength = 262144
node.session.iscsi.MaxBurstLength = 16776192
node.conn[0].iscsi.MaxRecvDataSegmentLength = 262144
node.conn[0].iscsi.MaxXmitDataSegmentLength = 0
discovery.sendtargets.iscsi.MaxRecvDataSegmentLength = 32768
node.session.nr_sessions = 1
node.session.reopen_max = 0
node.session.iscsi.FastAbort = Yes
node.session.scan = auto
```
---
## 2) Prerrequisitos en los nodos
### 2.1. Configuración `/etc/multipath.conf`
```conf
defaults {
user_friendly_names "no"
find_multipaths "greedy"
no_path_retry "queue"
}
devices {
device {
vendor "DellEMC"
product "ME5"
path_grouping_policy "multibus"
path_checker "tur"
prio "alua"
}
}
```
> **Por qué `greedy`?**
>
> * `find_multipaths "greedy"` evita crear *maps* hasta que haya más de un camino **o** el dispositivo sea claramente multipath, reduciendo falsos positivos y estabilizando el *udev settle*.
### 2.2. Multipath e iSCSI activos
Asegurarse de tener `multipathd` en ejecución:
```bash
sudo systemctl restart multipathd
sudo multipath -r
```
### 2.3. Propagación de montajes (rshared)
```bash
sudo mount --make-rshared /
# systemd drop-in para kubelet
sudo install -d /etc/systemd/system/kubelet.service.d
cat <<'EOF' | sudo tee /etc/systemd/system/kubelet.service.d/10-mount-propagation.conf
[Service]
MountFlags=
ExecStartPre=/bin/mkdir -p /var/lib/kubelet
ExecStartPre=/bin/mount --bind /var/lib/kubelet /var/lib/kubelet
ExecStartPre=/bin/mount --make-rshared /var/lib/kubelet
EOF
sudo systemctl daemon-reload
sudo systemctl restart kubelet
```
Verificar:
```bash
sudo findmnt -o TARGET,PROPAGATION /
sudo findmnt -o TARGET,PROPAGATION /var/lib/kubelet
```
### 2.4. Etiquetas de topología en nodos
```bash
kubectl label nodes <nodo-del-site-a> topology.kubernetes.io/zone=site-a --overwrite
kubectl label nodes <nodo-del-site-b> topology.kubernetes.io/zone=site-b --overwrite
```
---
## 3) Despliegue del Driver con Helm
### 3.1. Instalación
```bash
helm upgrade --install exos-x-csi \
-n seagate --create-namespace \
./seagate-exos-x-csi \
-f ./values.yaml
```
*(Si hay residuos RBAC, eliminarlos antes de reintentar)*
### 3.2. Namespace y valores
```bash
kubectl apply -k .
```
Esto asegura que se creen en el orden correcto:
1. `namespace.yaml` → crea el namespace seagate.
2. `csidriver/` → instala el recurso CSIDriver (cluster-scoped).
3. `secrets/` → instala los secrets de conexión en el namespace seagate.
4. `storageclass/` → instala los dos StorageClass (sc-me5-site-a y sc-me5-site-b).
>Notas sobre recursos cluster-scoped:
En Kubernetes hay recursos que pertenecen a un namespace (ej: Pod, Secret, ConfigMap) y otros que son globales para todo el clúster (ej: CSIDriver, StorageClass, Node, Namespace). Los resources namespaced se pueden repetir en distintos namespaces. Los cluster-scoped solo existen una vez en todo el clúster y no tienen campo namespace.
En este repositorio:
* CSIDriver y StorageClass son cluster-scoped → no tienen namespace.
* Los Secret sí son namespaced → se instalan en seagate.
Por eso el kustomization.yaml está separado en subcarpetas:
* secrets/kustomization.yaml tiene namespace: seagate porque aplica solo a objetos namespaced.
* csidriver/ y storageclass/ no tienen namespace porque son cluster-scoped.
Esto evita errores y mantiene la instalación GitOps-friendly.
---
## 4) Prueba de extremo a extremo
PVC + Pod en site-a:
```bash
kubectl apply -f pvc-pod-a.yaml
kubectl apply -f pod-a.yaml
kubectl get pvc,pod
```
Verificar `iscsiadm`, `multipath`, eventos del PVC y logs del controller.
---
## 5) Medición de tiempos de *NodePublish*
```bash
kubectl -n seagate logs -l name=seagate-exos-x-csi-node-server \
-c seagate-exos-x-csi-node --tail=10000 \
| grep "NodePublishVolume" | grep "ROUTINE END"
```
---
## 6) Solución de problemas
* `missing API credentials` → revisar claves CSI en el StorageClass.
* `DeadlineExceeded` → revisar multipath, etiquetas de zona y topología.
* Helm RBAC conflict → borrar roles residuales.
---
## 7) Limpieza
```bash
kubectl delete -f pod-a.yaml
kubectl delete -f pvc-pod-a.yaml
```
Para desinstalar completamente:
```bash
helm uninstall exos-x-csi -n seagate
```
---
## 8) Anexos — Comandos útiles
* Reinicio multipath/kubelet
* Limpieza iSCSI/multipath:
```bash
sudo iscsiadm -m node -u || true
sudo iscsiadm -m node -o delete || true
sudo multipath -F || true
sudo multipath -r
```

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,27 @@
annotations:
artifacthub.io/images: |
- name: csi-driver
image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
apiVersion: v2
appVersion: 1.10.0
description: A dynamic persistent volume (PV) provisioner for Seagate Exos X storage
systems.
home: https://github.com/Seagate/seagate-exos-x-csi
keywords:
- storage
- iscsi
- fc
- sas
- plugin
- csi
maintainers:
- email: css-host-software@seagate.com
name: Seagate
url: https://github.com/Seagate
- email: joseph.skazinski@seagate.com
name: Joe Skazinski
name: seagate-exos-x-csi
sources:
- https://github.com/Seagate/seagate-exos-x-csi/tree/main/helm/csi-charts
type: application
version: 1.10.0

View File

@@ -0,0 +1,59 @@
{{ template "chart.header" . }}
{{ template "chart.deprecationWarning" . }}
{{ template "chart.description" . }}
{{ template "chart.badgesSection" . }}
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/Seagate)](https://artifacthub.io/packages/search?repo=Seagate)
# Introduction
As of version `1.0.0`, this `csi` driver and the associated helm charts are released as open-source projects under the Apache 2.0 license.
Your contribution is most welcome!
{{ template "chart.homepageLine" . }}
## This helm chart
Is part of the project and is published on [Seagate](https://seagate.io)'s charts repository.
{{ template "chart.sourcesSection" . }}
# Installing the Chart
Create a file named `{{ template "chart.name" . }}.values.yaml` with your values, with the help of [Chart Values](#values).
Add our Charts repository:
```
$ helm repo add seagate https://charts.seagate.io
```
Install the {{ template "chart.name" . }} with release name `{{ template "chart.name" . }}` in the `seagate-exos-x-csi-system` namespace:
```
$ helm install -n seagate-exos-x-csi-system {{ template "chart.name" . }} seagate/{{ template "chart.name" . }} --values {{ template "chart.name" . }}.values.yaml
```
The `upgrade` command is used to change configuration when values are modified:
```
$ helm upgrade -n seagate-exos-x-csi-system {{ template "chart.name" . }} seagate/{{ template "chart.name" . }} --values {{ template "chart.name" . }}.values.yaml
```
# Upgrading the Chart
Update Helm repositories:
```
$ helm repo update
```
Upgrade release names `{{ template "chart.name" . }}` to the latest version:
```
$ helm upgrade {{ template "chart.name" . }} seagate/{{ template "chart.name" . }}
```
# Creating a storage class
In order to dynamically provision persistants volumes, you first need to create a storage class. To do so, please refer to the project [documentation](https://github.com/Seagate/seagate-exos-x-csi).
{{ template "chart.maintainersSection" . }}
{{ template "chart.requirementsSection" . }}
{{ template "chart.valuesSection" . }}

View File

@@ -0,0 +1,5 @@
Thank you for using Seagate Exos X provisioner. It will be up and running shortly.
Run 'kubectl get pods' to verify that the new pods have a 'STATUS' of 'Running'.
In order to dynamically provide a persistant volume, create a storage class first.
Please refer to this example to do so: https://github.com/Seagate/seagate-exos-x-csi/blob/main/example/storage-class.yaml

View File

@@ -0,0 +1,10 @@
{{- define "csidriver.labels" -}}
app.kubernetes.io/name: {{ .Chart.Name | kebabcase }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{- define "csidriver.extraArgs" -}}
{{- range .extraArgs }}
- {{ toYaml . }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,126 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: seagate-exos-x-csi-node-server
labels:
app.kubernetes.io/version: {{ .Chart.Version }}
app.kubernetes.io/component: dynamic-provisionning-node
{{ include "csidriver.labels" . | indent 4 }}
spec:
selector:
matchLabels:
name: seagate-exos-x-csi-node-server
{{ include "csidriver.labels" . | indent 6 }}
template:
metadata:
labels:
name: seagate-exos-x-csi-node-server
{{ include "csidriver.labels" . | indent 8 }}
spec:
hostNetwork: true
hostIPC: true
{{ if .Values.pspAdmissionControllerEnabled }}serviceAccount: csi-node-registrar{{ end }}
{{- if .Values.nodeServer.nodeAffinity }}
affinity:
nodeAffinity:
{{ toYaml .Values.nodeServer.nodeAffinity | indent 10 }}
{{- end }}
{{- if .Values.nodeServer.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeServer.nodeSelector | indent 8 }}
{{- end }}
containers:
- name: seagate-exos-x-csi-node
image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
command:
- seagate-exos-x-csi-node
- -bind=unix://{{ .Values.kubeletPath }}/plugins/csi-exos-x.seagate.com/csi.sock
- -chroot=/host
{{- include "csidriver.extraArgs" .Values.node | indent 10 }}
env:
- name: CSI_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CSI_NODE_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: CSI_NODE_SERVICE_PORT
value: "978"
securityContext:
privileged: true
volumeMounts:
- name: plugin-dir
mountPath: {{ .Values.kubeletPath }}/plugins/csi-exos-x.seagate.com
- name: mountpoint-dir
mountPath: {{ .Values.kubeletPath }}/pods
mountPropagation: Bidirectional
- name: san-iscsi-csi-run-dir
mountPath: /var/run/csi-exos-x.seagate.com
- name: device-dir
mountPath: /dev
- name: iscsi-dir
mountPath: /etc/iscsi
- name: host
mountPath: /host
mountPropagation: Bidirectional
ports:
- containerPort: 9808
name: healthz
protocol: TCP
- containerPort: 9842
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: healthz
periodSeconds: 60
- name: liveness-probe
image: {{.Values.nodeLivenessProbe.image.repository }}:{{ .Values.nodeLivenessProbe.image.tag }}
args:
- --csi-address=/csi/csi.sock
{{- include "csidriver.extraArgs" .Values.nodeLivenessProbe | indent 10 }}
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: driver-registrar
image: {{ .Values.csiNodeRegistrar.image.repository }}:{{ .Values.csiNodeRegistrar.image.tag }}
args:
- --csi-address=/csi/csi.sock
- --kubelet-registration-path={{ .Values.kubeletPath }}/plugins/csi-exos-x.seagate.com/csi.sock
{{- include "csidriver.extraArgs" .Values.csiNodeRegistrar | indent 10 }}
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
volumes:
- name: registration-dir
hostPath:
path: {{ .Values.kubeletPath }}/plugins_registry/
- name: mountpoint-dir
hostPath:
path: {{ .Values.kubeletPath }}/pods
- name: plugin-dir
hostPath:
path: {{ .Values.kubeletPath }}/plugins/csi-exos-x.seagate.com
type: DirectoryOrCreate
- name: iscsi-dir
hostPath:
path: /etc/iscsi
- name: device-dir
hostPath:
path: /dev
- name: san-iscsi-csi-run-dir
hostPath:
path: /var/run/csi-exos-x.seagate.com
- name: host
hostPath:
path: /

View File

@@ -0,0 +1,94 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: seagate-exos-x-csi-controller-server
labels:
app.kubernetes.io/version: {{ .Chart.Version }}
app.kubernetes.io/component: dynamic-provisionning-controller
{{ include "csidriver.labels" . | indent 4 }}
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: seagate-exos-x-csi-controller-server
{{ include "csidriver.labels" . | indent 6 }}
template:
metadata:
labels:
app: seagate-exos-x-csi-controller-server
{{ include "csidriver.labels" . | indent 8 }}
spec:
serviceAccount: csi-provisioner
containers:
- name: seagate-exos-x-csi-controller
image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}
command:
- seagate-exos-x-csi-controller
- -bind=unix:///csi/csi.sock
{{- include "csidriver.extraArgs" .Values.controller | indent 10 }}
env:
- name: CSI_NODE_SERVICE_PORT
value: "978"
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-run-dir
mountPath: /var/run/csi-exos-x.seagate.com
ports:
- containerPort: 9842
name: metrics
protocol: TCP
- name: csi-provisioner
image: {{ .Values.csiProvisioner.image.repository }}:{{ .Values.csiProvisioner.image.tag }}
args:
- --csi-address=/csi/csi.sock
- --worker-threads=1
- --timeout={{ .Values.csiProvisioner.timeout }}
{{- include "csidriver.extraArgs" .Values.csiProvisioner | indent 10 }}
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-attacher
image: {{ .Values.csiAttacher.image.repository }}:{{ .Values.csiAttacher.image.tag }}
args:
- --csi-address=/csi/csi.sock
- --worker-threads=1
- --timeout={{ .Values.csiAttacher.timeout }}
{{- include "csidriver.extraArgs" .Values.csiAttacher | indent 10 }}
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-resizer
image: {{ .Values.csiResizer.image.repository }}:{{ .Values.csiResizer.image.tag }}
args:
- --csi-address=/csi/csi.sock
{{- include "csidriver.extraArgs" .Values.csiResizer | indent 10 }}
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /csi
- name: csi-snapshotter
image: {{ .Values.csiSnapshotter.image.repository }}:{{ .Values.csiSnapshotter.image.tag }}
args:
- --csi-address=/csi/csi.sock
{{- include "csidriver.extraArgs" .Values.csiSnapshotter | indent 10 }}
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /csi
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
volumes:
- name: socket-dir
emptyDir:
medium: Memory
- name: csi-run-dir
hostPath:
path: /var/run/csi-exos-x.seagate.com

View File

@@ -0,0 +1,14 @@
{{- if .Values.podMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: seagate-exos-x-csi-node-exporter
labels:
{{ include "csidriver.labels" . | indent 4 }}
spec:
selector:
matchLabels:
name: seagate-exos-x-csi-node-server
podMetricsEndpoints:
- port: metrics
{{- end }}

View File

@@ -0,0 +1,26 @@
{{- if .Values.pspAdmissionControllerEnabled -}}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: seagate-exos-x-csi
spec:
privileged: true
hostNetwork: true
hostIPC: true
hostPID: true
seLinux:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
runAsUser:
rule: RunAsAny
fsGroup:
rule: RunAsAny
hostPorts:
- min: 0
max: 65535
volumes:
- '*'
allowedCapabilities:
- '*'
{{ end }}

View File

@@ -0,0 +1,166 @@
# This YAML file contains all RBAC objects that are necessary to run external
# CSI provisioner.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# provisioner, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner
labels:
{{ include "csidriver.labels" . | indent 4 }}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner-systems
labels:
{{ include "csidriver.labels" . | indent 4 }}
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role-systems
labels:
{{ include "csidriver.labels" . | indent 4 }}
subjects:
- kind: ServiceAccount
name: csi-provisioner
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: external-provisioner-runner-systems
apiGroup: rbac.authorization.k8s.io
---
# Provisioner must be able to work with endpoints in current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-cfg-systems
labels:
{{ include "csidriver.labels" . | indent 4 }}
rules:
# Only one of the following rules for endpoints or leases is required based on
# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases.
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
{{ if .Values.pspAdmissionControllerEnabled }}
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
resourceNames:
- seagate-exos-x-csi
{{ end }}
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role-cfg-systems
labels:
{{ include "csidriver.labels" . | indent 4 }}
subjects:
- kind: ServiceAccount
name: csi-provisioner
roleRef:
kind: Role
name: external-provisioner-cfg-systems
apiGroup: rbac.authorization.k8s.io
{{ if .Values.pspAdmissionControllerEnabled }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-node-registrar
labels:
{{ include "csidriver.labels" . | indent 4 }}
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-node-registrar-cfg-systems
labels:
{{ include "csidriver.labels" . | indent 4 }}
rules:
- apiGroups: ["policy"]
resources: ["podsecuritypolicies"]
verbs: ["use"]
resourceNames:
- systems-role
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-node-registrar-role-cfg-systems
labels:
{{ include "csidriver.labels" . | indent 4 }}
subjects:
- kind: ServiceAccount
name: csi-node-registrar
roleRef:
kind: Role
name: csi-node-registrar-cfg-systems
apiGroup: rbac.authorization.k8s.io
{{ end }}

View File

@@ -0,0 +1,31 @@
{{- if .Values.serviceMonitor.enabled }}
apiVersion: v1
kind: Service
metadata:
name: systems-controller-metrics
labels:
name: systems-controller-metrics
{{ include "csidriver.labels" . | indent 4 }}
spec:
ports:
- name: metrics
port: 9842
targetPort: metrics
protocol: TCP
selector:
app: seagate-exos-x-csi-controller-server
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: seagate-exos-x-csi-controller-exporter
labels:
{{ include "csidriver.labels" . | indent 4 }}
spec:
selector:
matchLabels:
name: systems-controller-metrics
endpoints:
- port: metrics
interval: 1s
{{- end }}

View File

@@ -0,0 +1,83 @@
# Default values CSI Driver.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
# -- Path to kubelet
kubeletPath: /var/lib/kubelet
# -- Wether psp admission controller has been enabled in the cluster or not
pspAdmissionControllerEnabled: false
image:
# -- Docker repository to use for nodes and controller
repository: ghcr.io/seagate/seagate-exos-x-csi
# -- Tag to use for nodes and controller
# @default -- Uses Chart.appVersion value by default if tag does not specify a new version.
tag: "v1.10.0"
# -- Default is set to IfNotPresent, to override use Always here to always pull the specified version
pullPolicy: Always
# -- Controller sidecar for provisioning
# AKA external-provisioner
csiProvisioner:
image:
repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v5.0.1
# -- Timeout for gRPC calls from the csi-provisioner to the controller
timeout: 60s
# -- Extra arguments for csi-provisioner controller sidecar
extraArgs: []
# -- Controller sidecar for attachment handling
csiAttacher:
image:
repository: registry.k8s.io/sig-storage/csi-attacher
tag: v4.6.1
# -- Timeout for gRPC calls from the csi-attacher to the controller
timeout: 60s
# -- Extra arguments for csi-attacher controller sidecar
extraArgs: []
# -- Controller sidecar for volume expansion
csiResizer:
image:
repository: registry.k8s.io/sig-storage/csi-resizer
tag: v1.11.1
# -- Extra arguments for csi-resizer controller sidecar
extraArgs: []
# -- Controller sidecar for snapshots handling
csiSnapshotter:
image:
repository: registry.k8s.io/sig-storage/csi-snapshotter
tag: v8.0.1
# -- Extra arguments for csi-snapshotter controller sidecar
extraArgs: []
# -- Node sidecar for plugin registration
csiNodeRegistrar:
image:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.9.0
# -- Extra arguments for csi-node-registrar node sidecar
extraArgs: []
controller:
# -- Extra arguments for seagate-exos-x-csi-controller container
extraArgs: [-v=0]
node:
# -- Extra arguments for seagate-exos-x-csi-node containers
extraArgs: [-v=0]
multipathd:
# -- Extra arguments for multipathd containers
extraArgs: []
# -- Container that convert CSI liveness probe to kubernetes liveness/readiness probe
nodeLivenessProbe:
image:
repository: registry.k8s.io/sig-storage/livenessprobe
tag: v2.12.0
# -- Extra arguments for the node's liveness probe containers
extraArgs: []
nodeServer:
# -- Kubernetes nodeSelector field for seagate-exos-x-csi-node-server Pod
nodeSelector:
# -- Kubernetes nodeAffinity field for seagate-exos-x-csi-node-server Pod
nodeAffinity:
podMonitor:
# -- Set a Prometheus operator PodMonitor resource (true or false)
enabled: false
serviceMonitor:
# -- Set a Prometheus operator ServiceMonitor resource (true or false)
enabled: false

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- secret-me5-site-a.yaml
- secret-me5-site-b.yaml

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: seagate-me5-site-a
namespace: seagate
type: Opaque
data:
apiAddress: aHR0cHM6Ly9hZG1pbi5wb3dlcnZhdWx0MS5jMmV0Lm5ldA== # https://admin.powervault1.c2et.net
username: YWRtaW4uYzNz # admin.c3s
password: UG96dWVsby4xMjM0NQ== # Pozuelo.12345

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: seagate-me5-site-b
namespace: seagate
type: Opaque
data:
apiAddress: aHR0cHM6Ly9hZG1pbi5wb3dlcnZhdWx0Mi5jMmV0Lm5ldA== # https://admin.powervault2.c2et.net
username: YWRtaW4uYzNz
password: UG96dWVsby4xMjM0NQ==

View File

@@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- sc-me5-site-a.yaml
- sc-me5-site-b.yaml

View File

@@ -0,0 +1,20 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: sc-me5-site-a
provisioner: csi-exos-x.seagate.com
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
csi.storage.k8s.io/provisioner-secret-name: seagate-me5-site-a
csi.storage.k8s.io/provisioner-secret-namespace: seagate
csi.storage.k8s.io/controller-publish-secret-name: seagate-me5-site-a
csi.storage.k8s.io/controller-publish-secret-namespace: seagate
csi.storage.k8s.io/controller-expand-secret-name: seagate-me5-site-a
csi.storage.k8s.io/controller-expand-secret-namespace: seagate
csi.storage.k8s.io/fstype: ext4
pool: pool
volPrefix: sza
storageProtocol: iscsi

View File

@@ -0,0 +1,19 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: sc-me5-site-b
provisioner: csi-exos-x.seagate.com
reclaimPolicy: Delete
volumeBindingMode: Immediate
allowVolumeExpansion: true
parameters:
csi.storage.k8s.io/provisioner-secret-name: seagate-me5-site-b
csi.storage.k8s.io/provisioner-secret-namespace: seagate
csi.storage.k8s.io/controller-publish-secret-name: seagate-me5-site-b
csi.storage.k8s.io/controller-publish-secret-namespace: seagate
csi.storage.k8s.io/controller-expand-secret-name: seagate-me5-site-b
csi.storage.k8s.io/controller-expand-secret-namespace: seagate
csi.storage.k8s.io/fstype: ext4
pool: pool
volPrefix: szb
storageProtocol: iscsi

64
seagate/values.yaml Normal file
View File

@@ -0,0 +1,64 @@
kubeletPath: /var/lib/kubelet
pspAdmissionControllerEnabled: false
image:
repository: ghcr.io/seagate/seagate-exos-x-csi
tag: "v1.10.0"
pullPolicy: IfNotPresent
csiProvisioner:
image:
repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v5.0.1
timeout: 60s
extraArgs: []
csiAttacher:
image:
repository: registry.k8s.io/sig-storage/csi-attacher
tag: v4.6.1
timeout: 60s
extraArgs: []
csiResizer:
image:
repository: registry.k8s.io/sig-storage/csi-resizer
tag: v1.11.1
extraArgs: []
csiSnapshotter:
image:
repository: registry.k8s.io/sig-storage/csi-snapshotter
tag: v8.0.1
extraArgs: []
csiNodeRegistrar:
image:
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
tag: v2.9.0
extraArgs: []
controller:
extraArgs: ["-v=2"]
node:
extraArgs: ["-v=2"]
multipathd:
extraArgs: []
nodeLivenessProbe:
image:
repository: registry.k8s.io/sig-storage/livenessprobe
tag: v2.12.0
extraArgs: []
nodeServer:
nodeSelector: {}
nodeAffinity: {}
podMonitor:
enabled: false
serviceMonitor:
enabled: false

Some files were not shown because too many files have changed in this diff Show More