This commit is contained in:
2025-08-31 00:24:21 +02:00
parent 2eff32d251
commit 2331da8cf8
34 changed files with 411 additions and 446 deletions

View File

@@ -1,10 +0,0 @@
kubeadm join 192.168.0.20:6443 --token rvz86n.c8rdb9ygtikrwnub --discovery-token-ca-cert-hash sha256:f925653dfb3d2b4697395a272e0b07cf4eb16b7ae5a2cc3b33aeab1f36fe7d13 --control-plane --certificate-key eeab98651b2f07f6ce53649b2cca1bf3c449d4fe6270ec0645219cd8c6795ca7 --apiserver-advertise-address=192.168.4.2
sudo rm -rf /var/lib/kubelet/
sudo rm -rf /etc/kubernetes
sudo rm -rf /etc/cni/
sudo rm -rf /var/lib/etcd
sudo systemctl stop kubelet
sudo systemctl status kubelet
sudo kubeadm join 192.168.0.20:6443 --token rvz86n.c8rdb9ygtikrwnub --discovery-token-ca-cert-hash sha256:f925653dfb3d2b4697395a272e0b07cf4eb16b7ae5a2cc3b33aeab1f36fe7d13 --control-plane --certificate-key eeab98651b2f07f6ce53649b2cca1bf3c449d4fe6270ec0645219cd8c6795ca7 --apiserver-advertise-address=192.168.4.2

View File

@@ -0,0 +1,16 @@
apiVersion: velero.io/v1
kind: BackupStorageLocation
metadata:
name: bsl-incluster-a
spec:
provider: aws
objectStorage:
bucket: velero-backups
config:
s3Url: http://minio-a.minio-velero.svc.cluster.local:9000
s3ForcePathStyle: "true"
region: site-a
accessMode: ReadWrite
credential:
name: cloud-credentials
key: cloud

View File

@@ -0,0 +1,16 @@
apiVersion: velero.io/v1
kind: BackupStorageLocation
metadata:
name: bsl-incluster-b
spec:
provider: aws
objectStorage:
bucket: velero-backups
config:
s3Url: http://minio-b.minio-velero.svc.cluster.local:9000
s3ForcePathStyle: "true"
region: site-b
accessMode: ReadWrite
credential:
name: cloud-credentials
key: cloud

View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio-a
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-a
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-a
template:
metadata:
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-a
spec:
securityContext:
fsGroup: 1000
containers:
- name: minio
image: quay.io/minio/minio:latest
args: ["server", "/data", "--console-address", ":9001"]
envFrom:
- secretRef:
name: minio-root
ports:
- name: s3
containerPort: 9000
- name: console
containerPort: 9001
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
cpu: "250m"
memory: "512Mi"
limits:
cpu: "2"
memory: "4Gi"
volumes:
- name: data
persistentVolumeClaim:
claimName: minio-a-data

View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio-b
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-b
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-b
template:
metadata:
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-b
spec:
securityContext:
fsGroup: 1000
containers:
- name: minio
image: quay.io/minio/minio:latest
args: ["server", "/data", "--console-address", ":9001"]
envFrom:
- secretRef:
name: minio-root
ports:
- name: s3
containerPort: 9000
- name: console
containerPort: 9001
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
cpu: "250m"
memory: "512Mi"
limits:
cpu: "2"
memory: "4Gi"
volumes:
- name: data
persistentVolumeClaim:
claimName: minio-b-data

View File

@@ -0,0 +1,50 @@
apiVersion: batch/v1
kind: Job
metadata:
name: minio-a-init
namespace: minio-velero
spec:
backoffLimit: 3
template:
spec:
restartPolicy: OnFailure
containers:
- name: init
image: bitnami/minio-client:latest
command: ["/bin/sh","-lc"]
env:
- name: MINIO_ENDPOINT
value: "http://minio-a.minio-velero.svc.cluster.local:9000"
- name: MINIO_ROOT_USER
valueFrom:
secretKeyRef:
name: minio-root
key: MINIO_ROOT_USER
- name: MINIO_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: minio-root
key: MINIO_ROOT_PASSWORD
args:
- |
set -euo pipefail
echo "[init-a] waiting for $MINIO_ENDPOINT ..."
until curl -sf "$MINIO_ENDPOINT/minio/health/ready" >/dev/null; do sleep 2; done
echo "[init-a] configuring bucket/user/policy"
mc alias set minioA "$MINIO_ENDPOINT" "$MINIO_ROOT_USER" "$MINIO_ROOT_PASSWORD"
mc mb minioA/velero-backups || true
mc version enable minioA/velero-backups || true
mc admin user add minioA velero Velero12345 || true
cat > /tmp/policy.json <<'EOF'
{ "Version":"2012-10-17",
"Statement":[
{"Effect":"Allow","Action":["s3:*"],
"Resource":["arn:aws:s3:::velero-backups","arn:aws:s3:::velero-backups/*"]}
]}
EOF
mc admin policy create minioA velero-rw /tmp/policy.json || true
mc admin policy attach minioA velero-rw --user velero || true
echo "[init-a] verifying with velero creds"
mc alias set a-vel "$MINIO_ENDPOINT" velero Velero12345
mc ls a-vel/velero-backups >/dev/null
echo "[init-a] done"

View File

@@ -0,0 +1,50 @@
apiVersion: batch/v1
kind: Job
metadata:
name: minio-b-init
namespace: minio-velero
spec:
backoffLimit: 3
template:
spec:
restartPolicy: OnFailure
containers:
- name: init
image: bitnami/minio-client:latest
command: ["/bin/sh","-lc"]
env:
- name: MINIO_ENDPOINT
value: "http://minio-b.minio-velero.svc.cluster.local:9000"
- name: MINIO_ROOT_USER
valueFrom:
secretKeyRef:
name: minio-root
key: MINIO_ROOT_USER
- name: MINIO_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: minio-root
key: MINIO_ROOT_PASSWORD
args:
- |
set -euo pipefail
echo "[init-b] waiting for $MINIO_ENDPOINT ..."
until curl -sf "$MINIO_ENDPOINT/minio/health/ready" >/dev/null; do sleep 2; done
echo "[init-b] configuring bucket/user/policy"
mc alias set minioB "$MINIO_ENDPOINT" "$MINIO_ROOT_USER" "$MINIO_ROOT_PASSWORD"
mc mb minioB/velero-backups || true
mc version enable minioB/velero-backups || true
mc admin user add minioB velero Velero12345 || true
cat > /tmp/policy.json <<'EOF'
{ "Version":"2012-10-17",
"Statement":[
{"Effect":"Allow","Action":["s3:*"],
"Resource":["arn:aws:s3:::velero-backups","arn:aws:s3:::velero-backups/*"]}
]}
EOF
mc admin policy create minioB velero-rw /tmp/policy.json || true
mc admin policy attach minioB velero-rw --user velero || true
echo "[init-b] verifying with velero creds"
mc alias set b-vel "$MINIO_ENDPOINT" velero Velero12345
mc ls b-vel/velero-backups >/dev/null
echo "[init-b] done"

View File

@@ -0,0 +1,21 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: minio-velero
resources:
- namespace.yaml
- secrets/minio-root.yaml
- secrets/cloud-credentials.yaml
- networkpolicies/default-deny-ingress.yaml
- networkpolicies/allow-self-to-minio.yaml
- pvcs/minio-a-pvc.yaml
- deployments/minio-a.yaml
- services/minio-a.yaml
- jobs/minio-a-init.yaml
- pvcs/minio-b-pvc.yaml
- deployments/minio-b.yaml
- services/minio-b.yaml
- jobs/minio-b-init.yaml
- backupstoragelocations/bsl-a.yaml
- backupstoragelocations/bsl-b.yaml

View File

@@ -1,4 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: velero
name: minio-velero

View File

@@ -0,0 +1,19 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-self-to-minio
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: minio
policyTypes: ["Ingress"]
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: minio-velero
ports:
- protocol: TCP
port: 9000
- protocol: TCP
port: 9001

View File

@@ -0,0 +1,7 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-ingress
spec:
podSelector: {}
policyTypes: ["Ingress"]

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minio-a-data
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: sc-me5-site-a
resources:
requests:
storage: 5Ti

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minio-b-data
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: sc-me5-site-b
resources:
requests:
storage: 5Ti

View File

@@ -1,28 +1,33 @@
# Velero + MinIO (c2et.net)
Este paquete contiene:
- `namespace.yaml`
- Secrets de credenciales (`cloud-credentials-site-a`, `cloud-credentials-site-b`)
- BackupStorageLocation (BSL) por YAML: `default` (site-a) y `site-b`
- Ejemplo de `Schedule` (nightly a las 02:00 y 02:30)
- `helm/values-approach-b.yaml`: despliegue de Velero sin BSL/Secret (GitOps)
- `ServiceMonitor` (si usas Prometheus Operator)
- Dashboard de Grafana (JSON)
## Flujo recomendado (GitOps, Approach B)
* `namespace.yaml`
* Secrets de credenciales (`cloud-credentials.yaml`, `minio-root.yaml`)
* Despliegues de MinIO (`minio-a`, `minio-b`) con sus `PVC` y `Service`
* Jobs de inicialización (`minio-a-init`, `minio-b-init`)
* `BackupStorageLocation` (BSL) por YAML: `bsl-incluster-a` y `bsl-incluster-b`
* Ejemplo de `Schedule` (nightly a las 02:00 y 02:30)
* `values-velero.yaml`: despliegue de Velero sin BSL/Secret (GitOps)
* `ServiceMonitor` (si usas Prometheus Operator)
* Dashboard de Grafana (JSON)
## Flujo recomendado (GitOps)
```bash
# 1) Instala Velero por Helm sin BSL ni secrets
# 1) Instalar Velero por Helm sin BSL ni secrets
helm repo add vmware-tanzu https://vmware-tanzu.github.io/helm-charts
helm upgrade --install velero vmware-tanzu/velero -n velero --create-namespace -f helm/values-approach-b.yaml
helm upgrade --install velero vmware-tanzu/velero -n minio-velero -f values-velero.yaml
# 2) Aplica Secrets, BSLs y Schedules
kubectl apply -f namespace.yaml
kubectl apply -f secrets/secret-site-a.yaml -f secrets/secret-site-b.yaml
kubectl apply -f bsl/bsl-default-site-a.yaml -f bsl/bsl-site-b.yaml
# 2) Crear namespace y desplegar MinIO con sus Secrets, PVCs, Services y Jobs de init
kubectl apply -k ./
# 3) (Opcional) Aplicar Schedules
kubectl apply -f schedules/schedules.yaml
```
## Cliente Velero
Para interactuar con Velero necesitas el binario en tu máquina de administración.
```bash
@@ -30,32 +35,33 @@ Para interactuar con Velero necesitas el binario en tu máquina de administraci
wget https://github.com/vmware-tanzu/velero/releases/download/v1.16.2/velero-v1.16.2-linux-amd64.tar.gz
tar -xvf velero-v1.16.2-linux-amd64.tar.gz
sudo mv velero-v1.16.2-linux-amd64/velero /usr/local/bin/
# MacOS Intel
wget https://github.com/vmware-tanzu/velero/releases/download/v1.16.2/velero-v1.16.2-darwin-amd64.tar.gz
tar -xvf velero-v1.16.2-darwin-amd64.tar.gz
sudo mv velero-v1.16.2-darwin-amd64/velero /usr/local/bin/
```
Verifica la instalación:
```bash
velero version
```
## Hacer un backup manual
Ejemplo: respaldar el namespace `wireguard`.
```bash
velero backup create wireguard-backup --include-namespaces wireguard --wait
velero backup describe wireguard-backup --details
```
Puedes excluir recursos innecesarios (ej. CRDs de KubeVirt):
```bash
velero backup create smoke --include-namespaces default --exclude-resources uploadtokenrequests.upload.cdi.kubevirt.io --wait
```
## Programar backups (Schedules)
Ejemplo de programación diaria a las 03:15, TTL de 30 días:
```bash
velero schedule create daily-wireguard --schedule "15 3 * * *" --include-namespaces wireguard --ttl 720h --default-volumes-to-fs-backup
```
@@ -63,7 +69,9 @@ velero schedule create daily-wireguard --schedule "15 3 * * *" --include-nam
Los schedules también se pueden definir por YAML en `schedules/schedules.yaml`.
## Restaurar un backup
### Restaurar al mismo namespace (desastre real)
```bash
# 1) Borrar el namespace roto
kubectl delete ns wireguard
@@ -74,13 +82,15 @@ velero restore describe wireguard-restore --details
```
### Restaurar a otro namespace (ensayo)
```bash
kubectl create ns wireguard-restore
velero restore create wireguard-restore-test --from-backup wireguard-backup --namespace-mappings wireguard:wireguard-restore --wait
```
## Notas
- MinIO requiere `s3ForcePathStyle=true`.
- Si usas CA propia, añade `spec.config.caCert` en los BSL.
- `ServiceMonitor` requiere Prometheus Operator; ajusta `metadata.labels.release` al valor que use tu Prometheus.
- Importa el dashboard JSON en Grafana (datasource `prometheus`).
* MinIO requiere `s3ForcePathStyle=true`.
* Si usas CA propia, añade `spec.config.caCert` en los BSL.
* `ServiceMonitor` requiere Prometheus Operator; ajusta `metadata.labels.release` al valor que use tu Prometheus.
* Importa el dashboard JSON en Grafana (datasource `prometheus`).

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: cloud-credentials
type: Opaque
stringData:
cloud: |
[default]
aws_access_key_id = velero
aws_secret_access_key = Velero12345

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: minio-root
type: Opaque
stringData:
MINIO_ROOT_USER: admin
MINIO_ROOT_PASSWORD: "Pozuelo12345"

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: minio-a
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-a
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-a
ports:
- name: s3
port: 9000
targetPort: 9000
- name: console
port: 9001
targetPort: 9001

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: minio-b
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-b
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-b
ports:
- name: s3
port: 9000
targetPort: 9000
- name: console
port: 9001
targetPort: 9001

View File

@@ -0,0 +1,28 @@
# No crear credenciales ni BSL/VSL desde Helm (los gestionas por YAML)
credentials:
useSecret: false
configuration:
# En v1.16 ya no es necesario el flag, pero si quieres forzar el motor FS:
# features: EnableFSBackup
features: "" # déjalo vacío si no quieres forzar nada
backupStorageLocation: [] # BSL por YAML
volumeSnapshotLocation: [] # evita el error "spec.provider: Required value"
defaultVolumesToFsBackup: true # hace FS-backup por defecto (Kopia/node-agent)
# Despliega el node-agent (DaemonSet en todos los nodos)
deployNodeAgent: true
nodeAgent:
podConfig:
# Tómatelo como "catch-all" para cualquier taint
tolerations:
- operator: "Exists"
# Plugin S3 para MinIO (obligatorio para que el server tenga el provider "aws")
initContainers:
- name: velero-plugin-for-aws
image: velero/velero-plugin-for-aws:v1.9.0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: plugins
mountPath: /target

View File

@@ -1,78 +0,0 @@
# MinIO en Kubernetes — c2et.net (Site A/B)
Este paquete contiene manifiestos sin Helm para desplegar **dos instancias independientes de MinIO**,
una por site, usando tus StorageClasses `sc-me5-site-a` y `sc-me5-site-b`, y forzando programación por zona.
## Estructura
```
minio-k8s-c2et-net/
site-a/
namespace.yaml
secret-root.yaml
pvc.yaml
statefulset.yaml
service.yaml
ingress-api.yaml
ingress-console.yaml
site-b/
(idéntico con valores del site B)
```
## Credenciales de administración
- Usuario: **admin**
- Password: **Pozuelo12345**
> Cambia estas credenciales en `secret-root.yaml` antes de ir a producción.
## Dominios
- Site A API: `s3-a.c2et.net`
- Site A Consola: `console.s3-a.c2et.net`
- Site B API: `s3-b.c2et.net`
- Site B Consola: `console.s3-b.c2et.net`
Requisitos previos:
- IngressClass `nginx` operativo.
- `cert-manager` con `ClusterIssuer` llamado `letsencrypt-prod`.
- DNS apuntando los hosts anteriores al Ingress Controller.
## Despliegue rápido
```bash
kubectl apply -f site-a/namespace.yaml
kubectl apply -f site-a/secret-root.yaml
kubectl apply -f site-a/pvc.yaml
kubectl apply -f site-a/service.yaml
kubectl apply -f site-a/statefulset.yaml
kubectl apply -f site-a/ingress-api.yaml
kubectl apply -f site-a/ingress-console.yaml
kubectl apply -f site-b/namespace.yaml
kubectl apply -f site-b/secret-root.yaml
kubectl apply -f site-b/pvc.yaml
kubectl apply -f site-b/service.yaml
kubectl apply -f site-b/statefulset.yaml
kubectl apply -f site-b/ingress-api.yaml
kubectl apply -f site-b/ingress-console.yaml
```
## Probar
```bash
export AWS_ACCESS_KEY_ID=admin
export AWS_SECRET_ACCESS_KEY='Pozuelo12345'
export AWS_S3_FORCE_PATH_STYLE=true
aws --endpoint-url https://s3-a.c2et.net s3 mb s3://mi-bucket-a
aws --endpoint-url https://s3-a.c2et.net s3 ls
aws --endpoint-url https://s3-b.c2et.net s3 mb s3://mi-bucket-b
aws --endpoint-url https://s3-b.c2et.net s3 ls
```
## Notas
- Los PVC usan `WaitForFirstConsumer` a través de tus StorageClasses; el `nodeSelector` del StatefulSet garantiza
que cada volumen se cree en el **site** correcto.
- Imagen MinIO: `quay.io/minio/minio:RELEASE.2025-02-20T00-00-00Z` (ajústala a la que certifiques).
- Tamaño del PVC por defecto: `2Ti` (modifícalo a tu necesidad).

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: minio-site-a
spec:
type: ClusterIP
selector:
app: minio
ports:
- name: api
port: 9000
targetPort: 9000
protocol: TCP
- name: console
port: 9001
targetPort: 9001
protocol: TCP

View File

@@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: minio-site-b
spec:
type: ClusterIP
selector:
app: minio
ports:
- name: api
port: 9000
targetPort: 9000
protocol: TCP
- name: console
port: 9001
targetPort: 9001
protocol: TCP

View File

@@ -1,36 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: test-http
# annotations:
# metallb.universe.tf/address-pool: default
spec:
# type: NodePort
type: LoadBalancer
loadBalancerIP: 192.168.200.10
selector:
app: test-http
ports:
- port: 80
targetPort: 80
# nodePort: 30080
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-http
spec:
replicas: 1
selector:
matchLabels:
app: test-http
template:
metadata:
labels:
app: test-http
spec:
containers:
- name: test-http
image: nginx:alpine
ports:
- containerPort: 80

View File

@@ -1,16 +0,0 @@
apiVersion: velero.io/v1
kind: BackupStorageLocation
metadata:
name: default
namespace: velero
spec:
provider: aws
objectStorage:
bucket: velero
config:
region: minio
s3Url: https://s3-a.c2et.net
s3ForcePathStyle: "true"
credential:
name: cloud-credentials-site-a
key: cloud

View File

@@ -1,16 +0,0 @@
apiVersion: velero.io/v1
kind: BackupStorageLocation
metadata:
name: site-b
namespace: velero
spec:
provider: aws
objectStorage:
bucket: velero
config:
region: minio
s3Url: https://s3-b.c2et.net
s3ForcePathStyle: "true"
credential:
name: cloud-credentials-site-b
key: cloud

View File

@@ -1,36 +0,0 @@
credentials:
useSecret: true
existingSecret: ""
secretContents:
cloud: |
[default]
aws_access_key_id=velero-a
aws_secret_access_key=Clave-Velero-A
configuration:
features: EnableCSI
backupStorageLocation:
- name: default
provider: aws
bucket: velero
config:
region: minio
s3Url: https://s3-a.c2et.net
s3ForcePathStyle: "true"
initContainers:
- name: velero-plugin-for-aws
image: velero/velero-plugin-for-aws:v1.9.0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: plugins
mountPath: /target
- name: velero-plugin-for-csi
image: velero/velero-plugin-for-csi:v0.7.0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: plugins
mountPath: /target
nodeAgent:
enabled: true

View File

@@ -1,30 +0,0 @@
# values-combined.yaml
credentials:
useSecret: false # Secrets y BSLs los aplicas tú por YAML (como ya hiciste)
configuration:
features: ""
backupStorageLocation: [] # ninguno desde Helm (los gestionas por YAML)
defaultVolumesToFsBackup: true # copia datos de PV vía node-agent/Kopia al BSL
# Dejamos SOLO el plugin de AWS; el CSI externo se quita (viene integrado en Velero 1.16)
initContainers:
- name: velero-plugin-for-aws
image: velero/velero-plugin-for-aws:v1.9.0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: plugins
mountPath: /target
# **activar** el node-agent (DaemonSet) y darle tolerations "catch-all"
deployNodeAgent: true
nodeAgent:
podConfig:
tolerations:
- key: "node-role.kubernetes.io/master"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
effect: "NoSchedule"
- operator: "Exists" # tolera cualquier otro taint

View File

@@ -1,92 +0,0 @@
{
"annotations": {
"list": []
},
"editable": true,
"gnetId": null,
"graphTooltip": 0,
"panels": [
{
"type": "stat",
"title": "Backups - Total",
"targets": [
{
"expr": "sum(velero_backup_total)",
"legendFormat": "total"
}
],
"id": 1,
"datasource": {
"type": "prometheus",
"uid": "prometheus"
},
"options": {
"reduceOptions": {
"calcs": [
"lastNotNull"
]
}
}
},
{
"type": "timeSeries",
"title": "Backups por estado",
"targets": [
{
"expr": "sum by (phase) (increase(velero_backup_attempt_total[1h]))",
"legendFormat": "{{phase}}"
}
],
"id": 2,
"datasource": {
"type": "prometheus",
"uid": "prometheus"
}
},
{
"type": "timeSeries",
"title": "Duraci\u00f3n de backups (p95)",
"targets": [
{
"expr": "histogram_quantile(0.95, sum(rate(velero_backup_duration_seconds_bucket[5m])) by (le))",
"legendFormat": "p95"
}
],
"id": 3,
"datasource": {
"type": "prometheus",
"uid": "prometheus"
}
},
{
"type": "timeSeries",
"title": "Errores del node-agent",
"targets": [
{
"expr": "sum(rate(velero_node_agent_errors_total[5m]))",
"legendFormat": "errores"
}
],
"id": 4,
"datasource": {
"type": "prometheus",
"uid": "prometheus"
}
}
],
"schemaVersion": 37,
"style": "dark",
"tags": [
"velero",
"backup"
],
"templating": {
"list": []
},
"time": {
"from": "now-24h",
"to": "now"
},
"title": "Velero (MinIO S3)",
"version": 1
}

View File

@@ -1,16 +0,0 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: velero
namespace: velero
labels:
release: prometheus # ajusta al selector de tu Prometheus
spec:
selector:
matchLabels:
app.kubernetes.io/name: velero
namespaceSelector:
matchNames: ["velero"]
endpoints:
- port: metrics
interval: 30s

View File

@@ -1,27 +0,0 @@
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: nightly-a
namespace: velero
spec:
schedule: "0 2 * * *"
template:
ttl: 168h
includedNamespaces:
- gitea
- apolo
storageLocation: default
---
apiVersion: velero.io/v1
kind: Schedule
metadata:
name: nightly-b
namespace: velero
spec:
schedule: "30 2 * * *"
template:
ttl: 168h
includedNamespaces:
- giteay
- apolo
storageLocation: site-b

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: cloud-credentials-site-a
namespace: velero
type: Opaque
stringData:
cloud: |
[default]
aws_access_key_id=velero-a
aws_secret_access_key=Pozuelo12345

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: cloud-credentials-site-b
namespace: velero
type: Opaque
stringData:
cloud: |
[default]
aws_access_key_id=velero-b
aws_secret_access_key=Pozuelo12345

View File

@@ -1,7 +0,0 @@
apiVersion: velero.io/v1
kind: VolumeSnapshotLocation
metadata:
name: default
namespace: velero
spec:
provider: velero.io/csi