renombrando carpeta

This commit is contained in:
2025-08-31 00:27:06 +02:00
parent 2331da8cf8
commit 9291e46fe9
18 changed files with 0 additions and 0 deletions

View File

@@ -0,0 +1,16 @@
apiVersion: velero.io/v1
kind: BackupStorageLocation
metadata:
name: bsl-incluster-a
spec:
provider: aws
objectStorage:
bucket: velero-backups
config:
s3Url: http://minio-a.minio-velero.svc.cluster.local:9000
s3ForcePathStyle: "true"
region: site-a
accessMode: ReadWrite
credential:
name: cloud-credentials
key: cloud

View File

@@ -0,0 +1,16 @@
apiVersion: velero.io/v1
kind: BackupStorageLocation
metadata:
name: bsl-incluster-b
spec:
provider: aws
objectStorage:
bucket: velero-backups
config:
s3Url: http://minio-b.minio-velero.svc.cluster.local:9000
s3ForcePathStyle: "true"
region: site-b
accessMode: ReadWrite
credential:
name: cloud-credentials
key: cloud

View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio-a
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-a
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-a
template:
metadata:
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-a
spec:
securityContext:
fsGroup: 1000
containers:
- name: minio
image: quay.io/minio/minio:latest
args: ["server", "/data", "--console-address", ":9001"]
envFrom:
- secretRef:
name: minio-root
ports:
- name: s3
containerPort: 9000
- name: console
containerPort: 9001
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
cpu: "250m"
memory: "512Mi"
limits:
cpu: "2"
memory: "4Gi"
volumes:
- name: data
persistentVolumeClaim:
claimName: minio-a-data

View File

@@ -0,0 +1,47 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio-b
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-b
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-b
template:
metadata:
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-b
spec:
securityContext:
fsGroup: 1000
containers:
- name: minio
image: quay.io/minio/minio:latest
args: ["server", "/data", "--console-address", ":9001"]
envFrom:
- secretRef:
name: minio-root
ports:
- name: s3
containerPort: 9000
- name: console
containerPort: 9001
volumeMounts:
- name: data
mountPath: /data
resources:
requests:
cpu: "250m"
memory: "512Mi"
limits:
cpu: "2"
memory: "4Gi"
volumes:
- name: data
persistentVolumeClaim:
claimName: minio-b-data

View File

@@ -0,0 +1,50 @@
apiVersion: batch/v1
kind: Job
metadata:
name: minio-a-init
namespace: minio-velero
spec:
backoffLimit: 3
template:
spec:
restartPolicy: OnFailure
containers:
- name: init
image: bitnami/minio-client:latest
command: ["/bin/sh","-lc"]
env:
- name: MINIO_ENDPOINT
value: "http://minio-a.minio-velero.svc.cluster.local:9000"
- name: MINIO_ROOT_USER
valueFrom:
secretKeyRef:
name: minio-root
key: MINIO_ROOT_USER
- name: MINIO_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: minio-root
key: MINIO_ROOT_PASSWORD
args:
- |
set -euo pipefail
echo "[init-a] waiting for $MINIO_ENDPOINT ..."
until curl -sf "$MINIO_ENDPOINT/minio/health/ready" >/dev/null; do sleep 2; done
echo "[init-a] configuring bucket/user/policy"
mc alias set minioA "$MINIO_ENDPOINT" "$MINIO_ROOT_USER" "$MINIO_ROOT_PASSWORD"
mc mb minioA/velero-backups || true
mc version enable minioA/velero-backups || true
mc admin user add minioA velero Velero12345 || true
cat > /tmp/policy.json <<'EOF'
{ "Version":"2012-10-17",
"Statement":[
{"Effect":"Allow","Action":["s3:*"],
"Resource":["arn:aws:s3:::velero-backups","arn:aws:s3:::velero-backups/*"]}
]}
EOF
mc admin policy create minioA velero-rw /tmp/policy.json || true
mc admin policy attach minioA velero-rw --user velero || true
echo "[init-a] verifying with velero creds"
mc alias set a-vel "$MINIO_ENDPOINT" velero Velero12345
mc ls a-vel/velero-backups >/dev/null
echo "[init-a] done"

View File

@@ -0,0 +1,50 @@
apiVersion: batch/v1
kind: Job
metadata:
name: minio-b-init
namespace: minio-velero
spec:
backoffLimit: 3
template:
spec:
restartPolicy: OnFailure
containers:
- name: init
image: bitnami/minio-client:latest
command: ["/bin/sh","-lc"]
env:
- name: MINIO_ENDPOINT
value: "http://minio-b.minio-velero.svc.cluster.local:9000"
- name: MINIO_ROOT_USER
valueFrom:
secretKeyRef:
name: minio-root
key: MINIO_ROOT_USER
- name: MINIO_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: minio-root
key: MINIO_ROOT_PASSWORD
args:
- |
set -euo pipefail
echo "[init-b] waiting for $MINIO_ENDPOINT ..."
until curl -sf "$MINIO_ENDPOINT/minio/health/ready" >/dev/null; do sleep 2; done
echo "[init-b] configuring bucket/user/policy"
mc alias set minioB "$MINIO_ENDPOINT" "$MINIO_ROOT_USER" "$MINIO_ROOT_PASSWORD"
mc mb minioB/velero-backups || true
mc version enable minioB/velero-backups || true
mc admin user add minioB velero Velero12345 || true
cat > /tmp/policy.json <<'EOF'
{ "Version":"2012-10-17",
"Statement":[
{"Effect":"Allow","Action":["s3:*"],
"Resource":["arn:aws:s3:::velero-backups","arn:aws:s3:::velero-backups/*"]}
]}
EOF
mc admin policy create minioB velero-rw /tmp/policy.json || true
mc admin policy attach minioB velero-rw --user velero || true
echo "[init-b] verifying with velero creds"
mc alias set b-vel "$MINIO_ENDPOINT" velero Velero12345
mc ls b-vel/velero-backups >/dev/null
echo "[init-b] done"

21
velero/kustomization.yaml Normal file
View File

@@ -0,0 +1,21 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: minio-velero
resources:
- namespace.yaml
- secrets/minio-root.yaml
- secrets/cloud-credentials.yaml
- networkpolicies/default-deny-ingress.yaml
- networkpolicies/allow-self-to-minio.yaml
- pvcs/minio-a-pvc.yaml
- deployments/minio-a.yaml
- services/minio-a.yaml
- jobs/minio-a-init.yaml
- pvcs/minio-b-pvc.yaml
- deployments/minio-b.yaml
- services/minio-b.yaml
- jobs/minio-b-init.yaml
- backupstoragelocations/bsl-a.yaml
- backupstoragelocations/bsl-b.yaml

4
velero/namespace.yaml Normal file
View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: minio-velero

View File

@@ -0,0 +1,19 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-self-to-minio
spec:
podSelector:
matchLabels:
app.kubernetes.io/name: minio
policyTypes: ["Ingress"]
ingress:
- from:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: minio-velero
ports:
- protocol: TCP
port: 9000
- protocol: TCP
port: 9001

View File

@@ -0,0 +1,7 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-ingress
spec:
podSelector: {}
policyTypes: ["Ingress"]

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minio-a-data
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: sc-me5-site-a
resources:
requests:
storage: 5Ti

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minio-b-data
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: sc-me5-site-b
resources:
requests:
storage: 5Ti

96
velero/readme.md Normal file
View File

@@ -0,0 +1,96 @@
# Velero + MinIO (c2et.net)
Este paquete contiene:
* `namespace.yaml`
* Secrets de credenciales (`cloud-credentials.yaml`, `minio-root.yaml`)
* Despliegues de MinIO (`minio-a`, `minio-b`) con sus `PVC` y `Service`
* Jobs de inicialización (`minio-a-init`, `minio-b-init`)
* `BackupStorageLocation` (BSL) por YAML: `bsl-incluster-a` y `bsl-incluster-b`
* Ejemplo de `Schedule` (nightly a las 02:00 y 02:30)
* `values-velero.yaml`: despliegue de Velero sin BSL/Secret (GitOps)
* `ServiceMonitor` (si usas Prometheus Operator)
* Dashboard de Grafana (JSON)
## Flujo recomendado (GitOps)
```bash
# 1) Instalar Velero por Helm sin BSL ni secrets
helm repo add vmware-tanzu https://vmware-tanzu.github.io/helm-charts
helm upgrade --install velero vmware-tanzu/velero -n minio-velero -f values-velero.yaml
# 2) Crear namespace y desplegar MinIO con sus Secrets, PVCs, Services y Jobs de init
kubectl apply -k ./
# 3) (Opcional) Aplicar Schedules
kubectl apply -f schedules/schedules.yaml
```
## Cliente Velero
Para interactuar con Velero necesitas el binario en tu máquina de administración.
```bash
# Linux AMD64
wget https://github.com/vmware-tanzu/velero/releases/download/v1.16.2/velero-v1.16.2-linux-amd64.tar.gz
tar -xvf velero-v1.16.2-linux-amd64.tar.gz
sudo mv velero-v1.16.2-linux-amd64/velero /usr/local/bin/
```
Verifica la instalación:
```bash
velero version
```
## Hacer un backup manual
Ejemplo: respaldar el namespace `wireguard`.
```bash
velero backup create wireguard-backup --include-namespaces wireguard --wait
velero backup describe wireguard-backup --details
```
Puedes excluir recursos innecesarios (ej. CRDs de KubeVirt):
```bash
velero backup create smoke --include-namespaces default --exclude-resources uploadtokenrequests.upload.cdi.kubevirt.io --wait
```
## Programar backups (Schedules)
Ejemplo de programación diaria a las 03:15, TTL de 30 días:
```bash
velero schedule create daily-wireguard --schedule "15 3 * * *" --include-namespaces wireguard --ttl 720h --default-volumes-to-fs-backup
```
Los schedules también se pueden definir por YAML en `schedules/schedules.yaml`.
## Restaurar un backup
### Restaurar al mismo namespace (desastre real)
```bash
# 1) Borrar el namespace roto
kubectl delete ns wireguard
# 2) Restaurar desde el backup
velero restore create wireguard-restore --from-backup wireguard-backup --wait
velero restore describe wireguard-restore --details
```
### Restaurar a otro namespace (ensayo)
```bash
kubectl create ns wireguard-restore
velero restore create wireguard-restore-test --from-backup wireguard-backup --namespace-mappings wireguard:wireguard-restore --wait
```
## Notas
* MinIO requiere `s3ForcePathStyle=true`.
* Si usas CA propia, añade `spec.config.caCert` en los BSL.
* `ServiceMonitor` requiere Prometheus Operator; ajusta `metadata.labels.release` al valor que use tu Prometheus.
* Importa el dashboard JSON en Grafana (datasource `prometheus`).

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: cloud-credentials
type: Opaque
stringData:
cloud: |
[default]
aws_access_key_id = velero
aws_secret_access_key = Velero12345

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: minio-root
type: Opaque
stringData:
MINIO_ROOT_USER: admin
MINIO_ROOT_PASSWORD: "Pozuelo12345"

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: minio-a
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-a
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-a
ports:
- name: s3
port: 9000
targetPort: 9000
- name: console
port: 9001
targetPort: 9001

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: minio-b
labels:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-b
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: minio
app.kubernetes.io/instance: minio-b
ports:
- name: s3
port: 9000
targetPort: 9000
- name: console
port: 9001
targetPort: 9001

28
velero/values-velero.yaml Normal file
View File

@@ -0,0 +1,28 @@
# No crear credenciales ni BSL/VSL desde Helm (los gestionas por YAML)
credentials:
useSecret: false
configuration:
# En v1.16 ya no es necesario el flag, pero si quieres forzar el motor FS:
# features: EnableFSBackup
features: "" # déjalo vacío si no quieres forzar nada
backupStorageLocation: [] # BSL por YAML
volumeSnapshotLocation: [] # evita el error "spec.provider: Required value"
defaultVolumesToFsBackup: true # hace FS-backup por defecto (Kopia/node-agent)
# Despliega el node-agent (DaemonSet en todos los nodos)
deployNodeAgent: true
nodeAgent:
podConfig:
# Tómatelo como "catch-all" para cualquier taint
tolerations:
- operator: "Exists"
# Plugin S3 para MinIO (obligatorio para que el server tenga el provider "aws")
initContainers:
- name: velero-plugin-for-aws
image: velero/velero-plugin-for-aws:v1.9.0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: plugins
mountPath: /target