commit 7781476751fd4cdc63098faf868644f20681f024 Author: xavor Date: Sat Jul 19 15:35:27 2025 +0000 first commit diff --git a/cert-manager/clusterissuer-prod.yaml b/cert-manager/clusterissuer-prod.yaml new file mode 100644 index 0000000..4449b55 --- /dev/null +++ b/cert-manager/clusterissuer-prod.yaml @@ -0,0 +1,14 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-prod +spec: + acme: + email: xavor@hotmail.es + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-prod + solvers: + - http01: + ingress: + ingressClassName: traefik diff --git a/cert-manager/clusterissuer-staging.yaml b/cert-manager/clusterissuer-staging.yaml new file mode 100644 index 0000000..aa3272f --- /dev/null +++ b/cert-manager/clusterissuer-staging.yaml @@ -0,0 +1,14 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-staging +spec: + acme: + email: xavor@hotmail.es + server: https://acme-staging-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-staging + solvers: + - http01: + ingress: + ingressClassName: traefik diff --git a/cert-manager/kustomization.yaml b/cert-manager/kustomization.yaml new file mode 100644 index 0000000..4acdc81 --- /dev/null +++ b/cert-manager/kustomization.yaml @@ -0,0 +1,6 @@ +namespace: cert-manager +resources: + - clusterissuer-prod.yaml + - clusterissuer-staging.yaml + + diff --git a/cert-manager/namespace.yaml b/cert-manager/namespace.yaml new file mode 100644 index 0000000..661039b --- /dev/null +++ b/cert-manager/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager \ No newline at end of file diff --git a/cert-manager/readme.md b/cert-manager/readme.md new file mode 100644 index 0000000..158261a --- /dev/null +++ b/cert-manager/readme.md @@ -0,0 +1,15 @@ +# Cert Manager para Kubernetes + +Este repositorio contiene los manifiestos necesarios para desplegar [cert-manager](https://cert-manager.io), una herramienta que automatiza la gestión y renovación de certificados TLS en Kubernetes. + +Cert-manager se encarga de emitir y renovar automáticamente certificados mediante ACME (por ejemplo, Let's Encrypt), y es compatible con `Ingress` para habilitar TLS en tus servicios expuestos. + +--- + +## Despliegue + + kubectl apply -f namespace.yaml + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml + kubectl apply -f clusterissuer-staging.yaml + kubectl apply -f clusterissuer-prod.yaml + diff --git a/kubevirt-manager/deployments/deployment.yaml b/kubevirt-manager/deployments/deployment.yaml new file mode 100644 index 0000000..e3bd805 --- /dev/null +++ b/kubevirt-manager/deployments/deployment.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kubevirt-manager + namespace: kubevirt-manager +spec: + replicas: 1 + selector: + matchLabels: + app: kubevirt-manager + template: + metadata: + labels: + app: kubevirt-manager + spec: + serviceAccountName: kubevirt-manager + containers: + - name: kubevirt-manager + image: kubevirtmanager/kubevirt-manager:1.4.0 + ports: + - containerPort: 8001 + env: + - name: BACKEND_URL + value: "http://localhost:8080" diff --git a/kubevirt-manager/ingress/ingress.yaml b/kubevirt-manager/ingress/ingress.yaml new file mode 100644 index 0000000..01204f9 --- /dev/null +++ b/kubevirt-manager/ingress/ingress.yaml @@ -0,0 +1,26 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: kubevirt-manager + namespace: kubevirt-manager + annotations: + cert-manager.io/cluster-issuer: "letsencrypt-prod" + nginx.ingress.kubernetes.io/force-ssl-redirect: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" +spec: + ingressClassName: nginx + tls: + - hosts: + - kubevirt.manabo.org + secretName: kubevirt-manager-tls + rules: + - host: kubevirt.manabo.org + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: kubevirt-manager + port: + number: 80 diff --git a/kubevirt-manager/kustomization.yaml b/kubevirt-manager/kustomization.yaml new file mode 100644 index 0000000..3923093 --- /dev/null +++ b/kubevirt-manager/kustomization.yaml @@ -0,0 +1,7 @@ +resources: + - namespace.yaml + - deployments/deployment.yaml + - services/service.yaml +# - ingress/ingress.yaml + - rbac/serviceaccount.yaml + - rbac/clusterrolebinding.yaml diff --git a/kubevirt-manager/namespace.yaml b/kubevirt-manager/namespace.yaml new file mode 100644 index 0000000..232a17f --- /dev/null +++ b/kubevirt-manager/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: kubevirt-manager diff --git a/kubevirt-manager/rbac/clusterrolebinding.yaml b/kubevirt-manager/rbac/clusterrolebinding.yaml new file mode 100644 index 0000000..00934e5 --- /dev/null +++ b/kubevirt-manager/rbac/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubevirt-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: kubevirt-manager + namespace: kubevirt-manager diff --git a/kubevirt-manager/rbac/serviceaccount.yaml b/kubevirt-manager/rbac/serviceaccount.yaml new file mode 100644 index 0000000..71bde4a --- /dev/null +++ b/kubevirt-manager/rbac/serviceaccount.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kubevirt-manager + namespace: kubevirt-manager + diff --git a/kubevirt-manager/services/service.yaml b/kubevirt-manager/services/service.yaml new file mode 100644 index 0000000..ded2a8d --- /dev/null +++ b/kubevirt-manager/services/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: kubevirt-manager + namespace: kubevirt-manager +spec: + selector: + app: kubevirt-manager + ports: + - port: 80 + targetPort: 8001 + protocol: TCP + nodePort: 30081 # <--- puedes elegir el puerto, debe estar entre 30000-32767 + type: NodePort diff --git a/metallb/ipaddresspool.yaml b/metallb/ipaddresspool.yaml new file mode 100644 index 0000000..bc5f778 --- /dev/null +++ b/metallb/ipaddresspool.yaml @@ -0,0 +1,7 @@ +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: single-ip +spec: + addresses: + - 192.168.1.100/32 diff --git a/metallb/kustomization.yaml b/metallb/kustomization.yaml new file mode 100644 index 0000000..aa029a0 --- /dev/null +++ b/metallb/kustomization.yaml @@ -0,0 +1,4 @@ +namespace: metallb-system +resources: + - ipaddresspool.yaml + - l2advertisement.yaml diff --git a/metallb/l2advertisement.yaml b/metallb/l2advertisement.yaml new file mode 100644 index 0000000..8f9ed7f --- /dev/null +++ b/metallb/l2advertisement.yaml @@ -0,0 +1,5 @@ +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: l2adv +spec: {} diff --git a/multus/nad-br-servicios.yaml b/multus/nad-br-servicios.yaml new file mode 100644 index 0000000..3019924 --- /dev/null +++ b/multus/nad-br-servicios.yaml @@ -0,0 +1,19 @@ +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: + name: br-servicios + namespace: default +spec: + config: '{ + "cniVersion": "0.3.1", + "type": "bridge", + "bridge": "br-servicios", + "ipam": { + "type": "host-local", + "subnet": "192.168.200.0/22", + "rangeStart": "192.168.200.100", + "rangeEnd": "192.168.200.200", + "routes": [{"dst": "0.0.0.0/0"}], + "gateway": "192.168.200.1" + } + }' diff --git a/multus/test-multus-pod.yaml b/multus/test-multus-pod.yaml new file mode 100644 index 0000000..0249de4 --- /dev/null +++ b/multus/test-multus-pod.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: multus-test + annotations: + k8s.v1.cni.cncf.io/networks: br-servicios +spec: + containers: + - name: alpine + image: alpine + command: ["sleep", "infinity"] + securityContext: + capabilities: + add: ["NET_ADMIN"] diff --git a/readme-suse.md b/readme-suse.md new file mode 100644 index 0000000..cf8e087 --- /dev/null +++ b/readme-suse.md @@ -0,0 +1,78 @@ +# Configuración de red en SUSE con NetworkManager + +Ejemplo: Red equivalente a Netplan con bonding (LACP), VLANs y bridges. + +> **Pre-requisitos:** +> +> * openSUSE/SLES con NetworkManager +> * Interfaces: enp2s0f0 y enp2s0f1 + +--- + +## 1. Crear el bond (LACP 802.3ad, rápido, hash layer3+4) + +```bash +nmcli con add type bond ifname bond0 mode 802.3ad +nmcli con mod bond0 bond.options "mode=802.3ad,miimon=100,updelay=200,downdelay=200,lacp_rate=fast,xmit_hash_policy=layer3+4" + +# Añadir interfaces físicas +nmcli con add type ethernet ifname enp2s0f0 master bond0 +nmcli con add type ethernet ifname enp2s0f1 master bond0 +``` + +--- + +## 2. Crear VLANs sobre el bond + +```bash +# VLAN 20 (sin IP, solo para bridge de servicios) +nmcli con add type vlan ifname bond0.20 dev bond0 id 20 + +# VLAN 30 +nmcli con add type vlan ifname bond0.30 dev bond0 id 30 ip4 192.168.3.4/24 + +# VLAN 40 +nmcli con add type vlan ifname bond0.40 dev bond0 id 40 ip4 192.168.4.4/24 +``` + +--- + +## 3. Crear bridges y asignar IP + +```bash +# Bridge de administración (br0) sobre bond0 (sin VLAN) +nmcli con add type bridge ifname br0 +nmcli con add type bridge-slave ifname bond0 master br0 +nmcli con mod br0 ipv4.addresses 192.168.1.14/24 +nmcli con mod br0 ipv4.gateway 192.168.1.1 +nmcli con mod br0 ipv4.dns "192.168.1.1 1.1.1.1 8.8.8.8" +nmcli con mod br0 ipv4.method manual + +# Bridge de servicios (br-servicios) sobre bond0.20 (VLAN 20) +nmcli con add type bridge ifname br-servicios +nmcli con add type bridge-slave ifname bond0.20 master br-servicios +nmcli con mod br-servicios ipv4.addresses 192.168.200.4/22 +nmcli con mod br-servicios ipv4.method manual +``` + +--- + +## 4. Comprobar y aplicar cambios + +```bash +nmcli con show +ip a +# Levantar las conexiones si es necesario +echo "Subiendo interfaces..." +nmcli con up bond0 +nmcli con up br0 +nmcli con up br-servicios +``` + +--- + +> **Notas:** +> +> * Si alguna conexión da error, bórrala con `nmcli con delete ` y vuelve a crearla. +> * Puedes usar `nmtui` como alternativa visual. +> * Si necesitas añadir otras VLANs o bridges, repite el patrón. diff --git a/readme.md b/readme.md new file mode 100644 index 0000000..851cb22 --- /dev/null +++ b/readme.md @@ -0,0 +1,415 @@ +# Guía de instalación K8s + KubeVirt en Ubuntu 24.04 + +## 0. Ejemplo de configuración de red (Netplan) + +Si quieres probar Multus con bridges y VLANs, puedes usar una configuración de Netplan como la siguiente. Aquí se configura: + +* Bonding LACP sobre dos interfaces +* VLANs para separar redes +* Bridges para administración y servicios + +**Archivo:** `/etc/netplan/50-cloud-init.yaml` + +```yaml +network: + version: 2 + ethernets: + enp2s0f0np0: {} + enp2s0f1np1: {} + bonds: + bond0: + interfaces: + - enp2s0f0np0 + - enp2s0f1np1 + parameters: + mode: "802.3ad" + lacp-rate: "fast" + transmit-hash-policy: "layer3+4" + vlans: + bond0.20: + id: 20 + link: bond0 + dhcp4: no + bond0.30: + id: 30 + link: bond0 + addresses: + - "192.168.3.4/24" + bond0.40: + id: 40 + link: bond0 + addresses: + - "192.168.4.4/24" + bridges: + br0: + interfaces: + - bond0 + addresses: + - "192.168.1.14/24" + nameservers: + addresses: + - 192.168.1.1 + - 1.1.1.1 + - 8.8.8.8 + routes: + - to: "default" + via: "192.168.1.1" + parameters: + stp: false + forward-delay: 0 + br-servicios: + interfaces: + - bond0.20 + addresses: + - 192.168.200.4/22 + parameters: + stp: false + forward-delay: 0 +``` + +*No olvides aplicar cambios con:* +`sudo netplan apply` + +--- + +## 1. Prerrequisitos del sistema + +* Ubuntu 24.04 actualizado +* Acceso root o sudo + +### a) Actualiza el sistema y paquetes básicos + +```bash +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates curl gnupg +``` + +### b) Añade el repositorio oficial de Kubernetes + +```bash +curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.33/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg +echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.33/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list +sudo chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg +sudo chmod 644 /etc/apt/sources.list.d/kubernetes.list +sudo apt-get update +``` + +--- + +## 2. Desactiva SWAP (requisito Kubernetes) + +```bash +sudo swapoff -a +sudo sed -i '/ swap / s/^/#/' /etc/fstab +``` + +--- + +## 3. Instala containerd (runtime recomendado) + +```bash +sudo apt-get install -y containerd +sudo mkdir -p /etc/containerd +containerd config default | sudo tee /etc/containerd/config.toml +sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml +sudo systemctl restart containerd +sudo systemctl enable containerd +``` + +--- + +## 4. Prepara el kernel y sysctl + +```bash +sudo modprobe overlay +sudo modprobe br_netfilter +cat < Si usas Flannel, usa este parámetro: + +```bash +sudo kubeadm init --pod-network-cidr=10.244.0.0/16 +``` + +--- + +### a) Configura kubectl para tu usuario + +```bash +mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config +``` + +--- + +## 7. Instala la red de pods (Flannel) + +```bash +kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml +``` + +--- + +## 8. Instala Multus (opcional, para múltiples redes) + +```bash +kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset.yml +``` + +* Verifica: + + ```bash + kubectl get pods -n kube-system | grep multus + ``` + +--- + +## 9. (Opcional) Quita el taint del nodo master para poder programar pods en él + +```bash +kubectl taint nodes --all node-role.kubernetes.io/control-plane- +kubectl taint nodes --all node-role.kubernetes.io/master- +``` + +--- + +## 10. Test rápido de Multus + +La carpeta `multus/` del repositorio contiene el NAD y el pod de prueba: + +* `multus/nad-br-servicios.yaml` (NetworkAttachmentDefinition) +* `multus/test-multus-pod.yaml` (pod alpine) + +**Despliega la NAD:** + +```bash +kubectl apply -f multus/nad-br-servicios.yaml +``` + +**Despliega el pod de test:** + +```bash +kubectl apply -f multus/test-multus-pod.yaml +``` + +Comprueba las interfaces: + +```bash +kubectl exec -it multus-test -- sh +ip a +``` + +El pod debería tener una interfaz extra de la red `br-servicios` además de la de Flannel. + +Para limpiar: + +```bash +kubectl delete pod multus-test +``` + +--- + +## 11. Instalación y configuración de MetalLB (LoadBalancer local) + +MetalLB permite a tu clúster on-premise asignar IPs flotantes de tu red LAN a servicios tipo `LoadBalancer`, igual que en cloud. Ideal para exponer ingress-nginx, rancher, etc. directamente en tu red. + +### a) Instala MetalLB + +```bash +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.5/config/manifests/metallb-native.yaml +``` + +Esto crea el namespace `metallb-system` y despliega los pods necesarios. + +--- + +### b) Crea la configuración del pool de IPs + +En este repositorio, la carpeta `metallb/` contiene los recursos listos para aplicar: + +```shell +metallb/ +├── ipaddresspool.yaml +├── l2advertisement.yaml +└── kustomization.yaml +``` + +**Para aplicar:** + +```bash +kubectl apply -k metallb/ +``` + +--- + +### c) Uso de MetalLB + +A partir de aquí, cualquier Service tipo LoadBalancer obtiene una IP flotante LAN automáticamente. + +**Ejemplo mínimo de Service:** + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: ejemplo +spec: + selector: + app: ejemplo + ports: + - port: 80 + targetPort: 80 + type: LoadBalancer +``` + +Verás la IP asignada en la columna `EXTERNAL-IP` al ejecutar: + +```bash +kubectl get svc +``` + +Puedes acceder desde tu red local a esa IP. + +--- + +## 12. Instalación de Traefik y cert-manager (Ingress + TLS) + +### a) Instala Traefik como Ingress Controller + +* Aplica todos los recursos con Kustomize: + +```bash +kubectl apply -k traefik/ +``` + +* Comprueba que MetalLB asigna una IP al Service principal: + +```bash +kubectl get pods -n traefik +kubectl get svc -n traefik +``` + +### b) Instala cert-manager + +* Crea el namespace: + +```bash +kubectl apply -f cert-manager/namespace.yaml +``` + +* Aplica el manifiesto oficial de cert-manager: + +```bash +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml +``` + +* Crea los `ClusterIssuer` para staging y producción: + +```bash +kubectl apply -f cert-manager/clusterissuer-staging.yaml +kubectl apply -f cert-manager/clusterissuer-prod.yaml +``` + +* Comprueba los pods: + +```bash +kubectl get pods -n cert-manager +``` + +--- + +## 13. Instala KubeVirt y CDI + +**Nota:** Puedes usar manifiestos oficiales, o crear tu carpeta kubevirt/ si deseas versionar los YAML personalizados. + +```bash +# Instala KubeVirt (recomendado hacerlo tras tener la red y almacenamiento) +export KUBEVIRT_VERSION=$(curl -s https://api.github.com/repos/kubevirt/kubevirt/releases/latest | grep tag_name | cut -d '"' -f 4) +kubectl create namespace kubevirt +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator.yaml +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr.yaml + +# Instala CDI (para gestión de discos/ISOs) +export CDI_VERSION=$(curl -s https://api.github.com/repos/kubevirt/containerized-data-importer/releases/latest | grep tag_name | cut -d '"' -f 4) +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_VERSION}/cdi-operator.yaml +kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_VERSION}/cdi-cr.yaml +``` + +--- + +## 14. Instala virtctl (CLI de KubeVirt) + +```bash +curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/virtctl-${KUBEVIRT_VERSION}-linux-amd64 +chmod +x virtctl +sudo mv virtctl /usr/local/bin/ +``` + +--- + +## 15. Habilita KVM para tu usuario + +```bash +sudo usermod -aG kvm $(whoami) +# Reinicia sesión o ejecuta 'newgrp kvm' para aplicar +``` + +--- + +## 16. Despliega kubevirt-manager + +La carpeta `kubevirt-manager/` contiene todos los manifiestos organizados por tipo: + +```bash +kubectl apply -k kubevirt-manager/ +``` + +Puedes comprobar el estado: + +```bash +kubectl get pods -n kubevirt-manager +``` + +--- + +## 17. Despliega el stack de almacenamiento NFS + +La carpeta `storage/` tiene todos los manifiestos del servidor y provisioner NFS, organizados en subcarpetas: + +```bash +kubectl apply -k storage/ +``` + +Puedes comprobar el estado: + +```bash +kubectl get pods -n nfs-provisioner +``` + +* Instala el cliente NFS en el nodo: + + ```bash + sudo apt install nfs-common + ``` + +--- diff --git a/storage/deployments/nfs-client-provisioner.yaml b/storage/deployments/nfs-client-provisioner.yaml new file mode 100644 index 0000000..d238034 --- /dev/null +++ b/storage/deployments/nfs-client-provisioner.yaml @@ -0,0 +1,41 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfs-client-provisioner + namespace: nfs-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-client-provisioner + template: + metadata: + labels: + app: nfs-client-provisioner + spec: + serviceAccountName: nfs-client-provisioner + containers: + - name: nfs-client-provisioner + image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2 + volumeMounts: + - name: nfs-client-root + mountPath: /persistentvolumes + env: + - name: PROVISIONER_NAME + value: k8s-sigs.io/nfs-subdir-external-provisioner + - name: NFS_SERVER + value: 192.168.3.3 # VLAN 30 IP de niflheim + - name: NFS_PATH + value: / + - name: LABELS + value: "namespace,pvcName" + volumes: + - name: nfs-client-root + nfs: + server: 192.168.3.3 + path: / + tolerations: + - key: "storage" + operator: "Equal" + value: "only" + effect: "NoSchedule" \ No newline at end of file diff --git a/storage/deployments/nfs-server.yaml b/storage/deployments/nfs-server.yaml new file mode 100644 index 0000000..a747160 --- /dev/null +++ b/storage/deployments/nfs-server.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nfs-server + namespace: nfs-provisioner +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-server + template: + metadata: + labels: + app: nfs-server + spec: + hostNetwork: true + containers: + - name: nfs-server + image: itsthenetwork/nfs-server-alpine:latest + ports: + - name: nfs + containerPort: 2049 + protocol: TCP + securityContext: + privileged: true + env: + - name: SHARED_DIRECTORY + value: /nfsshare + volumeMounts: + - name: nfs-data + mountPath: /nfsshare + volumes: + - name: nfs-data + hostPath: + path: /mnt/storage/k8s/nfsshare + type: Directory + tolerations: + - key: "storage" + operator: "Equal" + value: "only" + effect: "NoSchedule" + nodeSelector: + kubernetes.io/hostname: niflheim \ No newline at end of file diff --git a/storage/kustomization.yaml b/storage/kustomization.yaml new file mode 100644 index 0000000..b8907b0 --- /dev/null +++ b/storage/kustomization.yaml @@ -0,0 +1,8 @@ +resources: + - namespace.yaml + - rbac/clusterrolebinding.yaml + - rbac/clusterrole.yaml + - rbac/serviceaccount.yaml +# - deployments/nfs-server.yaml + - deployments/nfs-client-provisioner.yaml + - storageclass/storageclass.yaml diff --git a/storage/namespace.yaml b/storage/namespace.yaml new file mode 100644 index 0000000..52d071e --- /dev/null +++ b/storage/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: nfs-provisioner \ No newline at end of file diff --git a/storage/rbac/clusterrole.yaml b/storage/rbac/clusterrole.yaml new file mode 100644 index 0000000..4e15b02 --- /dev/null +++ b/storage/rbac/clusterrole.yaml @@ -0,0 +1,20 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: nfs-client-provisioner-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] diff --git a/storage/rbac/clusterrolebinding.yaml b/storage/rbac/clusterrolebinding.yaml new file mode 100644 index 0000000..91775d0 --- /dev/null +++ b/storage/rbac/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-client-provisioner +subjects: + - kind: ServiceAccount + name: nfs-client-provisioner + namespace: nfs-provisioner +roleRef: + kind: ClusterRole + name: nfs-client-provisioner-runner + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/storage/rbac/serviceaccount.yaml b/storage/rbac/serviceaccount.yaml new file mode 100644 index 0000000..147f8b3 --- /dev/null +++ b/storage/rbac/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-client-provisioner + namespace: nfs-provisioner \ No newline at end of file diff --git a/storage/readme.md b/storage/readme.md new file mode 100644 index 0000000..92f9086 --- /dev/null +++ b/storage/readme.md @@ -0,0 +1,17 @@ +# k8s-storage + +Este módulo despliega un driver de almacenamiento dinámico basado en NFS, apuntando a un servidor ZFS (`niflheim`, 192.168.1.10) con la ruta `/mnt/storage/k8s`. + +## Componentes incluidos + +- Namespace `nfs-provisioner` +- RBAC necesario +- Deployment del provisioner dinámico +- StorageClass predeterminado `nfs-manabo` + +## Cómo aplicar + + kubectl apply -f namespace.yaml + kubectl apply -f rbac/ + kubectl apply -f deployment/ + kubectl apply -f storageclass/ \ No newline at end of file diff --git a/storage/storageclass/storageclass.yaml b/storage/storageclass/storageclass.yaml new file mode 100644 index 0000000..5b9be18 --- /dev/null +++ b/storage/storageclass/storageclass.yaml @@ -0,0 +1,9 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: nfs-manabo + annotations: + storageclass.kubernetes.io/is-default-class: "true" +provisioner: k8s-sigs.io/nfs-subdir-external-provisioner +reclaimPolicy: Retain +volumeBindingMode: Immediate diff --git a/traefik/configmaps/configmap.yaml b/traefik/configmaps/configmap.yaml new file mode 100644 index 0000000..ef6a56f --- /dev/null +++ b/traefik/configmaps/configmap.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: traefik-config + namespace: traefik +data: + traefik.yml: | + api: + dashboard: true + log: + level: DEBUG + entryPoints: + web: + address: ":80" + websecure: + address: ":443" diff --git a/traefik/deployments/deployment.yaml b/traefik/deployments/deployment.yaml new file mode 100644 index 0000000..ff42d0a --- /dev/null +++ b/traefik/deployments/deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: traefik + namespace: traefik +spec: + replicas: 1 + selector: + matchLabels: + app: traefik + template: + metadata: + labels: + app: traefik + spec: + serviceAccountName: traefik + containers: + - name: traefik + image: traefik:v3.0 + args: + - --configFile=/config/traefik.yml + volumeMounts: + - name: config + mountPath: /config + volumes: + - name: config + configMap: + name: traefik-config diff --git a/traefik/ingress/ingress.yaml b/traefik/ingress/ingress.yaml new file mode 100644 index 0000000..65579df --- /dev/null +++ b/traefik/ingress/ingress.yaml @@ -0,0 +1,32 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: traefik-dashboard + namespace: traefik + annotations: + cert-manager.io/cluster-issuer: letsencrypt-prod + kubernetes.io/ingress.class: traefik +spec: + ingressClassName: traefik + tls: + - hosts: + - traefik.manabo.org + secretName: traefik-dashboard-tls + rules: + - host: traefik.manabo.org + http: + paths: + - path: /dashboard + pathType: Prefix + backend: + service: + name: traefik + port: + number: 80 + - path: /api + pathType: Prefix + backend: + service: + name: traefik + port: + number: 80 diff --git a/traefik/ingressclass.yaml b/traefik/ingressclass.yaml new file mode 100644 index 0000000..5345ee2 --- /dev/null +++ b/traefik/ingressclass.yaml @@ -0,0 +1,6 @@ +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + name: traefik +spec: + controller: traefik.io/ingress-controller diff --git a/traefik/kustomization.yaml b/traefik/kustomization.yaml new file mode 100644 index 0000000..83f43e9 --- /dev/null +++ b/traefik/kustomization.yaml @@ -0,0 +1,11 @@ +namespace: traefik +resources: + - namespace.yaml + - rbac/serviceaccount.yaml + - rbac/clusterrole.yaml + - rbac/clusterrolebinding.yaml + - configmaps/configmap.yaml + - deployments/deployment.yaml + - services/service.yaml + - ingressclass.yaml + - ingress/ingress.yaml diff --git a/traefik/namespace.yaml b/traefik/namespace.yaml new file mode 100644 index 0000000..c088a91 --- /dev/null +++ b/traefik/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: traefik diff --git a/traefik/rbac/clusterrole.yaml b/traefik/rbac/clusterrole.yaml new file mode 100644 index 0000000..1158f06 --- /dev/null +++ b/traefik/rbac/clusterrole.yaml @@ -0,0 +1,62 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: traefik +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + - ingressclasses + - ingresses/status + verbs: + - get + - list + - watch + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - create + - update + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch diff --git a/traefik/rbac/clusterrolebinding.yaml b/traefik/rbac/clusterrolebinding.yaml new file mode 100644 index 0000000..b953c71 --- /dev/null +++ b/traefik/rbac/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: traefik +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: traefik +subjects: + - kind: ServiceAccount + name: traefik + namespace: traefik diff --git a/traefik/rbac/serviceaccount.yaml b/traefik/rbac/serviceaccount.yaml new file mode 100644 index 0000000..4560cb3 --- /dev/null +++ b/traefik/rbac/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: traefik + namespace: traefik diff --git a/traefik/services/service.yaml b/traefik/services/service.yaml new file mode 100644 index 0000000..6443a4f --- /dev/null +++ b/traefik/services/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: traefik + namespace: traefik +spec: + type: LoadBalancer # <-- ¡Esto es lo importante con MetalLB! + ports: + - port: 80 + name: web + targetPort: 80 + - port: 443 + name: websecure + targetPort: 443 + selector: + app: traefik