first commit

This commit is contained in:
2025-07-19 15:35:27 +00:00
commit 7781476751
38 changed files with 1039 additions and 0 deletions

View File

@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
email: xavor@hotmail.es
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
ingressClassName: traefik

View File

@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
email: xavor@hotmail.es
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: letsencrypt-staging
solvers:
- http01:
ingress:
ingressClassName: traefik

View File

@ -0,0 +1,6 @@
namespace: cert-manager
resources:
- clusterissuer-prod.yaml
- clusterissuer-staging.yaml

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: cert-manager

15
cert-manager/readme.md Normal file
View File

@ -0,0 +1,15 @@
# Cert Manager para Kubernetes
Este repositorio contiene los manifiestos necesarios para desplegar [cert-manager](https://cert-manager.io), una herramienta que automatiza la gestión y renovación de certificados TLS en Kubernetes.
Cert-manager se encarga de emitir y renovar automáticamente certificados mediante ACME (por ejemplo, Let's Encrypt), y es compatible con `Ingress` para habilitar TLS en tus servicios expuestos.
---
## Despliegue
kubectl apply -f namespace.yaml
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml
kubectl apply -f clusterissuer-staging.yaml
kubectl apply -f clusterissuer-prod.yaml

View File

@ -0,0 +1,24 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubevirt-manager
namespace: kubevirt-manager
spec:
replicas: 1
selector:
matchLabels:
app: kubevirt-manager
template:
metadata:
labels:
app: kubevirt-manager
spec:
serviceAccountName: kubevirt-manager
containers:
- name: kubevirt-manager
image: kubevirtmanager/kubevirt-manager:1.4.0
ports:
- containerPort: 8001
env:
- name: BACKEND_URL
value: "http://localhost:8080"

View File

@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubevirt-manager
namespace: kubevirt-manager
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-prod"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
spec:
ingressClassName: nginx
tls:
- hosts:
- kubevirt.manabo.org
secretName: kubevirt-manager-tls
rules:
- host: kubevirt.manabo.org
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kubevirt-manager
port:
number: 80

View File

@ -0,0 +1,7 @@
resources:
- namespace.yaml
- deployments/deployment.yaml
- services/service.yaml
# - ingress/ingress.yaml
- rbac/serviceaccount.yaml
- rbac/clusterrolebinding.yaml

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: kubevirt-manager

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubevirt-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubevirt-manager
namespace: kubevirt-manager

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kubevirt-manager
namespace: kubevirt-manager

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: kubevirt-manager
namespace: kubevirt-manager
spec:
selector:
app: kubevirt-manager
ports:
- port: 80
targetPort: 8001
protocol: TCP
nodePort: 30081 # <--- puedes elegir el puerto, debe estar entre 30000-32767
type: NodePort

View File

@ -0,0 +1,7 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: single-ip
spec:
addresses:
- 192.168.1.100/32

View File

@ -0,0 +1,4 @@
namespace: metallb-system
resources:
- ipaddresspool.yaml
- l2advertisement.yaml

View File

@ -0,0 +1,5 @@
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: l2adv
spec: {}

View File

@ -0,0 +1,19 @@
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: br-servicios
namespace: default
spec:
config: '{
"cniVersion": "0.3.1",
"type": "bridge",
"bridge": "br-servicios",
"ipam": {
"type": "host-local",
"subnet": "192.168.200.0/22",
"rangeStart": "192.168.200.100",
"rangeEnd": "192.168.200.200",
"routes": [{"dst": "0.0.0.0/0"}],
"gateway": "192.168.200.1"
}
}'

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Pod
metadata:
name: multus-test
annotations:
k8s.v1.cni.cncf.io/networks: br-servicios
spec:
containers:
- name: alpine
image: alpine
command: ["sleep", "infinity"]
securityContext:
capabilities:
add: ["NET_ADMIN"]

78
readme-suse.md Normal file
View File

@ -0,0 +1,78 @@
# Configuración de red en SUSE con NetworkManager
Ejemplo: Red equivalente a Netplan con bonding (LACP), VLANs y bridges.
> **Pre-requisitos:**
>
> * openSUSE/SLES con NetworkManager
> * Interfaces: enp2s0f0 y enp2s0f1
---
## 1. Crear el bond (LACP 802.3ad, rápido, hash layer3+4)
```bash
nmcli con add type bond ifname bond0 mode 802.3ad
nmcli con mod bond0 bond.options "mode=802.3ad,miimon=100,updelay=200,downdelay=200,lacp_rate=fast,xmit_hash_policy=layer3+4"
# Añadir interfaces físicas
nmcli con add type ethernet ifname enp2s0f0 master bond0
nmcli con add type ethernet ifname enp2s0f1 master bond0
```
---
## 2. Crear VLANs sobre el bond
```bash
# VLAN 20 (sin IP, solo para bridge de servicios)
nmcli con add type vlan ifname bond0.20 dev bond0 id 20
# VLAN 30
nmcli con add type vlan ifname bond0.30 dev bond0 id 30 ip4 192.168.3.4/24
# VLAN 40
nmcli con add type vlan ifname bond0.40 dev bond0 id 40 ip4 192.168.4.4/24
```
---
## 3. Crear bridges y asignar IP
```bash
# Bridge de administración (br0) sobre bond0 (sin VLAN)
nmcli con add type bridge ifname br0
nmcli con add type bridge-slave ifname bond0 master br0
nmcli con mod br0 ipv4.addresses 192.168.1.14/24
nmcli con mod br0 ipv4.gateway 192.168.1.1
nmcli con mod br0 ipv4.dns "192.168.1.1 1.1.1.1 8.8.8.8"
nmcli con mod br0 ipv4.method manual
# Bridge de servicios (br-servicios) sobre bond0.20 (VLAN 20)
nmcli con add type bridge ifname br-servicios
nmcli con add type bridge-slave ifname bond0.20 master br-servicios
nmcli con mod br-servicios ipv4.addresses 192.168.200.4/22
nmcli con mod br-servicios ipv4.method manual
```
---
## 4. Comprobar y aplicar cambios
```bash
nmcli con show
ip a
# Levantar las conexiones si es necesario
echo "Subiendo interfaces..."
nmcli con up bond0
nmcli con up br0
nmcli con up br-servicios
```
---
> **Notas:**
>
> * Si alguna conexión da error, bórrala con `nmcli con delete <nombre>` y vuelve a crearla.
> * Puedes usar `nmtui` como alternativa visual.
> * Si necesitas añadir otras VLANs o bridges, repite el patrón.

415
readme.md Normal file
View File

@ -0,0 +1,415 @@
# Guía de instalación K8s + KubeVirt en Ubuntu 24.04
## 0. Ejemplo de configuración de red (Netplan)
Si quieres probar Multus con bridges y VLANs, puedes usar una configuración de Netplan como la siguiente. Aquí se configura:
* Bonding LACP sobre dos interfaces
* VLANs para separar redes
* Bridges para administración y servicios
**Archivo:** `/etc/netplan/50-cloud-init.yaml`
```yaml
network:
version: 2
ethernets:
enp2s0f0np0: {}
enp2s0f1np1: {}
bonds:
bond0:
interfaces:
- enp2s0f0np0
- enp2s0f1np1
parameters:
mode: "802.3ad"
lacp-rate: "fast"
transmit-hash-policy: "layer3+4"
vlans:
bond0.20:
id: 20
link: bond0
dhcp4: no
bond0.30:
id: 30
link: bond0
addresses:
- "192.168.3.4/24"
bond0.40:
id: 40
link: bond0
addresses:
- "192.168.4.4/24"
bridges:
br0:
interfaces:
- bond0
addresses:
- "192.168.1.14/24"
nameservers:
addresses:
- 192.168.1.1
- 1.1.1.1
- 8.8.8.8
routes:
- to: "default"
via: "192.168.1.1"
parameters:
stp: false
forward-delay: 0
br-servicios:
interfaces:
- bond0.20
addresses:
- 192.168.200.4/22
parameters:
stp: false
forward-delay: 0
```
*No olvides aplicar cambios con:*
`sudo netplan apply`
---
## 1. Prerrequisitos del sistema
* Ubuntu 24.04 actualizado
* Acceso root o sudo
### a) Actualiza el sistema y paquetes básicos
```bash
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl gnupg
```
### b) Añade el repositorio oficial de Kubernetes
```bash
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.33/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.33/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg
sudo chmod 644 /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
```
---
## 2. Desactiva SWAP (requisito Kubernetes)
```bash
sudo swapoff -a
sudo sed -i '/ swap / s/^/#/' /etc/fstab
```
---
## 3. Instala containerd (runtime recomendado)
```bash
sudo apt-get install -y containerd
sudo mkdir -p /etc/containerd
containerd config default | sudo tee /etc/containerd/config.toml
sudo sed -i 's/SystemdCgroup = false/SystemdCgroup = true/' /etc/containerd/config.toml
sudo systemctl restart containerd
sudo systemctl enable containerd
```
---
## 4. Prepara el kernel y sysctl
```bash
sudo modprobe overlay
sudo modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sudo sysctl --system
```
---
## 5. Instala kubeadm, kubelet, kubectl
```bash
sudo apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
```
---
## 6. Inicializa el clúster (mononodo/laboratorio)
> Si usas Flannel, usa este parámetro:
```bash
sudo kubeadm init --pod-network-cidr=10.244.0.0/16
```
---
### a) Configura kubectl para tu usuario
```bash
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
```
---
## 7. Instala la red de pods (Flannel)
```bash
kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
```
---
## 8. Instala Multus (opcional, para múltiples redes)
```bash
kubectl apply -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset.yml
```
* Verifica:
```bash
kubectl get pods -n kube-system | grep multus
```
---
## 9. (Opcional) Quita el taint del nodo master para poder programar pods en él
```bash
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
kubectl taint nodes --all node-role.kubernetes.io/master-
```
---
## 10. Test rápido de Multus
La carpeta `multus/` del repositorio contiene el NAD y el pod de prueba:
* `multus/nad-br-servicios.yaml` (NetworkAttachmentDefinition)
* `multus/test-multus-pod.yaml` (pod alpine)
**Despliega la NAD:**
```bash
kubectl apply -f multus/nad-br-servicios.yaml
```
**Despliega el pod de test:**
```bash
kubectl apply -f multus/test-multus-pod.yaml
```
Comprueba las interfaces:
```bash
kubectl exec -it multus-test -- sh
ip a
```
El pod debería tener una interfaz extra de la red `br-servicios` además de la de Flannel.
Para limpiar:
```bash
kubectl delete pod multus-test
```
---
## 11. Instalación y configuración de MetalLB (LoadBalancer local)
MetalLB permite a tu clúster on-premise asignar IPs flotantes de tu red LAN a servicios tipo `LoadBalancer`, igual que en cloud. Ideal para exponer ingress-nginx, rancher, etc. directamente en tu red.
### a) Instala MetalLB
```bash
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.5/config/manifests/metallb-native.yaml
```
Esto crea el namespace `metallb-system` y despliega los pods necesarios.
---
### b) Crea la configuración del pool de IPs
En este repositorio, la carpeta `metallb/` contiene los recursos listos para aplicar:
```shell
metallb/
├── ipaddresspool.yaml
├── l2advertisement.yaml
└── kustomization.yaml
```
**Para aplicar:**
```bash
kubectl apply -k metallb/
```
---
### c) Uso de MetalLB
A partir de aquí, cualquier Service tipo LoadBalancer obtiene una IP flotante LAN automáticamente.
**Ejemplo mínimo de Service:**
```yaml
apiVersion: v1
kind: Service
metadata:
name: ejemplo
spec:
selector:
app: ejemplo
ports:
- port: 80
targetPort: 80
type: LoadBalancer
```
Verás la IP asignada en la columna `EXTERNAL-IP` al ejecutar:
```bash
kubectl get svc
```
Puedes acceder desde tu red local a esa IP.
---
## 12. Instalación de Traefik y cert-manager (Ingress + TLS)
### a) Instala Traefik como Ingress Controller
* Aplica todos los recursos con Kustomize:
```bash
kubectl apply -k traefik/
```
* Comprueba que MetalLB asigna una IP al Service principal:
```bash
kubectl get pods -n traefik
kubectl get svc -n traefik
```
### b) Instala cert-manager
* Crea el namespace:
```bash
kubectl apply -f cert-manager/namespace.yaml
```
* Aplica el manifiesto oficial de cert-manager:
```bash
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/latest/download/cert-manager.yaml
```
* Crea los `ClusterIssuer` para staging y producción:
```bash
kubectl apply -f cert-manager/clusterissuer-staging.yaml
kubectl apply -f cert-manager/clusterissuer-prod.yaml
```
* Comprueba los pods:
```bash
kubectl get pods -n cert-manager
```
---
## 13. Instala KubeVirt y CDI
**Nota:** Puedes usar manifiestos oficiales, o crear tu carpeta kubevirt/ si deseas versionar los YAML personalizados.
```bash
# Instala KubeVirt (recomendado hacerlo tras tener la red y almacenamiento)
export KUBEVIRT_VERSION=$(curl -s https://api.github.com/repos/kubevirt/kubevirt/releases/latest | grep tag_name | cut -d '"' -f 4)
kubectl create namespace kubevirt
kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator.yaml
kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr.yaml
# Instala CDI (para gestión de discos/ISOs)
export CDI_VERSION=$(curl -s https://api.github.com/repos/kubevirt/containerized-data-importer/releases/latest | grep tag_name | cut -d '"' -f 4)
kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_VERSION}/cdi-operator.yaml
kubectl create -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_VERSION}/cdi-cr.yaml
```
---
## 14. Instala virtctl (CLI de KubeVirt)
```bash
curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/virtctl-${KUBEVIRT_VERSION}-linux-amd64
chmod +x virtctl
sudo mv virtctl /usr/local/bin/
```
---
## 15. Habilita KVM para tu usuario
```bash
sudo usermod -aG kvm $(whoami)
# Reinicia sesión o ejecuta 'newgrp kvm' para aplicar
```
---
## 16. Despliega kubevirt-manager
La carpeta `kubevirt-manager/` contiene todos los manifiestos organizados por tipo:
```bash
kubectl apply -k kubevirt-manager/
```
Puedes comprobar el estado:
```bash
kubectl get pods -n kubevirt-manager
```
---
## 17. Despliega el stack de almacenamiento NFS
La carpeta `storage/` tiene todos los manifiestos del servidor y provisioner NFS, organizados en subcarpetas:
```bash
kubectl apply -k storage/
```
Puedes comprobar el estado:
```bash
kubectl get pods -n nfs-provisioner
```
* Instala el cliente NFS en el nodo:
```bash
sudo apt install nfs-common
```
---

View File

@ -0,0 +1,41 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
namespace: nfs-provisioner
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 192.168.3.3 # VLAN 30 IP de niflheim
- name: NFS_PATH
value: /
- name: LABELS
value: "namespace,pvcName"
volumes:
- name: nfs-client-root
nfs:
server: 192.168.3.3
path: /
tolerations:
- key: "storage"
operator: "Equal"
value: "only"
effect: "NoSchedule"

View File

@ -0,0 +1,43 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-server
namespace: nfs-provisioner
spec:
replicas: 1
selector:
matchLabels:
app: nfs-server
template:
metadata:
labels:
app: nfs-server
spec:
hostNetwork: true
containers:
- name: nfs-server
image: itsthenetwork/nfs-server-alpine:latest
ports:
- name: nfs
containerPort: 2049
protocol: TCP
securityContext:
privileged: true
env:
- name: SHARED_DIRECTORY
value: /nfsshare
volumeMounts:
- name: nfs-data
mountPath: /nfsshare
volumes:
- name: nfs-data
hostPath:
path: /mnt/storage/k8s/nfsshare
type: Directory
tolerations:
- key: "storage"
operator: "Equal"
value: "only"
effect: "NoSchedule"
nodeSelector:
kubernetes.io/hostname: niflheim

View File

@ -0,0 +1,8 @@
resources:
- namespace.yaml
- rbac/clusterrolebinding.yaml
- rbac/clusterrole.yaml
- rbac/serviceaccount.yaml
# - deployments/nfs-server.yaml
- deployments/nfs-client-provisioner.yaml
- storageclass/storageclass.yaml

4
storage/namespace.yaml Normal file
View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: nfs-provisioner

View File

@ -0,0 +1,20 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]

View File

@ -0,0 +1,12 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: nfs-provisioner
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
namespace: nfs-provisioner

17
storage/readme.md Normal file
View File

@ -0,0 +1,17 @@
# k8s-storage
Este m<>dulo despliega un driver de almacenamiento din<69>mico basado en NFS, apuntando a un servidor ZFS (`niflheim`, 192.168.1.10) con la ruta `/mnt/storage/k8s`.
## Componentes incluidos
- Namespace `nfs-provisioner`
- RBAC necesario
- Deployment del provisioner din<69>mico
- StorageClass predeterminado `nfs-manabo`
## C<>mo aplicar
kubectl apply -f namespace.yaml
kubectl apply -f rbac/
kubectl apply -f deployment/
kubectl apply -f storageclass/

View File

@ -0,0 +1,9 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-manabo
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
reclaimPolicy: Retain
volumeBindingMode: Immediate

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: traefik-config
namespace: traefik
data:
traefik.yml: |
api:
dashboard: true
log:
level: DEBUG
entryPoints:
web:
address: ":80"
websecure:
address: ":443"

View File

@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: traefik
namespace: traefik
spec:
replicas: 1
selector:
matchLabels:
app: traefik
template:
metadata:
labels:
app: traefik
spec:
serviceAccountName: traefik
containers:
- name: traefik
image: traefik:v3.0
args:
- --configFile=/config/traefik.yml
volumeMounts:
- name: config
mountPath: /config
volumes:
- name: config
configMap:
name: traefik-config

View File

@ -0,0 +1,32 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: traefik-dashboard
namespace: traefik
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: traefik
spec:
ingressClassName: traefik
tls:
- hosts:
- traefik.manabo.org
secretName: traefik-dashboard-tls
rules:
- host: traefik.manabo.org
http:
paths:
- path: /dashboard
pathType: Prefix
backend:
service:
name: traefik
port:
number: 80
- path: /api
pathType: Prefix
backend:
service:
name: traefik
port:
number: 80

View File

@ -0,0 +1,6 @@
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
name: traefik
spec:
controller: traefik.io/ingress-controller

View File

@ -0,0 +1,11 @@
namespace: traefik
resources:
- namespace.yaml
- rbac/serviceaccount.yaml
- rbac/clusterrole.yaml
- rbac/clusterrolebinding.yaml
- configmaps/configmap.yaml
- deployments/deployment.yaml
- services/service.yaml
- ingressclass.yaml
- ingress/ingress.yaml

4
traefik/namespace.yaml Normal file
View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: traefik

View File

@ -0,0 +1,62 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: traefik
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
- ingressclasses
- ingresses/status
verbs:
- get
- list
- watch
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- create
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: traefik
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik
subjects:
- kind: ServiceAccount
name: traefik
namespace: traefik

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik
namespace: traefik

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: traefik
namespace: traefik
spec:
type: LoadBalancer # <-- ¡Esto es lo importante con MetalLB!
ports:
- port: 80
name: web
targetPort: 80
- port: 443
name: websecure
targetPort: 443
selector:
app: traefik