añadido apolo

This commit is contained in:
2025-08-17 10:15:19 +02:00
parent 2a3067dc0b
commit 31a109fd5c
68 changed files with 2416 additions and 26 deletions

View File

@@ -0,0 +1,14 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: chat-cert
namespace: apolo
spec:
secretName: chat-tls
dnsNames:
- chat.apolo.c2et.net
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
privateKey:
rotationPolicy: Always

View File

@@ -0,0 +1,15 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: meeting-cert
namespace: apolo
spec:
secretName: meeting-tls
issuerRef:
kind: ClusterIssuer
name: letsencrypt-prod
commonName: meeting.apolo.c2et.net
dnsNames:
- meeting.apolo.c2et.net
privateKey:
rotationPolicy: Always

View File

@@ -0,0 +1,26 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns-custom
namespace: apolo
data:
Corefile: |
.:53 {
log
errors
health
reload
hosts {
192.168.200.10 backend.apolo.c2et.net
192.168.200.10 portal.apolo.c2et.net
192.168.200.10 colossus.apolo.c2et.net
192.168.200.13 chat.apolo.c2et.net
192.168.200.13 muc.chat.apolo.c2et.net
192.168.200.12 streaming.apolo.c2et.net
192.168.200.14 meeting.apolo.c2et.net
fallthrough
}
forward . /etc/resolv.conf
cache 120
# prometheus 0.0.0.0:9153 # <- activa si quieres métricas
}

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: apolo-ejabberd-inetrc
namespace: apolo
data:
inetrc: |
{file, resolv, "/etc/resolv.conf"}.

View File

@@ -0,0 +1,249 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: apolo-ejabberd-config
namespace: apolo
data:
ejabberd.yml: |
define_macro:
HOST: "chat.apolo.c2et.net"
ADMIN: "admin@chat.apolo.c2et.net"
hosts:
- "@HOST@"
loglevel: info
certfiles:
- /home/ejabberd/conf/chat.pem
# Usamos bundle de CAs del sistema
ca_file: "/etc/ssl/certs/ca-certificates.crt"
auth_method: [external]
extauth_program: "/usr/local/bin/auth"
extauth_pool_size: 3
auth_use_cache: false
listen:
-
port: 5222
ip: "::"
module: ejabberd_c2s
max_stanza_size: 262144
shaper: c2s_shaper
access: c2s
starttls_required: true
-
port: 5223
ip: "::"
module: ejabberd_c2s
max_stanza_size: 262144
shaper: c2s_shaper
access: c2s
tls: true
-
port: 5269
ip: "::"
module: ejabberd_s2s_in
max_stanza_size: 524288
shaper: s2s_shaper
-
port: 5443
ip: "::"
module: ejabberd_http
tls: true
request_handlers:
/admin: ejabberd_web_admin
/api: mod_http_api
/bosh: mod_bosh
/captcha: ejabberd_captcha
/upload: mod_http_upload
/ws: ejabberd_http_ws
/oauth: ejabberd_oauth
-
port: 5280
ip: "::"
module: ejabberd_http
request_handlers:
/admin: ejabberd_web_admin
/api: mod_http_api
/bosh: mod_bosh
/ws: ejabberd_http_ws
-
port: 1880
ip: "::"
module: ejabberd_http
request_handlers:
/: ejabberd_web_admin
-
port: 1883
ip: "::"
module: mod_mqtt
backlog: 1000
s2s_use_starttls: optional
acl:
local:
user_regexp: ""
loopback:
ip:
- 127.0.0.0/8
- ::1/128
admin:
- user: "admin@chat.apolo.c2et.net"
access_rules:
local:
allow: local
c2s:
deny: blocked
allow: all
announce:
allow: admin
configure:
allow: admin
muc_create:
allow: local
pubsub_createnode:
allow: local
trusted_network:
allow: loopback
api_permissions:
"ejabberd API":
from: mod_http_api
who: admin
what:
- "*"
"console commands":
from: ejabberd_ctl
who: all
what: "*"
"webadmin commands":
from: ejabberd_web_admin
who: admin
what: "*"
"admin access":
who:
access:
allow:
- acl: loopback
- acl: admin
oauth:
scope: "ejabberd:admin"
access:
allow:
- acl: loopback
- acl: admin
what:
- "*"
- "!stop"
- "!start"
"public commands":
who:
ip: 127.0.0.1/8
what:
- status
- connected_users_number
shaper:
normal:
rate: 3000
burst_size: 20000
fast: 100000
shaper_rules:
max_user_sessions: 10
max_user_offline_messages:
5000: admin
100: all
c2s_shaper:
none: admin
normal: all
s2s_shaper: fast
max_s2s_connections: 1
modules:
mod_adhoc: {}
mod_admin_extra: {}
mod_announce:
access: announce
mod_avatar: {}
mod_blocking: {}
mod_bosh: {}
mod_caps: {}
mod_carboncopy: {}
mod_client_state: {}
mod_configure: {}
mod_disco: {}
mod_fail2ban:
c2s_max_auth_failures: 7
c2s_auth_ban_lifetime: 3600
mod_http_api: {}
mod_http_upload:
put_url: https://@HOST@:5443/upload
custom_headers:
"Access-Control-Allow-Origin": "https://@HOST@"
"Access-Control-Allow-Methods": "GET,HEAD,PUT,OPTIONS"
"Access-Control-Allow-Headers": "Content-Type,Authorization"
mod_last: {}
mod_mam:
assume_mam_usage: true
default: always
mod_mqtt: {}
mod_muc:
host: "muc.@HOST@"
access:
- allow
access_admin:
- allow: admin
access_create: muc_create
access_persistent: muc_create
access_mam:
- allow
default_room_options:
allow_subscription: true
mam: true
persistent: true
mod_muc_admin: {}
mod_muc_occupantid: {}
mod_offline:
access_max_user_messages: max_user_offline_messages
mod_ping: {}
mod_privacy: {}
mod_private: {}
mod_proxy65:
access: local
max_connections: 5
port: 7777
mod_pubsub:
access_createnode: pubsub_createnode
plugins:
- flat
- pep
force_node_config:
storage:bookmarks:
access_model: whitelist
mod_push: {}
mod_push_keepalive: {}
mod_register:
ip_access: trusted_network
welcome_message:
subject: "Welcome to @HOST@"
body: "Hi.\n\nWelcome to XRF Chat.\n\n"
registration_watchers:
- "admin@chat.apolo.c2et.net"
mod_roster:
versioning: true
mod_sip: {}
mod_s2s_bidi: {}
mod_s2s_dialback: {}
mod_shared_roster: {}
mod_stream_mgmt:
resend_on_timeout: if_offline
mod_vcard: {}
mod_vcard_xupdate: {}
mod_version:
show_os: false

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: apolo-kurento-api-config
namespace: apolo
labels:
app.kubernetes.io/name: apolo-kurento-api
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: kurento-api
data:
ENV: "development"
PORT: "3000"
KURENTO_WS_URI: "ws://apolo-kms:8888/kurento"
API_BASE_URL: "https://backend.apolo.c2et.net"
DASHBOARD_USERNAME: "admin"
SSL_KEY_PATH: "/app/keys/kurento.key"
SSL_CERT_PATH: "/app/keys/kurento.crt"
SSL_CA_PATH: "/app/keys/ca.crt"

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: apolo-kms-env
namespace: apolo
labels:
app.kubernetes.io/name: apolo-kms
app.kubernetes.io/part-of: apolo
data:
ENV: "development"
KMS_MIN_PORT: "40000"
KMS_MAX_PORT: "40099"
KMS_STUN_IP: "stun.l.google.com"
KMS_STUN_PORT: "19302"
KMS_EXTERNAL_IPV4: "meeting.apolo.c2et.net"
GST_DEBUG: "3,Kurento*:4"

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: apolo-mediamtx-env
namespace: apolo
labels:
app.kubernetes.io/name: apolo-mediamtx
app.kubernetes.io/part-of: apolo
data:
MTX_PROTOCOLS: "tcp"
MTX_RTSP_ADDRESS: "0.0.0.0:8554"
MTX_RTMP_ADDRESS: "0.0.0.0:1935"
MTX_HLS_ADDRESS: "0.0.0.0:8887"
# Usa el FQDN que verán los clientes dentro de la VPN.
# Si tus players/portal usan .net -> deja .net ; si mantienen .com, cámbialo aquí a .com
MTX_EXTERNAL_ADDRESS: "streaming.apolo.c2et.net"

View File

@@ -0,0 +1,30 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: apolo-portal-proxy
namespace: apolo
data:
default.conf: |
server {
listen 8080;
# Proxy al portal real (escucha en 3000 dentro del mismo Pod)
location / {
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Fuerza contenido sin comprimir desde upstream para poder sub_filter
proxy_set_header Accept-Encoding "";
# Reescrituras .com -> .net en las respuestas al cliente
sub_filter_once off;
sub_filter_types *;
sub_filter 'https://backend.apolo.c2et.com' 'https://backend.apolo.c2et.net';
sub_filter 'wss://backend.apolo.c2et.com' 'wss://backend.apolo.c2et.net';
proxy_pass http://127.0.0.1:3000;
}
}

View File

@@ -0,0 +1,21 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: apolo-streamer-config
namespace: apolo
labels:
app.kubernetes.io/name: apolo-streamer
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: streamer
data:
PORT: "80"
PUSHER_CLUSTER: "eu"
FALLBACK_VIDEO: 'file:///usr/app/files/media/fallback.mp4'
STREAM_SERVER_URL: '^rtmp:\/\/(localhost|127.0.0.1|172.19.127.147|172.17.0.1|media-server|streaming.apolo.c2et.com):[0-9]{3,4}\/\S+$'
STREAM_SERVER_ID_CREATOR: '^rtmp:\/\/(localhost|127.0.0.1|172.19.127.147|172.17.0.1|media-server|streaming.apolo.c2et.com):[0-9]{3,4}\/'
MEDIA_SERVER_API_URL: 'http://streaming.apolo.c2et.com'
STREAMER_SERVER_API_URL: 'http://streaming.apolo.c2et.com'
MEDIA_SERVER_HOSTNAME: 'streaming.apolo.c2et.com'
BACKEND_DEV: 'backend.apolo.c2et.com'
BACKEND_PRE: 'backend.apolo.c2et.com'
BACKEND_PRO: 'https://backend.apolo.c2et.com'

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: apolo-web-config
namespace: apolo
labels:
app.kubernetes.io/name: apolo-web
app.kubernetes.io/part-of: apolo
data:
SERVER_NAME: "backend.apolo.c2et.net"
PHP_HOST: "apolo-php"
PHP_PORT: "9000"

View File

@@ -0,0 +1,41 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-app6
namespace: apolo
labels:
app.kubernetes.io/name: apolo-app6
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: app6
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-app6
template:
metadata:
labels:
app.kubernetes.io/name: apolo-app6
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: app6
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: app6
image: harbor.c2et.com/xrf-ssl/xrf-app6:6.0
imagePullPolicy: IfNotPresent
ports:
- name: tcp-app6
containerPort: 2525
readinessProbe:
tcpSocket: { port: 2525 }
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket: { port: 2525 }
initialDelaySeconds: 20
periodSeconds: 20
resources:
requests: { cpu: "50m", memory: "64Mi" }
limits: { cpu: "500m", memory: "256Mi" }

View File

@@ -0,0 +1,43 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-colossus
namespace: apolo
labels:
app.kubernetes.io/name: apolo-colossus
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: colossus
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-colossus
template:
metadata:
labels:
app.kubernetes.io/name: apolo-colossus
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: colossus
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: colossus
image: harbor.c2et.com/xrf-ssl/xrf-webcolossus:6.0
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 3000
# Si el contenedor expone / como OK:
readinessProbe:
httpGet: { path: /, port: 3000 }
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet: { path: /, port: 3000 }
initialDelaySeconds: 20
periodSeconds: 20
# Si más tarde necesitas forzar backend .net en runtime (solo si la imagen lo lee en runtime):
# env:
# - name: NEXT_PUBLIC_APP_BASE_URL
# value: "https://backend.apolo.c2et.net"

View File

@@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-consumer
namespace: apolo
labels:
app.kubernetes.io/name: apolo-consumer
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: consumer
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-consumer
template:
metadata:
labels:
app.kubernetes.io/name: apolo-consumer
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: consumer
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: consumer
image: harbor.c2et.com/xrf-ssl/xrf-consumer:6.0
imagePullPolicy: IfNotPresent
envFrom:
- secretRef:
name: apolo-backend-secret
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: apolo-db-url
key: DATABASE_URL
- name: MESSENGER_TRANSPORT_DSN
valueFrom:
secretKeyRef:
name: apolo-rabbit-dsn
key: MESSENGER_TRANSPORT_DSN
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "500m"
memory: "512Mi"
# Si necesitas que este pod resuelva FQDN internos a 200.10 vía tu CoreDNS:
# dnsPolicy: None
# dnsConfig:
# nameservers: [ "192.168.200.11" ]

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns-custom
namespace: apolo
labels:
app: coredns-custom
spec:
replicas: 2
selector:
matchLabels:
app: coredns-custom
template:
metadata:
labels:
app: coredns-custom
spec:
containers:
- name: coredns
image: coredns/coredns:1.11.1
args: ["-conf", "/etc/coredns/Corefile"]
ports:
- name: dns-udp
containerPort: 53
protocol: UDP
- name: dns-tcp
containerPort: 53
protocol: TCP
- name: metrics
containerPort: 9153
protocol: TCP
readinessProbe:
httpGet: { path: /health, port: 8080 }
initialDelaySeconds: 3
periodSeconds: 10
livenessProbe:
httpGet: { path: /health, port: 8080 }
initialDelaySeconds: 10
periodSeconds: 20
volumeMounts:
- name: config
mountPath: /etc/coredns
volumes:
- name: config
configMap:
name: coredns-custom
items:
- key: Corefile
path: Corefile

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-drone
namespace: apolo
labels:
app.kubernetes.io/name: apolo-drone
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: drone
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-drone
template:
metadata:
labels:
app.kubernetes.io/name: apolo-drone
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: drone
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: drone
image: harbor.c2et.com/xrf-ssl/xrf-drone:6.0
imagePullPolicy: IfNotPresent
# Si Drone necesita otras vars del backend, puedes añadir:
# envFrom:
# - secretRef: { name: apolo-backend-secret }
envFrom:
- secretRef:
name: apolo-drone-secret
# Al ser worker, no exponemos puertos
resources:
requests: { cpu: "100m", memory: "128Mi" }
limits: { cpu: "500m", memory: "512Mi" }
# (Opcional) probes sencillos; quítalos si la imagen no los tolera
livenessProbe:
exec:
command: ["/bin/sh","-lc","ps -o pid= 1 >/dev/null 2>&1"]
initialDelaySeconds: 20
periodSeconds: 20
readinessProbe:
exec:
command: ["/bin/sh","-lc","ps -o pid= 1 >/dev/null 2>&1"]
initialDelaySeconds: 5
periodSeconds: 10

View File

@@ -0,0 +1,109 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-ejabberd
namespace: apolo
labels:
app.kubernetes.io/name: apolo-ejabberd
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: ejabberd
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-ejabberd
template:
metadata:
labels:
app.kubernetes.io/name: apolo-ejabberd
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: ejabberd
spec:
imagePullSecrets:
- name: harbor-cred
# >>> Asegura permisos/ownership en volúmenes
securityContext:
runAsUser: 9000
runAsGroup: 9000
fsGroup: 9000
fsGroupChangePolicy: "OnRootMismatch"
initContainers:
- name: make-chat-pem
image: alpine:latest
command: ["/bin/sh","-lc"]
args:
- |
set -eu
mkdir -p /work/conf
cat /tls/tls.key /tls/tls.crt > /work/conf/chat.pem
chmod 600 /work/conf/chat.pem
volumeMounts:
- { name: tls, mountPath: /tls, readOnly: true }
- { name: conf, mountPath: /work/conf }
- name: fix-perms
image: alpine:latest
securityContext:
runAsUser: 0
runAsGroup: 0
command: ["/bin/sh","-lc"]
args:
- |
set -eu
chown -R 9000:9000 /home/ejabberd/database || true
chown -R 9000:9000 /home/ejabberd/logs || true
chown -R 9000:9000 /home/ejabberd/conf || true
volumeMounts:
- { name: data, mountPath: /home/ejabberd/database }
- { name: logs, mountPath: /home/ejabberd/logs }
- { name: conf, mountPath: /home/ejabberd/conf }
containers:
- name: ejabberd
image: harbor.c2et.com/xrf-ssl/xrf-ejabberd:6.0
imagePullPolicy: IfNotPresent
envFrom:
- secretRef:
name: apolo-ejabberd-secret
ports:
- { name: c2s, containerPort: 5222, protocol: TCP }
- { name: s2s, containerPort: 5269, protocol: TCP }
- { name: https, containerPort: 5443, protocol: TCP }
- { name: http, containerPort: 5280, protocol: TCP }
readinessProbe:
tcpSocket: { port: 5222 }
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
tcpSocket: { port: 5222 }
initialDelaySeconds: 30
periodSeconds: 20
volumeMounts:
- { name: data, mountPath: /home/ejabberd/database }
- { name: logs, mountPath: /home/ejabberd/logs }
- { name: conf, mountPath: /home/ejabberd/conf }
- { name: confmap, mountPath: /home/ejabberd/conf/ejabberd.yml, subPath: ejabberd.yml }
# inetrc opcional (para callar el warning)
- { name: inetrc, mountPath: /home/ejabberd/conf/inetrc, subPath: inetrc, readOnly: true }
volumes:
- name: data
persistentVolumeClaim:
claimName: apolo-ejabberd-data
- name: logs
persistentVolumeClaim:
claimName: apolo-ejabberd-logs
- name: conf
emptyDir: {}
- name: confmap
configMap:
name: apolo-ejabberd-config
- name: tls
secret:
secretName: chat-tls
- name: inetrc
configMap:
name: apolo-ejabberd-inetrc
optional: true

View File

@@ -0,0 +1,42 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-kms
namespace: apolo
labels:
app.kubernetes.io/name: apolo-kms
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: kurento
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-kms
template:
metadata:
labels:
app.kubernetes.io/name: apolo-kms
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: kurento
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: kurento-media-server
image: harbor.c2et.com/xrf-ssl/xrf-kurento-media-server:6.0
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
name: apolo-kms-env
ports:
- name: ws
containerPort: 8888
protocol: TCP
readinessProbe:
tcpSocket: { port: 8888 }
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket: { port: 8888 }
initialDelaySeconds: 20
periodSeconds: 20

View File

@@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-kurento-api
namespace: apolo
labels:
app.kubernetes.io/name: apolo-kurento-api
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: kurento-api
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-kurento-api
template:
metadata:
labels:
app.kubernetes.io/name: apolo-kurento-api
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: kurento-api
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: kurento-api
image: harbor.c2et.com/xrf-ssl/xrf-kurento-api:6.0
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
name: apolo-kurento-api-config
- secretRef:
name: apolo-kurento-api-secret
ports:
- name: https
containerPort: 3000
protocol: TCP
volumeMounts:
- name: tls
mountPath: /app/keys
readOnly: true
volumes:
- name: tls
secret:
secretName: meeting-tls
items:
- key: tls.key
path: kurento.key
- key: tls.crt
path: kurento.crt
# algunas apps piden "ca.crt"; reutilizamos el fullchain
- key: tls.crt
path: ca.crt

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-mediamtx
namespace: apolo
labels:
app.kubernetes.io/name: apolo-mediamtx
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: media
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-mediamtx
template:
metadata:
labels:
app.kubernetes.io/name: apolo-mediamtx
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: media
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: mediamtx
image: harbor.c2et.com/xrf-ssl/xrf-media-server:6.0
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
name: apolo-mediamtx-env
ports:
- name: rtmp
containerPort: 1935
protocol: TCP
- name: rtsp
containerPort: 8554
protocol: TCP
- name: hls
containerPort: 8887
protocol: TCP
# Probes sencillas por TCP en RTSP/RTMP. Ajusta si tu build expone health HTTP.
readinessProbe:
tcpSocket: { port: 8554 }
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket: { port: 8554 }
initialDelaySeconds: 20
periodSeconds: 20

View File

@@ -0,0 +1,43 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-nakama
namespace: apolo
labels:
app.kubernetes.io/name: apolo-nakama
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: nakama
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-nakama
template:
metadata:
labels:
app.kubernetes.io/name: apolo-nakama
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: nakama
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: nakama
image: harbor.c2et.com/xrf-ssl/xrf-nakama:6.0
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 7350
- name: grpc
containerPort: 7351
readinessProbe:
tcpSocket: { port: 7350 }
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket: { port: 7350 }
initialDelaySeconds: 20
periodSeconds: 20
resources:
requests: { cpu: "100m", memory: "128Mi" }
limits: { cpu: "500m", memory: "512Mi" }

View File

@@ -0,0 +1,54 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-php
namespace: apolo
labels:
app.kubernetes.io/name: apolo-php
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: php
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-php
template:
metadata:
labels:
app.kubernetes.io/name: apolo-php
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: php
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: php-fpm
image: harbor.c2et.com/xrf-ssl/xrf-php:6.0
imagePullPolicy: IfNotPresent
ports:
- name: php-fpm
containerPort: 9000
envFrom:
- secretRef:
name: apolo-backend-secret
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: apolo-db-url
key: DATABASE_URL
- name: MESSENGER_TRANSPORT_DSN
valueFrom:
secretKeyRef:
name: apolo-rabbit-dsn
key: MESSENGER_TRANSPORT_DSN
readinessProbe:
tcpSocket:
port: 9000
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 9000
initialDelaySeconds: 20
periodSeconds: 20

View File

@@ -0,0 +1,66 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-portal
namespace: apolo
labels:
app.kubernetes.io/name: apolo-portal
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: portal
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-portal
template:
metadata:
labels:
app.kubernetes.io/name: apolo-portal
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: portal
spec:
imagePullSecrets:
- name: harbor-cred
containers:
# Contenedor de la app (como venías)
- name: portal
image: harbor.c2et.com/xrf-ssl/xrf-portal-https:6.0
imagePullPolicy: IfNotPresent
ports:
- name: app
containerPort: 3000
readinessProbe:
httpGet: { path: /, port: 3000 }
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet: { path: /, port: 3000 }
initialDelaySeconds: 20
periodSeconds: 20
# Sidecar NGINX con sub_filter
- name: nginx-proxy
image: nginx:latest
ports:
- name: http
containerPort: 8080
volumeMounts:
- name: proxy-conf
mountPath: /etc/nginx/conf.d/default.conf
subPath: default.conf
readinessProbe:
httpGet: { path: /, port: 8080 }
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet: { path: /, port: 8080 }
initialDelaySeconds: 20
periodSeconds: 20
volumes:
- name: proxy-conf
configMap:
name: apolo-portal-proxy
items:
- key: default.conf
path: default.conf

View File

@@ -0,0 +1,68 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-postgres
namespace: apolo
labels:
app.kubernetes.io/name: apolo-postgres
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: postgres
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-postgres
template:
metadata:
labels:
app.kubernetes.io/name: apolo-postgres
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: postgres
spec:
imagePullSecrets:
- name: harbor-cred
securityContext:
fsGroup: 999
initContainers:
- name: init-pgdata
image: busybox:1.36
command:
- sh
- -c
- |
mkdir -p /var/lib/postgresql/data/pgdata
chown -R 999:999 /var/lib/postgresql/data
chmod 700 /var/lib/postgresql/data/pgdata
volumeMounts:
- name: pgdata
mountPath: /var/lib/postgresql/data
securityContext:
runAsUser: 0
containers:
- name: postgres
image: harbor.c2et.com/xrf-ssl/xrf-db:6.0
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 999
runAsGroup: 999
env:
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
envFrom:
- secretRef:
name: apolo-db-secret
ports:
- containerPort: 5432
readinessProbe:
exec:
command: ["/bin/sh","-c","pg_isready -U $POSTGRES_USER -d $POSTGRES_DB -h 127.0.0.1"]
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
tcpSocket: { port: 5432 }
initialDelaySeconds: 30
periodSeconds: 20
volumes:
- name: pgdata
persistentVolumeClaim:
claimName: apolo-pgdata

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-rabbitmq
namespace: apolo
labels:
app.kubernetes.io/name: apolo-rabbitmq
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: rabbitmq
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-rabbitmq
template:
metadata:
labels:
app.kubernetes.io/name: apolo-rabbitmq
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: rabbitmq
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: rabbitmq
image: harbor.c2et.com/xrf-ssl/xrf-rabbitmq:6.0
imagePullPolicy: IfNotPresent
envFrom:
- secretRef:
name: apolo-rabbitmq-secret
ports:
- { name: amqp, containerPort: 5672 }
- { name: mgmt, containerPort: 15672 }
readinessProbe:
tcpSocket: { port: 5672 }
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
tcpSocket: { port: 5672 }
initialDelaySeconds: 30
periodSeconds: 20
volumeMounts:
- name: data
mountPath: /var/lib/rabbitmq
volumes:
- name: data
persistentVolumeClaim:
claimName: apolo-rabbitmq-data

View File

@@ -0,0 +1,45 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-streamer
namespace: apolo
labels:
app.kubernetes.io/name: apolo-streamer
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: streamer
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-streamer
template:
metadata:
labels:
app.kubernetes.io/name: apolo-streamer
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: streamer
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: streamer
image: harbor.c2et.com/xrf-ssl/xrf-streamer-server:6.0
imagePullPolicy: IfNotPresent
command: ["npm","start"]
envFrom:
- configMapRef:
name: apolo-streamer-config
- secretRef:
name: apolo-streamer-secret
ports:
- name: http
containerPort: 80
protocol: TCP
readinessProbe:
tcpSocket: { port: 80 }
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket: { port: 80 }
initialDelaySeconds: 20
periodSeconds: 20

View File

@@ -0,0 +1,43 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-web
namespace: apolo
labels:
app.kubernetes.io/name: apolo-web
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: web
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-web
template:
metadata:
labels:
app.kubernetes.io/name: apolo-web
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: web
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: nginx
image: harbor.c2et.com/xrf-ssl/xrf-web:6.0
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
name: apolo-web-config
ports:
- name: http
containerPort: 80
readinessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 80
initialDelaySeconds: 20
periodSeconds: 20

View File

@@ -0,0 +1,42 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: apolo-websocket
namespace: apolo
labels:
app.kubernetes.io/name: apolo-websocket
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: websocket
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: apolo-websocket
template:
metadata:
labels:
app.kubernetes.io/name: apolo-websocket
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: websocket
spec:
imagePullSecrets:
- name: harbor-cred
containers:
- name: websocket
image: harbor.c2et.com/xrf-ssl/xrf-websocket:6.0
imagePullPolicy: IfNotPresent
ports:
- name: ws
containerPort: 6001
- name: admin
containerPort: 9601
readinessProbe:
tcpSocket:
port: 6001
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
tcpSocket:
port: 6001
initialDelaySeconds: 20
periodSeconds: 20

View File

@@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: apolo-colossus
namespace: apolo
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/whitelist-source-range: "192.168.200.0/24,10.244.0.0/16,192.168.4.0/24"
spec:
ingressClassName: nginx
tls:
- hosts:
- colossus.apolo.c2et.net
secretName: colossus-tls
rules:
- host: colossus.apolo.c2et.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apolo-colossus
port:
number: 3000

View File

@@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: apolo-meeting
namespace: apolo
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/whitelist-source-range: "192.168.200.0/24,10.244.0.0/16,192.168.4.0/24"
spec:
ingressClassName: nginx
tls:
- hosts: [meeting.apolo.c2et.net]
secretName: meeting-tls
rules:
- host: meeting.apolo.c2et.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apolo-kurento-api
port:
number: 3000

View File

@@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: apolo-portal
namespace: apolo
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/whitelist-source-range: "192.168.200.0/24,10.244.0.0/16,192.168.4.0/24"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- portal.apolo.c2et.net
secretName: portal-tls
rules:
- host: portal.apolo.c2et.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apolo-portal
port:
number: 8080

View File

@@ -0,0 +1,27 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: apolo-backend
namespace: apolo
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/whitelist-source-range: "192.168.200.0/24,10.244.0.0/16,192.168.4.0/24"
nginx.ingress.kubernetes.io/ssl-redirect: "true"
spec:
ingressClassName: nginx
tls:
- hosts:
- backend.apolo.c2et.net
secretName: backend-tls
rules:
- host: backend.apolo.c2et.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apolo-web
port:
number: 80

View File

@@ -0,0 +1,34 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: apolo-backend-websocket
namespace: apolo
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "true"
# Mantener conexiones WS mucho tiempo
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
# (Opcional) Desactiva buffering para WS
nginx.ingress.kubernetes.io/proxy-buffering: "off"
# En Traefik ponías X-Forwarded-Proto=wss; replicamos eso:
nginx.ingress.kubernetes.io/configuration-snippet: |
proxy_set_header X-Forwarded-Proto wss;
# Misma ACL que tu backend (sólo VPN/LAN)
nginx.ingress.kubernetes.io/whitelist-source-range: "192.168.200.0/24,10.244.0.0/16,192.168.4.0/24"
spec:
ingressClassName: nginx
rules:
- host: backend.apolo.c2et.net
http:
paths:
- path: /app
pathType: Prefix
backend:
service:
name: apolo-websocket
port:
number: 6001

View File

@@ -1,8 +1,91 @@
apiVersion: kustomize.config.k8s.io/v1beta1 apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization kind: Kustomization
# Forzamos el namespace por defecto (los manifests ya lo llevan,
# pero así cubrimos cualquiera que no lo tenga).
namespace: apolo namespace: apolo
# Una etiqueta común para facilitar filtros
commonLabels:
app.kubernetes.io/part-of: apolo
resources: resources:
# Namespace
- namespace.yaml - namespace.yaml
- pvc-postgres.yaml
- pvc-ejabberd-db.yaml # Certificados (cert-manager ya instalado)
- pvc-ejabberd-logs.yaml - certs/certificate-chat.yaml
- certs/certificate-meeting.yaml
# ConfigMaps
- configmaps/configmap-coredns.yaml
- configmaps/configmap-ejabberd-inetrc.yaml
- configmaps/configmap-ejabberd.yaml
- configmaps/configmap-kms-api.yaml
- configmaps/configmap-kms.yaml
- configmaps/configmap-mediamtx.yaml
- configmaps/configmap-portal-proxy.yaml
- configmaps/configmap-streamer.yaml
- configmaps/configmap-web.yaml
# Secrets (ya creados como manifests)
- secrets/secret-harbor-cred.yaml
- secrets/secret-backend.yaml
- secrets/secret-dburl.yaml
- secrets/secret-db.yaml
- secrets/secret-drone.yaml
- secrets/secret-ejabberd.yaml
- secrets/secret-kms-api.yaml
- secrets/secret-rabbit-dsn.yaml
- secrets/secret-rabbitmq.yaml
- secrets/secret-streamer.yaml
# PVCs
- pvc/pvc-ejabberd.yaml
- pvc/pvc-postgres.yaml
- pvc/pvc-rabbitmq.yaml
# Deployments
- deployments/deploy-app6.yaml
- deployments/deploy-colossus.yaml
- deployments/deploy-consumer.yaml
- deployments/deploy-coredns.yaml
- deployments/deploy-drone.yaml
- deployments/deploy-ejabberd.yaml
- deployments/deploy-kms.yaml
- deployments/deploy-kurento-api.yaml
- deployments/deploy-mediamtx.yaml
- deployments/deploy-nakama.yaml
- deployments/deploy-php.yaml
- deployments/deploy-portal.yaml
- deployments/deploy-postgres.yaml
- deployments/deploy-rabbitmq.yaml
- deployments/deploy-streamer.yaml
- deployments/deploy-websocket.yaml
- deployments/deploy-web.yaml
# Services
- services/svc-aliases-compose.yaml
- services/svc-app6.yaml
- services/svc-colossus.yaml
- services/svc-coredns.yaml
- services/svc-ejabberd.yaml
- services/svc-kms.yaml
- services/svc-kurento-api.yaml
- services/svc-mediamtx.yaml
- services/svc-nakama.yaml
- services/svc-php.yaml
- services/svc-portal.yaml
- services/svc-postgres.yaml
- services/svc-rabbitmq.yaml
- services/svc-streamer-lb.yaml
- services/svc-streamer.yaml
- services/svc-websocket.yaml
- services/svc-web.yaml
# Ingress
- ingress/ingress-colossus.yaml
- ingress/ingress-kurento-api.yaml
- ingress/ingress-portal.yaml
- ingress/ingress-websocket.yaml
- ingress/ingress-web.yaml

View File

@@ -1,3 +1,4 @@
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: apolo-ejabberd-logs
namespace: apolo
labels:
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: ejabberd
spec:
accessModes:
- ReadWriteOnce
storageClassName: ceph-rbd
resources:
requests:
storage: 5Gi

View File

@@ -0,0 +1,29 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: apolo-ejabberd-data
namespace: apolo
labels:
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: ejabberd
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: ceph-rbd
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: apolo-ejabberd-logs
namespace: apolo
labels:
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: ejabberd
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: ceph-rbd
resources:
requests:
storage: 5Gi

View File

@@ -8,7 +8,7 @@ metadata:
app.kubernetes.io/component: postgres app.kubernetes.io/component: postgres
spec: spec:
accessModes: accessModes:
- ReadWriteOnce # Ceph RBD = RWO - ReadWriteOnce
storageClassName: ceph-rbd storageClassName: ceph-rbd
resources: resources:
requests: requests:

View File

@@ -1,15 +1,14 @@
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
name: apolo-ejabberd-db name: apolo-rabbitmq-data
namespace: apolo namespace: apolo
labels: labels:
app.kubernetes.io/part-of: apolo app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: ejabberd app.kubernetes.io/component: rabbitmq
spec: spec:
accessModes: accessModes: [ "ReadWriteOnce" ]
- ReadWriteOnce
storageClassName: ceph-rbd storageClassName: ceph-rbd
resources: resources:
requests: requests:
storage: 10Gi storage: 20Gi

173
apolo/readme.md Normal file
View File

@@ -0,0 +1,173 @@
# Apolo SSL Manifiestos de Kubernetes
Manifiestos de **Apolo** para Kubernetes con Kustomize. Incluye backend (PHP + Postgres + RabbitMQ), servicios web, WebSocket, CoreDNS interno para la VPN, chat (ejabberd), Nakama, y el stack de **streaming** (MediaMTX, Kurento Media Server y su API, Streamer). Certificados vía **cert-manager** y exposición con **ingress-nginx** y **MetalLB**.
> Despliegue único:
>
> ```bash
> kubectl apply -k .
> ```
---
## Requisitos
* **Kubernetes** 1.24+ (probado con containerd).
* **MetalLB** configurado y con rango para IPs del LB (ej. `192.168.200.0/24`).
* **ingress-nginx** desplegado. En este entorno se usan dos Services:
* `ingress-nginx/ingress-nginx-controller` → IP pública (ej. `192.168.0.100`)
* `ingress-nginx/ingress-nginx-controller-200`**IP VPN** `192.168.200.10` (tráfico interno)
* **cert-manager** con un `ClusterIssuer` llamado `letsencrypt-prod`.
* **StorageClass** disponible (`ceph-rbd`) para los PVC de Postgres, RabbitMQ y ejabberd.
* (Para acceso por VPN) **WireGuard** + NAT desde la subred de la VPN hacia la red del clúster / redes internas.
* Acceso al **Harbor** privado (se crea un Secret `harbor-cred`).
---
## Estructura
```
certs/ # Certificados de cert-manager (Certificate)
configmaps/ # ConfigMaps de cada servicio (web, portal, CoreDNS, etc.)
deployments/ # Deployments de cada componente
ingress/ # Ingress (TLS, whitelists VPN, etc.)
pvc/ # PersistentVolumeClaims (Postgres, Rabbit, ejabberd)
secrets/ # Secrets de aplicación (no commitear credenciales privadas)
services/ # Services (ClusterIP / LoadBalancer) y alias de docker-compose
namespace.yaml # Namespace apolo
kustomization.yaml
```
---
## DNS interno para la VPN
Se expone un **CoreDNS** propio con IP LB `192.168.200.11` que resuelve:
* `chat.apolo.c2et.net`
* `muc.chat.apolo.c2et.net`
* `portal.apolo.c2et.net`
* `colossus.apolo.c2et.net`
* `streaming.apolo.c2et.net`
* `meeting.apolo.c2et.net`
* `backend.apolo.c2et.net`
> Para clientes VPN, configurar **DNS** a `192.168.200.11`.
---
## Certificados TLS
* `certs/` contiene `Certificate` para dominios como `chat.*` y `meeting.*`.
* El resto de hosts se gestionan vía Ingress con `tls.secretName` emitido por `letsencrypt-prod`.
---
## Whitelists y acceso
Los Ingress sensibles incluyen:
```
nginx.ingress.kubernetes.io/whitelist-source-range: "192.168.200.0/24,10.244.0.0/16,192.168.4.0/24"
```
* **192.168.200.0/24**: clientes por **VPN**
* **10.244.0.0/16**: tráfico **intra-clúster**
* **192.168.4.0/24**: nodos/infra local (ajústalo si aplica)
---
## Despliegue
```bash
# Namespace y todo el stack
kubectl apply -k .
# Ver pods
kubectl -n apolo get pods -w
```
### IPs de Services (ejemplo)
* Ingress interno (nginx): `192.168.200.10`
* CoreDNS interno: `192.168.200.11`
* MediaMTX (streaming): `192.168.200.12`
* Ejabberd (chat): `192.168.200.12`
* Kurento API: `192.168.200.14`
---
## Operación
* **Escalar**:
```bash
kubectl -n apolo scale deploy/apolo-web --replicas=2
```
* **Actualizar imagen**:
* Cambia el tag en el `Deployment` y aplica `kubectl apply -k .`
* **Logs**:
```bash
kubectl -n apolo logs deploy/apolo-web -f
```
* **Ingress / TLS**:
```bash
kubectl -n apolo describe ingress apolo-backend
kubectl -n apolo get certificate
kubectl -n apolo describe certificate <name>
```
---
## Notas y “gotchas”
* **PV de Postgres**: el contenedor requiere subdirectorio `PGDATA`. Ya se maneja con `initContainer` para evitar el `lost+found` al montar el PVC.
* **Health checks web**: los `readiness/liveness` usan `/`. Algunos contenedores devuelven 404 en raíz; está ajustado para que no reinicien en bucle.
* **ejabberd**:
* Permisos de volúmenes: se monta con `fsGroup` adecuado; si ves `permission denied`, revisa `runAsUser/runAsGroup/fsGroup` y el dueño de los paths del contenedor.
* TLS: el `initContainer` concatena `tls.key + tls.crt → chat.pem` (formato que espera ejabberd).
* **.com vs .net**:
* El **Portal** puede hablar con `backend.apolo.c2et.com`; la VPN/CoreDNS “traduce” a la IP interna (192.168.200.10) sin tocar certificados.
* Si más adelante migras todo a `.com`, cambia `hosts` de los Ingress y `Certificate`.
---
## Troubleshooting rápido
* Ver por qué el Ingress devuelve 403/401/404:
```bash
kubectl -n ingress-nginx logs deploy/ingress-nginx-controller -f
kubectl -n apolo describe ingress apolo-backend
```
* Revisar eventos de un pod:
```bash
kubectl -n apolo describe pod <pod>
```
* Comprobar variables de entorno en PHP (conectividad DB/Rabbit):
```bash
kubectl -n apolo exec deploy/apolo-php -- printenv | egrep 'DATABASE_URL|MESSENGER_TRANSPORT_DSN'
```
---
## Despliegue
```bash
kubectl apply -k .
kubectl -n apolo get pods -w
```

View File

@@ -0,0 +1,155 @@
apiVersion: v1
kind: Secret
metadata:
name: apolo-backend-secret
namespace: apolo
labels:
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: php
type: Opaque
stringData:
APP_ENV: "prod"
APP_DEBUG: "0"
APP_SECRET: "d64fea16476693f5a8fa7d4b3a2b8b83"
INTERNET_ALLOWED: "false"
ADMIN_USERNAME: "admin"
ADMIN_PASSWORD: "123456"
# OJO: apuntamos al Service de Postgres en K8s
DATABASE_URL: "pgsql://xrf_user:nxVZGx80231tQ3@apolo-postgres:5432/xrf_db"
# CORS (temporal amplio; afinaremos luego)
CORS_ALLOW_ORIGIN: "^.*$"
MESSENGER_TRANSPORT_DSN: "amqp://xrf_user:cDyj6e8ql6R0ZQHu@xrf-rabbitmq:5672/%2f"
MESSENGER_EXCHANGE: "xrf_exchange"
MESSENGER_VHOST: "xrf_vhost"
JWT_SECRET_KEY: "%kernel.project_dir%/config/jwt/private.pem"
JWT_PUBLIC_KEY: "%kernel.project_dir%/config/jwt/public.pem"
JWT_PASSPHRASE: "g3bed4qa52lw5cxay3hzs58tzjkm43ot"
JWT_ACCESS_TOKEN_EXPIRATION: "86400"
JWT_REFRESH_TOKEN_EXPIRATION: "788400000"
MIN_PASSWORD_LENGTH: "6"
PASSWORD_STRENGTH: "weak"
PASSWORD_FORMAT: "alphanumeric"
PASSWORD_RESET_EXPIRATION: "1440"
EMAIL_CONFIRMATION_EXPIRATION: "1440"
MAILER_DSN: "smtp://AKIAXYKJQE4JML3F2XEX:BMAH8%2FLZwfs0wGTfAMXFzsRrSuUMgYzq4h%2FKpzG2po4m@email-smtp.eu-west-1.amazonaws.com:587"
MAILER_DEFAULT_SENDER: "noreply@xrf.ai"
EMAIL_LOGO: ""
EMAIL_BACKGROUND: ""
AWS_BUCKET: ""
AWS_KEY: ""
AWS_SECRET: ""
AWS_REGION: "eu-west-1"
AWS_S3_LINK_EXPIRATION: "+24 hours"
AWS_CHIME_REGION: "eu-west-1"
AWS_CHIME_KEY: ""
AWS_CHIME_SECRET: ""
MONOLOG_SWIFT_FROM: "noreply@xrf.ai"
MONOLOG_SWIFT_TO: "notifications@xrf.ai"
MONOLOG_SWIFT_SUBJECT: "[XRSandbox][PRO]"
# dominio .c2et.net
FRONTEND_HOST: "https://portal.apolo.c2et.net"
RESET_PASSWORD_EXPIRATION_MINUTES: "1440"
CHANGE_EMAIL_EXPIRATION_MINUTES: "1440"
USER_ROOM_INACTIVITY_MINUTES: "5"
ENVIRONMENT: "development"
UNION_AVATAR_USER: ""
UNION_AVATAR_PASSWORD: ""
UNION_AVATAR_OUTPUT_FORMAT: "fbx"
UNION_AVATAR_BASE_URL: ""
UNION_AVATAR_COLLECTION: ""
UNION_AVATAR_CONFIG: "[]"
BROADCAST_DRIVER: "pusher"
PUSHER_APP_ID: "37248458"
PUSHER_KEY: "xrf-qa2LW55393Ok-external"
PUSHER_SECRET: "n0Gb43G4eLi39wZz"
PUSHER_CLUSTER: "eu"
PUSHER_HOST: "backend.apolo.c2et.net"
PUSHER_PORT: "6001"
PUSHER_SCHEME: "wss"
PUSHER_ENABLED: "yes"
WEBSOCKET_CONFIGURATION: "{\"enabled\":true,\"connections\":{\"client\":{\"enabled\":true,\"key\":\"xrf-qa2LW55393Ok-external\",\"host\":\"backend.apolo.c2et.net\",\"port\":443,\"scheme\":\"wss\"},\"system\":[],\"external\":[]}}"
FFT_NODE_URL: "http://localhost:8080"
PLAYER_ASSET: "https://backend.apolo.c2et.net/media/TriangulatedAvatar.fbx"
MAX_CONTENT_DISTANCE: "10000"
MONITORING_DSN: ""
MONITORING_EMAIL_TO: "[]"
SENTRY_DSN: ""
SENTRY_TRACES_SAMPLE_RATE: "1.0"
SENTRY_ALLOWED_EVENTS: "[]"
SENTRY_ENVIRONMENT: "production"
MAVLINK_SERVICE_URL: "https://backend.apolo.c2et.net"
MAVLINK_MAX_DISTANCE: "10000"
MAVLINK_MAX_HEIGHT: "10000"
FFMPEG_SERVICE_KEY: "e436b20441e41efb60872da285a13b55ac9a02acf6dfa46604d9af071b26c1bc"
FFMPEG_EC2_INSTANCE: ""
FFMPEG_EC2_REGION: "eu-west-1"
API_KEY_SECRET: "my-secret"
SECURITY_LEVEL: "CommercialStandards"
VIMEO_FOLDER: ""
VIMEO_ID: ""
VIMEO_KEY: ""
VIMEO_SECRET: ""
VIMEO_TOKEN: ""
VIMEO_ENABLED: "true"
NVGServerUrl: "http://3.253.103.103:18083"
AI_ASSISTANT_EMAIL: "noreply@xrf.ai"
AI_ASSISTANT_TOKEN: ""
AI_ASSISTANT_URL: ""
HERMES_GOLD: "dev/gold/medatada_test_files"
HERMES_PROFILE: "default_profile"
HERMES_SILVER: "dev/silver/metadata_test_files"
MAINTENANCE_USERS: "[]"
MENU_FEATURES: "[\"Map\",\"FullScreen\",\"Locations\",\"Tools\"]"
ODM_TOKEN: "06a5a50cda6a43abb1a7c4bd0bfcedcdd97ee468a75e27c78cbdbb2916ffc7f3f633e080841155cd2bfee6052c1f69cdddd88919c71f4378a7a4edf4"
ROOM_ACTIVITY_MAX_TIME: "120"
ROOM_FEATURES: "[\"ChangeRoom\",\"EditRoom\"]"
FFMPEG_DEFAULT_STREAMING: "https://backend.apolo.c2et.net/fallback"
FFMPEG_SERVICE_ENABLED: "yes"
FFMPEG_SERVICE_HIGH_BITRATE: "8000"
FFMPEG_SERVICE_HIGH_RESOLUTION: "1920x1080"
FFMPEG_SERVICE_LOW_BITRATE: "800"
FFMPEG_SERVICE_LOW_RESOLUTION: "640x360"
FFMPEG_SERVICE_MEDIUM_BITRATE: "4000"
FFMPEG_SERVICE_SSL_URL: "http://streaming.apolo.c2et.net"
FFMPEG_SERVICE_URL: "http://streaming.apolo.c2et.net"
XMPP_API_PORT: "5443"
XMPP_CHAT_HOST: "muc.chat.apolo.c2et.net"
XMPP_HOST: "chat.apolo.c2et.net"
XMPP_PASS: "Tv7RfaP4KZXizdq"
XMPP_USER: "admin@chat.apolo.c2et.net"
XMPP_WS_PORT: "5443"
XMPP_TCP_PORT: "5222"
MEETING_SERVICE_ENABLED: "yes"
MEETING_SERVICE_URL: "wss://meeting.apolo.c2et.net:3000/ws"

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Secret
metadata:
name: apolo-db-secret
namespace: apolo
labels:
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: postgres
type: Opaque
stringData:
POSTGRES_USER: xrf_user
POSTGRES_PASSWORD: nxVZGx80231tQ3
POSTGRES_DB: xrf_db

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: apolo-db-url
namespace: apolo
labels:
app.kubernetes.io/part-of: apolo
type: Opaque
stringData:
DATABASE_URL: pgsql://xrf_user:nxVZGx80231tQ3@apolo-postgres:5432/xrf_db

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Secret
metadata:
name: apolo-drone-secret
namespace: apolo
labels:
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: drone
type: Opaque
stringData:
# Equivalentes a tu drone.env (adaptados a K8s)
MESSENGER_USER: xrf_user
MESSENGER_PASSWORD: cDyj6e8ql6R0ZQHu
MESSENGER_SERVER: apolo-rabbitmq
MESSENGER_PORT: "5672"
# En docker-compose era %2f (vhost "/"). Aquí usamos tu vhost real:
MESSENGER_VHOST: xrf_vhost

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Secret
metadata:
name: apolo-ejabberd-secret
namespace: apolo
labels:
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: ejabberd
type: Opaque
stringData:
XRF_URL: "http://xrf-web:80"
CHAT_ADMIN: "admin"
CHAT_ADMIN_PASSWORD: "Tv7RfaP4KZXizdq"
CHAT_DOMAIN: "chat.apolo.c2et.net"
XRF_TOKEN: "214defebd6ed636c801845f0b56968799de60d6a4fdac20a0e6a2287ca3ae58c9f542e61b98d047b5e7a1764d5226810ff33ee5678cefdea15705eb2"

View File

@@ -0,0 +1,9 @@
apiVersion: v1
data:
.dockerconfigjson: eyJhdXRocyI6eyJoYXJib3IuYzJldC5jb20iOnsidXNlcm5hbWUiOiJ4YXZvciIsInBhc3N3b3JkIjoiTUBuYWJvMjAyNSIsImVtYWlsIjoibm8tcmVwbHlAYzJldC5jb20iLCJhdXRoIjoiZUdGMmIzSTZUVUJ1WVdKdk1qQXlOUT09In19fQ==
kind: Secret
metadata:
creationTimestamp: null
name: harbor-cred
namespace: apolo
type: kubernetes.io/dockerconfigjson

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Secret
metadata:
name: apolo-kurento-api-secret
namespace: apolo
labels:
app.kubernetes.io/name: apolo-kurento-api
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: kurento-api
type: Opaque
stringData:
DASHBOARD_PASSWORD: "mgebdCDT"

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: Secret
metadata:
name: apolo-rabbit-dsn
namespace: apolo
labels:
app.kubernetes.io/part-of: apolo
type: Opaque
stringData:
# Usando el vhost correcto: xrf_vhost
MESSENGER_TRANSPORT_DSN: amqp://xrf_user:cDyj6e8ql6R0ZQHu@apolo-rabbitmq:5672/xrf_vhost

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Secret
metadata:
name: apolo-rabbitmq-secret
namespace: apolo
labels:
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: rabbitmq
type: Opaque
stringData:
RABBITMQ_ERLANG_COOKIE: wa13JmE4YWFUdxW5F7g1DFsrQhd0I3D7
RABBITMQ_DEFAULT_USER: xrf_user
RABBITMQ_DEFAULT_PASS: cDyj6e8ql6R0ZQHu
RABBITMQ_DEFAULT_VHOST: xrf_vhost

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Secret
metadata:
name: apolo-streamer-secret
namespace: apolo
labels:
app.kubernetes.io/name: apolo-streamer
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: streamer
type: Opaque
stringData:
TOKEN_SECRET: "4709bdaa95ce976ec89a4d0fa159be54cf138508ae6ef3aee152234af8cd7b03884019b5447ce3a94da3ab458ab9800462045f94aae8a459ef6db7628287e2f0"
API_KEY: "e436b20441e41efb60872da285a13b55ac9a02acf6dfa46604d9af071b26c1bc"
XRF_TOKEN: "aae0be610505880019630c98b46bcf1510ba0ad5ff7b2cf401b654529d95320297dcca04a9fd1d4fce4edb6769093ce6e7dccb1899c21f335530cfc7"
PUSHER_APP_ID: "30781558"
PUSHER_KEY: "xrf-7J58roVrSvdb-client"
PUSHER_SECRET: "Y8C7wRgv5T9K2ms8"

View File

@@ -0,0 +1,47 @@
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: Service
metadata:
name: xrf-db
namespace: apolo
spec:
type: ExternalName
externalName: apolo-postgres.apolo.svc.cluster.local
- apiVersion: v1
kind: Service
metadata:
name: xrf-rabbitmq
namespace: apolo
spec:
type: ExternalName
externalName: apolo-rabbitmq.apolo.svc.cluster.local
- apiVersion: v1
kind: Service
metadata:
name: xrf-php
namespace: apolo
spec:
type: ExternalName
externalName: apolo-php.apolo.svc.cluster.local
- apiVersion: v1
kind: Service
metadata:
name: xrf-web
namespace: apolo
spec:
type: ExternalName
externalName: apolo-web.apolo.svc.cluster.local
- apiVersion: v1
kind: Service
metadata:
name: xrf-app6
namespace: apolo
spec:
type: ExternalName
externalName: apolo-app6.apolo.svc.cluster.local

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-app6
namespace: apolo
labels:
app.kubernetes.io/name: apolo-app6
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: app6
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-app6
ports:
- name: tcp-app6
port: 2525
targetPort: 2525
protocol: TCP

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-colossus
namespace: apolo
labels:
app.kubernetes.io/name: apolo-colossus
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: colossus
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-colossus
ports:
- name: http
port: 3000
targetPort: 3000

View File

@@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: coredns-custom
namespace: apolo
spec:
type: LoadBalancer
loadBalancerIP: 192.168.200.11
selector:
app: coredns-custom
ports:
- name: dns-udp
port: 53
targetPort: 53
protocol: UDP
- name: dns-tcp
port: 53
targetPort: 53
protocol: TCP

View File

@@ -0,0 +1,38 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-ejabberd
namespace: apolo
labels:
app.kubernetes.io/name: apolo-ejabberd
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: ejabberd
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-ejabberd
ports:
- { name: c2s, port: 5222, targetPort: 5222, protocol: TCP }
- { name: s2s, port: 5269, targetPort: 5269, protocol: TCP }
- { name: https, port: 5443, targetPort: 5443, protocol: TCP }
- { name: http, port: 5280, targetPort: 5280, protocol: TCP }
---
apiVersion: v1
kind: Service
metadata:
name: apolo-ejabberd-lb
namespace: apolo
labels:
app.kubernetes.io/name: apolo-ejabberd
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: ejabberd
spec:
type: LoadBalancer
loadBalancerIP: 192.168.200.13
externalTrafficPolicy: Local
selector:
app.kubernetes.io/name: apolo-ejabberd
ports:
- { name: c2s, port: 5222, targetPort: 5222, protocol: TCP }
- { name: s2s, port: 5269, targetPort: 5269, protocol: TCP }
- { name: https, port: 5443, targetPort: 5443, protocol: TCP }

View File

@@ -0,0 +1,41 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-kms
namespace: apolo
labels:
app.kubernetes.io/name: apolo-kms
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: kurento
spec:
type: LoadBalancer
loadBalancerIP: 192.168.200.14
externalTrafficPolicy: Local
selector:
app.kubernetes.io/name: apolo-kms
ports:
- name: ws
port: 8888
targetPort: 8888
protocol: TCP
# --- UDP range (inicialmente 4000040019). Si todo va bien, añadimos hasta 40099 ---
- { name: udp-40000, port: 40000, targetPort: 40000, protocol: UDP }
- { name: udp-40001, port: 40001, targetPort: 40001, protocol: UDP }
- { name: udp-40002, port: 40002, targetPort: 40002, protocol: UDP }
- { name: udp-40003, port: 40003, targetPort: 40003, protocol: UDP }
- { name: udp-40004, port: 40004, targetPort: 40004, protocol: UDP }
- { name: udp-40005, port: 40005, targetPort: 40005, protocol: UDP }
- { name: udp-40006, port: 40006, targetPort: 40006, protocol: UDP }
- { name: udp-40007, port: 40007, targetPort: 40007, protocol: UDP }
- { name: udp-40008, port: 40008, targetPort: 40008, protocol: UDP }
- { name: udp-40009, port: 40009, targetPort: 40009, protocol: UDP }
- { name: udp-40010, port: 40010, targetPort: 40010, protocol: UDP }
- { name: udp-40011, port: 40011, targetPort: 40011, protocol: UDP }
- { name: udp-40012, port: 40012, targetPort: 40012, protocol: UDP }
- { name: udp-40013, port: 40013, targetPort: 40013, protocol: UDP }
- { name: udp-40014, port: 40014, targetPort: 40014, protocol: UDP }
- { name: udp-40015, port: 40015, targetPort: 40015, protocol: UDP }
- { name: udp-40016, port: 40016, targetPort: 40016, protocol: UDP }
- { name: udp-40017, port: 40017, targetPort: 40017, protocol: UDP }
- { name: udp-40018, port: 40018, targetPort: 40018, protocol: UDP }
- { name: udp-40019, port: 40019, targetPort: 40019, protocol: UDP }

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-kurento-api
namespace: apolo
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-kurento-api
ports:
- name: https
port: 3000
targetPort: 3000

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-mediamtx
namespace: apolo
labels:
app.kubernetes.io/name: apolo-mediamtx
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: media
spec:
type: LoadBalancer
loadBalancerIP: 192.168.200.12
externalTrafficPolicy: Local
selector:
app.kubernetes.io/name: apolo-mediamtx
ports:
- name: rtmp
port: 1935
targetPort: 1935
protocol: TCP
- name: rtsp
port: 8554
targetPort: 8554
protocol: TCP
- name: hls
port: 8887
targetPort: 8887
protocol: TCP

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-nakama
namespace: apolo
labels:
app.kubernetes.io/name: apolo-nakama
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: nakama
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-nakama
ports:
- name: http
port: 7350
targetPort: 7350
protocol: TCP
- name: grpc
port: 7351
targetPort: 7351
protocol: TCP

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-php
namespace: apolo
labels:
app.kubernetes.io/name: apolo-php
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: php
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-php
ports:
- name: fpm
port: 9000
targetPort: 9000

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-portal
namespace: apolo
labels:
app.kubernetes.io/name: apolo-portal
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: portal
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-portal
ports:
- name: http
port: 8080
targetPort: 8080

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-postgres
namespace: apolo
labels:
app.kubernetes.io/name: apolo-postgres
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: postgres
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-postgres
ports:
- name: pg
port: 5432
targetPort: 5432

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-rabbitmq
namespace: apolo
labels:
app.kubernetes.io/name: apolo-rabbitmq
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: rabbitmq
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-rabbitmq
ports:
- name: amqp
port: 5672
targetPort: 5672
protocol: TCP
- name: mgmt
port: 15672
targetPort: 15672
protocol: TCP

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-streamer-lb
namespace: apolo
labels:
app.kubernetes.io/name: apolo-streamer
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: streamer
spec:
type: LoadBalancer
loadBalancerIP: 192.168.200.12
selector:
app.kubernetes.io/name: apolo-streamer
ports:
- name: http
port: 80
targetPort: 80

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-streamer
namespace: apolo
labels:
app.kubernetes.io/name: apolo-streamer
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: streamer
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-streamer
ports:
- name: http
port: 80
targetPort: 80

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-web
namespace: apolo
labels:
app.kubernetes.io/name: apolo-web
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: web
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-web
ports:
- name: http
port: 80
targetPort: 80

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: apolo-websocket
namespace: apolo
labels:
app.kubernetes.io/name: apolo-websocket
app.kubernetes.io/part-of: apolo
app.kubernetes.io/component: websocket
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: apolo-websocket
ports:
- name: ws
port: 6001
targetPort: 6001
protocol: TCP
- name: admin
port: 9601
targetPort: 9601
protocol: TCP

View File

@@ -1 +0,0 @@
Username: