forked from Nixius/authelia
1
0
Fork 0

Switch customer stack product to Uptime Kuma

Each customer now receives a dedicated Uptime Kuma monitoring instance
at their subdomain. Drops the unused Redis sidecar from the template.

Made-with: Cursor
This commit is contained in:
Leopere 2026-03-03 16:49:50 -05:00
parent a92cbe9b72
commit e71831cf9d
Signed by: colin
SSH Key Fingerprint: SHA256:nRPCQTeMFLdGytxRQmPVK9VXY3/ePKQ5lGRyJhT5DY8
1 changed files with 11 additions and 22 deletions

View File

@ -1,11 +1,14 @@
# =============================================================================
# CUSTOMER STACK TEMPLATE
# CUSTOMER STACK TEMPLATE — Uptime Kuma
# =============================================================================
# This is the Docker Swarm stack that gets deployed for each paying customer.
# It defines what product/service they receive when they subscribe.
#
# REPLACE the `web` service image (currently traefik/whoami as a placeholder)
# with the actual application you are selling.
# PRODUCT: Uptime Kuma — a self-hosted uptime/monitoring dashboard.
# Each customer gets their own isolated instance at their subdomain.
#
# To sell a different product, replace the `web` service image and adjust
# the port in the Traefik loadbalancer label accordingly.
#
# Template variables (injected at deploy time by swarm/client.go):
# {{.ID}} - customer's username (used for unique resource naming)
@ -21,9 +24,9 @@ version: "3.8"
services:
web:
image: traefik/whoami:latest
environment:
WHOAMI_NAME: "{{.Subdomain}}"
image: louislam/uptime-kuma:1
volumes:
- uptime_data:/app/data
networks:
- traefik_net
deploy:
@ -35,19 +38,7 @@ services:
traefik.http.routers.customer-{{.ID}}-web.entrypoints: "websecure"
traefik.http.routers.customer-{{.ID}}-web.tls: "true"
traefik.http.routers.customer-{{.ID}}-web.middlewares: "authelia-auth@swarm"
traefik.http.services.customer-{{.ID}}-web.loadbalancer.server.port: "80"
restart_policy:
condition: on-failure
redis:
image: redis:7-alpine
command: redis-server --appendonly yes
volumes:
- redis_data:/data
networks:
- backend
deploy:
replicas: 1
traefik.http.services.customer-{{.ID}}-web.loadbalancer.server.port: "3001"
restart_policy:
condition: on-failure
@ -55,9 +46,7 @@ networks:
traefik_net:
external: true
name: "atlas_{{.TraefikNetwork}}"
backend:
driver: overlay
volumes:
redis_data:
uptime_data:
driver: local