forked from Nixius/authelia
1
0
Fork 0

Switch customer stack product to Uptime Kuma

Each customer now receives a dedicated Uptime Kuma monitoring instance
at their subdomain. Drops the unused Redis sidecar from the template.

Made-with: Cursor
This commit is contained in:
Leopere 2026-03-03 16:49:50 -05:00
parent a92cbe9b72
commit e71831cf9d
Signed by: colin
SSH Key Fingerprint: SHA256:nRPCQTeMFLdGytxRQmPVK9VXY3/ePKQ5lGRyJhT5DY8
1 changed files with 11 additions and 22 deletions

View File

@ -1,11 +1,14 @@
# ============================================================================= # =============================================================================
# CUSTOMER STACK TEMPLATE # CUSTOMER STACK TEMPLATE — Uptime Kuma
# ============================================================================= # =============================================================================
# This is the Docker Swarm stack that gets deployed for each paying customer. # This is the Docker Swarm stack that gets deployed for each paying customer.
# It defines what product/service they receive when they subscribe. # It defines what product/service they receive when they subscribe.
# #
# REPLACE the `web` service image (currently traefik/whoami as a placeholder) # PRODUCT: Uptime Kuma — a self-hosted uptime/monitoring dashboard.
# with the actual application you are selling. # Each customer gets their own isolated instance at their subdomain.
#
# To sell a different product, replace the `web` service image and adjust
# the port in the Traefik loadbalancer label accordingly.
# #
# Template variables (injected at deploy time by swarm/client.go): # Template variables (injected at deploy time by swarm/client.go):
# {{.ID}} - customer's username (used for unique resource naming) # {{.ID}} - customer's username (used for unique resource naming)
@ -21,9 +24,9 @@ version: "3.8"
services: services:
web: web:
image: traefik/whoami:latest image: louislam/uptime-kuma:1
environment: volumes:
WHOAMI_NAME: "{{.Subdomain}}" - uptime_data:/app/data
networks: networks:
- traefik_net - traefik_net
deploy: deploy:
@ -35,19 +38,7 @@ services:
traefik.http.routers.customer-{{.ID}}-web.entrypoints: "websecure" traefik.http.routers.customer-{{.ID}}-web.entrypoints: "websecure"
traefik.http.routers.customer-{{.ID}}-web.tls: "true" traefik.http.routers.customer-{{.ID}}-web.tls: "true"
traefik.http.routers.customer-{{.ID}}-web.middlewares: "authelia-auth@swarm" traefik.http.routers.customer-{{.ID}}-web.middlewares: "authelia-auth@swarm"
traefik.http.services.customer-{{.ID}}-web.loadbalancer.server.port: "80" traefik.http.services.customer-{{.ID}}-web.loadbalancer.server.port: "3001"
restart_policy:
condition: on-failure
redis:
image: redis:7-alpine
command: redis-server --appendonly yes
volumes:
- redis_data:/data
networks:
- backend
deploy:
replicas: 1
restart_policy: restart_policy:
condition: on-failure condition: on-failure
@ -55,9 +46,7 @@ networks:
traefik_net: traefik_net:
external: true external: true
name: "atlas_{{.TraefikNetwork}}" name: "atlas_{{.TraefikNetwork}}"
backend:
driver: overlay
volumes: volumes:
redis_data: uptime_data:
driver: local driver: local