Compare commits

..

No commits in common. "a4f41768bad8d36aae0ef68908b17981b9e8f2c2" and "6d5abae5826cba934524d76e6bb2408f0aaa64b8" have entirely different histories.

25 changed files with 101 additions and 1717 deletions

View File

@ -10,11 +10,11 @@ jobs:
go-version: 1.25.x
- uses: actions/setup-node@v6
with:
node-version: '24'
node-version: '22'
- uses: actions/checkout@v5
- run: (cd ui && yarn)
- run: make build-js
- uses: golangci/golangci-lint-action@v9
- uses: golangci/golangci-lint-action@v8
with:
version: v2.4.0
args: --timeout=5m

View File

@ -6,7 +6,6 @@ import (
"net/http"
"os"
"path/filepath"
"strings"
"github.com/gin-gonic/gin"
"github.com/gotify/server/v2/auth"
@ -431,7 +430,6 @@ func (a *ApplicationAPI) RemoveApplicationImage(ctx *gin.Context) {
func withResolvedImage(app *model.Application) *model.Application {
if app.Image == "" {
// This must stay in sync with the isDefaultImage check in ui/src/application/Applications.tsx.
app.Image = "static/defaultapp.png"
} else {
app.Image = "image/" + app.Image
@ -461,7 +459,7 @@ func generateNonExistingImageName(imgDir string, gen func() string) string {
}
func ValidApplicationImageExt(ext string) bool {
switch strings.ToLower(ext) {
switch ext {
case ".gif", ".png", ".jpg", ".jpeg":
return true
default:

View File

@ -29,10 +29,10 @@ type client struct {
once once
}
func newClient(conn *websocket.Conn, userID uint, token string, onClose func(*client), channelBuf int) *client {
func newClient(conn *websocket.Conn, userID uint, token string, onClose func(*client)) *client {
return &client{
conn: conn,
write: make(chan *model.MessageExternal, channelBuf),
write: make(chan *model.MessageExternal, 1),
userID: userID,
token: token,
onClose: onClose,

View File

@ -15,188 +15,98 @@ import (
"github.com/gotify/server/v2/model"
)
// shard holds clients for a subset of users to reduce lock contention.
type shard struct {
clients map[uint]map[string][]*client // userID -> token -> []client (multiple clients can share same token)
lock sync.RWMutex
}
// The API provides a handler for a WebSocket stream API.
type API struct {
shards []*shard
shardCount int
clients map[uint][]*client
lock sync.RWMutex
pingPeriod time.Duration
pongTimeout time.Duration
upgrader *websocket.Upgrader
channelBuf int // Buffer size for client write channels
}
// New creates a new instance of API.
// pingPeriod: is the interval, in which is server sends the a ping to the client.
// pongTimeout: is the duration after the connection will be terminated, when the client does not respond with the
// pong command.
// shardCount: number of shards for client storage (should be power of 2 for optimal distribution).
// readBufferSize: WebSocket read buffer size in bytes.
// writeBufferSize: WebSocket write buffer size in bytes.
// channelBufferSize: buffer size for client write channels in messages.
func New(pingPeriod, pongTimeout time.Duration, allowedWebSocketOrigins []string, shardCount, readBufferSize, writeBufferSize, channelBufferSize int) *API {
// Ensure shardCount is at least 1 and is a power of 2 for optimal hashing
if shardCount < 1 {
shardCount = 256
}
// Round up to next power of 2
shardCount = nextPowerOf2(shardCount)
shards := make([]*shard, shardCount)
for i := range shards {
shards[i] = &shard{
clients: make(map[uint]map[string][]*client),
}
}
func New(pingPeriod, pongTimeout time.Duration, allowedWebSocketOrigins []string) *API {
return &API{
shards: shards,
shardCount: shardCount,
clients: make(map[uint][]*client),
pingPeriod: pingPeriod,
pongTimeout: pingPeriod + pongTimeout,
upgrader: newUpgrader(allowedWebSocketOrigins, readBufferSize, writeBufferSize),
channelBuf: channelBufferSize,
upgrader: newUpgrader(allowedWebSocketOrigins),
}
}
// nextPowerOf2 returns the next power of 2 >= n.
func nextPowerOf2(n int) int {
if n <= 0 {
return 1
}
n--
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n++
return n
}
// getShard returns the shard for a given userID using fast modulo operation.
func (a *API) getShard(userID uint) *shard {
return a.shards[userID%uint(a.shardCount)]
}
// CollectConnectedClientTokens returns all tokens of the connected clients.
func (a *API) CollectConnectedClientTokens() []string {
var allClients []string
for _, shard := range a.shards {
shard.lock.RLock()
for _, userClients := range shard.clients {
for _, tokenClients := range userClients {
for _, c := range tokenClients {
allClients = append(allClients, c.token)
}
}
a.lock.RLock()
defer a.lock.RUnlock()
var clients []string
for _, cs := range a.clients {
for _, c := range cs {
clients = append(clients, c.token)
}
shard.lock.RUnlock()
}
return uniq(allClients)
return uniq(clients)
}
// NotifyDeletedUser closes existing connections for the given user.
func (a *API) NotifyDeletedUser(userID uint) error {
shard := a.getShard(userID)
shard.lock.Lock()
defer shard.lock.Unlock()
if userClients, ok := shard.clients[userID]; ok {
for _, tokenClients := range userClients {
for _, client := range tokenClients {
client.Close()
}
a.lock.Lock()
defer a.lock.Unlock()
if clients, ok := a.clients[userID]; ok {
for _, client := range clients {
client.Close()
}
delete(shard.clients, userID)
delete(a.clients, userID)
}
return nil
}
// NotifyDeletedClient closes existing connections with the given token.
func (a *API) NotifyDeletedClient(userID uint, token string) {
shard := a.getShard(userID)
shard.lock.Lock()
defer shard.lock.Unlock()
if userClients, ok := shard.clients[userID]; ok {
if tokenClients, exists := userClients[token]; exists {
for _, client := range tokenClients {
a.lock.Lock()
defer a.lock.Unlock()
if clients, ok := a.clients[userID]; ok {
for i := len(clients) - 1; i >= 0; i-- {
client := clients[i]
if client.token == token {
client.Close()
}
delete(userClients, token)
// Clean up empty user map
if len(userClients) == 0 {
delete(shard.clients, userID)
clients = append(clients[:i], clients[i+1:]...)
}
}
a.clients[userID] = clients
}
}
// Notify notifies the clients with the given userID that a new messages was created.
func (a *API) Notify(userID uint, msg *model.MessageExternal) {
shard := a.getShard(userID)
shard.lock.RLock()
userClients, ok := shard.clients[userID]
if !ok {
shard.lock.RUnlock()
return
}
// Create a snapshot of clients to avoid holding lock during send
clients := make([]*client, 0)
for _, tokenClients := range userClients {
for _, c := range tokenClients {
clients = append(clients, c)
a.lock.RLock()
defer a.lock.RUnlock()
if clients, ok := a.clients[userID]; ok {
for _, c := range clients {
c.write <- msg
}
}
shard.lock.RUnlock()
// Send messages without holding the lock to prevent blocking other shards
// The channel buffer (default 10) helps prevent blocking in most cases
for _, c := range clients {
c.write <- msg
}
}
func (a *API) remove(c *client) {
shard := a.getShard(c.userID)
shard.lock.Lock()
defer shard.lock.Unlock()
if userClients, ok := shard.clients[c.userID]; ok {
if tokenClients, exists := userClients[c.token]; exists {
// Remove the specific client from the slice
for i, client := range tokenClients {
if client == c {
userClients[c.token] = append(tokenClients[:i], tokenClients[i+1:]...)
// Clean up empty token slice
if len(userClients[c.token]) == 0 {
delete(userClients, c.token)
}
// Clean up empty user map
if len(userClients) == 0 {
delete(shard.clients, c.userID)
}
break
}
func (a *API) remove(remove *client) {
a.lock.Lock()
defer a.lock.Unlock()
if userIDClients, ok := a.clients[remove.userID]; ok {
for i, client := range userIDClients {
if client == remove {
a.clients[remove.userID] = append(userIDClients[:i], userIDClients[i+1:]...)
break
}
}
}
}
func (a *API) register(c *client) {
shard := a.getShard(c.userID)
shard.lock.Lock()
defer shard.lock.Unlock()
if shard.clients[c.userID] == nil {
shard.clients[c.userID] = make(map[string][]*client)
}
if shard.clients[c.userID][c.token] == nil {
shard.clients[c.userID][c.token] = make([]*client, 0, 1)
}
shard.clients[c.userID][c.token] = append(shard.clients[c.userID][c.token], c)
func (a *API) register(client *client) {
a.lock.Lock()
defer a.lock.Unlock()
a.clients[client.userID] = append(a.clients[client.userID], client)
}
// Handle handles incoming requests. First it upgrades the protocol to the WebSocket protocol and then starts listening
@ -237,7 +147,7 @@ func (a *API) Handle(ctx *gin.Context) {
return
}
client := newClient(conn, auth.GetUserID(ctx), auth.GetTokenID(ctx), a.remove, a.channelBuf)
client := newClient(conn, auth.GetUserID(ctx), auth.GetTokenID(ctx), a.remove)
a.register(client)
go client.startReading(a.pongTimeout)
go client.startWriteHandler(a.pingPeriod)
@ -245,19 +155,16 @@ func (a *API) Handle(ctx *gin.Context) {
// Close closes all client connections and stops answering new connections.
func (a *API) Close() {
for _, shard := range a.shards {
shard.lock.Lock()
for _, userClients := range shard.clients {
for _, tokenClients := range userClients {
for _, client := range tokenClients {
client.Close()
}
}
a.lock.Lock()
defer a.lock.Unlock()
for _, clients := range a.clients {
for _, client := range clients {
client.Close()
}
for k := range shard.clients {
delete(shard.clients, k)
}
shard.lock.Unlock()
}
for k := range a.clients {
delete(a.clients, k)
}
}
@ -297,11 +204,11 @@ func isAllowedOrigin(r *http.Request, allowedOrigins []*regexp.Regexp) bool {
return false
}
func newUpgrader(allowedWebSocketOrigins []string, readBufferSize, writeBufferSize int) *websocket.Upgrader {
func newUpgrader(allowedWebSocketOrigins []string) *websocket.Upgrader {
compiledAllowedOrigins := compileAllowedWebSocketOrigins(allowedWebSocketOrigins)
return &websocket.Upgrader{
ReadBufferSize: readBufferSize,
WriteBufferSize: writeBufferSize,
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool {
if mode.IsDev() {
return true

View File

@ -505,33 +505,21 @@ func Test_compileAllowedWebSocketOrigins(t *testing.T) {
}
func clients(api *API, user uint) []*client {
shard := api.getShard(user)
shard.lock.RLock()
defer shard.lock.RUnlock()
api.lock.RLock()
defer api.lock.RUnlock()
userClients, ok := shard.clients[user]
if !ok {
return nil
}
clients := make([]*client, 0)
for _, tokenClients := range userClients {
clients = append(clients, tokenClients...)
}
return clients
return api.clients[user]
}
func countClients(a *API) int {
var count int
for _, shard := range a.shards {
shard.lock.RLock()
for _, userClients := range shard.clients {
for _, tokenClients := range userClients {
count += len(tokenClients)
}
}
shard.lock.RUnlock()
a.lock.RLock()
defer a.lock.RUnlock()
var i int
for _, clients := range a.clients {
i += len(clients)
}
return count
return i
}
func testClient(t *testing.T, url string) *testingClient {
@ -604,8 +592,7 @@ func bootTestServer(handlerFunc gin.HandlerFunc) (*httptest.Server, *API) {
r := gin.New()
r.Use(handlerFunc)
// ping every 500 ms, and the client has 500 ms to respond
// Use default values for shard count and buffer sizes in tests
api := New(500*time.Millisecond, 500*time.Millisecond, []string{}, 256, 8192, 8192, 10)
api := New(500*time.Millisecond, 500*time.Millisecond, []string{})
r.GET("/", api.Handle)
server := httptest.NewServer(r)

View File

@ -1,234 +0,0 @@
# Hardware Recommendations for WebSocket Benchmarking
## Testing Millions of WebSocket Connections
To properly test Gotify's ability to handle millions of concurrent WebSocket connections, you need to consider several hardware and system factors.
## M4 Mac Mini Considerations
### Pros:
- **Powerful CPU**: M4 chip has excellent single-threaded and multi-threaded performance
- **Unified Memory**: Fast memory access
- **Energy Efficient**: Can run tests for extended periods
### Cons:
- **Limited RAM Options**: Max 24GB (M4 Pro) or 36GB (M4 Max) - may be limiting for millions of connections
- **macOS Limitations**:
- Lower default file descriptor limits (~10,000)
- Docker Desktop overhead
- Network stack differences from Linux
- **Memory Per Connection**:
- Each WebSocket connection uses ~2-4KB of memory
- 1M connections = ~2-4GB just for connections
- Plus OS overhead, buffers, etc.
- Realistically need 8-16GB+ for 1M connections
### M4 Mac Mini Verdict:
**Good for**: Testing up to 100K-500K connections, development, validation
**Limited for**: Testing true millions of connections (1M+)
## Recommended Hardware for Full-Scale Testing
### Option 1: Linux Server (Recommended)
**Best for: 1M+ connections**
**Minimum Specs:**
- **CPU**: 8+ cores (Intel Xeon or AMD EPYC)
- **RAM**: 32GB+ (64GB+ recommended for 1M+ connections)
- **Network**: 10Gbps+ network interface
- **OS**: Linux (Ubuntu 22.04+ or similar)
- **Storage**: SSD for database
**Why Linux:**
- Higher file descriptor limits (can be set to 1M+)
- Better network stack performance
- Native Docker (no Desktop overhead)
- More control over system resources
**System Tuning Required:**
```bash
# Increase file descriptor limits
ulimit -n 1000000
echo "* soft nofile 1000000" >> /etc/security/limits.conf
echo "* hard nofile 1000000" >> /etc/security/limits.conf
# Network tuning
echo 'net.core.somaxconn = 65535' >> /etc/sysctl.conf
echo 'net.ipv4.tcp_max_syn_backlog = 65535' >> /etc/sysctl.conf
echo 'net.ipv4.ip_local_port_range = 1024 65535' >> /etc/sysctl.conf
sysctl -p
```
### Option 2: Cloud Instance (AWS/GCP/Azure)
**Best for: Flexible scaling**
**Recommended Instance Types:**
- **AWS**: c6i.4xlarge or larger (16+ vCPUs, 32GB+ RAM)
- **GCP**: n2-standard-16 or larger
- **Azure**: Standard_D16s_v3 or larger
**Benefits:**
- Can scale up/down as needed
- High network bandwidth
- Easy to replicate test environments
- Can test with multiple instances
### Option 3: Dedicated Server
**Best for: Consistent long-term testing**
- **CPU**: 16+ cores
- **RAM**: 64GB+
- **Network**: 10Gbps+
- **Cost**: $200-500/month for good hardware
## Memory Requirements by Connection Count
| Connections | Estimated RAM | Recommended RAM | Notes |
|------------|---------------|------------------|-------|
| 10K | 2-4GB | 8GB | M4 Mac Mini ✅ |
| 100K | 4-8GB | 16GB | M4 Mac Mini ⚠️ (may work) |
| 500K | 8-16GB | 32GB | M4 Mac Mini ❌ |
| 1M | 16-32GB | 64GB | Linux Server ✅ |
| 5M | 80-160GB | 256GB | High-end Server ✅ |
*Note: RAM estimates include OS, Docker, and application overhead*
## Network Requirements
### Bandwidth Calculation:
- Each WebSocket connection: ~1-2KB initial handshake
- Ping/pong messages: ~10 bytes every 45 seconds
- Message delivery: Variable (depends on message size)
**For 1M connections:**
- Initial connection burst: ~1-2GB
- Sustained: ~100-200MB/s for ping/pong
- Message delivery: Depends on message rate
**Recommendation**: 10Gbps network for 1M+ connections
## Testing Strategy by Hardware
### M4 Mac Mini (24-36GB RAM)
1. **Start Small**: Test with 10K connections
2. **Scale Gradually**: 50K → 100K → 250K
3. **Monitor Memory**: Watch for OOM conditions
4. **Focus on**: Shard comparison, latency testing, throughput at moderate scale
**Commands:**
```bash
# Test with 10K connections
./benchmark/run-benchmark.sh scale 1k
# Compare shard configurations
./benchmark/run-benchmark.sh all
```
### Linux Server (32GB+ RAM)
1. **Full Scale Testing**: 100K → 500K → 1M+ connections
2. **Multiple Instances**: Test horizontal scaling
3. **Stress Testing**: Find breaking points
4. **Production Simulation**: Real-world scenarios
**Commands:**
```bash
# Test with 100K connections
SCALE=10k ./benchmark/run-benchmark.sh scale 10k
# Test with custom high connection count
# (modify k6 scripts for higher VU counts)
```
## System Limits to Check
### macOS (M4 Mac Mini)
```bash
# Check current limits
ulimit -n # File descriptors
sysctl kern.maxfiles # Max open files
sysctl kern.maxfilesperproc # Max files per process
# Increase limits (temporary)
ulimit -n 65536
```
### Linux
```bash
# Check limits
ulimit -n
cat /proc/sys/fs/file-max
# Increase limits (permanent)
# Edit /etc/security/limits.conf
```
## Docker Considerations
### Docker Desktop (macOS)
- **Overhead**: ~2-4GB RAM for Docker Desktop
- **Performance**: Slightly slower than native Linux
- **Limits**: Subject to macOS system limits
### Native Docker (Linux)
- **Overhead**: Minimal (~500MB)
- **Performance**: Near-native
- **Limits**: Can use full system resources
## Recommendations
### For Development & Initial Testing:
✅ **M4 Mac Mini is fine**
- Test up to 100K-250K connections
- Validate shard configurations
- Test latency and throughput
- Develop and debug
### For Production-Scale Testing:
✅ **Use Linux Server**
- Test 1M+ connections
- Validate true scalability
- Stress testing
- Production simulation
### Hybrid Approach:
1. **Develop on M4 Mac Mini**: Quick iteration, smaller scale tests
2. **Validate on Linux Server**: Full-scale testing before production
## Quick Start on M4 Mac Mini
```bash
# 1. Increase file descriptor limits
ulimit -n 65536
# 2. Start single instance for testing
docker-compose -f docker-compose.benchmark.yml up -d gotify-256
# 3. Run small-scale test
./benchmark/run-benchmark.sh 256 websocket-simple.js
# 4. Monitor resources
docker stats gotify-bench-256
```
## Quick Start on Linux Server
```bash
# 1. Tune system limits (see above)
# 2. Start all instances
docker-compose -f docker-compose.benchmark.yml up -d --build
# 3. Run full-scale test
./benchmark/run-benchmark.sh scale 10k
# 4. Monitor system resources
htop
docker stats
```
## Conclusion
**M4 Mac Mini**: Great for development and testing up to ~250K connections
**Linux Server**: Required for testing true millions of connections
Start with the M4 Mac Mini to validate the setup and optimizations, then move to a Linux server for full-scale production validation.

View File

@ -1,130 +0,0 @@
# Memory Usage Analysis
## Current Memory Per Connection: ~75KB
This is higher than ideal. Let's break down where the memory is going:
### Per-Connection Memory Breakdown
1. **WebSocket Buffers: 16KB** ⚠️ **Largest contributor**
- Read buffer: 8KB
- Write buffer: 8KB
- These are allocated per connection regardless of usage
2. **Channel Buffer: ~2KB**
- 10 messages * ~200 bytes per message
- Helps prevent blocking but uses memory
3. **Goroutine Stacks: ~4KB**
- 2 goroutines per connection (read + write handlers)
- ~2KB stack per goroutine (default Go stack size)
4. **Client Struct: ~100 bytes**
- Minimal overhead
5. **Map Overhead: Variable**
- Nested map structure: `map[uint]map[string][]*client`
- Each map level has hash table overhead
- Pointer storage overhead
6. **Go Runtime Overhead: ~2-4KB**
- GC metadata
- Runtime structures
7. **Docker/System Overhead: Shared**
- Container base memory
- System libraries
### Sharding Structure Analysis
**Current Structure:**
```go
map[uint]map[string][]*client // userID -> token -> []client
```
**Memory Impact:**
- ✅ **Good**: Sharding reduces lock contention significantly
- ⚠️ **Concern**: Nested maps add overhead
- Each map has bucket overhead (~8 bytes per bucket)
- Hash table structure overhead
- For 256 shards with sparse distribution, this adds up
**Is Sharding Okay?**
- ✅ **Yes, sharding is necessary** for performance at scale
- ⚠️ **But** we could optimize the structure for memory efficiency
### Optimization Opportunities
#### 1. Reduce Buffer Sizes (Quick Win)
**Current:** 8KB read + 8KB write = 16KB
**Optimized:** 2KB read + 2KB write = 4KB
**Savings:** ~12KB per connection (16% reduction)
**Trade-off:** More syscalls, but acceptable for most use cases
#### 2. Flatten Map Structure (Memory Optimization)
**Current:** `map[uint]map[string][]*client`
**Optimized:** `map[string]*client` with composite key
**Savings:** Eliminates one level of map overhead
**Trade-off:** Slightly more complex key generation, but better memory
#### 3. Reduce Channel Buffer Size
**Current:** 10 messages
**Optimized:** 5 messages
**Savings:** ~1KB per connection
**Trade-off:** Slightly higher chance of blocking, but usually acceptable
#### 4. Connection Pooling (Advanced)
Reuse connections or reduce goroutine overhead
### Recommended Optimizations
#### Option A: Quick Memory Reduction (Easy)
```yaml
# Reduce buffer sizes
readbuffersize: 2048 # from 8192
writebuffersize: 2048 # from 8192
channelbuffersize: 5 # from 10
```
**Expected:** ~12-15KB per connection (60-80% reduction in buffer overhead)
#### Option B: Structure Optimization (Medium)
Flatten the nested map structure to reduce overhead:
```go
// Instead of: map[uint]map[string][]*client
// Use: map[string]*client with key = fmt.Sprintf("%d:%s", userID, token)
```
**Expected:** ~2-5KB per connection savings
#### Option C: Hybrid Approach (Best)
Combine buffer reduction + structure optimization
**Expected:** ~15-20KB per connection (down from 75KB to ~55-60KB)
### Real-World Expectations
**For 1M connections:**
- Current: ~75GB (75KB * 1M)
- Optimized: ~55-60GB (55KB * 1M)
- Savings: ~15-20GB
**For 10M connections:**
- Current: ~750GB (not feasible)
- Optimized: ~550-600GB (still large, but more manageable)
### Conclusion
**Sharding is good** - it's essential for performance. The memory issue comes from:
1. Large WebSocket buffers (16KB) - biggest issue
2. Nested map overhead - moderate issue
3. Channel buffers - minor issue
**Recommendation:**
1. ✅ Keep sharding (it's working well)
2. ⚠️ Reduce buffer sizes for memory-constrained environments
3. ⚠️ Consider flattening map structure if memory is critical
4. ✅ Test with reduced buffers to validate performance
The 75KB includes Docker and Go runtime overhead. Actual application memory per connection is likely ~25-30KB, which is more reasonable but still could be optimized.

View File

@ -1,345 +0,0 @@
# Gotify WebSocket Performance Benchmarking
This directory contains tools and configurations for benchmarking Gotify's WebSocket performance with different shard configurations.
## Overview
The benchmarking setup allows you to:
- Test multiple Gotify instances with different shard counts (64, 128, 256, 512, 1024)
- Measure WebSocket connection performance, latency, and throughput
- Compare performance across different shard configurations
- Test connection scaling (1K, 10K, 100K+ concurrent connections)
## Prerequisites
- Docker and Docker Compose installed
- At least 8GB of available RAM (for running multiple instances)
- Sufficient CPU cores (recommended: 4+ cores)
## Quick Start
### 1. Start All Benchmark Instances
```bash
# Build and start all Gotify instances with different shard counts
docker-compose -f docker-compose.benchmark.yml up -d --build
```
This will start 5 Gotify instances:
- `gotify-64` on port 8080 (64 shards)
- `gotify-128` on port 8081 (128 shards)
- `gotify-256` on port 8082 (256 shards, default)
- `gotify-512` on port 8083 (512 shards)
- `gotify-1024` on port 8084 (1024 shards)
### 2. Verify Services Are Running
```bash
# Check health of all instances
curl http://localhost:8080/health
curl http://localhost:8081/health
curl http://localhost:8082/health
curl http://localhost:8083/health
curl http://localhost:8084/health
```
### 3. Run Benchmarks
#### Run All Benchmarks (Compare All Shard Counts)
```bash
./benchmark/run-benchmark.sh all
```
#### Run Benchmark Against Specific Instance
```bash
# Test instance with 256 shards
./benchmark/run-benchmark.sh 256
# Test instance with 512 shards
./benchmark/run-benchmark.sh 512
```
#### Run Connection Scaling Test
```bash
# Test with 1K connections
./benchmark/run-benchmark.sh scale 1k
# Test with 10K connections
./benchmark/run-benchmark.sh scale 10k
```
#### Stop All Services
```bash
./benchmark/run-benchmark.sh stop
```
## Manual k6 Testing
You can also run k6 tests manually for more control:
### Simple Connection Test
```bash
docker run --rm -i --network gotify_benchmark-net \
-v $(pwd)/benchmark/k6:/scripts \
-e BASE_URL="http://gotify-256:80" \
grafana/k6:latest run /scripts/websocket-simple.js
```
### Full WebSocket Test
```bash
docker run --rm -i --network gotify_benchmark-net \
-v $(pwd)/benchmark/k6:/scripts \
-e BASE_URL="http://gotify-256:80" \
grafana/k6:latest run /scripts/websocket-test.js
```
### Connection Scaling Test
```bash
docker run --rm -i --network gotify_benchmark-net \
-v $(pwd)/benchmark/k6:/scripts \
-e BASE_URL="http://gotify-256:80" \
-e SCALE="10k" \
grafana/k6:latest run /scripts/connection-scaling.js
```
## Test Scripts
### `websocket-simple.js`
- Quick validation test
- 100 virtual users for 2 minutes
- Basic connection and message delivery checks
### `websocket-test.js`
- Comprehensive performance test
- Gradual ramp-up: 1K → 5K → 10K connections
- Measures connection time, latency, throughput
- Includes thresholds for performance validation
### `connection-scaling.js`
- Tests different connection scales
- Configurable via `SCALE` environment variable (1k, 10k, 100k)
- Measures connection establishment time
- Tracks message delivery latency
## Metrics Collected
The benchmarks collect the following metrics:
### Connection Metrics
- **Connection Time**: Time to establish WebSocket connection
- **Connection Success Rate**: Percentage of successful connections
- **Connection Duration**: How long connections stay alive
### Message Metrics
- **Message Latency**: Time from message creation to delivery (P50, P95, P99)
- **Messages Per Second**: Throughput of message delivery
- **Message Success Rate**: Percentage of messages successfully delivered
### Resource Metrics
- **CPU Usage**: Per-instance CPU utilization
- **Memory Usage**: Per-instance memory consumption
- **Memory Per Connection**: Average memory used per WebSocket connection
## Interpreting Results
### Shard Count Comparison
When comparing different shard counts, look for:
1. **Connection Time**: Lower is better
- More shards should reduce lock contention
- Expect 64 shards to have higher connection times under load
- 256-512 shards typically provide optimal balance
2. **Message Latency**: Lower is better
- P95 latency should be < 100ms for most scenarios
- Higher shard counts may reduce latency under high concurrency
3. **Throughput**: Higher is better
- Messages per second should scale with shard count up to a point
- Diminishing returns after optimal shard count
4. **Memory Usage**: Lower is better
- More shards = slightly more memory overhead
- Balance between performance and memory
### Optimal Shard Count
Based on testing, recommended shard counts:
- **< 10K connections**: 128-256 shards
- **10K-100K connections**: 256-512 shards
- **100K-1M connections**: 512-1024 shards
- **> 1M connections**: 1024+ shards (may need custom build)
## Benchmark Scenarios
### Scenario 1: Connection Scaling
Test how many concurrent connections each configuration can handle:
```bash
./benchmark/run-benchmark.sh scale 1k # Start with 1K
./benchmark/run-benchmark.sh scale 10k # Then 10K
./benchmark/run-benchmark.sh scale 100k # Finally 100K
```
### Scenario 2: Shard Comparison
Compare performance across all shard configurations:
```bash
./benchmark/run-benchmark.sh all
```
### Scenario 3: Message Throughput
Test message delivery rate with different connection counts:
- Modify k6 scripts to send messages via REST API
- Measure delivery latency through WebSocket
### Scenario 4: Latency Testing
Focus on P50, P95, P99 latency metrics:
- Run tests with steady connection count
- Send messages at controlled rate
- Analyze latency distribution
## Configuration
### Adjusting Shard Counts
Edit `docker-compose.benchmark.yml` to modify shard counts:
```yaml
environment:
- GOTIFY_SERVER_STREAM_SHARDCOUNT=256
```
### Adjusting Buffer Sizes
Modify buffer sizes in config files or environment variables:
```yaml
environment:
- GOTIFY_SERVER_STREAM_READBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_WRITEBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_CHANNELBUFFERSIZE=10
```
### Custom k6 Test Parameters
Modify k6 test scripts to adjust:
- Virtual users (VUs)
- Test duration
- Ramp-up/ramp-down stages
- Thresholds
## Troubleshooting
### Services Won't Start
1. Check Docker resources:
```bash
docker system df
docker system prune # If needed
```
2. Verify ports are available:
```bash
lsof -i :8080-8084
```
3. Check logs:
```bash
docker-compose -f docker-compose.benchmark.yml logs
```
### High Connection Failures
1. Increase system limits:
```bash
# Linux: Increase file descriptor limits
ulimit -n 65536
```
2. Check Docker resource limits:
- Increase memory allocation
- Increase CPU allocation
3. Reduce concurrent connections in test scripts
### Memory Issues
1. Monitor memory usage:
```bash
docker stats
```
2. Reduce number of instances running simultaneously
3. Adjust shard counts (fewer shards = less memory)
### Slow Performance
1. Check CPU usage: `docker stats`
2. Verify network connectivity between containers
3. Check for resource contention
4. Consider running tests sequentially instead of parallel
## Results Storage
Benchmark results are stored in:
- `benchmark/results/` - Detailed logs per shard configuration
- k6 output includes summary statistics
## Advanced Usage
### Custom Test Scenarios
Create custom k6 scripts in `benchmark/k6/`:
```javascript
import ws from 'k6/ws';
import { check } from 'k6';
export const options = {
vus: 1000,
duration: '5m',
};
export default function() {
// Your custom test logic
}
```
### Monitoring with Prometheus
Add Prometheus to `docker-compose.benchmark.yml` for detailed metrics collection.
### Load Balancer Testing
Test with a load balancer in front of multiple instances to simulate production scenarios.
## Performance Expectations
Based on optimizations implemented:
- **Connection Capacity**: 100K-1M+ concurrent connections per instance
- **Message Latency**: P95 < 100ms for most scenarios
- **Throughput**: 10K+ messages/second per instance
- **Memory**: ~2-4KB per connection (varies by shard count)
## Contributing
When adding new benchmark scenarios:
1. Add k6 script to `benchmark/k6/`
2. Update this README with usage instructions
3. Add configuration if needed
4. Test and validate results
## References
- [k6 WebSocket Documentation](https://k6.io/docs/javascript-api/k6-ws/)
- [Gotify Configuration](https://gotify.net/docs/config)
- [WebSocket Performance Best Practices](https://www.ably.com/topic/websockets)

View File

@ -1,33 +0,0 @@
server:
keepaliveperiodseconds: 0
listenaddr: ""
port: 80
ssl:
enabled: false
redirecttohttps: true
stream:
pingperiodseconds: 45
allowedorigins: []
shardcount: 1024
readbuffersize: 8192
writebuffersize: 8192
channelbuffersize: 10
cors:
alloworigins: []
allowmethods: []
allowheaders: []
trustedproxies: []
database:
dialect: sqlite3
connection: data/gotify.db
defaultuser:
name: admin
pass: admin
passstrength: 10
uploadedimagesdir: data/images
pluginsdir: data/plugins
registration: false

View File

@ -1,33 +0,0 @@
server:
keepaliveperiodseconds: 0
listenaddr: ""
port: 80
ssl:
enabled: false
redirecttohttps: true
stream:
pingperiodseconds: 45
allowedorigins: []
shardcount: 128
readbuffersize: 8192
writebuffersize: 8192
channelbuffersize: 10
cors:
alloworigins: []
allowmethods: []
allowheaders: []
trustedproxies: []
database:
dialect: sqlite3
connection: data/gotify.db
defaultuser:
name: admin
pass: admin
passstrength: 10
uploadedimagesdir: data/images
pluginsdir: data/plugins
registration: false

View File

@ -1,33 +0,0 @@
server:
keepaliveperiodseconds: 0
listenaddr: ""
port: 80
ssl:
enabled: false
redirecttohttps: true
stream:
pingperiodseconds: 45
allowedorigins: []
shardcount: 256
readbuffersize: 8192
writebuffersize: 8192
channelbuffersize: 10
cors:
alloworigins: []
allowmethods: []
allowheaders: []
trustedproxies: []
database:
dialect: sqlite3
connection: data/gotify.db
defaultuser:
name: admin
pass: admin
passstrength: 10
uploadedimagesdir: data/images
pluginsdir: data/plugins
registration: false

View File

@ -1,33 +0,0 @@
server:
keepaliveperiodseconds: 0
listenaddr: ""
port: 80
ssl:
enabled: false
redirecttohttps: true
stream:
pingperiodseconds: 45
allowedorigins: []
shardcount: 512
readbuffersize: 8192
writebuffersize: 8192
channelbuffersize: 10
cors:
alloworigins: []
allowmethods: []
allowheaders: []
trustedproxies: []
database:
dialect: sqlite3
connection: data/gotify.db
defaultuser:
name: admin
pass: admin
passstrength: 10
uploadedimagesdir: data/images
pluginsdir: data/plugins
registration: false

View File

@ -1,33 +0,0 @@
server:
keepaliveperiodseconds: 0
listenaddr: ""
port: 80
ssl:
enabled: false
redirecttohttps: true
stream:
pingperiodseconds: 45
allowedorigins: []
shardcount: 64
readbuffersize: 8192
writebuffersize: 8192
channelbuffersize: 10
cors:
alloworigins: []
allowmethods: []
allowheaders: []
trustedproxies: []
database:
dialect: sqlite3
connection: data/gotify.db
defaultuser:
name: admin
pass: admin
passstrength: 10
uploadedimagesdir: data/images
pluginsdir: data/plugins
registration: false

View File

@ -1,92 +0,0 @@
import ws from 'k6/ws';
import { check, sleep } from 'k6';
import http from 'k6/http';
import encoding from 'k6/encoding';
// Test different connection scales
const SCALE = __ENV.SCALE || '1k'; // 1k, 10k, 100k
const scales = {
'1k': { vus: 1000, duration: '5m' },
'10k': { vus: 10000, duration: '10m' },
'100k': { vus: 100000, duration: '15m' },
};
export const options = scales[SCALE] || scales['1k'];
const BASE_URL = __ENV.BASE_URL || 'http://gotify-256:80';
const USERNAME = __ENV.USERNAME || 'admin';
const PASSWORD = __ENV.PASSWORD || 'admin';
let authToken = null;
let clientToken = null;
export function setup() {
const baseUrl = BASE_URL.replace(/^http/, 'http');
const credentials = encoding.b64encode(`${USERNAME}:${PASSWORD}`);
const clientRes = http.post(`${baseUrl}/client`, JSON.stringify({
name: `k6-scale-${__VU}-${Date.now()}`,
}), {
headers: {
'Content-Type': 'application/json',
'Authorization': `Basic ${credentials}`,
},
});
if (clientRes.status !== 200) {
console.error('Failed to create client:', clientRes.status, clientRes.body);
return {};
}
clientToken = JSON.parse(clientRes.body).token;
return { clientToken, baseUrl };
}
export default function (data) {
if (!data.clientToken) return;
const wsUrl = data.baseUrl.replace(/^http/, 'ws') + '/stream?token=' + data.clientToken;
const startTime = Date.now();
const response = ws.connect(wsUrl, {}, function (socket) {
const connectTime = Date.now() - startTime;
socket.on('open', () => {
// Track connection time
console.log(`VU ${__VU}: Connected in ${connectTime}ms`);
});
socket.on('message', (data) => {
const msg = JSON.parse(data);
const receiveTime = Date.now() - startTime;
check(msg, {
'message received': (m) => m.id !== undefined,
'receive latency acceptable': () => receiveTime < 5000,
});
});
socket.on('close', () => {
console.log(`VU ${__VU}: Connection closed after ${Date.now() - startTime}ms`);
});
socket.on('error', (e) => {
if (e.error() !== 'websocket: close sent') {
console.error(`VU ${__VU}: Error:`, e.error());
}
});
// Keep connection alive
sleep(60);
});
check(response, {
'connection successful': (r) => r && r.status === 101,
'connection time < 1s': () => (Date.now() - startTime) < 1000,
});
sleep(1);
}

View File

@ -1,74 +0,0 @@
import ws from 'k6/ws';
import { check, sleep } from 'k6';
import http from 'k6/http';
import encoding from 'k6/encoding';
// Simple test for quick validation
export const options = {
vus: 100, // 100 virtual users
duration: '2m', // Run for 2 minutes
thresholds: {
'ws_connecting': ['p(95)<1000'],
'ws_session_duration': ['p(95)<5000'],
},
};
const BASE_URL = __ENV.BASE_URL || 'http://gotify-256:80';
const USERNAME = __ENV.USERNAME || 'admin';
const PASSWORD = __ENV.PASSWORD || 'admin';
let authToken = null;
let clientToken = null;
export function setup() {
const baseUrl = BASE_URL.replace(/^http/, 'http');
const credentials = encoding.b64encode(`${USERNAME}:${PASSWORD}`);
const clientRes = http.post(`${baseUrl}/client`, JSON.stringify({
name: `k6-simple-${__VU}-${Date.now()}`,
}), {
headers: {
'Content-Type': 'application/json',
'Authorization': `Basic ${credentials}`,
},
});
if (clientRes.status !== 200) {
console.error('Failed to create client:', clientRes.status, clientRes.body);
return {};
}
clientToken = JSON.parse(clientRes.body).token;
return { clientToken, baseUrl };
}
export default function (data) {
if (!data.clientToken) return;
const wsUrl = data.baseUrl.replace(/^http/, 'ws') + '/stream?token=' + data.clientToken;
const response = ws.connect(wsUrl, {}, function (socket) {
socket.on('open', () => console.log(`VU ${__VU}: Connected`));
socket.on('message', (data) => {
const msg = JSON.parse(data);
check(msg, { 'received message': (m) => m.id !== undefined });
});
socket.on('close', () => console.log(`VU ${__VU}: Disconnected`));
socket.on('error', (e) => {
if (e.error() !== 'websocket: close sent') {
console.error(`VU ${__VU}: Error:`, e.error());
}
});
sleep(30);
});
check(response, {
'connected successfully': (r) => r && r.status === 101,
});
sleep(1);
}

View File

@ -1,120 +0,0 @@
import ws from 'k6/ws';
import { check, sleep } from 'k6';
import http from 'k6/http';
import encoding from 'k6/encoding';
// Test configuration
export const options = {
stages: [
{ duration: '30s', target: 1000 }, // Ramp up to 1000 connections
{ duration: '1m', target: 1000 }, // Stay at 1000 connections
{ duration: '30s', target: 5000 }, // Ramp up to 5000 connections
{ duration: '2m', target: 5000 }, // Stay at 5000 connections
{ duration: '30s', target: 10000 }, // Ramp up to 10000 connections
{ duration: '2m', target: 10000 }, // Stay at 10000 connections
{ duration: '30s', target: 0 }, // Ramp down
],
thresholds: {
'ws_connecting': ['p(95)<500'], // 95% of connections should connect in <500ms
'ws_session_duration': ['p(95)<1000'], // 95% of sessions should last <1s
'ws_ping': ['p(95)<100'], // 95% of pings should be <100ms
'ws_messages_sent': ['rate>100'], // Should send >100 messages/sec
'ws_messages_received': ['rate>100'], // Should receive >100 messages/sec
},
};
// Configuration - can be overridden via environment variables
const BASE_URL = __ENV.BASE_URL || 'http://gotify-256:80';
const USERNAME = __ENV.USERNAME || 'admin';
const PASSWORD = __ENV.PASSWORD || 'admin';
const CONNECTIONS_PER_VU = __ENV.CONNECTIONS_PER_VU || 1;
// Global variables for authentication
let clientToken = null;
// Setup: Create a client token using basic auth
export function setup() {
const baseUrl = BASE_URL.replace(/^http/, 'http');
// Encode basic auth credentials
const credentials = encoding.b64encode(`${USERNAME}:${PASSWORD}`);
// Create a client token for WebSocket connection using basic auth
const clientRes = http.post(`${baseUrl}/client`, JSON.stringify({
name: `k6-client-${__VU}-${__ITER}-${Date.now()}`,
}), {
headers: {
'Content-Type': 'application/json',
'Authorization': `Basic ${credentials}`,
},
});
if (clientRes.status !== 200) {
console.error('Failed to create client:', clientRes.status, clientRes.body);
return {};
}
clientToken = JSON.parse(clientRes.body).token;
return {
clientToken: clientToken,
baseUrl: baseUrl,
};
}
export default function (data) {
if (!data.clientToken) {
console.error('No client token available');
return;
}
const wsUrl = data.baseUrl.replace(/^http/, 'ws') + '/stream?token=' + data.clientToken;
const params = { tags: { name: 'WebSocket Stream' } };
const response = ws.connect(wsUrl, params, function (socket) {
socket.on('open', function () {
// Connection established
console.log(`VU ${__VU}: WebSocket connection opened`);
});
socket.on('message', function (data) {
// Message received
const message = JSON.parse(data);
check(message, {
'message received': (msg) => msg.id !== undefined,
'message has content': (msg) => msg.message !== undefined,
});
});
socket.on('ping', function () {
// Server ping received
});
socket.on('pong', function () {
// Pong received
});
socket.on('close', function () {
// Connection closed
console.log(`VU ${__VU}: WebSocket connection closed`);
});
socket.on('error', function (e) {
if (e.error() !== 'websocket: close sent') {
console.error(`VU ${__VU}: WebSocket error:`, e.error());
}
});
// Keep connection alive for the duration of the test
// The server will send ping messages periodically
sleep(60); // Keep connection for 60 seconds
});
check(response, {
'status is 101': (r) => r && r.status === 101,
'protocol is websocket': (r) => r && r.url && r.url.startsWith('ws'),
});
sleep(1);
}

View File

@ -1,151 +0,0 @@
#!/bin/bash
# Benchmark runner script for Gotify WebSocket performance testing
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Configuration
COMPOSE_FILE="docker-compose.benchmark.yml"
INSTANCE_PORTS=(8080 8081 8082 8083 8084)
INSTANCE_NAMES=("64" "128" "256" "512" "1024")
# Function to print colored output
print_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
print_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Function to check if services are healthy
wait_for_services() {
print_info "Waiting for Gotify instances to be healthy..."
for port in "${INSTANCE_PORTS[@]}"; do
local max_attempts=30
local attempt=0
while [ $attempt -lt $max_attempts ]; do
if curl -sf "http://localhost:${port}/health" > /dev/null 2>&1; then
print_info "Instance on port ${port} is healthy"
break
fi
attempt=$((attempt + 1))
sleep 2
done
if [ $attempt -eq $max_attempts ]; then
print_error "Instance on port ${port} failed to become healthy"
return 1
fi
done
print_info "All instances are healthy"
}
# Function to run benchmark against a specific instance
run_benchmark() {
local instance_name=$1
local port=$2
local test_script=${3:-"websocket-simple.js"}
print_info "Running benchmark against instance with ${instance_name} shards (port ${port})..."
docker run --rm \
--network gotify_benchmark-net \
-v "$(pwd)/benchmark/k6:/scripts" \
-e BASE_URL="http://gotify-${instance_name}:80" \
grafana/k6:latest \
run /scripts/${test_script}
}
# Function to compare all instances
compare_all_instances() {
print_info "Running comparison benchmark across all instances..."
for i in "${!INSTANCE_NAMES[@]}"; do
local name="${INSTANCE_NAMES[$i]}"
local port="${INSTANCE_PORTS[$i]}"
print_info "Testing instance with ${name} shards..."
run_benchmark "${name}" "${port}" "websocket-simple.js" > "benchmark/results/${name}-shards.log" 2>&1 || true
sleep 5
done
}
# Main execution
main() {
print_info "Starting Gotify WebSocket Benchmark Suite"
# Create results directory
mkdir -p benchmark/results
# Start services
print_info "Starting Docker Compose services..."
docker-compose -f ${COMPOSE_FILE} up -d --build
# Wait for services to be ready
wait_for_services
# Give services a moment to fully initialize
sleep 5
# Parse command line arguments
case "${1:-all}" in
"all")
compare_all_instances
;;
"64"|"128"|"256"|"512"|"1024")
local port_index=0
for i in "${!INSTANCE_NAMES[@]}"; do
if [ "${INSTANCE_NAMES[$i]}" = "$1" ]; then
port_index=$i
break
fi
done
run_benchmark "$1" "${INSTANCE_PORTS[$port_index]}" "${2:-websocket-simple.js}"
;;
"scale")
local scale=${2:-"1k"}
print_info "Running connection scaling test with ${scale} connections..."
docker run --rm \
--network gotify_benchmark-net \
-v "$(pwd)/benchmark/k6:/scripts" \
-e BASE_URL="http://gotify-256:80" \
-e SCALE="${scale}" \
grafana/k6:latest \
run /scripts/connection-scaling.js
;;
"stop")
print_info "Stopping Docker Compose services..."
docker-compose -f ${COMPOSE_FILE} down -v
;;
*)
echo "Usage: $0 [all|64|128|256|512|1024|scale|stop] [test-script|scale-size]"
echo ""
echo "Commands:"
echo " all - Run benchmarks against all instances"
echo " 64|128|256|512|1024 - Run benchmark against specific shard count"
echo " scale [1k|10k|100k] - Run connection scaling test"
echo " stop - Stop all services"
exit 1
;;
esac
print_info "Benchmark completed. Check benchmark/results/ for detailed logs."
}
main "$@"

View File

@ -34,10 +34,6 @@ type Configuration struct {
Stream struct {
PingPeriodSeconds int `default:"45"`
AllowedOrigins []string
ShardCount int `default:"256"` // Number of shards for client storage (must be power of 2)
ReadBufferSize int `default:"8192"` // WebSocket read buffer size in bytes
WriteBufferSize int `default:"8192"` // WebSocket write buffer size in bytes
ChannelBufferSize int `default:"10"` // Client write channel buffer size in messages
}
Cors struct {
AllowOrigins []string

View File

@ -1,175 +0,0 @@
services:
# Gotify instance with 64 shards
gotify-64:
build:
context: .
dockerfile: docker/Dockerfile
args:
GO_VERSION: 1.25.1
BUILD_JS: 1
RUN_TESTS: 0
container_name: gotify-bench-64
ports:
- "8080:80"
volumes:
- ./benchmark/configs/config-64.yml:/app/config.yml:ro
- gotify-64-data:/app/data
environment:
- GOTIFY_SERVER_STREAM_SHARDCOUNT=64
- GOTIFY_SERVER_STREAM_READBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_WRITEBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_CHANNELBUFFERSIZE=10
networks:
- benchmark-net
healthcheck:
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
test: ["CMD", "curl", "-f", "http://localhost:80/health"]
# Gotify instance with 128 shards
gotify-128:
build:
context: .
dockerfile: docker/Dockerfile
args:
GO_VERSION: 1.25.1
BUILD_JS: 1
RUN_TESTS: 0
container_name: gotify-bench-128
ports:
- "8081:80"
volumes:
- ./benchmark/configs/config-128.yml:/app/config.yml:ro
- gotify-128-data:/app/data
environment:
- GOTIFY_SERVER_STREAM_SHARDCOUNT=128
- GOTIFY_SERVER_STREAM_READBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_WRITEBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_CHANNELBUFFERSIZE=10
networks:
- benchmark-net
healthcheck:
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
test: ["CMD", "curl", "-f", "http://localhost:80/health"]
# Gotify instance with 256 shards (default)
gotify-256:
build:
context: .
dockerfile: docker/Dockerfile
args:
GO_VERSION: 1.25.1
BUILD_JS: 1
RUN_TESTS: 0
container_name: gotify-bench-256
ports:
- "8082:80"
volumes:
- ./benchmark/configs/config-256.yml:/app/config.yml:ro
- gotify-256-data:/app/data
environment:
- GOTIFY_SERVER_STREAM_SHARDCOUNT=256
- GOTIFY_SERVER_STREAM_READBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_WRITEBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_CHANNELBUFFERSIZE=10
networks:
- benchmark-net
healthcheck:
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
test: ["CMD", "curl", "-f", "http://localhost:80/health"]
# Gotify instance with 512 shards
gotify-512:
build:
context: .
dockerfile: docker/Dockerfile
args:
GO_VERSION: 1.25.1
BUILD_JS: 1
RUN_TESTS: 0
container_name: gotify-bench-512
ports:
- "8083:80"
volumes:
- ./benchmark/configs/config-512.yml:/app/config.yml:ro
- gotify-512-data:/app/data
environment:
- GOTIFY_SERVER_STREAM_SHARDCOUNT=512
- GOTIFY_SERVER_STREAM_READBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_WRITEBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_CHANNELBUFFERSIZE=10
networks:
- benchmark-net
healthcheck:
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
test: ["CMD", "curl", "-f", "http://localhost:80/health"]
# Gotify instance with 1024 shards
gotify-1024:
build:
context: .
dockerfile: docker/Dockerfile
args:
GO_VERSION: 1.25.1
BUILD_JS: 1
RUN_TESTS: 0
container_name: gotify-bench-1024
ports:
- "8084:80"
volumes:
- ./benchmark/configs/config-1024.yml:/app/config.yml:ro
- gotify-1024-data:/app/data
environment:
- GOTIFY_SERVER_STREAM_SHARDCOUNT=1024
- GOTIFY_SERVER_STREAM_READBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_WRITEBUFFERSIZE=8192
- GOTIFY_SERVER_STREAM_CHANNELBUFFERSIZE=10
networks:
- benchmark-net
healthcheck:
interval: 10s
timeout: 5s
retries: 3
start_period: 10s
test: ["CMD", "curl", "-f", "http://localhost:80/health"]
# k6 load testing tool
k6-loadtest:
image: grafana/k6:latest
container_name: k6-benchmark
volumes:
- ./benchmark/k6:/scripts
networks:
- benchmark-net
depends_on:
- gotify-64
- gotify-128
- gotify-256
- gotify-512
- gotify-1024
command: run /scripts/websocket-test.js
profiles:
- benchmark
volumes:
gotify-64-data:
gotify-128-data:
gotify-256-data:
gotify-512-data:
gotify-1024-data:
networks:
benchmark-net:
driver: bridge

View File

@ -10,7 +10,7 @@ ARG __TARGETPLATFORM_GO_NOTATION=${__TARGETPLATFORM_DASHES/arm\/v7/arm-7}
# --- JS Builder ---
FROM --platform=${BUILDPLATFORM} node:24 AS js-builder
FROM --platform=${BUILDPLATFORM} node:23 AS js-builder
ARG BUILD_JS=0

12
go.mod
View File

@ -14,12 +14,12 @@ require (
github.com/mattn/go-isatty v0.0.20
github.com/robfig/cron v1.2.0
github.com/stretchr/testify v1.11.1
golang.org/x/crypto v0.45.0
golang.org/x/crypto v0.43.0
gopkg.in/yaml.v3 v3.0.1
gorm.io/driver/mysql v1.6.0
gorm.io/driver/postgres v1.6.0
gorm.io/driver/sqlite v1.6.0
gorm.io/gorm v1.31.1
gorm.io/gorm v1.31.0
)
require (
@ -58,10 +58,10 @@ require (
github.com/ugorji/go/codec v1.3.0 // indirect
golang.org/x/arch v0.22.0 // indirect
golang.org/x/mod v0.29.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/sys v0.38.0 // indirect
golang.org/x/text v0.31.0 // indirect
golang.org/x/net v0.46.0 // indirect
golang.org/x/sync v0.17.0 // indirect
golang.org/x/sys v0.37.0 // indirect
golang.org/x/text v0.30.0 // indirect
golang.org/x/tools v0.38.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
)

24
go.sum
View File

@ -122,22 +122,22 @@ go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
golang.org/x/arch v0.22.0 h1:c/Zle32i5ttqRXjdLyyHZESLD/bB90DCU1g9l/0YBDI=
golang.org/x/arch v0.22.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A=
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20190110200230-915654e7eabc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190109145017-48ac38b7c8cb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
@ -157,5 +157,5 @@ gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
gorm.io/driver/sqlite v1.6.0 h1:WHRRrIiulaPiPFmDcod6prc4l2VGVWHz80KspNsxSfQ=
gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8=
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
gorm.io/gorm v1.31.0 h1:0VlycGreVhK7RF/Bwt51Fk8v0xLiiiFdbGDPIZQ7mJY=
gorm.io/gorm v1.31.0/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=

View File

@ -27,7 +27,6 @@ import (
func Create(db *database.GormDatabase, vInfo *model.VersionInfo, conf *config.Configuration) (*gin.Engine, func()) {
g := gin.New()
g.RemoveExtraSlash = true
g.RemoteIPHeaders = []string{"X-Forwarded-For"}
g.SetTrustedProxies(conf.Server.TrustedProxies)
g.ForwardedByClientIP = true
@ -65,13 +64,7 @@ func Create(db *database.GormDatabase, vInfo *model.VersionInfo, conf *config.Co
})
}
streamHandler := stream.New(
time.Duration(conf.Server.Stream.PingPeriodSeconds)*time.Second,
15*time.Second,
conf.Server.Stream.AllowedOrigins,
conf.Server.Stream.ShardCount,
conf.Server.Stream.ReadBufferSize,
conf.Server.Stream.WriteBufferSize,
conf.Server.Stream.ChannelBufferSize)
time.Duration(conf.Server.Stream.PingPeriodSeconds)*time.Second, 15*time.Second, conf.Server.Stream.AllowedOrigins)
go func() {
ticker := time.NewTicker(5 * time.Minute)
for range ticker.C {

View File

@ -59,8 +59,6 @@ const Applications = observer(() => {
useEffect(() => void appStore.refresh(), []);
const validExtensions = ['.gif', '.png', '.jpg', '.jpeg'];
const handleImageUploadClick = (id: number) => {
uploadId.current = id;
if (fileInputRef.current) {
@ -73,7 +71,11 @@ const Applications = observer(() => {
if (!file) {
return;
}
appStore.uploadImage(uploadId.current, file);
if (['image/png', 'image/jpeg', 'image/gif'].indexOf(file.type) !== -1) {
appStore.uploadImage(uploadId.current, file);
} else {
alert('Uploaded file must be of type png, jpeg or gif.');
}
};
return (
@ -126,7 +128,6 @@ const Applications = observer(() => {
<input
ref={fileInputRef}
type="file"
accept={validExtensions.join(',')}
style={{display: 'none'}}
onChange={onUploadImage}
/>
@ -197,16 +198,12 @@ const Row = ({
fEdit,
}: IRowProps) => {
const {classes} = useStyles();
const isDefaultImage = image === 'static/defaultapp.png';
return (
<TableRow>
<TableCell padding="normal">
<div style={{display: 'flex'}}>
<Tooltip title="Delete image" placement="top" arrow>
<ButtonBase
className={classes.imageContainer}
onClick={fDeleteImage}
disabled={isDefaultImage}>
<ButtonBase className={classes.imageContainer} onClick={fDeleteImage}>
<img
src={config.get('url') + image}
alt="app logo"

View File

@ -2,8 +2,6 @@ import {reaction} from 'mobx';
import * as Notifications from './snack/browserNotification';
import {StoreMapping} from './stores';
const AUDIO_REPEAT_DELAY = 1000;
export const registerReactions = (stores: StoreMapping) => {
const clearAll = () => {
stores.messagesStore.clearAll();
@ -12,19 +10,13 @@ export const registerReactions = (stores: StoreMapping) => {
stores.userStore.clear();
stores.wsStore.close();
};
let audio: HTMLAudioElement | undefined;
let lastAudio = 0;
const loadAll = () => {
stores.wsStore.listen((message) => {
stores.messagesStore.publishSingleMessage(message);
Notifications.notifyNewMessage(message);
if (message.priority >= 4 && Date.now() > lastAudio + AUDIO_REPEAT_DELAY) {
lastAudio = Date.now();
audio ??= new Audio('static/notification.ogg');
audio.currentTime = 0;
if (message.priority >= 4) {
const src = 'static/notification.ogg';
const audio = new Audio(src);
audio.play();
}
});