Merge branch 'master' of github.com:radian-software/riju into radian-software-master

This commit is contained in:
plondon 2023-05-11 09:19:32 -04:00
commit 1dd7533abe
No known key found for this signature in database
GPG Key ID: 30488E636545C80C
114 changed files with 8770 additions and 1402 deletions

18
.github/FUNDING.yml vendored
View File

@ -1,8 +1,12 @@
github: raxod502
patreon: riju
ko_fi: riju_codes
liberapay: riju
github: radian-software
patreon: radiansoftware
ko_fi: radiansoftware
liberapay: radian-software
custom:
- https://paypal.me/rijucodes
- https://cash.app/$RijuCodes
- https://venmo.com/code?user_id=3335527067549696598
- https://www.paypal.com/donate/?hosted_button_id=SYF48KFJ95FPA
- https://cash.app/$RadianSoftware
# Venmo is not currently supported because it is impossible to
# create a new business account when one has been created at any
# point in the past, even if it has been deleted. I have reached out
# to Venmo support and they have confirmed there is no supported way
# to use Venmo going forward, and suggested I use PayPal instead.

View File

@ -1,17 +0,0 @@
name: Build and deploy
on:
push:
branches:
- master
concurrency: deploy
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
# - name: Build and deploy
# env:
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
# AWS_REGION: us-west-1
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
# run: tools/ci-ec2.bash

7
.gitignore vendored
View File

@ -1,11 +1,14 @@
*.log
*.out
*.out.*
*.pem
.env
.lsp-repl-history
.terraform
bin
build
env.yaml
node_modules
out
sentinel.h
financials/????-??/*
!financials/????-??/breakdown.txt
agent/agent

View File

@ -8,6 +8,6 @@
* [Deploying your own instance of Riju](doc/selfhosting.md)
If you'd like to request a new language, head to the [language support
meta-issue](https://github.com/raxod502/riju/issues/24) and add a
comment. Of course, if you actually want it to be added anytime soon,
you should submit a pull request :)
meta-issue](https://github.com/radian-software/riju/issues/24) and add
a comment. Of course, if you actually want it to be added anytime
soon, you should submit a pull request :)

View File

@ -1,6 +1,7 @@
# MIT License
Copyright (c) 2020 Radon Rosborough
Copyright (c) 20202022 [Radian LLC](https://radian.codes) and
contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@ -9,9 +9,9 @@ export
BUILD := build/$(T)/$(L)
DEB := riju-$(T)-$(L).deb
S3 := s3://$(S3_BUCKET)
S3_CONFIG_PATH ?= config.json
S3_DEB := $(S3)/debs/$(DEB)
S3_HASH := $(S3)/hashes/riju-$(T)-$(L)
S3_CONFIG := $(S3)/config.json
S3_CONFIG := $(S3)/$(S3_CONFIG_PATH)
ifneq ($(CMD),)
C_CMD := -c '$(CMD)'
@ -51,24 +51,25 @@ image: # I=<image> [L=<lang>] [NC=1] : Build a Docker image
ifeq ($(I),lang)
@: $${L}
node tools/build-lang-image.js --lang $(L)
else ifeq ($(I),ubuntu)
docker pull ubuntu:rolling
hash="$$(docker inspect ubuntu:rolling -f '{{ .Id }}' | sha1sum | awk '{ print $$1 }')"; echo "FROM ubuntu:rolling" | docker build --label riju.image-hash="$${hash}" -t riju:$(I) -
else ifneq (,$(filter $(I),admin ci))
docker build . -f docker/$(I)/Dockerfile -t riju:$(I) $(NO_CACHE)
else
hash="$$(node tools/hash-dockerfile.js $(I) | grep .)"; docker build . -f docker/$(I)/Dockerfile -t riju:$(I) --label riju.image-hash="$${hash}" $(NO_CACHE)
docker build . -f docker/$(I)/Dockerfile -t riju:$(I) $(NO_CACHE)
endif
VOLUME_MOUNT ?= $(PWD)
# http
P1 ?= 6119
# https
P2 ?= 6120
# metrics
P3 ?= 6121
ifneq (,$(EE))
SHELL_PORTS := -p 0.0.0.0:$(P1):6119 -p 0.0.0.0:$(P2):6120
SHELL_PORTS := -p 0.0.0.0:$(P1):6119 -p 0.0.0.0:$(P2):6120 -p 0.0.0.0:$(P3):6121
else ifneq (,$(E))
SHELL_PORTS := -p 127.0.0.1:$(P1):6119 -p 127.0.0.1:$(P2):6120
SHELL_PORTS := -p 127.0.0.1:$(P1):6119 -p 127.0.0.1:$(P2):6120 -p 127.0.0.1:$(P3):6121
else
SHELL_PORTS :=
endif
@ -81,25 +82,22 @@ else
LANG_TAG := $(I)
endif
IMAGE_HASH := "$$(docker inspect riju:$(LANG_TAG) -f '{{ index .Config.Labels "riju.image-hash" }}')"
WITH_IMAGE_HASH := -e RIJU_IMAGE_HASH=$(IMAGE_HASH)
shell: # I=<shell> [L=<lang>] [E[E]=1] [P1|P2=<port>] [CMD="<arg>..."] : Launch Docker image with shell
@: $${I}
ifneq (,$(filter $(I),admin ci))
@mkdir -p $(HOME)/.aws $(HOME)/.docker $(HOME)/.ssh $(HOME)/.terraform.d
docker run $(IT_ARG) --rm --hostname $(I) -v $(VOLUME_MOUNT):/src -v /var/cache/riju:/var/cache/riju -v /var/run/docker.sock:/var/run/docker.sock -v $(HOME)/.aws:/var/cache/riju/.aws -v $(HOME)/.docker:/var/cache/riju/.docker -v $(HOME)/.ssh:/var/cache/riju/.ssh -v $(HOME)/.terraform.d:/var/cache/riju/.terraform.d -e NI -e AWS_REGION -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e DOCKER_REPO -e PUBLIC_DOCKER_REPO -e S3_BUCKET -e DOMAIN -e VOLUME_MOUNT=$(VOLUME_MOUNT) $(SHELL_PORTS) $(SHELL_ENV) $(WITH_IMAGE_HASH) --network host riju:$(I) $(BASH_CMD)
docker run $(IT_ARG) --rm --hostname $(I) -v $(VOLUME_MOUNT):/src -v /var/cache/riju:/var/cache/riju -v /var/run/docker.sock:/var/run/docker.sock -v $(HOME)/.aws:/var/cache/riju/.aws -v $(HOME)/.docker:/var/cache/riju/.docker -v $(HOME)/.ssh:/var/cache/riju/.ssh -v $(HOME)/.terraform.d:/var/cache/riju/.terraform.d -e NI -e AWS_REGION -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e DOCKER_REPO -e PUBLIC_DOCKER_REPO -e S3_BUCKET -e DOMAIN -e VOLUME_MOUNT=$(VOLUME_MOUNT) $(SHELL_PORTS) $(SHELL_ENV) --network host riju:$(I) $(BASH_CMD)
else ifeq ($(I),app)
docker run $(IT_ARG) --rm --hostname $(I) -v /var/cache/riju:/var/cache/riju -v /var/run/docker.sock:/var/run/docker.sock $(SHELL_PORTS) $(SHELL_ENV) $(WITH_IMAGE_HASH) riju:$(I) $(BASH_CMD)
docker run $(IT_ARG) --rm --hostname $(I) -v /var/cache/riju:/var/cache/riju -v /var/run/docker.sock:/var/run/docker.sock $(SHELL_PORTS) $(SHELL_ENV) riju:$(I) $(BASH_CMD)
else ifneq (,$(filter $(I),base lang))
ifeq ($(I),lang)
@: $${L}
endif
docker run $(IT_ARG) --rm --hostname $(LANG_TAG) -v $(VOLUME_MOUNT):/src $(SHELL_PORTS) $(SHELL_ENV) $(WITH_IMAGE_HASH) riju:$(LANG_TAG) $(BASH_CMD)
docker run $(IT_ARG) --rm --hostname $(LANG_TAG) -v $(VOLUME_MOUNT):/src $(SHELL_PORTS) $(SHELL_ENV) riju:$(LANG_TAG) $(BASH_CMD)
else ifeq ($(I),runtime)
docker run $(IT_ARG) --rm --hostname $(I) -v $(VOLUME_MOUNT):/src -v /var/cache/riju:/var/cache/riju -v /var/run/docker.sock:/var/run/docker.sock $(SHELL_PORTS) $(SHELL_ENV) $(WITH_IMAGE_HASH) riju:$(I) $(BASH_CMD)
docker run $(IT_ARG) --rm --hostname $(I) -v $(VOLUME_MOUNT):/src -v /var/cache/riju:/var/cache/riju -v /var/run/docker.sock:/var/run/docker.sock $(SHELL_PORTS) $(SHELL_ENV) riju:$(I) $(BASH_CMD)
else
docker run $(IT_ARG) --rm --hostname $(I) -v $(VOLUME_MOUNT):/src $(SHELL_PORTS) $(SHELL_ENV) $(WITH_IMAGE_HASH) riju:$(I) $(BASH_CMD)
docker run $(IT_ARG) --rm --hostname $(I) -v $(VOLUME_MOUNT):/src $(SHELL_PORTS) $(SHELL_ENV) riju:$(I) $(BASH_CMD)
endif
ecr: # Authenticate to ECR (temporary credentials)
@ -213,7 +211,7 @@ lsp: # L=<lang|cmd> : Run LSP REPL for language or custom command line
### Fetch artifacts from registries
PUBLIC_DOCKER_REPO_PULL ?= public.ecr.aws/raxod502/riju
PUBLIC_DOCKER_REPO_PULL ?= public.ecr.aws/radian-software/riju
sync-ubuntu: # Pull Riju Ubuntu image from public Docker registry
docker pull $(PUBLIC_DOCKER_REPO_PULL):ubuntu
@ -237,21 +235,13 @@ undeploy: # Pull latest deployment config from S3
push: # I=<image> : Push Riju image to Docker registry
@: $${I} $${DOCKER_REPO}
docker tag riju:$(I) $(DOCKER_REPO):$(I)-$(IMAGE_HASH)
docker push $(DOCKER_REPO):$(I)-$(IMAGE_HASH)
ifeq ($(I),ubuntu)
docker tag riju:$(I) $(PUBLIC_DOCKER_REPO):$(I)
docker push $(PUBLIC_DOCKER_REPO):$(I)
endif
docker tag riju:$(I) $(DOCKER_REPO):$(I)
docker push $(DOCKER_REPO):$(I)
upload: # L=<lang> T=<type> : Upload .deb to S3
@: $${L} $${T} $${S3_BUCKET}
tools/ensure-deb-compressed.bash
aws s3 rm --recursive $(S3_HASH)
aws s3 cp $(BUILD)/$(DEB) $(S3_DEB)
hash="$$(dpkg-deb -f $(BUILD)/$(DEB) Riju-Script-Hash | grep .)"; aws s3 cp - "$(S3_HASH)/$${hash}" < /dev/null
deploy-config: # Generate deployment config file
node tools/generate-deploy-config.js
@ -282,11 +272,13 @@ fmt: fmt-c fmt-go fmt-python fmt-terraform fmt-web # Format all code
### Infrastructure
packer-web: supervisor # Build and publish a new webserver AMI
tools/packer-build-web.bash
packer: supervisor # Build and publish a new webserver AMI
tools/packer-build.bash
packer-ci: # Build and publish a new CI AMI
tools/packer-build-ci.bash
deploy-alerts: # Deploy alerting configuration to Grafana Cloud
envsubst < grafana/alertmanager.yaml > grafana/alertmanager.yaml.out
cortextool rules load grafana/alerts.yaml --address=https://$(GRAFANA_PROMETHEUS_HOSTNAME) --id=$(GRAFANA_PROMETHEUS_USERNAME) --key=$(GRAFANA_API_KEY)
cortextool alertmanager load grafana/alertmanager.yaml.out --address=https://alertmanager-us-central1.grafana.net --id=$(GRAFANA_ALERTMANAGER_USERNAME) --key=$(GRAFANA_API_KEY)
### Miscellaneous

View File

@ -6,18 +6,18 @@ or compiling [INTERCAL](https://en.wikipedia.org/wiki/INTERCAL) code.
Check it out at <https://riju.codes>!
Service uptime available at <https://riju.statuspage.io/>.
Service uptime available at <https://radian.statuspage.io/>.
## Is it free?
Riju will always be free for everyone. I pay for the hosting costs
myself.
Riju will always be free for everyone. I pay for the hosting costs out
of the business account of Radian LLC, which is funded by donations
and my personal savings. If you would like to help keep Riju online
and see more projects like it, there are a few donation methods
available in the "Sponsor this project" sidebar on GitHub.
A number of people have asked me if they can donate to help keep Riju
online. In response, I have set up a few methods, which you can see in
the "Sponsor this project" sidebar on GitHub. All donations will be
used solely to cover hosting costs, and any surplus will be donated to
the [Electronic Frontier Foundation](https://www.eff.org/).
All financial records for Radian LLC are made [publicly
available](https://github.com/radian-software/financials).
## Is it safe?

View File

@ -1,8 +1,8 @@
# Reporting a security issue
Please contact me at
[radon.neon@gmail.com](mailto:radon.neon@gmail.com) if you find any
way to:
[security+riju@radian.codes](mailto:security+riju@radian.codes) if you
find any way to:
* Take down Riju without using a large number of concurrent sessions.
* View or interfere with another user's session.

8
agent/go.mod Normal file
View File

@ -0,0 +1,8 @@
module github.com/radian-software/riju/agent
go 1.18
require (
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
)

4
agent/go.sum Normal file
View File

@ -0,0 +1,4 @@
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=

30
agent/logging.go Normal file
View File

@ -0,0 +1,30 @@
package main
import (
"fmt"
"io"
"log"
)
func logWarn(err error) {
log.Println(err.Error())
}
func logWarnf(format string, arg ...interface{}) {
logWarn(fmt.Errorf(format, arg...))
}
func logError(err error) {
log.Println(err.Error())
}
func logErrorf(format string, arg ...interface{}) {
logError(fmt.Errorf(format, arg...))
}
func tryClose(obj io.Closer, objName string) {
err := obj.Close()
if err != nil {
logErrorf("error closing %s: %w", objName, err)
}
}

225
agent/main.go Normal file
View File

@ -0,0 +1,225 @@
package main
import (
"encoding/json"
"fmt"
"net/http"
"os"
"os/exec"
"time"
"github.com/google/shlex"
"github.com/gorilla/websocket"
)
type clientMessage struct {
// "stdin"
Event string `json:"event"`
// contents of stdin
Data []byte `json:"data,omitempty"`
}
type serverMessage struct {
// "start", "stdout", "stderr", "exit", "warn", "error"
Event string `json:"event"`
// contents of stdout/stderr
Data []byte `json:"data,omitempty"`
// error message
Text string `json:"text,omitempty"`
// exit status
ExitStatus *int `json:"exitStatus,omitempty"`
}
var upgrader = websocket.Upgrader{}
func closeWs(ms *ManagedWebsocket) {
ms.CloseChan <- struct{}{}
}
func send(ms *ManagedWebsocket, msg *serverMessage) {
data, err := json.Marshal(msg)
if err != nil {
logErrorf("marshaling message: %w", err)
closeWs(ms)
return
}
ms.OutgoingChan <- data
}
func fatal(ms *ManagedWebsocket, err error) {
send(ms, &serverMessage{
Event: "fatal",
Text: err.Error(),
})
}
func fatalf(ms *ManagedWebsocket, format string, arg ...interface{}) {
fatal(ms, fmt.Errorf(format, arg...))
}
func warn(ms *ManagedWebsocket, err error) {
send(ms, &serverMessage{
Event: "warn",
Text: err.Error(),
})
}
func warnf(ms *ManagedWebsocket, format string, arg ...interface{}) {
warn(ms, fmt.Errorf(format, arg...))
}
func getCommandPrefix() []string {
prefix := os.Getenv("RIJU_AGENT_COMMAND_PREFIX")
if prefix == "" {
logErrorf("must specify RIJU_AGENT_COMMAND_PREFIX for security reasons")
os.Exit(1)
}
if prefix == "0" {
return []string{}
}
list, err := shlex.Split(prefix)
if err != nil {
logErrorf("parsing RIJU_AGENT_COMMAND_PREFIX: %w", err)
os.Exit(1)
}
return list
}
var CommandPrefix = getCommandPrefix()
// https://github.com/gorilla/websocket/blob/76ecc29eff79f0cedf70c530605e486fc32131d1/examples/command/main.go
func handler(w http.ResponseWriter, r *http.Request) {
// Upgrade http connection to websocket
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
logErrorf("upgrading connection: %w", err)
return
}
// Set up channels to handle incoming and outgoing websocket
// messages more conveniently, and also to handle closing the
// websocket on error or when we ask.
ms := &ManagedWebsocket{
Socket: ws,
MessageType: websocket.TextMessage,
PingPeriod: 5 * time.Second,
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
}
ms.Init()
// Ensure that websocket will be closed eventually when we
// exit.
defer closeWs(ms)
// Parse request query parameters; do this after upgrading to
// websocket so that we can send errors back on the websocket
// which is easier for clients to parse
cmdline := r.URL.Query()["cmdline"]
if len(cmdline) == 0 {
fatalf(ms, "cmdline query parameter missing")
return
}
cmdline = append(CommandPrefix, cmdline...)
binary, err := exec.LookPath(cmdline[0])
if err != nil {
fatalf(ms, "searching for executable: %w", err)
return
}
// Spawn subprocess
mp, err := NewManagedProcess(binary, cmdline, nil)
if err != nil {
fatalf(ms, "spawning process: %w", err)
return
}
// Ensure eventual process termination
defer func() {
mp.CloseChan <- struct{}{}
}()
// Handle received messages from client
go func() {
for data := range ms.IncomingChan {
msg := clientMessage{}
err := json.Unmarshal(data, &msg)
if err != nil {
warnf(ms, "parsing json: %w", err)
continue
}
switch msg.Event {
case "stdin":
mp.StdinChan <- msg.Data
default:
logWarnf("received unknown event type %s", msg.Event)
}
}
}()
// Proxy stdout and stderr from subprocess
go func() {
for data := range mp.StdoutChan {
msg, err := json.Marshal(&serverMessage{
Event: "stdout",
Data: data,
})
if err != nil {
warnf(ms, "wrapping stdout in json: %w", err)
return
}
ms.OutgoingChan <- msg
}
}()
go func() {
for data := range mp.StderrChan {
msg, err := json.Marshal(&serverMessage{
Event: "stderr",
Data: data,
})
if err != nil {
warnf(ms, "wrapping stderr in json: %w", err)
return
}
ms.OutgoingChan <- msg
}
}()
// Send info about process exit status
exitChan2 := make(chan struct{}, 16)
go func() {
for status := range mp.ExitChan {
exitChan2 <- struct{}{}
code := status.ExitCode()
send(ms, &serverMessage{
Event: "exit",
ExitStatus: &code,
})
}
}()
// Wait until one of subprocess or websocket exits. The other
// one will be cleaned up on return.
select {
case <-exitChan2:
case <-ms.ClosedChan:
}
// Wait a bit to send any pending messages before closing the
// connection.
time.Sleep(1 * time.Second)
return
}
func main() {
port := os.Getenv("RIJU_AGENT_PORT")
if port == "" {
port = "869"
}
host := os.Getenv("RIJU_AGENT_HOST")
if host == "" {
host = "0.0.0.0"
}
fmt.Printf("Listening on http://%s:%s\n", host, port)
mux := http.NewServeMux()
mux.HandleFunc("/exec", handler)
mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})
err := http.ListenAndServe(fmt.Sprintf("%s:%s", host, port), mux)
if err != nil {
logError(err)
os.Exit(1)
}
}

164
agent/process.go Normal file
View File

@ -0,0 +1,164 @@
package main
import (
"fmt"
"os"
"syscall"
"time"
)
type managedProcess struct {
proc *os.Process
stdinRead *os.File
stdinWrite *os.File
stdoutRead *os.File
stdoutWrite *os.File
stderrRead *os.File
stderrWrite *os.File
internalExitChan chan struct{}
StdinChan chan []byte
StdoutChan chan []byte
StderrChan chan []byte
ExitChan chan *os.ProcessState
CloseChan chan struct{}
}
func NewManagedProcess(name string, argv []string, attr *os.ProcAttr) (*managedProcess, error) {
mp := &managedProcess{
internalExitChan: make(chan struct{}, 16),
StdinChan: make(chan []byte, 16),
StdoutChan: make(chan []byte, 16),
StderrChan: make(chan []byte, 16),
ExitChan: make(chan *os.ProcessState, 16),
CloseChan: make(chan struct{}, 16),
}
done := false
go mp.handleClose()
defer func() {
if !done {
mp.CloseChan <- struct{}{}
}
}()
var err error
mp.stdinRead, mp.stdinWrite, err = os.Pipe()
if err != nil {
return mp, fmt.Errorf("creating stdin pipe: %w", err)
}
mp.stdoutRead, mp.stdoutWrite, err = os.Pipe()
if err != nil {
return mp, fmt.Errorf("creating stdout pipe: %w", err)
}
mp.stderrRead, mp.stderrWrite, err = os.Pipe()
if err != nil {
return mp, fmt.Errorf("creating stderr pipe: %w", err)
}
newAttr := &os.ProcAttr{}
if attr != nil {
*newAttr = *attr
}
if len(newAttr.Files) < 3 {
newAttr.Files = append(newAttr.Files, make([]*os.File, 3-len(newAttr.Files))...)
newAttr.Files[0] = mp.stdinRead
newAttr.Files[1] = mp.stdoutWrite
newAttr.Files[2] = mp.stderrWrite
}
mp.proc, err = os.StartProcess(name, argv, newAttr)
if err != nil {
return mp, fmt.Errorf("spawning process: %w", err)
}
go mp.handleWait()
go mp.handleInput(mp.StdinChan, mp.stdinWrite, "stdin")
go mp.handleOutput(mp.StdoutChan, mp.stdoutRead, "stdout")
go mp.handleOutput(mp.StderrChan, mp.stderrRead, "stderr")
done = true
return mp, nil
}
func (mp *managedProcess) handleInput(ch chan []byte, f *os.File, name string) {
for data := range ch {
_, err := f.Write(data)
if err != nil {
// Likely stdin closed by subprocess, this is normal
return
}
}
}
func (mp *managedProcess) handleOutput(ch chan []byte, f *os.File, name string) {
for {
buf := make([]byte, 1024)
nr, err := f.Read(buf)
if err != nil {
// Likely stdout/stderr closed by subprocess,
// this is normal
return
}
if nr == 0 {
continue
}
ch <- buf[:nr]
}
}
func (mp *managedProcess) handleWait() {
s, err := mp.proc.Wait()
if err != nil {
logErrorf("waiting on process: %w", err)
}
mp.internalExitChan <- struct{}{}
mp.ExitChan <- s
}
func (mp *managedProcess) killProc() {
// See if process has already exited or is about to
select {
case <-mp.internalExitChan:
return
case <-time.NewTimer(500 * time.Millisecond).C:
//
}
// Try killing the process by closing stdin
mp.stdinWrite.Close()
select {
case <-mp.internalExitChan:
return
case <-time.NewTimer(500 * time.Millisecond).C:
//
}
// Try killing the process with SIGTERM, SIGINT, then
// finally SIGKILL
for _, sig := range []os.Signal{syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL} {
err := mp.proc.Signal(sig)
if err != nil {
logErrorf("sending %s to child: %w", sig.String(), err)
}
select {
case <-mp.internalExitChan:
return
case <-time.NewTimer(500 * time.Millisecond).C:
//
}
}
// We are unable to kill the process
logErrorf("unable to kill child process (pid %d)", mp.proc.Pid)
}
func (mp *managedProcess) handleClose() {
<-mp.CloseChan
for _, p := range []*os.File{
mp.stdinRead, mp.stdinWrite,
mp.stdoutRead, mp.stdoutWrite,
mp.stderrRead, mp.stderrWrite,
} {
if p != nil {
p.Close()
}
}
if mp.proc != nil {
//
}
}

107
agent/websocket.go Normal file
View File

@ -0,0 +1,107 @@
package main
import (
"time"
"github.com/gorilla/websocket"
)
type ManagedWebsocket struct {
Socket *websocket.Conn
MessageType int
PingPeriod time.Duration
ReadTimeout time.Duration
WriteTimeout time.Duration
IncomingChan chan []byte
OutgoingChan chan []byte
CloseChan chan struct{}
ClosedChan chan struct{}
}
func (m *ManagedWebsocket) handleIncoming() {
pongChan := make(chan struct{}, 16)
m.Socket.SetPongHandler(func(string) error {
pongChan <- struct{}{}
return nil
})
msgChan := make(chan []byte, 16)
go func() {
defer close(msgChan)
for {
msgtype, data, err := m.Socket.ReadMessage()
if err != nil {
m.Socket.Close()
return
}
if msgtype != m.MessageType {
logWarnf("ignoring message of unexpected type %d", msgtype)
continue
}
msgChan <- data
}
}()
for {
m.Socket.SetReadDeadline(time.Now().Add(m.ReadTimeout))
var msgtype int
var msgdata []byte
select {
case <-pongChan:
msgtype = websocket.PongMessage
case data := <-msgChan:
msgtype = m.MessageType
msgdata = data
}
if msgtype != m.MessageType {
continue
}
m.IncomingChan <- msgdata
}
}
func (m *ManagedWebsocket) handleOutgoing() {
pingTicker := time.NewTicker(m.PingPeriod)
defer pingTicker.Stop()
defer func() {
m.ClosedChan <- struct{}{}
}()
for {
var msgtype int
var msgdata []byte
select {
case <-pingTicker.C:
msgtype = websocket.PingMessage
msgdata = []byte{}
case data := <-m.OutgoingChan:
msgtype = m.MessageType
msgdata = data
case <-m.CloseChan:
msgtype = websocket.CloseMessage
msgdata = websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
}
wd := time.Now().Add(m.WriteTimeout)
m.Socket.SetWriteDeadline(wd)
err := m.Socket.WriteMessage(msgtype, msgdata)
if err != nil {
m.Socket.Close()
return
}
if msgtype == websocket.CloseMessage {
time.Sleep(wd.Sub(time.Now()))
m.Socket.Close()
return
}
}
}
func (m *ManagedWebsocket) Init() {
m.IncomingChan = make(chan []byte, 16)
m.OutgoingChan = make(chan []byte, 16)
m.CloseChan = make(chan struct{}, 16)
m.ClosedChan = make(chan struct{}, 16)
go m.handleIncoming()
go m.handleOutgoing()
}

209
backend/k8s.js Normal file
View File

@ -0,0 +1,209 @@
import * as k8sClient from "@kubernetes/client-node";
import lodash from "lodash";
const kubeconfig = new k8sClient.KubeConfig();
kubeconfig.loadFromDefault();
const k8s = kubeconfig.makeApiClient(k8sClient.CoreV1Api);
export function watchPods() {
const callbacks = {};
const pods = {};
// https://github.com/kubernetes-client/javascript/blob/1f76ee10c54e33a998abb4686488ccff4285366a/examples/typescript/informer/informer.ts
//
// The watch functionality seems to be wholly undocumented. Copy,
// paste, and pray.
const informer = k8sClient.makeInformer(
kubeconfig,
"/api/v1/namespaces/riju-user/pods",
() => k8s.listNamespacedPod("riju-user")
);
for (const event of ["add", "update", "delete"]) {
informer.on(event, (pod) => {
if (pod.metadata.name in callbacks) {
callbacks[pod.metadata.name](event, pod);
}
pods[pod.metadata.name] = pod;
if (event == "delete") {
delete callbacks[pod.metadata.name];
delete pods[pod.metadata.name];
}
});
}
informer.on("error", (err) => {
console.error(err);
setTimeout(() => informer.start(), 5000);
});
informer.start();
return {
setCallback: (podName, callback) => {
callbacks[podName] = callback;
if (podName in pods) {
callback("add", pods[podName]);
}
},
};
}
export async function listUserSessions() {
return (await k8s.listNamespacedPod("riju-user")).body.items.map((pod) => ({
podName: pod.metadata.name,
sessionID: pod.metadata.labels["riju.codes/user-session-id"],
}));
}
export async function createUserSession({
watcher,
sessionID,
langConfig,
revisions,
}) {
const pod = (
await k8s.createNamespacedPod("riju-user", {
metadata: {
name: `riju-user-session-${sessionID}`,
labels: {
"riju.codes/user-session-id": sessionID,
},
},
spec: {
volumes: [
{
name: "minio-config",
secret: {
secretName: "minio-user-login",
},
},
{
name: "riju-bin",
emptyDir: {},
},
],
imagePullSecrets: [
{
name: "registry-user-login",
},
],
initContainers: [
{
name: "download",
image: "minio/mc:RELEASE.2022-12-13T00-23-28Z",
resources: {},
command: ["sh", "-c"],
args: [
`mkdir -p /root/.mc && cp -LT /mc/config.json /root/.mc/config.json &&` +
`mc cp riju/agent/${revisions.agent} /riju-bin/agent && chmod +x /riju-bin/agent &&` +
`mc cp riju/ptyify/${revisions.ptyify} /riju-bin/ptyify && chmod +x /riju-bin/ptyify`,
],
volumeMounts: [
{
name: "minio-config",
mountPath: "/mc",
readOnly: true,
},
{
name: "riju-bin",
mountPath: "/riju-bin",
},
],
},
],
containers: [
{
name: "session",
image: `localhost:30999/riju-lang:${langConfig.id}-${revisions.langImage}`,
resources: {
requests: {},
limits: {
cpu: "1000m",
memory: "4Gi",
},
},
command: ["/riju-bin/agent"],
env: [
{
name: "RIJU_AGENT_COMMAND_PREFIX",
value: "runuser -u riju --",
},
],
securityContext: {
runAsUser: 0,
},
startupProbe: {
httpGet: {
path: "/health",
port: 869,
scheme: "HTTP",
},
failureThreshold: 30,
initialDelaySeconds: 0,
periodSeconds: 1,
successThreshold: 1,
timeoutSeconds: 2,
},
readinessProbe: {
httpGet: {
path: "/health",
port: 869,
scheme: "HTTP",
},
failureThreshold: 1,
initialDelaySeconds: 2,
periodSeconds: 10,
successThreshold: 1,
timeoutSeconds: 2,
},
livenessProbe: {
httpGet: {
path: "/health",
port: 869,
scheme: "HTTP",
},
failureThreshold: 3,
initialDelaySeconds: 2,
periodSeconds: 10,
successThreshold: 1,
timeoutSeconds: 2,
},
volumeMounts: [
{
name: "riju-bin",
mountPath: "/riju-bin",
readOnly: true,
},
],
},
],
restartPolicy: "Never",
},
})
).body;
const podIP = await new Promise((resolve, reject) => {
setTimeout(() => reject("timed out"), 5 * 60 * 1000);
watcher.setCallback(pod.metadata.name, (event, pod) => {
if (event == "delete") {
reject(new Error("pod was deleted"));
} else if (pod.status.phase === "Failed") {
reject(new Error("pod status became Failed"));
} else if (
pod.status.podIP &&
lodash.every(pod.status.containerStatuses, (status) => status.ready)
) {
resolve(pod.status.podIP);
} else {
console.log(event, JSON.stringify(pod.status, null, 2));
}
});
});
return podIP;
}
export async function deleteUserSessions(sessionsToDelete) {
for (const { podName } of sessionsToDelete) {
await k8s.deleteNamespacedPod(podName, "riju-user");
}
}

View File

@ -1,5 +1,4 @@
import fsOrig, { promises as fs } from "fs";
import path from "path";
import fsOrig from "fs";
import debounce from "debounce";

80
backend/sandbox-k8s.js Normal file
View File

@ -0,0 +1,80 @@
import { spawn } from "child_process";
import { promises as fs } from "fs";
import process from "process";
import { readLangConfig } from "../lib/yaml.js";
import * as k8s from "./k8s.js";
import { getUUID, quote } from "./util.js";
function die(msg) {
console.error(msg);
process.exit(1);
}
async function main() {
const sandboxScript = await fs.readFile("backend/sandbox.bash", "utf-8");
const lang = process.env.L;
if (!lang) {
die("environment variable unset: $L");
}
const langConfig = await readLangConfig(lang);
console.log(`Checking for existing sessions`);
const existingSessions = await k8s.listUserSessions();
if (existingSessions.length > 0) {
console.log(`Killing ${existingSessions.length} existing session(s)`);
await k8s.deleteUserSessions(existingSessions);
}
const sessionID = getUUID();
console.log(`Starting session with UUID ${sessionID}`);
const watcher = k8s.watchPods();
await k8s.createUserSession({
watcher,
sessionID,
langConfig,
revisions: {
agent: "20221229-002450-semantic-moccasin-albatross",
ptyify: "20221228-023645-clean-white-gorilla",
langImage: "20221227-195753-forward-harlequin-wolverine",
},
});
// let buffer = "";
// await new Promise((resolve) => {
// session.stdout.on("data", (data) => {
// buffer += data.toString();
// let idx;
// while ((idx = buffer.indexOf("\n")) !== -1) {
// const line = buffer.slice(0, idx);
// buffer = buffer.slice(idx + 1);
// if (line === "riju: container ready") {
// resolve();
// } else {
// console.error(line);
// }
// }
// });
// });
// const args = [].concat.apply(
// ["riju-pty", "-f"],
// privilegedPty(
// { uuid },
// bash(
// `env L='${lang}' LANG_CONFIG=${quote(
// JSON.stringify(langConfig)
// )} bash --rcfile <(cat <<< ${quote(sandboxScript)})`
// )
// )
// );
// const proc = spawn(args[0], args.slice(1), {
// stdio: "inherit",
// });
// try {
// await new Promise((resolve, reject) => {
// proc.on("error", reject);
// proc.on("close", resolve);
// });
// } finally {
// session.kill();
// }
}
main().catch(die);

View File

@ -1,17 +1,18 @@
#!/usr/bin/env bash
# This script is sourced by Bash within 'make sandbox'.
if [[ -z "$L" ]]; then
echo 'environment variable unset: $L' >&2
echo "environment variable unset: \$L" >&2
exit 1
fi
if [[ -z "$LANG_CONFIG" ]]; then
echo 'environment variable unset: $LANG_CONFIG' >&2
echo "environment variable unset: \$LANG_CONFIG" >&2
exit 1
fi
function get {
jq -r ".$1" <<< "${LANG_CONFIG}"
jq -r ".$1" <<<"${LANG_CONFIG}"
}
function has {
@ -24,21 +25,21 @@ function riju-exec {
function daemon {
if has daemon; then
echo "$(get daemon)"
get daemon
riju-exec "$(get daemon)"
fi
}
function setup {
if has setup; then
echo "$(get setup)"
get setup
riju-exec "$(get setup)"
fi
}
function repl {
if has repl; then
echo "$(get repl)"
get repl
riju-exec "$(get repl)"
fi
}
@ -47,22 +48,22 @@ function main {
if get main | grep -q /; then
mkdir -p "$(dirname "$(get main)")"
fi
: > "$(get main)"
has prefix && get prefix >> "$(get main)"
get template >> "$(get main)"
has suffix && get suffix >> "$(get main)"
: >"$(get main)"
has prefix && get prefix >>"$(get main)"
get template >>"$(get main)"
has suffix && get suffix >>"$(get main)"
}
function compile {
if has compile; then
echo "$(get compile)"
get compile
riju-exec "$(get compile)"
fi
}
function run-only {
if has run; then
echo "$(get run)"
get run
riju-exec "$(get run)"
fi
}
@ -73,18 +74,18 @@ function run {
function format {
if has format; then
echo "$(get format.run)"
get format.run
riju-exec "( $(get format.run) ) < $(get main)"
fi
}
function lsp {
if has lsp.setup; then
echo "$(get lsp.setup)"
get lsp.setup
riju-exec "$(get lsp.setup)"
fi
if has lsp; then
echo "$(get lsp.start)"
get lsp.start
riju-exec "$(get lsp.start)"
fi
}

View File

@ -6,11 +6,9 @@ import { readLangConfig } from "../lib/yaml.js";
import {
bash,
getUUID,
privilegedExec,
privilegedPty,
privilegedSession,
quote,
run,
} from "./util.js";
function die(msg) {
@ -18,10 +16,6 @@ function die(msg) {
process.exit(1);
}
function log(msg) {
console.log(msg);
}
async function main() {
const sandboxScript = await fs.readFile("backend/sandbox.bash", "utf-8");
const lang = process.env.L;

View File

@ -6,6 +6,7 @@ import cors from "cors"
import express from "express";
import ws from "express-ws";
import _ from "lodash";
import * as promClient from "prom-client";
import * as api from "./api.js";
import { aliases, langsPromise } from "./langs.js";
@ -15,12 +16,21 @@ import { log, privilegedTeardown } from "./util.js";
const host = process.env.HOST || "localhost";
const port = parseInt(process.env.PORT || "") || 6119;
const tlsPort = parseInt(process.env.TLS_PORT || "") || 6120;
const metricsPort = parseInt(process.env.METRICS_PORT || "") || 6121;
const useTLS = process.env.TLS ? true : false;
const analyticsTag = (process.env.ANALYTICS_TAG || "").replace(
/^'(.+)'$/,
"$1"
);
promClient.collectDefaultMetrics();
const metricsApp = express();
metricsApp.get("/metrics", async (_, res) => {
res.contentType("text/plain; version=0.0.4");
res.send(await promClient.register.metrics());
});
const langs = await langsPromise;
const app = express();
@ -142,3 +152,7 @@ if (useTLS) {
console.log(`Listening on http://${host}:${port}`)
);
}
metricsApp.listen(metricsPort, host, () =>
console.log(`Listening on http://${host}:${metricsPort}/metrics`)
);

View File

@ -10,7 +10,7 @@ import { getTestHash } from "../lib/hash-test.js";
import * as api from "./api.js";
import { langsPromise } from "./langs.js";
import { shutdown } from "./shutdown.js";
import { getUUID, run } from "./util.js";
import { run } from "./util.js";
let langs = {};
@ -525,7 +525,7 @@ const testTypes = {
ensure: {
pred: ({ ensure }) => (ensure ? true : false),
},
run: { pred: (config) => true },
run: { pred: (_config) => true },
repl: {
pred: ({ repl }) => (repl ? true : false),
},

View File

@ -1,5 +1,4 @@
import { spawn } from "child_process";
import os from "os";
import process from "process";
import * as Sentry from "@sentry/node";
@ -89,6 +88,14 @@ export async function run(args, log, options) {
});
}
export function privilegedList() {
return [rijuSystemPrivileged, "list"];
}
export function privilegedPull({ repo, tag }) {
return [rijuSystemPrivileged, "pull", repo, tag];
}
export function privilegedSession({ uuid, lang }) {
const cmdline = [rijuSystemPrivileged, "session", uuid, lang];
if (imageHashes[lang]) {

View File

@ -1,4 +1,4 @@
module github.com/raxod502/riju/cli
module github.com/radian-software/riju/cli
go 1.16

View File

@ -13,7 +13,7 @@ hesitate to open an issue!
Clone locally:
```
$ git clone https://github.com/raxod502/riju.git
$ git clone https://github.com/radian-software/riju.git
$ cd riju
```

View File

@ -111,8 +111,8 @@ API. Generate one randomly with `pwgen -s 30 1`.
## Build web AMI
You'll want to run `set -a; . .env` to load in the new variables from
`.env`. Now run `make packer-web`. This will take up to 10 minutes to
build a timestamped AMI with a name like `riju-web-20210711223158`.
`.env`. Now run `make packer`. This will take up to 10 minutes to
build a timestamped AMI with a name like `riju-20210711223158`.
## Create local configuration (part 2 of 3)
@ -120,15 +120,16 @@ Add to `.env` the following contents:
```
# Terraform
AMI_NAME=riju-web-20210711223158
AMI_NAME=riju-20210711223158
AWS_REGION=us-west-1
DOMAIN=your.domain
S3_BUCKET=yourname-riju
S3_CONFIG_PATH=config.json
```
### AMI\_NAME
This is the AMI name from `make packer-web`.
This is the AMI name from `make packer`.
### AWS\_REGION
@ -168,7 +169,7 @@ infrastructure.
follow these steps:*
1. Update `.env` and make sure it is sourced (`set -a; . .env`).
2. Run `make packer-web` and get the name of the new AMI.
2. Run `make packer` and get the name of the new AMI.
3. Update it in `.env` under `AMI_NAME` and make sure the update is
sourced (`set -a; . .env`).
4. Run `terraform apply`.
@ -231,7 +232,7 @@ from the load balancer).
to upload the finished build artifacts to ECR, which amount to about
40 GB of data transfer. If you don't have a symmetric Internet plan at
home, you may need to do this on an EC2 instance instead. You can
provision one manually with at least 256 GB of disk space, install
provision one manually with at least 128 GB of disk space, install
Docker, clone down Riju, copy over your `.env` file, and proceed as if
you were running locally.)*

View File

@ -49,9 +49,9 @@ requirements:
because it only runs on macOS, and [Docker](https://www.docker.com/)
is out because it can't be run inside Docker (without the
`--privileged` flag, which has unacceptable security drawbacks; see
[#29](https://github.com/raxod502/riju/issues/29)). Note, however,
that many Windows-based languages can be used successfully via
[Mono](https://www.mono-project.com/) or
[#29](https://github.com/radian-software/riju/issues/29)). Note,
however, that many Windows-based languages can be used successfully
via [Mono](https://www.mono-project.com/) or
[Wine](https://www.winehq.org/), such as
[Cmd](https://en.wikipedia.org/wiki/Cmd.exe),
[C#](https://en.wikipedia.org/wiki/C_Sharp_(programming_language)),
@ -60,7 +60,8 @@ requirements:
Here are some explicit *non-requirements*:
* *Language must be well-known.* Nope, I'll be happy to add your pet
project; after all, [Kalyn](https://github.com/raxod502/kalyn) and
project; after all,
[Kalyn](https://github.com/radian-software/kalyn) and
[Ink](https://github.com/thesephist/ink) are already supported.
* *Language must be useful.* I have no objection to adding everything
on the esolangs wiki, if there are interpreters/compilers available.

View File

@ -2,7 +2,12 @@
set -euxo pipefail
pushd /tmp
latest_release() {
curl -sSL "https://api.github.com/repos/$1/releases/latest" | jq -r .tag_name
}
mkdir /tmp/riju-work
pushd /tmp/riju-work
export DEBIAN_FRONTEND=noninteractive
@ -39,6 +44,7 @@ dctrl-tools
docker-ce-cli
file
g++
gettext
git
golang
htop
@ -57,8 +63,9 @@ skopeo
ssh
strace
sudo
tmux
terraform
tmux
tree
unzip
uuid-runtime
vim
@ -77,7 +84,11 @@ npm install -g prettier
wget -nv https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip -O awscli.zip
unzip -q awscli.zip
./aws/install
rm -rf aws awscli.zip
ver="$(latest_release grafana/cortex-tools | sed 's/^v//')"
wget -nv "https://github.com/grafana/cortex-tools/releases/download/v${ver}/cortextool_${ver}_linux_amd64.tar.gz" -O cortextool.tar.gz
tar -xf cortextool.tar.gz
cp cortextool /usr/local/bin/
rm -rf /var/lib/apt/lists/*
@ -86,5 +97,6 @@ tee /etc/sudoers.d/90-riju >/dev/null <<"EOF"
EOF
popd
rm -rf /tmp/riju-work
rm "$0"

View File

@ -1,4 +1,5 @@
FROM riju:ubuntu AS build
# EOL: April 2027
FROM ubuntu:22.04 AS build
COPY docker/app/install-build.bash /tmp/
RUN /tmp/install-build.bash

View File

@ -15,7 +15,7 @@ curl -sSL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
ubuntu_ver="$(lsb_release -rs)"
ubuntu_name="$(lsb_release -cs)"
node_repo="$(curl -sS https://deb.nodesource.com/setup_current.x | grep NODEREPO= | grep -Eo 'node_[0-9]+\.x' | head -n1)"
node_repo="$(curl -sS https://deb.nodesource.com/setup_16.x | grep NODEREPO= | grep -Eo 'node_[0-9]+\.x' | head -n1)"
tee -a /etc/apt/sources.list.d/custom.list >/dev/null <<EOF
deb https://deb.nodesource.com/${node_repo} ${ubuntu_name} main

View File

@ -1,4 +1,5 @@
FROM riju:ubuntu
# EOL: April 2027
FROM ubuntu:22.04
COPY docker/base/install.bash /tmp/
RUN /tmp/install.bash

View File

@ -1,4 +1,4 @@
FROM ubuntu:rolling
FROM ubuntu:21.04
COPY docker/ci/install.bash /tmp/
RUN /tmp/install.bash

View File

@ -1,4 +1,5 @@
FROM riju:ubuntu
# EOL: April 2027
FROM ubuntu:22.04
COPY docker/packaging/install.bash /tmp/
RUN /tmp/install.bash

View File

@ -1,4 +1,5 @@
FROM riju:ubuntu
# EOL: April 2027
FROM ubuntu:22.04
COPY docker/runtime/install.bash /tmp/
RUN /tmp/install.bash
@ -10,4 +11,5 @@ WORKDIR /src
CMD ["bash"]
EXPOSE 6119
EXPOSE 6120
EXPOSE 6121
ENV HOST=0.0.0.0

View File

@ -2,8 +2,8 @@
set -euxo pipefail
latest_release() {
curl -sSL "https://api.github.com/repos/$1/releases/latest" | jq -r .tag_name
latest_watchexec_release() {
curl -sSL "https://api.github.com/repos/$1/releases" | jq -c -r '[.[] | select(.tag_name | test("^cli-v"))] | first | .tag_name'
}
mkdir /tmp/riju-work
@ -23,7 +23,7 @@ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
ubuntu_name="$(lsb_release -cs)"
node_repo="$(curl -sS https://deb.nodesource.com/setup_current.x | grep NODEREPO= | grep -Eo 'node_[0-9]+\.x' | head -n1)"
node_repo="$(curl -sS https://deb.nodesource.com/setup_16.x | grep NODEREPO= | grep -Eo 'node_[0-9]+\.x' | head -n1)"
tee -a /etc/apt/sources.list.d/custom.list >/dev/null <<EOF
deb [arch=amd64] https://deb.nodesource.com/${node_repo} ${ubuntu_name} main
@ -72,7 +72,7 @@ apt-get install -y $(sed 's/#.*//' <<< "${packages}")
pip3 install poetry
ver="$(latest_release watchexec/watchexec | sed 's/^cli-v//')"
ver="$(latest_watchexec_release watchexec/watchexec | sed 's/^cli-v//')"
wget "https://github.com/watchexec/watchexec/releases/download/cli-v${ver}/watchexec-${ver}-x86_64-unknown-linux-gnu.deb"
apt-get install -y ./watchexec-*.deb

33
env.yaml.bash Executable file
View File

@ -0,0 +1,33 @@
#!/usr/bin/env bash
set -euo pipefail
cd "$(dirname "$0")"
registry_password="$(pwgen -s 20 1)"
proxy_password="$(pwgen -s 20 1)"
cat <<EOF
networking:
domain: riju.example.com # FIXME
ip: x.y.z.w # FIXME
contact:
letsEncryptEmail: ops@example.com # FIXME
letsEncryptProductionEnabled: false
metallb:
secretkey: "$(pwgen -s 256 1)"
registry:
password: "${registry_password}"
htpasswd: "$(htpasswd -nbB admin "${registry_password}")"
minio:
accessKey: "$(head -c16 /dev/urandom | xxd -p)"
secretKey: "$(head -c16 /dev/urandom | xxd -p)"
proxy:
password: "${proxy_password}"
htpasswd: "$(htpasswd -nbB admin "${proxy_password}")"
EOF

View File

@ -1,61 +0,0 @@
Riju :: $169.46
CloudWatch :: $34.80
EC2 :: $107.01
Data Transfer :: $0.68
EBS Snapshot :: $5.45
EBS Volume :: $46.40
EBS Volume :: $46.40
gp2 :: $11.61
gp3 :: $34.78
Instance :: $54.48
t2.small :: $0.04
t3 :: $0.08
t3.2xlarge :: $29.80
t3.medium :: $14.77
t3.small :: $9.78
ECR :: $7.31
Data Transfer :: $3.29
Storage :: $4.02
ELB :: $20.05
Data Transfer :: $0.31
LCUs :: $0.06
Load Balancer :: $19.68
S3 :: $0.29
COMMENTARY: This month was a disaster because AWS makes it really hard
to understand what exactly is going to run up your bill.
The most egregious thing here is CloudWatch. It turns out that if you
follow the official documentation for how to set up a CloudWatch alarm
on disk space for your EC2 instance, the default configuration has SSM
Agent creating a metric for *every* filesystem mounted on your
instance, which is actually one or more per Docker container, so I
actually had like multiple tens of thousands of metrics being shipped
to CloudWatch, which is expensive. I fixed this for August, bringing
CloudWatch costs to be effectively zero.
We have some charges for a t3.medium, this is before I scaled the
server down to t3.small. The charges for that instance are also higher
than you'd expect because I was originally running two of them before
scaling it down for a singleton because I realized I was out of my
depth.
We had a couple gp2 volumes (more expensive) before I migrated
everything to gp3. EBS costs are generally quite high here because not
only did I previously have two instances serving traffic, but I also
had a dev server. Each of those three instances had to have the full
256 GB data volume to store language images, which was ridiculously
expensive. I'm planning on keeping Riju as a singleton for a while
because of this issue, and relying on vertical scaling until that
becomes no longer feasible. The persistent dev server will be replaced
by a transient CI instance that can be spun up to do large rebuild
operations, mitigating EBS costs.
t3.2xlarge is the dev server, this is mostly just tough luck since I
did need to spend a lot of time building and rebuilding language
images and those hours add up. Hopefully that won't be as much of an
issue going forward now that the infrastructure is more stable and we
can hopefully get away without a dev server in general. But
fundamentally you can't do builds on your local laptop without a
symmetric Internet plan because you need to upload like 100 GB for a
full rebuild.

View File

@ -1,25 +0,0 @@
Riju :: $58.75
EC2 :: $32.26
Data Transfer :: $0.04
EBS Snapshot :: $1.67
EBS Volume :: $18.46
EBS Volume :: $18.46
gp2 :: $0.69
gp3 :: $17.77
Instance :: $12.09
t3.small :: $12.09
ECR :: $6.42
Data Transfer :: $1.38
Storage :: $5.05
ELB :: $19.93
Data Transfer :: $0.18
LCUs :: $0.06
Load Balancer :: $19.68
S3 :: $0.13
COMMENTARY: I think we could save on ELB costs by migrating to Lambda;
see https://github.com/raxod502/riju/issues/93 for that. Otherwise,
the main important thing to note about this month is that I had part
of the infrastructure spun down for a significant part of it, as per
https://riju.statuspage.io/ (Aug 1 through Aug 16). So costs are
liable to increase next month now that we are in normal operation.

View File

@ -1,27 +0,0 @@
Riju :: $81.55
EC2 :: $57.02
Data Transfer :: $0.02
EBS Snapshot :: $1.97
EBS Volume :: $26.82
EBS Volume :: $26.82
gp2 :: $1.01
gp3 :: $25.81
Instance :: $28.21
t3.medium :: $19.01
t3.small :: $9.21
ECR :: $5.09
Storage :: $5.09
ELB :: $19.32
Data Transfer :: $0.22
LCUs :: $0.06
Load Balancer :: $19.04
S3 :: $0.12
COMMENTARY: We're starting to look pretty stable from month to month.
Naturally the costs are higher because we were operating the
infrastructure for the entire month this time, instead of being down
for half of it, but I think this cost is about what we should expect
to see going forward until changes are made.
I did realize, by the way, that we can't use Lambda to replace the
ELB, because that wouldn't support websockets. Oh well.

View File

@ -1,18 +0,0 @@
Riju :: $106.77
EC2 :: $81.38
Data Transfer :: $0.03
EBS Snapshot :: $2.36
EBS Volume :: $28.57
EBS Volume :: $28.57
gp2 :: $1.07
gp3 :: $27.49
Instance :: $50.43
t3.large :: $23.05
t3.medium :: $27.38
ECR :: $5.14
Storage :: $5.14
ELB :: $20.14
Data Transfer :: $0.38
LCUs :: $0.07
Load Balancer :: $19.68
S3 :: $0.11

View File

@ -1,14 +1,4 @@
# Riju financials
This directory has a Python script that can download and analyze
billing data from AWS to determine how much Riju actually costs. This
information is then made publicly available in per-month
subdirectories here; for some months with unusual charges I've added
commentary to explain what was going on.
This information is then imported into [Riju's master budgeting
spreadsheet](https://docs.google.com/spreadsheets/d/15Us9KLXaJ6B1lNhrM6GV6JmmeKqNc8NNeTnaWiAhozw/edit?usp=sharing)
which compares spending to donations in order to determine whether we
are making a profit (we are not...). Once we start making a profit we
can start donating to the EFF as promised, or scale up Riju's
infrastructure to support more users for free.
This data has all moved to
[radian-software/financials](https://github.com/radian-software/financials).

View File

@ -1,310 +0,0 @@
#!/usr/bin/env python3
import argparse
import collections
import csv
import decimal
import gzip
import io
import json
import logging
import os
import pathlib
import re
import sys
from urllib.parse import urlparse
import boto3
logging.basicConfig(level=logging.INFO)
ROOT = pathlib.Path(__file__).parent
def die(msg):
raise AssertionError(msg)
def get_csv(year, month, force_download=False):
target_dir = ROOT / f"{year}-{month:02d}"
logging.info(f"Using base directory {target_dir}")
target_dir.mkdir(exist_ok=True)
latest_csv = target_dir / "latest.csv"
if force_download or not latest_csv.exists():
try:
latest_csv.unlink()
except FileNotFoundError:
pass
s3 = boto3.client("s3")
o = urlparse(os.environ["BILLING_REPORTS_URL"], allow_fragments=False)
assert o.scheme == "s3"
bucket = o.netloc
base_prefix = o.path.strip("/") + "/"
report_name = base_prefix.rstrip("/").split("/")[-1]
logging.info(f"List s3://{bucket}/{base_prefix}")
month_prefixes = [
elt["Prefix"]
for elt in s3.list_objects_v2(
Bucket=bucket, Prefix=f"{base_prefix}", Delimiter="/"
)["CommonPrefixes"]
]
if not month_prefixes:
die("no report prefixes found")
expected_month_prefix = f"{base_prefix}{year}{month:02d}"
matching_month_prefixes = [
p for p in month_prefixes if p.startswith(expected_month_prefix)
]
if not matching_month_prefixes:
die(f"no report prefix for the specified month ({expected_month_prefix})")
if len(matching_month_prefixes) > 1:
die(f"multiple matching report prefixes: {repr(matching_month_prefixes)}")
(month_prefix,) = matching_month_prefixes
stream = io.BytesIO()
manifest_path = f"{month_prefix}{report_name}-Manifest.json"
logging.info(f"Download s3://{bucket}/{manifest_path} in-memory")
s3.download_fileobj(bucket, manifest_path, stream)
manifest = json.loads(stream.getvalue())
(report_path,) = manifest["reportKeys"]
if not report_path.endswith(".csv.gz"):
die(f"unexpected report extension in {report_path}")
logging.info(f"Get metadata for s3://{bucket}/{report_path}")
basename = s3.head_object(Bucket=bucket, Key=report_path)[
"LastModified"
].strftime("%Y-%m-%d")
logging.info(
f"Download s3://{bucket}/{report_path} to {target_dir.relative_to(ROOT)}/{basename}.csv.gz"
)
s3.download_file(bucket, report_path, f"{target_dir}/{basename}.csv.gz")
logging.info(f"Decompress {basename}.csv.gz")
with gzip.open(f"{target_dir}/{basename}.csv.gz") as f_read:
with open(f"{target_dir}/{basename}.csv", "wb") as f_write:
while chunk := f_read.read(1024):
f_write.write(chunk)
latest_csv.symlink_to(f"{basename}.csv")
return latest_csv
def read_csv(csv_path):
rows = []
with open(csv_path) as f:
reader = csv.reader(f)
header = next(reader)
for row in reader:
rows.append(dict((key, val) for (key, val) in zip(header, row) if val))
return rows
def get_tax_key(item):
service = item["lineItem/ProductCode"]
usage_type = item["lineItem/UsageType"]
if "DataTransfer" in usage_type:
service = "AWSDataTransfer"
return (service, usage_type)
def embed_taxes(items):
tax_items = collections.defaultdict(list)
usage_items = collections.defaultdict(list)
for item in items:
item_type = item["lineItem/LineItemType"]
if item_type == "Tax":
tax_items[get_tax_key(item)].append(item)
elif item_type == "Usage":
usage_items[get_tax_key(item)].append(item)
else:
die(f"unexpected line item type {repr(item_type)}")
for key in tax_items:
if key not in usage_items:
die(f"tax for {repr(key)} but no usage for that key")
tax_cost = sum(item["lineItem/UnblendedCost"] for item in tax_items[key])
usage_cost = sum(item["lineItem/UnblendedCost"] for item in usage_items[key])
tax_multiplier = (tax_cost + usage_cost) / usage_cost
for item in usage_items[key]:
item["lineItem/UnblendedCost"] *= tax_multiplier
return [item for group in usage_items.values() for item in group]
def classify_line_item(item, billing_month=None, full=False):
service = item["lineItem/ProductCode"]
usage_type = item["lineItem/UsageType"]
operation = item.get("lineItem/Operation")
resource = item.get("lineItem/ResourceId")
project = item.get("resourceTags/user:BillingCategory")
# In 2021-07, the first month that I was using AWS resources for
# Riju in a nontrivial capacity, I had subpar billing
# observability, so a lot of the resources aren't tagged
# correctly. So for that month specifically, I'm hacking in a
# couple of heuristics to tag the resources after the fact based
# on what I know about my usage of AWS.
if billing_month == "2021-07":
if resource and "riju" in resource.lower():
project = "Riju"
elif resource and "shallan" in resource.lower():
project = "Shallan"
elif resource and "veidt" in resource.lower():
project = "Veidt"
elif service == "AmazonCloudWatch":
project = "Riju"
elif (
service == "AmazonEC2"
and resource != "i-077884b74aba86bac"
and "ElasticIP:IdleAddress" not in usage_type
and "EBS:SnapshotUsage" not in usage_type
):
project = "Riju"
# AWS does not let you put tags on a public ECR repository,
# yippee.
if service == "AmazonECRPublic" and resource.endswith("repository/riju"):
project = "Riju"
category = [
"Uncategorized",
service,
usage_type,
operation or "(no operation)",
resource or "(no resource)",
]
if not full:
if service == "AmazonS3":
category = ["S3"]
elif service == "AmazonSNS":
category = ["SNS"]
elif service in ("AmazonECR", "AmazonECRPublic"):
category = ["ECR"]
if "DataTransfer" in usage_type:
category.append("Data Transfer")
elif "TimedStorage" in usage_type:
category.append("Storage")
else:
category.extend(
[
"Uncategorized",
usage_type,
operation or "(no operation)",
resource or "(no resource)",
]
)
elif service == "AmazonEC2":
category = ["EC2"]
if "ElasticIP:IdleAddress" in usage_type:
category.append("EIP")
# Apparently tags on EIPs are ignored for billing
# purposes, so we just have to know what we were using
# them for. (Leaving them uncategorized for 2021-07
# though.)
if billing_month != "2021-07":
project = "Corona"
elif "EBS:VolumeUsage" in usage_type:
category.append("EBS Volume")
category.extend(["EBS Volume", re.sub(r"^.+\.", "", usage_type)])
elif "EBS:SnapshotUsage" in usage_type:
category.append("EBS Snapshot")
elif (
"DataTransfer" in usage_type
or "In-Bytes" in usage_type
or "Out-Bytes" in usage_type
):
category.append("Data Transfer")
elif "BoxUsage" in usage_type or "CPUCredits" in usage_type:
category.extend(["Instance", re.sub(r"^.+:", "", usage_type)])
else:
category.extend(
[
"Uncategorized",
usage_type,
operation or "(no operation)",
resource or "(no resource)",
]
)
elif service == "AWSELB":
category = ["ELB"]
if "DataTransfer" in usage_type:
category.append("Data Transfer")
elif "LCUUsage" in usage_type:
category.append("LCUs")
elif "LoadBalancerUsage":
category.append("Load Balancer")
else:
category.extend(
[
"Uncategorized",
usage_type,
operation or "(no operation)",
resource or "(no resource)",
]
)
elif service == "AmazonCloudWatch":
category = ["CloudWatch"]
elif service == "awskms":
category = ["KMS"]
if not project:
category.extend(
[
usage_type,
operation or "(no operation)",
resource or "(no resource)",
]
)
return [project or "Uncategorized", *category]
def add_to_taxonomy(taxonomy, category, item):
if category:
categories = taxonomy.setdefault("categories", {})
add_to_taxonomy(categories.setdefault(category[0], {}), category[1:], item)
else:
taxonomy.setdefault("items", []).append(item)
taxonomy.setdefault("cost", 0)
taxonomy["cost"] += float(item["lineItem/UnblendedCost"])
def uncategorized_last(key):
return (key == "Uncategorized", key)
def print_taxonomy(taxonomy, indent="", file=sys.stdout):
cost = taxonomy["cost"]
categories = taxonomy.get("categories", {})
for category in sorted(categories, key=uncategorized_last):
subtaxonomy = categories[category]
cost = subtaxonomy["cost"]
if cost < 0.01:
continue
print(f"{indent}{category} :: ${cost:.2f}", file=file)
print_taxonomy(subtaxonomy, indent=indent + " ", file=file)
def classify_costs(csv_path, **kwargs):
all_items = [item for item in read_csv(csv_path)]
items = []
for item in all_items:
cost = item["lineItem/UnblendedCost"]
if cost and float(cost):
items.append({**item, "lineItem/UnblendedCost": float(cost)})
taxonomy = {}
for item in embed_taxes(items):
add_to_taxonomy(taxonomy, ["AWS", *classify_line_item(item, **kwargs)], item)
return taxonomy
def main():
parser = argparse.ArgumentParser()
parser.add_argument("date")
parser.add_argument("-f", "--force-download", action="store_true")
parser.add_argument("-w", "--write", action="store_true")
args = parser.parse_args()
year, month = map(int, args.date.split("-"))
billing_month = f"{year}-{month:02d}"
csv_path = get_csv(year, month, force_download=args.force_download)
taxonomy = classify_costs(csv_path, billing_month=billing_month)
print_taxonomy(taxonomy)
if args.write:
riju_taxonomy = taxonomy["categories"]["AWS"]
riju_taxonomy["categories"] = {"Riju": riju_taxonomy["categories"]["Riju"]}
target_dir = ROOT / f"{year}-{month:02d}"
with open(target_dir / "breakdown.txt", "w") as f:
print_taxonomy(riju_taxonomy, file=f)
if __name__ == "__main__":
main()
sys.exit(0)

135
financials/poetry.lock generated
View File

@ -1,135 +0,0 @@
[[package]]
name = "boto3"
version = "1.18.23"
description = "The AWS SDK for Python"
category = "main"
optional = false
python-versions = ">= 3.6"
[package.dependencies]
botocore = ">=1.21.23,<1.22.0"
jmespath = ">=0.7.1,<1.0.0"
s3transfer = ">=0.5.0,<0.6.0"
[package.extras]
crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
[[package]]
name = "botocore"
version = "1.21.23"
description = "Low-level, data-driven core of boto 3."
category = "main"
optional = false
python-versions = ">= 3.6"
[package.dependencies]
jmespath = ">=0.7.1,<1.0.0"
python-dateutil = ">=2.1,<3.0.0"
urllib3 = ">=1.25.4,<1.27"
[package.extras]
crt = ["awscrt (==0.11.24)"]
[[package]]
name = "jmespath"
version = "0.10.0"
description = "JSON Matching Expressions"
category = "main"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
[[package]]
name = "python-dateutil"
version = "2.8.2"
description = "Extensions to the standard Python datetime module"
category = "main"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
[package.dependencies]
six = ">=1.5"
[[package]]
name = "python-dotenv"
version = "0.19.0"
description = "Read key-value pairs from a .env file and set them as environment variables"
category = "main"
optional = false
python-versions = ">=3.5"
[package.extras]
cli = ["click (>=5.0)"]
[[package]]
name = "s3transfer"
version = "0.5.0"
description = "An Amazon S3 Transfer Manager"
category = "main"
optional = false
python-versions = ">= 3.6"
[package.dependencies]
botocore = ">=1.12.36,<2.0a.0"
[package.extras]
crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"]
[[package]]
name = "six"
version = "1.16.0"
description = "Python 2 and 3 compatibility utilities"
category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
[[package]]
name = "urllib3"
version = "1.26.6"
description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
[package.extras]
brotli = ["brotlipy (>=0.6.0)"]
secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[metadata]
lock-version = "1.1"
python-versions = "^3.9"
content-hash = "170b0bcf9f0ae12c4c9e1daa195ecdb39585494414b88e53e3da72916eb52c51"
[metadata.files]
boto3 = [
{file = "boto3-1.18.23-py3-none-any.whl", hash = "sha256:1b08ace99e7b92965780e5ce759430ad62b7b7e037560bc772f9a8789f4f36d2"},
{file = "boto3-1.18.23.tar.gz", hash = "sha256:31cc69e665f773390c4c17ce340d2420e45fbac51d46d945cc4a58d483ec5da6"},
]
botocore = [
{file = "botocore-1.21.23-py3-none-any.whl", hash = "sha256:3877d69e0b718b786f1696cd04ddbdb3a57aef6adb0239a29aa88754489849a4"},
{file = "botocore-1.21.23.tar.gz", hash = "sha256:d0146d31dbc475942b578b47dd5bcf94d18fbce8c6d2ce5f12195e005de9b754"},
]
jmespath = [
{file = "jmespath-0.10.0-py2.py3-none-any.whl", hash = "sha256:cdf6525904cc597730141d61b36f2e4b8ecc257c420fa2f4549bac2c2d0cb72f"},
{file = "jmespath-0.10.0.tar.gz", hash = "sha256:b85d0567b8666149a93172712e68920734333c0ce7e89b78b3e987f71e5ed4f9"},
]
python-dateutil = [
{file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
{file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
]
python-dotenv = [
{file = "python-dotenv-0.19.0.tar.gz", hash = "sha256:f521bc2ac9a8e03c736f62911605c5d83970021e3fa95b37d769e2bbbe9b6172"},
{file = "python_dotenv-0.19.0-py2.py3-none-any.whl", hash = "sha256:aae25dc1ebe97c420f50b81fb0e5c949659af713f31fdb63c749ca68748f34b1"},
]
s3transfer = [
{file = "s3transfer-0.5.0-py3-none-any.whl", hash = "sha256:9c1dc369814391a6bda20ebbf4b70a0f34630592c9aa520856bf384916af2803"},
{file = "s3transfer-0.5.0.tar.gz", hash = "sha256:50ed823e1dc5868ad40c8dc92072f757aa0e653a192845c94a3b676f4a62da4c"},
]
six = [
{file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
{file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
]
urllib3 = [
{file = "urllib3-1.26.6-py2.py3-none-any.whl", hash = "sha256:39fb8672126159acb139a7718dd10806104dec1e2f0f6c88aab05d17df10c8d4"},
{file = "urllib3-1.26.6.tar.gz", hash = "sha256:f57b4c16c62fa2760b7e3d97c35b255512fb6b59a259730f36ba32ce9f8e342f"},
]

View File

@ -1,16 +0,0 @@
[tool.poetry]
name = "riju-financials"
version = "0.1.0"
description = "Financial data for Riju hosting"
authors = ["Radon Rosborough <radon.neon@gmail.com>"]
[tool.poetry.dependencies]
python = "^3.9"
boto3 = "^1.18.23"
python-dotenv = "^0.19.0"
[tool.poetry.dev-dependencies]
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"

View File

@ -25,9 +25,11 @@
<p>
<i>
Created by
<a href="https://github.com/raxod502">Radon Rosborough</a>.
<a href="https://github.com/raxod502">Radon Rosborough</a>
and maintained by
<a href="https://radian.codes">Radian LLC</a>.
Check out the project
<a href="https://github.com/raxod502/riju">on GitHub</a>.
<a href="https://github.com/radian-software/riju">on GitHub</a>.
</i>
</p>
<% } else { %>

View File

@ -0,0 +1,6 @@
receivers:
- name: pagerduty
pagerduty_configs:
- routing_key: "$PAGERDUTY_INTEGRATION_KEY"
route:
receiver: pagerduty

28
grafana/alerts.yaml Normal file
View File

@ -0,0 +1,28 @@
namespace: riju
groups:
- name: riju
rules:
- alert: NodeCPUHigh
annotations:
message: "Instance {{ $labels.node }} is running close to max CPU"
expr: |
sum(1 - rate(node_cpu_seconds_total{mode="idle"}[1m])) by (node) / count(sum(node_cpu_seconds_total{mode="idle"}) by (node, cpu)) by (node) * 100 >= 80
for: 30m
- alert: NodeMemoryHigh
annotations:
message: "Instance {{ $labels.node }} is running close to max memory"
expr: |
sum(1 - node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) by (node) * 100 >= 80
for: 30m
- alert: RootVolumeFilling
annotations:
message: "Root volume on instance {{ $labels.node }} is close to full"
expr: |
(1 - sum (node_filesystem_free_bytes{mountpoint="/"}) by (node) / sum (node_filesystem_size_bytes{mountpoint="/"}) by (node)) * 100 >= 80
for: 30m
- alert: DataVolumeFilling
annotations:
message: "Data volume on instance {{ $labels.node }} is close to full"
expr: |
(1 - sum (node_filesystem_free_bytes{mountpoint="/mnt/riju"}) by (node) / sum (node_filesystem_size_bytes{mountpoint="/mnt/riju"}) by (node)) * 100 >= 80
for: 30m

902
grafana/dashboard.json Normal file
View File

@ -0,0 +1,902 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"target": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
},
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 4,
"iteration": 1644689175462,
"links": [],
"liveNow": false,
"panels": [
{
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 15,
"title": "Server metrics",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 1
},
"id": 16,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"exemplar": true,
"expr": "sum(rate(process_cpu_seconds_total{node=~\"$node\",job=\"server\"}[1m])) by (node) / count(sum(node_cpu_seconds_total{node=~\"$node\",mode=\"idle\"}) by (node, cpu)) by (node) * 100",
"hide": false,
"interval": "",
"legendFormat": "",
"refId": "B"
}
],
"title": "CPU Utilization",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 1
},
"id": 17,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"exemplar": true,
"expr": "sum(process_resident_memory_bytes{node=~\"$node\",job=\"server\"} / node_memory_MemTotal_bytes{node=~\"$node\"}) by (node) * 100",
"hide": false,
"interval": "",
"legendFormat": "",
"refId": "A"
}
],
"title": "Memory Utilization",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 9
},
"id": 6,
"panels": [],
"title": "Instance metrics",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 10
},
"id": 8,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"exemplar": true,
"expr": "sum(1 - rate(node_cpu_seconds_total{node=~\"$node\",mode=\"idle\"}[1m])) by (node) / count(sum(node_cpu_seconds_total{node=~\"$node\",mode=\"idle\"}) by (node, cpu)) by (node) * 100",
"interval": "",
"legendFormat": "",
"refId": "A"
}
],
"title": "CPU Utilization",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 10
},
"id": 9,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"exemplar": true,
"expr": "sum(1 - node_memory_MemAvailable_bytes{node=~\"$node\"} / node_memory_MemTotal_bytes{node=~\"$node\"}) by (node) * 100",
"hide": false,
"interval": "",
"legendFormat": "",
"refId": "B"
}
],
"title": "Memory Utilization",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "MBs"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 18
},
"id": 10,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"exemplar": true,
"expr": "sum (rate(node_network_receive_bytes_total{node=~\"$node\"}[1m])) by (node) / 1e6",
"hide": false,
"interval": "",
"legendFormat": "",
"refId": "A"
}
],
"title": "Network Traffic Received",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "KBs"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 18
},
"id": 11,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"exemplar": true,
"expr": "sum (rate(node_network_transmit_bytes_total{node=~\"$node\"}[1m])) by (node) / 1e3",
"hide": false,
"interval": "",
"legendFormat": "",
"refId": "A"
}
],
"title": "Network Traffic Sent",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 26
},
"id": 12,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"exemplar": true,
"expr": "(1 - sum (node_filesystem_free_bytes{node=~\"$node\",mountpoint=\"/\"}) by (node) / sum (node_filesystem_size_bytes{node=~\"$node\",mountpoint=\"/\"}) by (node)) * 100",
"hide": false,
"interval": "",
"legendFormat": "",
"refId": "A"
}
],
"title": "Root Volume Disk Utilization",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"max": 100,
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 26
},
"id": 13,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "grafanacloud-prom"
},
"exemplar": true,
"expr": "(1 - sum (node_filesystem_free_bytes{node=~\"$node\",mountpoint=\"/mnt/riju\"}) by (node) / sum (node_filesystem_size_bytes{node=~\"$node\",mountpoint=\"/mnt/riju\"}) by (node)) * 100",
"hide": false,
"interval": "",
"legendFormat": "",
"refId": "A"
}
],
"title": "Data Volume Disk Utilization",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 34
},
"id": 2,
"panels": [],
"title": "Logs",
"type": "row"
},
{
"datasource": {
"type": "loki",
"uid": "grafanacloud-logs"
},
"gridPos": {
"h": 12,
"w": 24,
"x": 0,
"y": 35
},
"id": 4,
"options": {
"dedupStrategy": "none",
"enableLogDetails": true,
"prettifyLogMessage": false,
"showCommonLabels": false,
"showLabels": false,
"showTime": true,
"sortOrder": "Ascending",
"wrapLogMessage": false
},
"targets": [
{
"datasource": {
"type": "loki",
"uid": "grafanacloud-logs"
},
"expr": "{source=~\"$log_source\",node=~\"$node\"} | regexp \"(?P<log>.+)\" | line_format \"{{ .node }} {{ .log }}\"",
"maxLines": 50,
"queryType": "randomWalk",
"refId": "A"
}
],
"title": "Logs",
"type": "logs"
}
],
"refresh": "5s",
"schemaVersion": 34,
"style": "dark",
"tags": [],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": ["All"],
"value": ["$__all"]
},
"datasource": {
"type": "loki",
"uid": "grafanacloud-logs"
},
"definition": "label_values(node)",
"hide": 0,
"includeAll": true,
"label": "",
"multi": true,
"name": "node",
"options": [],
"query": "label_values(node)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
},
{
"current": {
"selected": false,
"text": ["All"],
"value": ["$__all"]
},
"datasource": {
"type": "loki",
"uid": "grafanacloud-logs"
},
"definition": "label_values(source)",
"hide": 0,
"includeAll": true,
"multi": true,
"name": "log_source",
"options": [],
"query": "label_values(source)",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 1,
"type": "query"
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "Riju",
"uid": "mx3ZlzMnk",
"version": 30,
"weekStart": ""
}

105
k8s/cluster.yaml Normal file
View File

@ -0,0 +1,105 @@
apiVersion: k0s.k0sproject.io/v1beta1
kind: ClusterConfig
metadata:
creationTimestamp: null
name: k0s
spec:
api:
address: 192.168.0.216
k0sApiPort: 9443
port: 6443
sans:
- 192.168.0.216
- 192.168.122.1
- 172.21.0.1
- 172.17.0.1
- 172.23.0.1
- 10.88.0.1
- 10.244.0.1
- 2601:646:4000:3060::be49
- 2601:646:4000:3060:8b13:5b76:2703:28f
- 2601:646:4000:3060:683c:7a51:eee8:2eb
- fe80::1fbd:2949:a12e:cedf
- fe80::42:d5ff:fe58:a84f
- fe80::844c:59ff:fe46:20dc
- fe80::20e5:9dff:fe7a:4698
- fe80::f86c:22ff:feb0:59ac
- fe80::b0d0:51ff:fe45:ce31
- fe80::3cd5:9eff:fed1:5f72
tunneledNetworkingMode: false
controllerManager: {}
extensions:
helm:
charts: null
repositories: null
storage:
type: openebs_local_storage
images:
calico:
cni:
image: docker.io/calico/cni
version: v3.24.5
kubecontrollers:
image: docker.io/calico/kube-controllers
version: v3.24.5
node:
image: docker.io/calico/node
version: v3.24.5
coredns:
image: docker.io/coredns/coredns
version: 1.9.4
default_pull_policy: IfNotPresent
konnectivity:
image: quay.io/k0sproject/apiserver-network-proxy-agent
version: 0.0.32-k0s1
kubeproxy:
image: registry.k8s.io/kube-proxy
version: v1.25.4
kuberouter:
cni:
image: docker.io/cloudnativelabs/kube-router
version: v1.5.1
cniInstaller:
image: quay.io/k0sproject/cni-node
version: 1.1.1-k0s.0
metricsserver:
image: registry.k8s.io/metrics-server/metrics-server
version: v0.6.1
pushgateway:
image: quay.io/k0sproject/pushgateway-ttl
version: edge@sha256:7031f6bf6c957e2fdb496161fe3bea0a5bde3de800deeba7b2155187196ecbd9
installConfig:
users:
etcdUser: etcd
kineUser: kube-apiserver
konnectivityUser: konnectivity-server
kubeAPIserverUser: kube-apiserver
kubeSchedulerUser: kube-scheduler
konnectivity:
adminPort: 8133
agentPort: 8132
network:
calico: null
clusterDomain: cluster.local
dualStack: {}
kubeProxy:
mode: iptables
kuberouter:
autoMTU: true
hairpinMode: false
metricsPort: 8080
mtu: 0
peerRouterASNs: ""
peerRouterIPs: ""
podCIDR: 10.244.0.0/16
provider: kuberouter
serviceCIDR: 10.96.0.0/12
scheduler: {}
storage:
etcd:
externalCluster: null
peerAddress: 192.168.0.216
type: etcd
telemetry:
enabled: true
status: {}

View File

@ -0,0 +1,9 @@
---
kind: IPAddressPool
apiVersion: metallb.io/v1beta1
metadata:
namespace: metallb
name: self
spec:
addresses:
- "{{ .networking.ip }}/32"

1108
k8s/metallb-crds.yaml Normal file

File diff suppressed because it is too large Load Diff

352
k8s/metallb-rbac.yaml Normal file
View File

@ -0,0 +1,352 @@
# Based on bitnami/metallb helm chart 4.1.12 for metallb 0.13.7
---
kind: ServiceAccount
apiVersion: v1
metadata:
namespace: metallb
name: metallb-controller
automountServiceAccountToken: true
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: metallb-controller
rules:
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- services/status
verbs:
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- metallb-controller
resources:
- podsecuritypolicies
verbs:
- use
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metallb-controller
subjects:
- kind: ServiceAccount
namespace: metallb
name: metallb-controller
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: metallb-controller
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: metallb
name: metallb-controller
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- get
- list
- watch
- apiGroups:
- ""
resources:
- secrets
resourceNames:
- metallb-memberlist
verbs:
- list
- apiGroups:
- apps
resources:
- deployments
resourceNames:
- metallb-controller
verbs:
- get
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- metallb.io
resources:
- addresspools
verbs:
- get
- list
- watch
- apiGroups:
- metallb.io
resources:
- ipaddresspools
verbs:
- get
- list
- watch
- apiGroups:
- metallb.io
resources:
- bgppeers
verbs:
- get
- list
- apiGroups:
- metallb.io
resources:
- bgpadvertisements
verbs:
- get
- list
- apiGroups:
- metallb.io
resources:
- l2advertisements
verbs:
- get
- list
- apiGroups:
- metallb.io
resources:
- communities
verbs:
- get
- list
- watch
- apiGroups:
- metallb.io
resources:
- bfdprofiles
verbs:
- get
- list
- watch
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: metallb
name: metallb-controller
subjects:
- kind: ServiceAccount
namespace: metallb
name: metallb-controller
roleRef:
kind: Role
apiGroup: rbac.authorization.k8s.io
name: metallb-controller
---
kind: ServiceAccount
apiVersion: v1
metadata:
namespace: metallb
name: metallb-speaker
automountServiceAccountToken: true
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: metallb-speaker
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- policy
resourceNames:
- metallb-speaker
resources:
- podsecuritypolicies
verbs:
- use
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metallb-speaker
subjects:
- kind: ServiceAccount
namespace: metallb
name: metallb-speaker
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: metallb-speaker
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: metallb
name: metallb-pod-lister
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- metallb.io
resources:
- addresspools
verbs:
- get
- list
- watch
- apiGroups:
- metallb.io
resources:
- bfdprofiles
verbs:
- get
- list
- watch
- apiGroups:
- metallb.io
resources:
- bgppeers
verbs:
- get
- list
- watch
- apiGroups:
- metallb.io
resources:
- l2advertisements
verbs:
- get
- list
- watch
- apiGroups:
- metallb.io
resources:
- bgpadvertisements
verbs:
- get
- list
- watch
- apiGroups:
- metallb.io
resources:
- ipaddresspools
verbs:
- get
- list
- watch
- apiGroups:
- metallb.io
resources:
- communities
verbs:
- get
- list
- watch
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: metallb
name: metallb-pod-lister
roleRef:
kind: Role
apiGroup: rbac.authorization.k8s.io
name: metallb-pod-lister
subjects:
- kind: ServiceAccount
name: metallb-speaker

326
k8s/metallb.yaml Normal file
View File

@ -0,0 +1,326 @@
# Based on bitnami/metallb helm chart 4.1.12 for metallb 0.13.7
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
namespace: metallb
name: metallb-speaker
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
app: metallb-speaker
template:
metadata:
labels:
app: metallb-speaker
spec:
serviceAccountName: metallb-speaker
hostNetwork: true
securityContext:
fsGroup: 0
terminationGracePeriodSeconds: 2
containers:
- name: metallb-speaker
image: "docker.io/bitnami/metallb-speaker:0.13.7-debian-11-r8"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
drop:
- ALL
readOnlyRootFilesystem: true
runAsUser: 0
args:
- "--port=7472"
env:
- name: METALLB_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: METALLB_HOST
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: METALLB_ML_BIND_ADDR
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: METALLB_ML_LABELS
value: app=metallb-speaker
- name: METALLB_ML_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: METALLB_ML_SECRET_KEY
valueFrom:
secretKeyRef:
name: metallb-memberlist
key: secretkey
ports:
- name: metrics
containerPort: 7472
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
httpGet:
path: /metrics
port: metrics
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
httpGet:
path: /metrics
port: metrics
resources: {}
---
kind: Secret
apiVersion: v1
metadata:
namespace: metallb
name: webhook-server-cert
---
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: metallb
name: metallb-controller
labels:
app.kubernetes.io/name: metallb
spec:
replicas: 1
strategy:
type: RollingUpdate
revisionHistoryLimit: 3
selector:
matchLabels:
app: metallb-controller
template:
metadata:
labels:
app: metallb-controller
spec:
serviceAccountName: metallb-controller
securityContext:
fsGroup: 1001
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert
containers:
- name: metallb-controller
image: "docker.io/bitnami/metallb-controller:0.13.7-debian-11-r9"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1001
args:
- --port=7472
- --cert-service-name=metallb-webhook-service
ports:
- name: webhook-server
containerPort: 9443
- name: metrics
containerPort: 7472
volumeMounts:
- name: cert
mountPath: /tmp/k8s-webhook-server/serving-certs
readOnly: true
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
httpGet:
path: /metrics
port: metrics
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
httpGet:
path: /metrics
port: metrics
resources: {}
---
kind: Service
apiVersion: v1
metadata:
namespace: metallb
name: metallb-webhook-service
spec:
ports:
- port: 443
targetPort: 9443
selector:
app: metallb-controller
---
kind: ValidatingWebhookConfiguration
apiVersion: admissionregistration.k8s.io/v1
metadata:
name: metallb-webhook-configuration
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
namespace: metallb
name: metallb-webhook-service
path: /validate-metallb-io-v1beta1-addresspool
failurePolicy: Fail
name: addresspoolvalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- addresspools
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
namespace: metallb
name: metallb-webhook-service
path: /validate-metallb-io-v1beta2-bgppeer
failurePolicy: Fail
name: bgppeervalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta2
operations:
- CREATE
- UPDATE
resources:
- bgppeers
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
namespace: metallb
name: metallb-webhook-service
path: /validate-metallb-io-v1beta1-ipaddresspool
failurePolicy: Fail
name: ipaddresspoolvalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- ipaddresspools
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
namespace: metallb
name: metallb-webhook-service
path: /validate-metallb-io-v1beta1-bgpadvertisement
failurePolicy: Fail
name: bgpadvertisementvalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- bgpadvertisements
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
namespace: metallb
name: metallb-webhook-service
path: /validate-metallb-io-v1beta1-community
failurePolicy: Fail
name: communityvalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- communities
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
namespace: metallb
name: metallb-webhook-service
path: /validate-metallb-io-v1beta1-bfdprofile
failurePolicy: Fail
name: bfdprofileyvalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- DELETE
resources:
- bfdprofiles
sideEffects: None
- admissionReviewVersions:
- v1
clientConfig:
service:
namespace: metallb
name: metallb-webhook-service
path: /validate-metallb-io-v1beta1-l2advertisement
failurePolicy: Fail
name: l2advertisementvalidationwebhook.metallb.io
rules:
- apiGroups:
- metallb.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- l2advertisements
sideEffects: None

23
k8s/namespaces.yaml Normal file
View File

@ -0,0 +1,23 @@
---
kind: Namespace
apiVersion: v1
metadata:
name: traefik
---
kind: Namespace
apiVersion: v1
metadata:
name: metallb
---
kind: Namespace
apiVersion: v1
metadata:
name: riju
---
kind: Namespace
apiVersion: v1
metadata:
name: riju-user

25
k8s/provisioning.md Normal file
View File

@ -0,0 +1,25 @@
```bash
curl -sSLf https://get.k0s.sh | sudo sh
sudo mkdir /etc/k0s
k0s config create > /etc/k0s/k0s.yaml
```
Edit to have this config:
```yaml
spec:
extensions:
storage:
type: openebs_local_storage
```
```bash
sudo k0s install controller --single
sudo k0s start
```
Go to client machine:
```bash
ssh riju-k8s sudo -S k0s kubeconfig admin > ~/.kube/config
```

View File

@ -0,0 +1,102 @@
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
namespace: riju
name: docker-registry
spec:
replicas: 1
serviceName: docker-registry
selector:
matchLabels:
app: docker-registry
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ReadWriteOnce]
resources:
requests:
storage: 128Gi
storageClassName: openebs-hostpath
template:
metadata:
labels:
app: docker-registry
spec:
volumes:
- name: auth
secret:
secretName: registry-auth
containers:
- name: registry
image: "registry:2"
resources: {}
readinessProbe:
httpGet:
path: /
port: 5000
scheme: HTTP
failureThreshold: 1
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
httpGet:
path: /
port: 5000
scheme: HTTP
failureThreshold: 3
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
env:
- name: REGISTRY_AUTH
value: htpasswd
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: "Registry Realm"
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: /var/run/registry/auth/htpasswd
ports:
- name: api
containerPort: 5000
volumeMounts:
- name: auth
mountPath: /var/run/registry/auth
- name: data
mountPath: /var/lib/registry
---
kind: Service
apiVersion: v1
metadata:
namespace: riju
name: docker-registry
spec:
selector:
app: docker-registry
type: NodePort
ports:
- name: api
port: 80
nodePort: 30999
targetPort: 5000
---
kind: IngressRoute
apiVersion: traefik.containo.us/v1alpha1
metadata:
namespace: riju
name: docker-registry
spec:
entryPoints:
- docker
routes:
- kind: Rule
match: "PathPrefix(`/`)"
services:
- namespace: riju
name: docker-registry
port: 80

101
k8s/riju-minio.yaml Normal file
View File

@ -0,0 +1,101 @@
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
namespace: riju
name: minio
spec:
replicas: 1
serviceName: minio
selector:
matchLabels:
app: minio
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ReadWriteOnce]
resources:
requests:
storage: 16Gi
storageClassName: openebs-hostpath
template:
metadata:
labels:
app: minio
spec:
containers:
- name: minio
image: "minio/minio:RELEASE.2022-12-12T19-27-27Z"
resources: {}
readinessProbe:
httpGet:
path: /minio/health/live
port: 9000
scheme: HTTP
failureThreshold: 1
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
httpGet:
path: /minio/health/live
port: 9000
scheme: HTTP
failureThreshold: 3
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
args:
- "server"
- "/data"
env:
- name: MINIO_ACCESS_KEY
valueFrom:
secretKeyRef:
name: minio-keys
key: access-key
- name: MINIO_SECRET_KEY
valueFrom:
secretKeyRef:
name: minio-keys
key: secret-key
ports:
- name: api
containerPort: 9000
volumeMounts:
- name: data
mountPath: /data
---
kind: Service
apiVersion: v1
metadata:
namespace: riju
name: minio
spec:
selector:
app: minio
ports:
- name: api
port: 80
targetPort: 9000
---
kind: IngressRoute
apiVersion: traefik.containo.us/v1alpha1
metadata:
namespace: riju
name: minio
spec:
entryPoints:
- minio
routes:
- kind: Rule
match: "PathPrefix(`/`)"
services:
- namespace: riju
name: minio
port: 80

117
k8s/riju-proxy.yaml Normal file
View File

@ -0,0 +1,117 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
namespace: riju
name: riju-proxy-config
data:
default.conf: |
underscores_in_headers on;
server {
resolver kube-dns.kube-system.svc.cluster.local;
listen 1869 default_server;
auth_basic "Riju administrative proxy";
auth_basic_user_file /etc/nginx/passwd;
location ~ /(10\.[0-9]+\.[0-9]+\.[0-9]+)/health {
proxy_pass http://$1:869/health;
}
location ~ /(10\.[0-9]+\.[0-9]+\.[0-9]+)/exec {
proxy_pass http://$1:869/exec$is_args$args;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
}
location / {
return 404;
}
}
---
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: riju
name: riju-proxy
spec:
replicas: 1
selector:
matchLabels:
app: riju-proxy
template:
metadata:
labels:
app: riju-proxy
spec:
volumes:
- name: config
configMap:
name: riju-proxy-config
- name: auth
secret:
secretName: riju-proxy-auth
containers:
- name: nginx
image: "nginx:1.23"
resources: {}
readinessProbe:
tcpSocket:
port: 1869
failureThreshold: 1
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
tcpSocket:
port: 1869
failureThreshold: 3
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
ports:
- name: http
containerPort: 1869
volumeMounts:
- name: config
mountPath: /etc/nginx/conf.d
- name: auth
mountPath: /etc/nginx/passwd
subPath: htpasswd
---
kind: Service
apiVersion: v1
metadata:
namespace: riju
name: riju-proxy
spec:
selector:
app: riju-proxy
ports:
- name: http
port: 1869
targetPort: 1869
---
kind: IngressRoute
apiVersion: traefik.containo.us/v1alpha1
metadata:
namespace: riju
name: riju-proxy
spec:
entryPoints:
- proxy
routes:
- kind: Rule
match: "PathPrefix(`/`)"
services:
- namespace: riju
name: riju-proxy
port: 1869

89
k8s/riju-server.yaml Normal file
View File

@ -0,0 +1,89 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: riju
name: riju-server
spec:
replicas: 1
selector:
matchLabels:
app: riju-server
template:
metadata:
labels:
app: riju-server
spec:
volumes:
- name: cache
hostPath:
path: /var/cache/riju
- name: docker
hostPath:
path: /var/run/docker.sock
imagePullSecrets:
- name: registry-login
containers:
- name: server
image: "localhost:30999/riju:app"
resources: {}
readinessProbe:
httpGet:
path: /
port: 6119
scheme: HTTP
failureThreshold: 1
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
httpGet:
path: /
port: 6119
scheme: HTTP
failureThreshold: 3
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
ports:
- name: http
containerPort: 6119
volumeMounts:
- name: cache
mountPath: /var/cache/riju
- name: docker
mountPath: /var/run/docker.sock
readOnly: true
---
kind: Service
apiVersion: v1
metadata:
namespace: riju
name: riju-server
spec:
selector:
app: riju-server
ports:
- name: http
port: 80
targetPort: 6119
---
kind: IngressRoute
apiVersion: traefik.containo.us/v1alpha1
metadata:
namespace: riju
name: riju-server
spec:
entryPoints:
- https
routes:
- kind: Rule
match: "PathPrefix(`/`)"
services:
- namespace: riju
name: riju-server
port: 80

95
k8s/secrets.in.yaml Normal file
View File

@ -0,0 +1,95 @@
---
kind: Secret
apiVersion: v1
metadata:
namespace: metallb
name: metallb-memberlist
data:
secretkey: "{{ .metallb.secretkey | b64enc }}"
---
kind: Secret
apiVersion: v1
metadata:
namespace: riju
name: registry-auth
data:
htpasswd: "{{ .registry.htpasswd | println | b64enc }}"
---
kind: Secret
apiVersion: v1
metadata:
namespace: riju
name: registry-login
type: kubernetes.io/dockerconfigjson
stringData:
.dockerconfigjson: |
{
"auths": {
"localhost:30999": {
"username": "admin",
"password": "{{ .registry.password }}",
"auth": "{{ .registry.password | printf "admin:%s" | b64enc }}"
}
}
}
---
kind: Secret
apiVersion: v1
metadata:
namespace: riju-user
name: registry-user-login
type: kubernetes.io/dockerconfigjson
stringData:
.dockerconfigjson: |
{
"auths": {
"localhost:30999": {
"username": "admin",
"password": "{{ .registry.password }}",
"auth": "{{ .registry.password | printf "admin:%s" | b64enc }}"
}
}
}
---
kind: Secret
apiVersion: v1
metadata:
namespace: riju
name: minio-keys
stringData:
access-key: "{{ .minio.accessKey }}"
secret-key: "{{ .minio.secretKey }}"
---
kind: Secret
apiVersion: v1
metadata:
namespace: riju-user
name: minio-user-login
stringData:
config.json: |
{
"version": "10",
"aliases": {
"riju": {
"url": "http://minio.riju.svc",
"accessKey": "{{ .minio.accessKey }}",
"secretKey": "{{ .minio.secretKey }}",
"api": "s3v4",
"path": "auto"
}
}
}
---
kind: Secret
apiVersion: v1
metadata:
namespace: riju
name: riju-proxy-auth
data:
htpasswd: "{{ .proxy.htpasswd | println | b64enc }}"

View File

@ -0,0 +1,60 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
namespace: traefik
name: traefik-config
data:
traefik.yaml: |
entryPoints:
proxy:
address: ":1869"
http:
tls:
certResolver: riju
domains:
- main: k8s.riju.codes
http:
address: ":8000"
https:
address: ":8443"
http:
tls:
certResolver: riju
domains:
- main: k8s.riju.codes
healthcheck:
address: ":9000"
metrics:
address: ":9100"
docker:
address: ":31000"
http:
tls:
certResolver: riju
domains:
- main: k8s.riju.codes
minio:
address: ":32000"
http:
tls:
certResolver: riju
domains:
- main: k8s.riju.codes
ping:
entryPoint: "healthcheck"
metrics:
prometheus:
entryPoint: "metrics"
providers:
kubernetesCRD: {}
certificatesResolvers:
riju:
acme:
{{- if not .contact.letsEncryptProductionEnabled }}
caServer: https://acme-staging-v02.api.letsencrypt.org/directory
{{- end }}
email: "{{ .contact.letsEncryptEmail }}"
storage: /data/acme.json
httpChallenge:
entryPoint: http

2288
k8s/traefik-crds.yaml Normal file

File diff suppressed because it is too large Load Diff

72
k8s/traefik-rbac.yaml Normal file
View File

@ -0,0 +1,72 @@
# Based on traefik/traefik helm chart 20.8.0 for traefik v2.9.6
---
kind: ServiceAccount
apiVersion: v1
metadata:
namespace: traefik
name: traefik
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik
rules:
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingressclasses
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- traefik.containo.us
resources:
- ingressroutes
- ingressroutetcps
- ingressrouteudps
- middlewares
- middlewaretcps
- tlsoptions
- tlsstores
- traefikservices
- serverstransports
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik
roleRef:
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
name: traefik
subjects:
- namespace: traefik
kind: ServiceAccount
name: traefik

150
k8s/traefik.yaml Normal file
View File

@ -0,0 +1,150 @@
# Based on traefik/traefik helm chart 20.8.0 for traefik v2.9.6
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
namespace: traefik
name: traefik-data
spec:
accessModes: [ReadWriteOnce]
resources:
requests:
storage: 128Mi
storageClassName: openebs-hostpath
---
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: traefik
name: traefik
spec:
replicas: 1
selector:
matchLabels:
app: traefik
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
minReadySeconds: 0
template:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: "9100"
labels:
app: traefik
spec:
serviceAccountName: traefik
terminationGracePeriodSeconds: 60
hostNetwork: false
initContainers:
- name: volume-permissions
image: busybox:1.35
command:
- "sh"
- "-c"
- "touch /data/acme.json && chmod -Rv 600 /data/* && chown 65532:65532 /data/acme.json"
volumeMounts:
- name: data
mountPath: /data
containers:
- image: traefik:v2.9.6
imagePullPolicy: IfNotPresent
name: traefik
resources: {}
readinessProbe:
httpGet:
path: /ping
port: 9000
scheme: HTTP
failureThreshold: 1
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
httpGet:
path: /ping
port: 9000
scheme: HTTP
failureThreshold: 3
initialDelaySeconds: 2
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
ports:
- name: http
containerPort: 8000
- name: https
containerPort: 8443
- name: ping
containerPort: 9000
- name: docker
containerPort: 31000
securityContext:
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
volumeMounts:
- name: config
mountPath: /etc/traefik
- name: data
mountPath: /data
- name: tmp
mountPath: /tmp
volumes:
- name: config
configMap:
name: traefik-config
- name: data
persistentVolumeClaim:
claimName: traefik-data
- name: tmp
emptyDir: {}
securityContext:
fsGroup: 65532
---
kind: Service
apiVersion: v1
metadata:
namespace: traefik
name: traefik
annotations:
metallb.universe.tf/allow-shared-ip: main
spec:
type: LoadBalancer
selector:
app: traefik
ports:
- port: 80
name: http
targetPort: 8000
- port: 443
name: https
targetPort: 8443
- port: 1869
name: proxy
- port: 31000
name: docker
- port: 32000
name: minio
---
kind: IngressClass
apiVersion: networking.k8s.io/v1
metadata:
name: traefik
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
spec:
controller: traefik.io/ingress-controller

44
langs/ante.yaml Normal file
View File

@ -0,0 +1,44 @@
id: "ante"
aliases:
- "an"
name: "Ante"
install:
prepare:
apt:
- cargo
- cmake
- libssl-dev
- pkg-config
- python3-distutils
manual: |
export PATH="$HOME/.cargo/bin:$PATH"
cargo install llvmenv
llvmenv init
# If compiler is not explicitly set to LLVM, then we get an
# error: unrecognized command-line option '-Wnewline-eof'.
CC=/usr/bin/clang CXX=/usr/bin/clang++ llvmenv build-entry -G Makefile -j$(nproc) 10.0.1
llvmenv global 10.0.1
manual: |
git clone https://github.com/jfecher/ante.git -n
pushd ante
git checkout ba940f3b492fb448a6a73b139403eefa7a0daedc
LLVM_SYS_100_PREFIX="$(llvmenv prefix)" cargo build --release
install -d "${pkg}/opt/ante"
install -d "${pkg}/usr/local/bin"
cp target/release/ante "${pkg}/usr/local/bin/"
cp -R stdlib "${pkg}/opt/ante/"
popd
setup: |
mkdir -p "$HOME/.config/ante"
cp -R /opt/ante/stdlib "$HOME/.config/ante/"
main: "main.an"
template: |
print "Hello, world!"
compile: |
ante main.an
run: |
./main

107
langs/claro.yaml Normal file
View File

@ -0,0 +1,107 @@
id: "claro"
name: "Claro"
info:
impl: "Claro"
year: 2021
desc: "High-level toy programming language providing standardized Software Engineering best practices out of the box"
ext:
- claro
web:
home: "https://clarolang.com/"
source: "https://github.com/JasonSteving99/claro-lang"
category: general
mode:
- compiled
- interpreted
platform: []
syntax:
- c
typing: static
paradigm:
- functional
- imperative
usage: personal
install:
apt:
- default-jdk
manual: |
install -d "${pkg}/opt/claro/programs"
# Pull resources from the latest Claro repo release.
ver="$(latest_release JasonSteving99/claro-lang | sed 's/^v//')"
# Pull the tarball of the built Bazel repo's bazel-bin instead of just the sources.
wget "https://github.com/JasonSteving99/claro-lang/releases/download/v${ver}/claro-lang-bazel-bin.tar.gz"
tar -xf claro-lang-bazel-bin.tar.gz
#####################################################################################################
# We don't want to depend on Bazel at all for rebuilding, it's just one file changing. Rebuild it using
# the packaged jar files and then update the jar holding that recompiled file and run the Bazel gen'd
# run script which points at all the correct jar runfiles. First though, we need to slightly modify
# the Bazel gen'd runscript to rebuild using the packaged jars for us (without explicitly rerunning Bazel
# itself since this is super slow and involves starting up a new Bazel server...).
pushd claro_programs
read -r -d '' MY_SCRIPT <<"EOF" ||:
REBUILD_CLASSPATH="${CLASSPATH}"
# For the purpose of rebuilding, we need lombok and autovalue on the classpath.
REBUILD_CLASSPATH+=":lombok-1.18.20.jar"
REBUILD_CLASSPATH+=":auto-value-1.5.3.jar"
javac -classpath $REBUILD_CLASSPATH Conditions.java
# There's an assumption that the dir ./com/claro/ was made in the tarball before this.
mv Conditions*.class com/claro
jar -uf "${RUNPATH}src/java/com/claro/claro_programs/conditions_compiled_claro_image.jar" com/claro/Conditions*.class
java -classpath $CLASSPATH "${ARGS[@]}"
EOF
# Insert MY_SCRIPT into the Bazel run script just before java gets executed. We're reusing Bazel's run
# script basically just to get a conveniently curated CLASSPATH variable generated to point to all the
# randomly scattered jar files that Bazel places throughout bazel-bin/.
sed -i "s|exec \$JAVABIN.*|${MY_SCRIPT//$'\n'/\\n}|" conditions_compiled_claro_image
chmod -R u+rw *
popd
#####################################################################################################
cp -R claro_programs/. "${pkg}/opt/claro/programs/"
wget "https://github.com/JasonSteving99/claro-lang/releases/download/v${ver}/claro_compiler_binary_deploy.jar"
cp claro_compiler_binary_deploy.jar "${pkg}/opt/claro/"
setup: |
cp -R /opt/claro/programs "./"
main: "programs/Conditions.claro"
template: |
# Thanks for trying out Claro during its early development stages!
# To learn Claro by example, check out:
# https://clarolang.com/tree/main/src/java/com/claro/claro_programs
print("Hello, world!");
repl: |
java -jar /opt/claro/claro_compiler_binary_deploy.jar --repl --silent
# Skip rebuilding the entire compiler all over again and instead just
# use the pre-built Claro compiler jar.
compile: |
java -jar /opt/claro/claro_compiler_binary_deploy.jar \
--java_source --silent \
--classname=Conditions --package=com.claro \
< programs/Conditions.claro \
> programs/Conditions.java
run: |
set -e
cd programs
./conditions_compiled_claro_image ||:
java -jar /opt/claro/claro_compiler_binary_deploy.jar --repl --silent
input: |
print(123 * 234);
timeoutFactor: 2

22
langs/groovy.yaml Normal file
View File

@ -0,0 +1,22 @@
id: "groovy"
name: "Groovy"
install:
apt:
- groovy
repl: |
JAVA_OPTS="-Djava.util.prefs.systemRoot=$PWD/.java -Djava.util.prefs.userRoot=$PWD/.java/.userPrefs" groovysh
main: "main.groovy"
template: |
print "Hello, world!";
run: |
JAVA_OPTS="-Djava.util.prefs.systemRoot=$PWD/.java -Djava.util.prefs.userRoot=$PWD/.java/.userPrefs" groovysh main.groovy
scope:
code: |
x = 123 * 234;
timeoutFactor: 4

30
langs/ioke.yaml Normal file
View File

@ -0,0 +1,30 @@
id: "ioke"
aliases:
- "ik"
name: "Ioke"
install:
prepare:
cert:
- "https://letsencrypt.org/certs/lets-encrypt-r3.pem"
apt:
- default-jdk
manual: |
install -d "${pkg}/opt/ioke"
install -d "${pkg}/usr/local/bin"
wget https://ioke.org/dist/ioke-ikj-latest.tar.gz -O ioke.tar.gz
tar -xf ioke.tar.gz -C "${pkg}/opt/ioke" --strip-components=1
ln -s /opt/ioke/bin/ioke "${pkg}/usr/local/bin/ioke"
repl: |
JAVA_OPTS="-Duser.home=$PWD" ioke
main: "main.ik"
template: |
"Hello, world!" println
run: |
JAVA_OPTS="-Duser.home=$PWD" ioke main.ik; JAVA_OPTS="-Duser.home=$PWD" ioke
timeoutFactor: 4

29
langs/kalyn.yaml Normal file
View File

@ -0,0 +1,29 @@
id: "kalyn"
name: "Kalyn"
install:
prepare:
apt:
- haskell-stack
manual: |
install -d "${pkg}/opt/kalyn"
install -d "${pkg}/usr/local/bin"
git clone https://github.com/radian-software/kalyn.git
pushd kalyn
stack build kalyn
cp "$(stack exec which kalyn)" "${pkg}/usr/local/bin/"
cp -R src-kalyn/Stdlib src-kalyn/Stdlib.kalyn "${pkg}/opt/kalyn/"
popd
main: "src-kalyn/Main.kalyn"
template: |
(import "/opt/kalyn/Stdlib.kalyn")
(public def main (IO Empty)
(print "Hello, world!\n"))
compile: |
kalyn
run: |
out-kalyn/Main

40
langs/kotlin.yaml Normal file
View File

@ -0,0 +1,40 @@
id: "kotlin"
aliases:
- "kts"
- "kotlinc"
name: "Kotlin"
monacoLang: kotlin
install:
apt:
- default-jre
manual: |
install -d "${pkg}/opt"
install -d "${pkg}/usr/local/bin"
install -d "${pkg}/usr/local/lib"
ver="$(latest_release JetBrains/kotlin)"
wget "https://github.com/JetBrains/kotlin/releases/download/${ver}/kotlin-compiler-$(sed 's/^v//' <<< "$ver").zip" -O kotlin.zip
unzip kotlin.zip
cp -RT kotlinc "${pkg}/opt/kotlin"
ls "${pkg}/opt/kotlin/bin" | while read name; do
ln -s "/opt/kotlin/bin/${name}" "${pkg}/usr/local/bin/"
done
ls "${pkg}/opt/kotlin/lib" | while read name; do
ln -s "/opt/kotlin/lib/${name}" "${pkg}/usr/local/lib/"
done
repl: |
JAVA_OPTS="-Duser.home=$PWD" kotlinc
main: "main.kts"
template: |
println("Hello, world!")
run: |
JAVA_OPTS="-Duser.home=$PWD" kotlinc -script main.kts
kotlinc
timeoutFactor: 4

View File

@ -41,15 +41,8 @@ install:
- python3
- python3-pip
- black
manual: |
install -d "${pkg}/opt/mspyls"
install -d "${pkg}/usr/local/bin"
url="$(curl -fsSL "https://pvsc.blob.core.windows.net/python-language-server-stable?restype=container&comp=list&prefix=Python-Language-Server-linux-x64" | grep -Eo 'https://[^<]+\.nupkg' | tail -n1)"
wget "${url}"
unzip -d "${pkg}/opt/mspyls" Python-Language-Server-linux-x64.*.nupkg
chmod +x "${pkg}/opt/mspyls/Microsoft.Python.LanguageServer"
ln -s "/opt/mspyls/Microsoft.Python.LanguageServer" "${pkg}/usr/local/bin/Microsoft.Python.LanguageServer"
npm:
- pyright
repl: |
python3 -u
@ -83,10 +76,13 @@ pkg:
lsp:
start: |
Microsoft.Python.LanguageServer
init:
interpreter:
properties:
InterpreterPath: /usr/bin/python3
pyright-langserver --stdio
code: "import func"
item: "functools"
<<<<<<< HEAD
item: "functools"
=======
item: "functools"
skip:
- lsp
>>>>>>> d370c5fbc85bf9f479adca7c532ec13c2b54199f

64
langs/qsharp.yaml Normal file
View File

@ -0,0 +1,64 @@
id: "qsharp"
aliases:
- "q"
- "qs"
name: "Q#"
install:
# Apparently, the Q# project template is hardcoded to use version
# 3.x of the .NET SDK. Not sure why.
prepare: &install-dotnet
preface: |
wget "https://packages.microsoft.com/config/ubuntu/${ubuntu_ver}/packages-microsoft-prod.deb"
sudo --preserve-env=DEBIAN_FRONTEND apt-get install ./packages-microsoft-prod.deb
sudo --preserve-env=DEBIAN_FRONTEND apt-get update
apt:
- $(grep-aptavail -wF Package "dotnet-sdk-3\.[0-9.]+" -s Package -n | sort -Vr | head -n1)
<<: *install-dotnet
# We should cache the .dotnet directory to avoid a .NET banner being
# printed, and we should cache the main directory because there is a
# generated main.csproj file that is needed by .NET. Finally we
# should cache the .nuget directory as well as the build artifacts
# inside main to avoid a 30s initial compile time.
#
# We could optimize further but I don't feel like it right now.
manual: |
install -d "${pkg}/opt/qsharp/skel-home"
install -d "${pkg}/opt/qsharp/skel-src"
dotnet new -i Microsoft.Quantum.ProjectTemplates
dotnet new console -lang Q# -o main
dotnet run --project main
shopt -s dotglob
cp -R main "${pkg}/opt/qsharp/skel-src/"
cp -R "${HOME}/.dotnet" "${HOME}/.nuget" "${pkg}/opt/qsharp/skel-home/"
rm "${pkg}/opt/qsharp/skel-src/main/Program.qs"
chmod -R a=u,go-w "${pkg}/opt/qsharp"
manualInstall: |
wget "https://packages.microsoft.com/config/ubuntu/${ubuntu_ver}/packages-microsoft-prod.deb"
sudo --preserve-env=DEBIAN_FRONTEND apt-get update
sudo --preserve-env=DEBIAN_FRONTEND apt-get install ./packages-microsoft-prod.deb
setup: |
shopt -s dotglob
cp -R /opt/qsharp/skel-src/* ./
cp -R /opt/qsharp/skel-home/* "${HOME}/"
main: "main/Main.qs"
template: |
namespace main {
open Microsoft.Quantum.Canon;
open Microsoft.Quantum.Intrinsic;
@EntryPoint()
operation Main() : Unit {
Message("Hello, world!");
}
}
run: |
dotnet run --project main
timeoutFactor: 4

36
langs/red.yaml Normal file
View File

@ -0,0 +1,36 @@
id: "red"
name: "Red"
install:
apt:
- libcurl4:i386
manual: |
install -d "${pkg}/opt/red/skel"
install -d "${pkg}/usr/local/bin"
path="$(curl -fsSL https://static.red-lang.org/download.html | grep -Eo '/dl/linux/[^"]+' | head -n1)"
wget "https://static.red-lang.org${path}" -O red
chmod +x red
cp red "${pkg}/usr/local/bin/"
./red <<< quit
cp -R "$HOME/.red" "${pkg}/opt/red/skel/"
setup: |
shopt -s dotglob; cp -R /opt/red/skel/* "${HOME}/"
# https://github.com/red/red/issues/543#issuecomment-25404212
repl: |
"$(which red)"
input: |
DELAY: 5
123 * 234
main: "main.red"
template: |
Red [Title: "Main"]
print "Hello, world!"
run: |
"$(which red)" main.red; "$(which red)"

25
langs/scala.yaml Normal file
View File

@ -0,0 +1,25 @@
id: "scala"
name: "Scala"
install:
apt:
- scala
repl: |
scala
input: |
DELAY: 5
123 * 234
main: "main.scala"
template: |
println("Hello, world!")
run: |
scala -i main.scala
scope:
code: |
val x = 123 * 234
timeoutFactor: 8

73
langs/unison.yaml Normal file
View File

@ -0,0 +1,73 @@
id: "unison"
aliases:
- "ucm"
name: "Unison"
install:
prepare:
apt:
- haskell-stack
manual: |
mkdir -p "${pkg}/opt/unison/skel"
install -d "${pkg}/usr/local/bin"
git clone https://github.com/unisonweb/unison.git
pushd unison
stack build
cp "$(stack exec which unison)" "${pkg}/usr/local/bin/"
popd
pushd "${pkg}/opt/unison/skel"
"${pkg}/usr/local/bin/unison" -codebase . init
LESS="+q" "${pkg}/usr/local/bin/unison" -codebase . <<< 'pull https://github.com/unisonweb/base:.trunk .base'
popd
setup: |
shopt -s dotglob
cp -R /opt/unison/skel/* ./
repl: |
unison -codebase .
input: |
DELAY: 10
find : [a] -> [a]
output: |
base.List.reverse
# runProg implementation courtesy of Robert Offner from Unison Slack!
main: "main.u"
template: |
use io
runProg: '{IO, Exception} a -> '{IO} ()
runProg f = 'let
printErr err = match err with
Failure _ errMsg _ -> handle putBytes (stdHandle StdErr) (toUtf8 errMsg) with cases
{raise _ -> _} -> ()
{_} -> ()
match catch f with
Left err -> printErr err
Right _ -> ()
main: '{IO} ()
main = runProg 'let
printLine "Hello, world!"
createEmpty: ""
run: |
unison -codebase . run.file main.u main
echo "Type 'load main.u' at the repl prompt to bring variables into scope."
unison -codebase .
scope:
code: |
x = 123 * 234
input: |
DELAY: 15
load main.u
DELAY: 5
add x
DELAY: 5
display x
timeoutFactor: 4

60
langs/v.yaml Normal file
View File

@ -0,0 +1,60 @@
id: "v"
aliases:
- "vlang"
name: "V"
info:
year: 2019
desc: "Simple, statically-typed compiled programming language designed for building maintainable software"
ext:
- v
web:
home: "https://vlang.io/"
source: "https://github.com/vlang/v"
category: general
mode: compiled
platform: clr
syntax:
- c
typing:
- static
paradigm:
- functional
- imperative
usage: personal
install:
manual: |
install -d "${pkg}/opt"
install -d "${pkg}/usr/local/bin"
git clone https://github.com/vlang/v.git "${pkg}/opt/v"
pushd "${pkg}/opt/v"
make
ln -s /opt/v/v "${pkg}/usr/local/bin/"
# Force vfmt to get compiled ahead of time, otherwise this will
# happen at first invocation and fail due to lack of write
# permissions on /opt/v.
./v fmt < /dev/null
popd
main: "main.v"
template: |
fn main() {
println('Hello, world!')
}
run: |
v run main.v
format:
run: |
v fmt main.v
input: |
fn main()
{
println("Hello, world!")
}

24
langs/verilog.yaml Normal file
View File

@ -0,0 +1,24 @@
id: "verilog"
aliases:
- "systemverilog"
- "iverilog"
name: "Verilog"
install:
apt:
- iverilog
main: "main.v"
template: |
module main;
initial begin
$display("Hello, world!");
end
endmodule
compile: |
iverilog main.v -o main
run: |
./main

View File

@ -9,6 +9,7 @@
"@babel/parser": "^7.13.11",
"@babel/preset-env": "^7.12.11",
"@balena/dockerignore": "^1.0.2",
"@kubernetes/client-node": "^0.18.0",
"@sentry/node": "^6.11.0",
"async-lock": "^1.2.6",
"babel-loader": "^8.2.2",
@ -19,7 +20,7 @@
"css-loader": "^5.0.1",
"debounce": "^1.2.0",
"docker-file-parser": "^1.0.5",
"ejs": "^3.1.5",
"ejs": "^3.1.7",
"express": "^4.17.1",
"express-ws": "^4.0.0",
"file-loader": "^6.2.0",
@ -32,9 +33,12 @@
"p-queue": "^6.6.2",
"parse-passwd": "^1.0.0",
"prettier": "^2.3.1",
"prom-client": "^14.0.1",
"regenerator-runtime": "^0.13.7",
"semaphore": "^1.1.0",
"strip-ansi": "^6.0.0",
"style-loader": "^2.0.0",
"unique-names-generator": "^4.7.1",
"uuid": "^8.3.2",
"vscode-languageserver-protocol": "3.15.3",
"webpack": "^4.44.2",
@ -42,5 +46,9 @@
"xterm": "^4.9.0",
"xterm-addon-fit": "^0.4.0",
"yaml": "^1.10.0"
}
},
"$comments": [
"limiter version pinned due to https://github.com/jhurliman/node-rate-limiter/issues/80",
"monaco-languageclient, monaco-editor, vscode-languageserver-protocol pinned because their APIs changed a bunch and Riju hasn't been updated yet"
]
}

View File

@ -1,48 +0,0 @@
data "amazon-ami" "ubuntu" {
filters = {
name = "ubuntu/images/hvm-ssd/ubuntu-*-21.04-amd64-server-*"
root-device-type = "ebs"
virtualization-type = "hvm"
}
most_recent = true
owners = ["099720109477"]
}
locals {
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
}
source "amazon-ebs" "ubuntu" {
ami_name = "riju-ci-${local.timestamp}"
instance_type = "t3.micro"
source_ami = "${data.amazon-ami.ubuntu.id}"
ssh_username = "ubuntu"
tag {
key = "BillingCategory"
value = "Riju"
}
tag {
key = "BillingSubcategory"
value = "Riju:AMI"
}
tag {
key = "Name"
value = "riju-ci-${local.timestamp}"
}
}
build {
sources = ["source.amazon-ebs.ubuntu"]
provisioner "file" {
destination = "/tmp/riju-init-volume"
source = "riju-init-volume"
}
provisioner "shell" {
script = "provision-ci.bash"
}
}

View File

@ -1,41 +0,0 @@
{
"agent": {
"metrics_collection_interval": 60,
"run_as_user": "root"
},
"metrics": {
"append_dimensions": {
"ImageId": "${aws:ImageId}",
"InstanceId": "${aws:InstanceId}",
"InstanceType": "${aws:InstanceType}"
},
"aggregation_dimensions": [
["RijuInstanceGroup"],
["RijuInstanceGroup", "path"]
],
"metrics_collected": {
"cpu": {
"append_dimensions": {
"RijuInstanceGroup": "Webserver"
},
"measurement": ["usage_active"],
"metrics_collection_interval": 60
},
"disk": {
"append_dimensions": {
"RijuInstanceGroup": "Webserver"
},
"measurement": ["used_percent"],
"metrics_collection_interval": 60,
"resources": ["/", "/mnt/riju"]
},
"mem": {
"append_dimensions": {
"RijuInstanceGroup": "Webserver"
},
"measurement": ["mem_used_percent"],
"metrics_collection_interval": 60
}
}
}
}

View File

@ -0,0 +1,13 @@
[Unit]
Description=Prometheus node exporter
StartLimitBurst=5
StartLimitIntervalSec=300
[Service]
Type=exec
ExecStart=node_exporter
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target

13
packer/prometheus.service Normal file
View File

@ -0,0 +1,13 @@
[Unit]
Description=Prometheus
StartLimitBurst=5
StartLimitIntervalSec=300
[Service]
Type=exec
ExecStart=bash -c 'EC2_INSTANCE_ID="$(curl -fsSL http://169.254.169.254/latest/meta-data/instance-id)" prometheus --config.file /etc/prometheus/config.yaml --enable-feature=expand-external-labels'
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target

21
packer/prometheus.yaml Normal file
View File

@ -0,0 +1,21 @@
global:
scrape_interval: 15s
external_labels:
node: "${EC2_INSTANCE_ID}"
scrape_configs:
- job_name: server
static_configs:
- targets: ["localhost:6121"]
- job_name: node
static_configs:
- targets: ["localhost:9100"]
- job_name: prometheus
static_configs:
- targets: ["localhost:9090"]
remote_write:
- url: "$GRAFANA_PROMETHEUS_HOSTNAME"
basic_auth:
username: "$GRAFANA_PROMETHEUS_USERNAME"
password: "$GRAFANA_API_KEY"

View File

@ -5,7 +5,7 @@ StartLimitIntervalSec=300
[Service]
Type=exec
ExecStart=bash -c 'promtail -config.file /etc/promtail/config.yaml -client.external-labels instance="$(curl -fsSL http://169.254.169.254/latest/meta-data/instance-id)"'
ExecStart=bash -c 'promtail -config.file /etc/promtail/config.yaml -client.external-labels node="$(curl -fsSL http://169.254.169.254/latest/meta-data/instance-id)"'
Restart=always
RestartSec=5

View File

@ -7,7 +7,7 @@ positions:
filename: /tmp/positions.yaml
client:
url: https://72217:$GRAFANA_API_KEY@logs-prod-us-central1.grafana.net/api/prom/push
url: https://$GRAFANA_LOKI_USERNAME:$GRAFANA_API_KEY@$GRAFANA_LOKI_HOSTNAME/api/prom/push
scrape_configs:
- job_name: kernel
@ -39,6 +39,16 @@ scrape_configs:
regex: "riju\\.service"
target_label: source
replacement: "supervisor"
- source_labels:
- __journal__systemd_unit
regex: "prometheus\\.service"
target_label: source
replacement: "prometheus"
- source_labels:
- __journal__systemd_unit
regex: "node-exporter\\.service"
target_label: source
replacement: "node-exporter"
- source_labels:
- source
regex: "systemd"

View File

@ -1,35 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
# I think there is a race condition related to Ubuntu wanting to do an
# automated system upgrade at boot, which causes 'apt-get update' to
# sometimes fail with an obscure error message.
sleep 5
mkdir /tmp/riju-work
pushd /tmp/riju-work
export DEBIAN_FRONTEND=noninteractive
sudo -E apt-get update
sudo -E apt-get dist-upgrade -y
sudo -E apt-get install -y curl gnupg lsb-release
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo -E apt-key add -
ubuntu_name="$(lsb_release -cs)"
sudo tee -a /etc/apt/sources.list.d/custom.list >/dev/null <<EOF
deb [arch=amd64] https://download.docker.com/linux/ubuntu ${ubuntu_name} stable
EOF
sudo -E apt-get update
sudo -E apt-get install -y docker-ce docker-ce-cli containerd.io make
sudo chown root:root /tmp/riju-init-volume
sudo mv /tmp/riju-init-volume /usr/local/bin/
popd
rm -rf /tmp/riju-work

View File

@ -5,6 +5,7 @@ set -euo pipefail
: ${ADMIN_PASSWORD}
: ${AWS_REGION}
: ${S3_BUCKET}
: ${S3_CONFIG_PATH}
: ${SUPERVISOR_ACCESS_TOKEN}
latest_release() {
@ -43,16 +44,12 @@ sudo ./aws/install
wget -nv https://s3.us-west-1.amazonaws.com/amazon-ssm-us-west-1/latest/debian_amd64/amazon-ssm-agent.deb
wget -nv https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb
sudo apt-get install -y ./amazon-cloudwatch-agent.deb
sudo chown root:root \
/tmp/cloudwatch.json /tmp/docker.json /tmp/riju.service \
/tmp/docker.json /tmp/riju.service \
/tmp/riju.slice /tmp/riju-init-volume /tmp/riju-supervisor
sudo mv /tmp/docker.json /etc/docker/daemon.json
sudo mv /tmp/riju.service /tmp/riju.slice /etc/systemd/system/
sudo mv /tmp/cloudwatch.json /opt/aws/amazon-cloudwatch-agent/bin/config.json
sudo mv /tmp/riju-init-volume /tmp/riju-supervisor /usr/local/bin/
sudo sed -Ei 's|^#?PermitRootLogin .*|PermitRootLogin no|' /etc/ssh/sshd_config
@ -61,13 +58,13 @@ sudo sed -Ei 's|^#?PermitEmptyPasswords .*|PermitEmptyPasswords no|' /etc/ssh/ss
sudo sed -Ei "s|\\\$AWS_REGION|${AWS_REGION}|" /etc/systemd/system/riju.service
sudo sed -Ei "s|\\\$ANALYTICS_TAG|${ANALYTICS_TAG:-}|" /etc/systemd/system/riju.service
sudo sed -Ei "s|\\\$S3_BUCKET|${S3_BUCKET}|" /etc/systemd/system/riju.service
sudo sed -Ei "s|\\\$S3_CONFIG_PATH|${S3_CONFIG_PATH}|" /etc/systemd/system/riju.service
sudo sed -Ei "s|\\\$SENTRY_DSN|${SENTRY_DSN:-}|" /etc/systemd/system/riju.service
sudo sed -Ei "s|\\\$SUPERVISOR_ACCESS_TOKEN|${SUPERVISOR_ACCESS_TOKEN}|" /etc/systemd/system/riju.service
sudo passwd -l root
sudo useradd admin -g admin -G sudo -s /usr/bin/bash -p "$(echo "${ADMIN_PASSWORD}" | mkpasswd -s)" -m
sudo amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -s -c file:/opt/aws/amazon-cloudwatch-agent/bin/config.json
sudo systemctl enable riju
if [[ -n "${GRAFANA_API_KEY:-}" ]]; then
@ -77,16 +74,42 @@ if [[ -n "${GRAFANA_API_KEY:-}" ]]; then
unzip promtail-linux-amd64.zip
sudo cp promtail-linux-amd64 /usr/local/bin/promtail
sudo chown root:root /tmp/promtail.service /tmp/promtail.yaml
ver="$(latest_release prometheus/node_exporter | sed 's/^v//')"
sudo mkdir /etc/promtail
wget -nv "https://github.com/prometheus/node_exporter/releases/download/v${ver}/node_exporter-${ver}.linux-amd64.tar.gz" -O node_exporter.tar.gz
tar -xf node_exporter.tar.gz --strip-components=1
sudo cp node_exporter /usr/local/bin/
ver="$(latest_release prometheus/prometheus | sed 's/^v//')"
wget -nv "https://github.com/prometheus/prometheus/releases/download/v${ver}/prometheus-${ver}.linux-amd64.tar.gz" -O prometheus.tar.gz
tar -xf prometheus.tar.gz --strip-components=1
sudo cp prometheus /usr/local/bin/
sudo chown root:root \
/tmp/node-exporter.service /tmp/prometheus.service \
/tmp/prometheus.yaml /tmp/promtail.service /tmp/promtail.yaml
sudo mkdir /etc/prometheus /etc/promtail
sudo mv /tmp/prometheus.yaml /etc/prometheus/config.yaml
sudo mv /tmp/promtail.yaml /etc/promtail/config.yaml
sudo mv /tmp/promtail.service /etc/systemd/system/
sudo sed -Ei "s/\\\$GRAFANA_API_KEY/${GRAFANA_API_KEY}/" /etc/promtail/config.yaml
sudo mv /tmp/prometheus.service /tmp/promtail.service /tmp/node-exporter.service \
/etc/systemd/system/
sudo systemctl enable promtail
sudo sed -Ei "s/\\\$GRAFANA_API_KEY/${GRAFANA_API_KEY}/" \
/etc/prometheus/config.yaml /etc/promtail/config.yaml
sudo sed -Ei "s/\\\$GRAFANA_LOKI_HOSTNAME/${GRAFANA_LOKI_HOSTNAME}/" \
/etc/promtail/config.yaml
sudo sed -Ei "s/\\\$GRAFANA_LOKI_USERNAME/${GRAFANA_LOKI_USERNAME}/" \
/etc/promtail/config.yaml
sudo sed -Ei "s/\\\$GRAFANA_PROMETHEUS_HOSTNAME/${GRAFANA_PROMETHEUS_HOSTNAME}/" \
/etc/prometheus/config.yaml
sudo sed -Ei "s/\\\$GRAFANA_PROMETHEUS_USERNAME/${GRAFANA_PROMETHEUS_USERNAME}/" \
/etc/prometheus/config.yaml
sudo systemctl enable node-exporter prometheus promtail
else
sudo rm /tmp/promtail.yaml /tmp/promtail.service
sudo rm /tmp/node-exporter.service /tmp/promtail.yaml /tmp/promtail.service
fi
sudo userdel ubuntu -f

View File

@ -13,6 +13,7 @@ RestartSec=5
Environment=AWS_REGION=$AWS_REGION
Environment=ANALYTICS_TAG=$ANALYTICS_TAG
Environment=S3_BUCKET=$S3_BUCKET
Environment=S3_CONFIG_PATH=$S3_CONFIG_PATH
Environment=SENTRY_DSN=$SENTRY_DSN
Environment=SUPERVISOR_ACCESS_TOKEN=$SUPERVISOR_ACCESS_TOKEN

View File

@ -21,8 +21,3 @@ MemorySwapMax=0
# this space to user code.
TasksAccounting=true
TasksMax=400000
# Attempt to deny access to EC2 Instance Metadata service from user
# code.
IPAccounting=true
IPAddressDeny=169.254.169.254

View File

@ -13,6 +13,26 @@ variable "analytics_tag" {
default = "${env("ANALYTICS_TAG")}"
}
variable "grafana_loki_hostname" {
type = string
default = "${env("GRAFANA_LOKI_HOSTNAME")}"
}
variable "grafana_loki_username" {
type = string
default = "${env("GRAFANA_LOKI_USERNAME")}"
}
variable "grafana_prometheus_hostname" {
type = string
default = "${env("GRAFANA_PROMETHEUS_HOSTNAME")}"
}
variable "grafana_prometheus_username" {
type = string
default = "${env("GRAFANA_PROMETHEUS_USERNAME")}"
}
variable "grafana_api_key" {
type = string
default = "${env("GRAFANA_API_KEY")}"
@ -23,6 +43,11 @@ variable "s3_bucket" {
default = "${env("S3_BUCKET")}"
}
variable "s3_config_path" {
type = string
default = "${env("S3_CONFIG_PATH")}"
}
variable "sentry_dsn" {
type = string
default = "${env("SENTRY_DSN_PACKER")}"
@ -35,7 +60,8 @@ variable "supervisor_access_token" {
data "amazon-ami" "ubuntu" {
filters = {
name = "ubuntu/images/hvm-ssd/ubuntu-*-21.10-amd64-server-*"
// EOL: April 2027
name = "ubuntu/images/hvm-ssd/ubuntu-*-22.04-amd64-server-*"
root-device-type = "ebs"
virtualization-type = "hvm"
}
@ -48,7 +74,7 @@ locals {
}
source "amazon-ebs" "ubuntu" {
ami_name = "riju-web-${local.timestamp}"
ami_name = "riju-${local.timestamp}"
instance_type = "t3.small"
source_ami = "${data.amazon-ami.ubuntu.id}"
ssh_username = "ubuntu"
@ -65,7 +91,7 @@ source "amazon-ebs" "ubuntu" {
tag {
key = "Name"
value = "riju-web-${local.timestamp}"
value = "riju-${local.timestamp}"
}
}
@ -73,13 +99,23 @@ build {
sources = ["source.amazon-ebs.ubuntu"]
provisioner "file" {
destination = "/tmp/cloudwatch.json"
source = "cloudwatch.json"
destination = "/tmp/docker.json"
source = "docker.json"
}
provisioner "file" {
destination = "/tmp/docker.json"
source = "docker.json"
destination = "/tmp/node-exporter.service"
source = "node-exporter.service"
}
provisioner "file" {
destination = "/tmp/prometheus.service"
source = "prometheus.service"
}
provisioner "file" {
destination = "/tmp/prometheus.yaml"
source = "prometheus.yaml"
}
provisioner "file" {
@ -117,11 +153,16 @@ build {
"ADMIN_PASSWORD=${var.admin_password}",
"AWS_REGION=${var.aws_region}",
"ANALYTICS_TAG=${var.analytics_tag}",
"GRAFANA_LOKI_HOSTNAME=${var.grafana_loki_hostname}",
"GRAFANA_LOKI_USERNAME=${var.grafana_loki_username}",
"GRAFANA_PROMETHEUS_HOSTNAME=${var.grafana_prometheus_hostname}",
"GRAFANA_PROMETHEUS_USERNAME=${var.grafana_prometheus_username}",
"GRAFANA_API_KEY=${var.grafana_api_key}",
"S3_BUCKET=${var.s3_bucket}",
"S3_CONFIG_PATH=${var.s3_config_path}",
"SENTRY_DSN=${var.sentry_dsn}",
"SUPERVISOR_ACCESS_TOKEN=${var.supervisor_access_token}",
]
script = "provision-web.bash"
script = "provision.bash"
}
}

View File

@ -1,4 +1,4 @@
module github.com/raxod502/riju/supervisor
module github.com/radian-software/riju/supervisor
go 1.16

View File

@ -34,6 +34,9 @@ import (
const bluePort = 6229
const greenPort = 6230
const blueMetricsPort = 6231
const greenMetricsPort = 6232
const blueName = "riju-app-blue"
const greenName = "riju-app-green"
@ -43,8 +46,9 @@ type deploymentConfig struct {
}
type supervisorConfig struct {
AccessToken string `env:"SUPERVISOR_ACCESS_TOKEN,notEmpty"`
S3Bucket string `env:"S3_BUCKET,notEmpty"`
AccessToken string `env:"SUPERVISOR_ACCESS_TOKEN,notEmpty"`
S3Bucket string `env:"S3_BUCKET,notEmpty"`
S3ConfigPath string `env:"S3_CONFIG_PATH,notEmpty"`
}
type reloadJob struct {
@ -56,10 +60,12 @@ type reloadJob struct {
type supervisor struct {
config supervisorConfig
blueProxyHandler http.Handler
greenProxyHandler http.Handler
isGreen bool // blue-green deployment
deployConfigHash string
blueProxyHandler http.Handler
greenProxyHandler http.Handler
blueMetricsProxyHandler http.Handler
greenMetricsProxyHandler http.Handler
isGreen bool // blue-green deployment
deployConfigHash string
awsAccountNumber string
awsRegion string
@ -102,7 +108,15 @@ func (sv *supervisor) scheduleReload() string {
return uuid
}
func (sv *supervisor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
func (sv *supervisor) serveHTTP(w http.ResponseWriter, r *http.Request, metricsPort bool) {
if metricsPort {
if sv.isGreen {
sv.greenMetricsProxyHandler.ServeHTTP(w, r)
} else {
sv.blueMetricsProxyHandler.ServeHTTP(w, r)
}
return
}
if strings.HasPrefix(r.URL.Path, "/api/supervisor") {
authHeader := r.Header.Get("Authorization")
if authHeader == "" {
@ -264,12 +278,13 @@ func (sv *supervisor) reload() error {
buf := s3manager.NewWriteAtBuffer([]byte{})
if _, err := dl.Download(context.Background(), buf, &s3.GetObjectInput{
Bucket: &sv.config.S3Bucket,
Key: aws.String("config.json"),
Key: aws.String(sv.config.S3ConfigPath),
}); err != nil {
return err
}
deployCfgBytes := buf.Bytes()
deployCfg := deploymentConfig{}
if err := json.Unmarshal(buf.Bytes(), &deployCfg); err != nil {
if err := json.Unmarshal(deployCfgBytes, &deployCfg); err != nil {
return err
}
sv.status("listing locally available images")
@ -321,12 +336,8 @@ func (sv *supervisor) reload() error {
}
}
}
deployCfgStr, err := json.Marshal(&deployCfg)
if err != nil {
return err
}
h := sha1.New()
h.Write([]byte(deployCfgStr))
h.Write(deployCfgBytes)
deployCfgHash := fmt.Sprintf("%x", h.Sum(nil))
if deployCfgHash == sv.deployConfigHash {
sv.status(fmt.Sprintf("config hash remains at %s", deployCfgHash))
@ -338,14 +349,17 @@ func (sv *supervisor) reload() error {
))
}
var port int
var metricsPort int
var name string
var oldName string
if sv.isGreen {
port = bluePort
metricsPort = blueMetricsPort
name = blueName
oldName = greenName
} else {
port = greenPort
metricsPort = greenMetricsPort
name = greenName
oldName = blueName
}
@ -355,6 +369,7 @@ func (sv *supervisor) reload() error {
"-v", "/var/cache/riju:/var/cache/riju",
"-v", "/var/run/docker.sock:/var/run/docker.sock",
"-p", fmt.Sprintf("127.0.0.1:%d:6119", port),
"-p", fmt.Sprintf("127.0.0.1:%d:6121", metricsPort),
"-e", "ANALYTICS_TAG",
"-e", "RIJU_DEPLOY_CONFIG",
"-e", "SENTRY_DSN",
@ -366,13 +381,13 @@ func (sv *supervisor) reload() error {
)
dockerRun.Stdout = os.Stdout
dockerRun.Stderr = os.Stderr
dockerRun.Env = append(os.Environ(), fmt.Sprintf("RIJU_DEPLOY_CONFIG=%s", deployCfgStr))
dockerRun.Env = append(os.Environ(), fmt.Sprintf("RIJU_DEPLOY_CONFIG=%s", deployCfgBytes))
if err := dockerRun.Run(); err != nil {
return err
}
sv.status("waiting for container to start up")
time.Sleep(5 * time.Second)
sv.status("checking that container is healthy")
sv.status("checking that container responds to HTTP")
resp, err := http.Get(fmt.Sprintf("http://localhost:%d", port))
if err != nil {
return err
@ -383,7 +398,25 @@ func (sv *supervisor) reload() error {
return err
}
if !strings.Contains(string(body), "python") {
return errors.New("container did not appear to be healthy")
return errors.New("container did not respond successfully to HTTP")
}
sv.status("checking that container exposes metrics")
resp, err = http.Get(fmt.Sprintf("http://localhost:%d/metrics", metricsPort))
if err != nil {
return err
}
defer resp.Body.Close()
body, err = io.ReadAll(resp.Body)
if err != nil {
return err
}
if !strings.Contains(string(body), "process_cpu_seconds_total") {
return errors.New("container did not expose metrics properly")
}
if sv.isGreen {
sv.status("switching from green to blue")
} else {
sv.status("switching from blue to green")
}
sv.isGreen = !sv.isGreen
sv.status("stopping old container")
@ -451,8 +484,8 @@ func main() {
}
rijuInitVolume := exec.Command("riju-init-volume")
rijuInitVolume.Stdout = rijuInitVolume.Stdout
rijuInitVolume.Stderr = rijuInitVolume.Stderr
rijuInitVolume.Stdout = os.Stdout
rijuInitVolume.Stderr = os.Stderr
if err := rijuInitVolume.Run(); err != nil {
log.Fatalln(err)
}
@ -466,6 +499,15 @@ func main() {
log.Fatalln(err)
}
blueMetricsUrl, err := url.Parse(fmt.Sprintf("http://localhost:%d", blueMetricsPort))
if err != nil {
log.Fatalln(err)
}
greenMetricsUrl, err := url.Parse(fmt.Sprintf("http://localhost:%d", greenMetricsPort))
if err != nil {
log.Fatalln(err)
}
awsCfg, err := awsConfig.LoadDefaultConfig(context.Background())
if err != nil {
log.Fatalln(err)
@ -583,18 +625,38 @@ func main() {
}
sv := &supervisor{
config: supervisorCfg,
blueProxyHandler: httputil.NewSingleHostReverseProxy(blueUrl),
greenProxyHandler: httputil.NewSingleHostReverseProxy(greenUrl),
isGreen: isGreen,
deployConfigHash: deployCfgHash,
s3: s3.NewFromConfig(awsCfg),
ecr: ecr.NewFromConfig(awsCfg),
awsRegion: awsCfg.Region,
awsAccountNumber: *ident.Account,
reloadJobs: map[string]*reloadJob{},
config: supervisorCfg,
blueProxyHandler: httputil.NewSingleHostReverseProxy(blueUrl),
greenProxyHandler: httputil.NewSingleHostReverseProxy(greenUrl),
blueMetricsProxyHandler: httputil.NewSingleHostReverseProxy(blueMetricsUrl),
greenMetricsProxyHandler: httputil.NewSingleHostReverseProxy(greenMetricsUrl),
isGreen: isGreen,
deployConfigHash: deployCfgHash,
s3: s3.NewFromConfig(awsCfg),
ecr: ecr.NewFromConfig(awsCfg),
awsRegion: awsCfg.Region,
awsAccountNumber: *ident.Account,
reloadJobs: map[string]*reloadJob{},
}
go sv.scheduleReload()
go func() {
log.Println("listening on http://127.0.0.1:6121/metrics")
log.Fatalln(http.ListenAndServe(
"127.0.0.1:6121",
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
sv.serveHTTP(w, r, true)
},
),
))
}()
log.Println("listening on http://0.0.0.0:80")
log.Fatalln(http.ListenAndServe("0.0.0.0:80", sv))
log.Fatalln(http.ListenAndServe(
"0.0.0.0:80",
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
sv.serveHTTP(w, r, false)
},
),
))
}

View File

@ -30,6 +30,8 @@ void init() { sentinel_bash[sentinel_bash_len - 1] = '\0'; }
void die_with_usage()
{
die("usage:\n"
" riju-system-privileged list\n"
" riju-system-privileged pull REPO TAG\n"
" riju-system-privileged session UUID LANG [IMAGE-HASH]\n"
" riju-system-privileged exec UUID CMDLINE...\n"
" riju-system-privileged pty UUID CMDLINE...\n"
@ -117,6 +119,28 @@ char *parseImageHash(char *imageHash)
return imageHash;
}
char *parseRepo(char *repo)
{
if (strnlen(repo, 501) > 500)
die("illegal repo name");
for (char *ptr = repo; *ptr; ++ptr)
if (!((*ptr >= 'a' && *ptr <= 'z') || (*ptr >= '0' && *ptr <= '9') ||
*ptr == '/' || *ptr == '.' || *ptr == '-' || *ptr == '_'))
die("illegal repo name");
return repo;
}
char *parseTag(char *tag)
{
if (strnlen(tag, 501) > 500)
die("illegal tag name");
for (char *ptr = tag; *ptr; ++ptr)
if (!((*ptr >= 'a' && *ptr <= 'z') || (*ptr >= '0' && *ptr <= '9') ||
*ptr == '.' || *ptr == '-' || *ptr == '_'))
die("illegal tag name");
return tag;
}
char *timeout_msg;
void sigalrm_die(int signum)
@ -132,7 +156,81 @@ void sigalrm_kill_parent(int signum)
exit(EXIT_FAILURE);
}
void session(char *uuid, char *lang, char *imageHash)
void cmd_list()
{
// This command prints a bunch of empty lines because there is no
// way to filter to a desired set of images. Caller is expected to
// remove empty lines because it's easier in JS than C.
char *argv[] = {
"docker",
"image",
"ls",
"--format",
"{{ if eq .Repository \"riju\" }}{{ .Tag }}{{ end }}",
NULL,
};
execvp(argv[0], argv);
die("execvp failed");
}
void cmd_pull(char *repo, char *tag)
{
char *localImage, *remoteImage;
if (asprintf(&remoteImage, "%s:%s", repo, tag) < 0)
die("asprintf failed");
if (asprintf(&localImage, "riju:%s", tag) < 0)
die("asprintf failed");
pid_t orig_ppid = getpid();
pid_t pid = fork();
if (pid < 0)
die("fork failed");
else if (pid == 0) {
if (freopen("/dev/null", "w", stdout) == NULL)
die("freopen failed");
if (prctl(PR_SET_PDEATHSIG, SIGTERM) < 0)
die("prctl failed");
if (getppid() != orig_ppid)
exit(EXIT_FAILURE);
char *argv[] = {
"docker", "inspect", "--", localImage, NULL,
};
execvp(argv[0], argv);
die("execvp failed");
}
siginfo_t info;
if (waitid(P_PID, pid, &info, WEXITED) < 0)
die("waitid failed");
if (info.si_status == 0) {
// Image exists already, no need to pull. It is only appropriate
// to use cmd_pull with immutable images.
return;
}
orig_ppid = getpid();
pid = fork();
if (pid < 0)
die("fork failed");
else if (pid == 0) {
if (prctl(PR_SET_PDEATHSIG, SIGTERM) < 0)
die("prctl failed");
if (getppid() != orig_ppid)
exit(EXIT_FAILURE);
char *argv[] = {
"docker", "pull", "--", remoteImage, NULL,
};
execvp(argv[0], argv);
}
if (waitid(P_PID, pid, &info, WEXITED) < 0)
die("waitid failed");
if (info.si_status != 0)
die("child process failed");
char *argv[] = {
"docker", "tag", "--", remoteImage, localImage,
};
execvp(argv[0], argv);
die("execvp failed");
}
void cmd_session(char *uuid, char *lang, char *imageHash)
{
if (setvbuf(stdout, NULL, _IONBF, 0) != 0)
die("setvbuf failed");
@ -240,6 +338,13 @@ void session(char *uuid, char *lang, char *imageHash)
"4000",
"--cgroup-parent",
"riju.slice",
// Deny access to outside networking for now in order to limit
// abuse, as we've received abuse reports from AWS. We should
// be able to remove this (and indeed we'll *want* to, in
// order to support package installation) by replacing it with
// a more fine-grained network control such as limiting
// outbound bandwidth.
"--network=none",
"--label",
"riju.category=user-session",
"--label",
@ -250,6 +355,7 @@ void session(char *uuid, char *lang, char *imageHash)
(char *)sentinel_bash,
NULL,
};
execvp(argv[0], argv);
die("execvp failed");
}
@ -287,7 +393,7 @@ void session(char *uuid, char *lang, char *imageHash)
}
}
void exec(char *uuid, int argc, char **cmdline, bool pty)
void cmd_exec(char *uuid, int argc, char **cmdline, bool pty)
{
if (setvbuf(stdout, NULL, _IONBF, 0) != 0)
die("setvbuf failed");
@ -455,7 +561,7 @@ void exec(char *uuid, int argc, char **cmdline, bool pty)
}
}
void teardown(char *uuid)
void cmd_teardown(char *uuid)
{
if (setuid(0) != 0)
die("setuid failed");
@ -483,31 +589,45 @@ int main(int argc, char **argv)
die("seteuid failed");
if (argc < 2)
die_with_usage();
if (!strcmp(argv[1], "list")) {
if (argc != 2)
die_with_usage();
cmd_list();
return 0;
}
if (!strcmp(argv[1], "pull")) {
if (argc != 4)
die_with_usage();
char *repo = parseRepo(argv[2]);
char *tag = parseTag(argv[3]);
cmd_pull(repo, tag);
return 0;
}
if (!strcmp(argv[1], "session")) {
if (argc < 4 || argc > 5)
die_with_usage();
char *uuid = parseUUID(argv[2]);
char *lang = parseLang(argv[3]);
char *imageHash = argc == 5 ? parseImageHash(argv[4]) : NULL;
session(uuid, lang, imageHash);
cmd_session(uuid, lang, imageHash);
return 0;
}
if (!strcmp(argv[1], "exec")) {
if (argc < 4)
die_with_usage();
exec(parseUUID(argv[2]), argc - 3, &argv[3], false);
cmd_exec(parseUUID(argv[2]), argc - 3, &argv[3], false);
return 0;
}
if (!strcmp(argv[1], "pty")) {
if (argc < 4)
die_with_usage();
exec(parseUUID(argv[2]), argc - 3, &argv[3], true);
cmd_exec(parseUUID(argv[2]), argc - 3, &argv[3], true);
return 0;
}
if (!strcmp(argv[1], "teardown")) {
if (argc < 2)
die_with_usage();
teardown(argc >= 3 ? parseUUID(argv[2]) : NULL);
cmd_teardown(argc >= 3 ? parseUUID(argv[2]) : NULL);
return 0;
}
die_with_usage();

View File

@ -6,12 +6,3 @@ data "aws_ami" "server" {
values = [data.external.env.result.AMI_NAME]
}
}
# data "aws_ami" "ci" {
# owners = ["self"]
# filter {
# name = "name"
# values = [data.external.env.result.CI_AMI_NAME]
# }
# }

View File

@ -1,174 +0,0 @@
resource "aws_cloudwatch_metric_alarm" "server_cpu" {
alarm_name = "riju-server-cpu-high"
comparison_operator = "GreaterThanOrEqualToThreshold"
evaluation_periods = "30"
datapoints_to_alarm = "15"
metric_name = "cpu_usage_active"
namespace = "CWAgent"
period = "60"
statistic = "Average"
threshold = "70"
alarm_description = "Average CPU usage on Riju server is above 70% for 30 minutes"
ok_actions = [aws_sns_topic.riju.arn]
alarm_actions = [aws_sns_topic.riju.arn]
insufficient_data_actions = [aws_sns_topic.riju.arn]
dimensions = {
RijuInstanceGroup = "Webserver"
}
tags = {
BillingSubcategory = "Riju:CloudWatch:Alarm"
}
}
resource "aws_cloudwatch_metric_alarm" "server_memory" {
alarm_name = "riju-server-memory-high"
comparison_operator = "GreaterThanOrEqualToThreshold"
evaluation_periods = "30"
datapoints_to_alarm = "15"
metric_name = "mem_used_percent"
namespace = "CWAgent"
period = "60"
statistic = "Average"
threshold = "70"
alarm_description = "Average memory usage on Riju server is above 70% for 30 minutes"
ok_actions = [aws_sns_topic.riju.arn]
alarm_actions = [aws_sns_topic.riju.arn]
insufficient_data_actions = [aws_sns_topic.riju.arn]
dimensions = {
RijuInstanceGroup = "Webserver"
}
tags = {
BillingSubcategory = "Riju:CloudWatch:Alarm"
}
}
resource "aws_cloudwatch_metric_alarm" "server_data_volume_disk_space" {
alarm_name = "riju-server-data-volume-disk-usage-high"
comparison_operator = "GreaterThanOrEqualToThreshold"
evaluation_periods = "5"
datapoints_to_alarm = "5"
metric_name = "disk_used_percent"
namespace = "CWAgent"
period = "60"
statistic = "Average"
threshold = "70"
alarm_description = "Disk space usage for data volume on Riju server is above 70%"
ok_actions = [aws_sns_topic.riju.arn]
alarm_actions = [aws_sns_topic.riju.arn]
insufficient_data_actions = [aws_sns_topic.riju.arn]
dimensions = {
RijuInstanceGroup = "Webserver"
path = "/mnt/riju"
}
tags = {
BillingSubcategory = "Riju:CloudWatch:Alarm"
}
}
resource "aws_cloudwatch_metric_alarm" "server_root_volume_disk_space" {
alarm_name = "riju-server-root-volume-disk-usage-high"
comparison_operator = "GreaterThanOrEqualToThreshold"
evaluation_periods = "5"
datapoints_to_alarm = "5"
metric_name = "disk_used_percent"
namespace = "CWAgent"
period = "60"
statistic = "Average"
threshold = "70"
alarm_description = "Disk space usage for root volume on Riju server is above 70%"
ok_actions = [aws_sns_topic.riju.arn]
alarm_actions = [aws_sns_topic.riju.arn]
insufficient_data_actions = [aws_sns_topic.riju.arn]
dimensions = {
RijuInstanceGroup = "Webserver"
path = "/"
}
tags = {
BillingSubcategory = "Riju:CloudWatch:Alarm"
}
}
resource "aws_cloudwatch_dashboard" "riju" {
dashboard_name = "Riju"
dashboard_body = <<EOF
{
"widgets": [
{
"type": "metric",
"x": 0,
"y": 0,
"width": 6,
"height": 6,
"properties": {
"title": "CPU",
"annotations": {
"alarms": [
"${aws_cloudwatch_metric_alarm.server_cpu.arn}"
]
},
"view": "timeSeries",
"stacked": false
}
},
{
"type": "metric",
"x": 12,
"y": 0,
"width": 6,
"height": 6,
"properties": {
"title": "Root volume disk space",
"annotations": {
"alarms": [
"${aws_cloudwatch_metric_alarm.server_root_volume_disk_space.arn}"
]
},
"view": "timeSeries",
"stacked": false,
"type": "chart"
}
},
{
"type": "metric",
"x": 18,
"y": 0,
"width": 6,
"height": 6,
"properties": {
"title": "Data volume disk space",
"annotations": {
"alarms": [
"${aws_cloudwatch_metric_alarm.server_data_volume_disk_space.arn}"
]
},
"view": "timeSeries",
"stacked": false,
"type": "chart"
}
},
{
"type": "metric",
"x": 6,
"y": 0,
"width": 6,
"height": 6,
"properties": {
"title": "Memory",
"annotations": {
"alarms": [
"${aws_cloudwatch_metric_alarm.server_memory.arn}"
]
},
"view": "timeSeries",
"stacked": false,
"type": "chart"
}
}
]
}
EOF
}

View File

@ -50,7 +50,7 @@ resource "aws_launch_template" "server" {
device_name = "/dev/sdh"
ebs {
volume_type = "gp3"
volume_size = 256
volume_size = 128
}
}

View File

@ -1,7 +1,3 @@
data "aws_iam_policy" "cloudwatch" {
name = "CloudWatchAgentServerPolicy"
}
data "aws_iam_policy" "ssm" {
name = "AmazonSSMManagedInstanceCore"
}
@ -194,11 +190,6 @@ resource "aws_iam_role_policy_attachment" "server" {
policy_arn = aws_iam_policy.server.arn
}
resource "aws_iam_role_policy_attachment" "server_cloudwatch" {
role = aws_iam_role.server.name
policy_arn = data.aws_iam_policy.cloudwatch.arn
}
resource "aws_iam_role_policy_attachment" "server_ssm" {
role = aws_iam_role.server.name
policy_arn = data.aws_iam_policy.ssm.arn
@ -295,4 +286,4 @@ resource "aws_iam_user_policy_attachment" "grafana_cloudwatch" {
resource "aws_iam_access_key" "grafana" {
user = aws_iam_user.grafana.name
}
}

View File

@ -10,12 +10,3 @@ output "deploy_aws_secret_access_key" {
value = aws_iam_access_key.deploy.secret
sensitive = true
}
output "grafana_aws_access_key_id" {
value = aws_iam_access_key.grafana.id
}
output "grafana_aws_secret_access_key" {
value = aws_iam_access_key.grafana.secret
sensitive = true
}

View File

@ -1,31 +0,0 @@
resource "aws_ssm_parameter" "web_ami_id" {
name = "riju-web-ami-id"
type = "String"
value = data.aws_ami.server.id
data_type = "aws:ec2:image"
}
# resource "aws_ssm_parameter" "ci_ami_id" {
# name = "riju-ci-ami-id"
# type = "String"
# value = data.aws_ami.ci.id
# data_type = "aws:ec2:image"
# }
resource "aws_ssm_parameter" "docker_repo" {
name = "riju-docker-repo-host"
type = "String"
value = aws_ecr_repository.riju.repository_url
}
resource "aws_ssm_parameter" "public_docker_repo" {
name = "riju-public-docker-repo-host"
type = "String"
value = aws_ecrpublic_repository.riju.repository_uri
}
resource "aws_ssm_parameter" "s3_bucket" {
name = "riju-s3-bucket-name"
type = "String"
value = aws_s3_bucket.riju.bucket
}

View File

@ -1,15 +1,10 @@
import crypto from "crypto";
import { promises as fs } from "fs";
import http from "http";
import url from "url";
import { Command } from "commander";
import express from "express";
import { getSharedDepsForLangConfig, readLangConfig } from "../lib/yaml.js";
import { getLocalImageLabel } from "./docker-util.js";
import { hashDockerfile } from "./hash-dockerfile.js";
import { getDebHash, runCommand } from "./util.js";
import { runCommand } from "./util.js";
// Get a Node.js http server object that will allow the Docker
// build to fetch files from outside the container, without them
@ -27,44 +22,6 @@ async function main() {
program.option("--debug", "interactive debugging");
program.parse(process.argv);
const { lang, debug } = program.opts();
const sharedDeps = await getSharedDepsForLangConfig(
await readLangConfig(lang)
);
const installContents = await fs.readFile(
`build/lang/${lang}/install.bash`,
"utf-8"
);
const sharedInstallContents = await Promise.all(
sharedDeps.map(async (name) =>
fs.readFile(`build/shared/${name}/install.bash`)
)
);
const allInstallContents = [].concat.apply(
[installContents],
sharedInstallContents
);
const hash = await hashDockerfile(
"lang",
{
"riju:base": await getLocalImageLabel("riju:base", "riju.image-hash"),
},
{
salt: {
langHash: await getDebHash(`build/lang/${lang}/riju-lang-${lang}.deb`),
sharedHashes: (
await Promise.all(
sharedDeps.map(
async (name) =>
await getDebHash(`build/shared/${name}/riju-shared-${name}.deb`)
)
)
).sort(),
installHash: allInstallContents
.map((c) => crypto.createHash("sha1").update(c).digest("hex"))
.join(""),
},
}
);
const server = getServer();
await new Promise((resolve) => server.listen(8487, "localhost", resolve));
try {
@ -76,11 +33,11 @@ async function main() {
await runCommand(
`docker build . -f docker/lang/Dockerfile ` +
`--build-arg LANG=${lang} -t riju:lang-${lang} ` +
`--network host --no-cache --label riju.image-hash=${hash}`
`--network host --no-cache`
);
}
} finally {
await server.close();
await new Promise((resolve) => server.close(resolve));
}
process.exit(0);
}

View File

@ -1,10 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
: ${AWS_ACCESS_KEY_ID}
: ${AWS_SECRET_ACCESS_KEY}
: ${DOCKER_REPO}
: ${S3_BUCKET}
make image shell I=ci CMD="tools/ci-run.bash" NI=1

Some files were not shown because too many files have changed in this diff Show More