Merge branch 'fragment'
This commit is contained in:
commit
2242905eae
|
@ -1,20 +0,0 @@
|
|||
version: 2
|
||||
workflows:
|
||||
version: 2
|
||||
ci:
|
||||
jobs:
|
||||
- build_and_deploy:
|
||||
filters:
|
||||
branches:
|
||||
only: master
|
||||
tags:
|
||||
ignore: /.*/
|
||||
jobs:
|
||||
build_and_deploy:
|
||||
machine:
|
||||
image: ubuntu-2004:202010-01
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
command: tools/ci-bootstrap.bash
|
||||
no_output_timeout: 2h
|
|
@ -5,5 +5,6 @@
|
|||
**/.lsp-repl-history
|
||||
**/.terraform
|
||||
**/build
|
||||
**/build-docker
|
||||
**/node_modules
|
||||
**/out
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
name: Build and deploy
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Build and deploy
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
DOCKER_REPO: 084011155226.dkr.ecr.us-west-1.amazonaws.com/riju
|
||||
S3_BUCKET: riju
|
||||
run: |
|
||||
make image shell I=ci CMD="tools/ci-bootstrap.bash"
|
|
@ -4,5 +4,8 @@
|
|||
.lsp-repl-history
|
||||
.terraform
|
||||
build
|
||||
# Separate directory for things that are ignored by Git but not by
|
||||
# Docker.
|
||||
build-docker
|
||||
node_modules
|
||||
out
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
# Contributing guide
|
||||
|
||||
* [Criteria for language inclusion](doc/what-languages.md)
|
||||
* [How to add your own language to Riju](doc/tutorial.md)
|
||||
* [Deep dive on Riju build system](doc/build.md)
|
||||
* [Deploying your own instance of Riju](doc/infrastructure.md)
|
||||
|
||||
If you'd like to request a new language, head to the [language support
|
||||
meta-issue](https://github.com/raxod502/riju/issues/24) and add a
|
||||
comment. Of course, if you actually want it to be added anytime soon,
|
||||
you should submit a pull request :)
|
177
Makefile
177
Makefile
|
@ -1,27 +1,30 @@
|
|||
SHELL := bash
|
||||
.SHELLFLAGS := -o pipefail -euc
|
||||
|
||||
export PATH := bin:$(PATH)
|
||||
export PATH := $(PWD)/bin:$(PATH)
|
||||
|
||||
-include .env
|
||||
export
|
||||
|
||||
BUILD := build/$(T)/$(L)
|
||||
DEB := riju-$(T)-$(L).deb
|
||||
S3_DEBS := s3://$(S3_BUCKET)-debs
|
||||
S3_DEB := $(S3_DEBS)/debs/$(DEB)
|
||||
S3_HASH := $(S3_DEBS)/hashes/riju-$(T)-$(L)
|
||||
S3 := s3://$(S3_BUCKET)
|
||||
S3_DEB := $(S3)/debs/$(DEB)
|
||||
S3_HASH := $(S3)/hashes/riju-$(T)-$(L)
|
||||
S3_CONFIG := $(S3)/config.json
|
||||
|
||||
ifneq ($(CMD),)
|
||||
C_CMD := -c '$(CMD)'
|
||||
BASH_CMD := bash -c '$(CMD)'
|
||||
else
|
||||
C_CMD :=
|
||||
BASH_CMD :=
|
||||
endif
|
||||
|
||||
# Get rid of 'Entering directory' / 'Leaving directory' messages.
|
||||
MAKE_QUIETLY := MAKELEVEL= make
|
||||
|
||||
.PHONY: all $(MAKECMDGOALS)
|
||||
.PHONY: all $(MAKECMDGOALS) frontend system supervisor
|
||||
|
||||
all: help
|
||||
|
||||
|
@ -36,10 +39,14 @@ endif
|
|||
## Pass NC=1 to disable the Docker cache. Base images are not pulled;
|
||||
## see 'make pull-base' for that.
|
||||
|
||||
image: # I=<image> [NC=1] : Build a Docker image
|
||||
image: # I=<image> [L=<lang>] [NC=1] : Build a Docker image
|
||||
@: $${I}
|
||||
ifeq ($(I),composite)
|
||||
node tools/build-composite-image.js
|
||||
ifeq ($(I),lang)
|
||||
@: $${L}
|
||||
node tools/build-lang-image.js --lang $(L)
|
||||
else ifeq ($(I),ubuntu)
|
||||
docker pull ubuntu:rolling
|
||||
hash="$$(docker inspect ubuntu:rolling | jq '.[0].Id' -r | sha1sum | awk '{ print $$1 }')"; echo "FROM ubuntu:rolling" | docker build --label riju.image-hash="$${hash}" -t riju:$(I) -
|
||||
else ifneq (,$(filter $(I),admin ci))
|
||||
docker build . -f docker/$(I)/Dockerfile -t riju:$(I) $(NO_CACHE)
|
||||
else
|
||||
|
@ -51,7 +58,9 @@ VOLUME_MOUNT ?= $(PWD)
|
|||
P1 ?= 6119
|
||||
P2 ?= 6120
|
||||
|
||||
ifneq (,$(E))
|
||||
ifneq (,$(EE))
|
||||
SHELL_PORTS := -p 0.0.0.0:$(P1):6119 -p 0.0.0.0:$(P2):6120
|
||||
else ifneq (,$(E))
|
||||
SHELL_PORTS := -p 127.0.0.1:$(P1):6119 -p 127.0.0.1:$(P2):6120
|
||||
else
|
||||
SHELL_PORTS :=
|
||||
|
@ -59,47 +68,44 @@ endif
|
|||
|
||||
SHELL_ENV := -e Z -e CI -e TEST_PATIENCE -e TEST_CONCURRENCY
|
||||
|
||||
shell: # I=<shell> [E=1] [P1|P2=<port>] : Launch Docker image with shell
|
||||
@: $${I}
|
||||
ifneq (,$(filter $(I),admin ci))
|
||||
docker run -it --rm --hostname $(I) -v $(VOLUME_MOUNT):/src -v /var/run/docker.sock:/var/run/docker.sock -v $(HOME)/.aws:/var/riju/.aws -v $(HOME)/.docker:/var/riju/.docker -v $(HOME)/.ssh:/var/riju/.ssh -v $(HOME)/.terraform.d:/var/riju/.terraform.d -e AWS_REGION -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e DOCKER_USERNAME -e DOCKER_PASSWORD -e DEPLOY_SSH_PRIVATE_KEY -e DOCKER_REPO -e S3_BUCKET -e DOMAIN -e VOLUME_MOUNT=$(VOLUME_MOUNT) $(SHELL_PORTS) $(SHELL_ENV) --network host riju:$(I) $(BASH_CMD)
|
||||
else ifneq (,$(filter $(I),compile app))
|
||||
docker run -it --rm --hostname $(I) $(SHELL_PORTS) $(SHELL_ENV) riju:$(I) $(BASH_CMD)
|
||||
else ifneq (,$(filter $(I),runtime composite))
|
||||
docker run -it --rm --hostname $(I) -v $(VOLUME_MOUNT):/src --label riju-install-target=yes $(SHELL_PORTS) $(SHELL_ENV) riju:$(I) $(BASH_CMD)
|
||||
ifeq ($(I),lang)
|
||||
LANG_TAG := lang-$(L)
|
||||
else
|
||||
docker run -it --rm --hostname $(I) -v $(VOLUME_MOUNT):/src $(SHELL_PORTS) $(SHELL_ENV) riju:$(I) $(BASH_CMD)
|
||||
LANG_TAG := $(I)
|
||||
endif
|
||||
|
||||
## This is equivalent to 'make pkg' in a fresh packaging container
|
||||
## followed by 'make install' in a persistent runtime container.
|
||||
IMAGE_HASH := "$$(docker inspect riju:$(LANG_TAG) | jq '.[0].Config.Labels["riju.image-hash"]' -r)"
|
||||
WITH_IMAGE_HASH := -e RIJU_IMAGE_HASH=$(IMAGE_HASH)
|
||||
|
||||
repkg: script # L=<lang> T=<type> : Build fresh .deb and install into live container
|
||||
@: $${L} $${T}
|
||||
$(MAKE_QUIETLY) shell I=packaging CMD="make pkg L=$(L) T=$(T)"
|
||||
ctr="$$(docker container ls -f label="riju-install-target=yes" -l -q)"; test "$${ctr}" || (echo "no valid container is live"; exit 1); docker exec "$${ctr}" make install L=$(L) T=$(T)
|
||||
LANG_IMAGE_HASH := "$$(docker inspect riju:lang-$(L) | jq '.[0].Config.Labels["riju.image-hash"]' -r)"
|
||||
|
||||
## This is equivalent to 'make repkg T=lang', 'make repkg T=config'.
|
||||
## For shared dependencies, use 'make repkg T=shared' directly.
|
||||
|
||||
repkgs: # L=<lang> : Build and install fresh lang and config .debs
|
||||
shell: # I=<shell> [L=<lang>] [E[E]=1] [P1|P2=<port>] : Launch Docker image with shell
|
||||
@: $${I}
|
||||
ifneq (,$(filter $(I),admin ci))
|
||||
@mkdir -p $(HOME)/.aws $(HOME)/.docker $(HOME)/.ssh $(HOME)/.terraform.d
|
||||
docker run -it --rm --hostname $(I) -v $(VOLUME_MOUNT):/src -v /var/run/riju:/var/run/riju -v /var/run/docker.sock:/var/run/docker.sock -v $(HOME)/.aws:/var/run/riju/.aws -v $(HOME)/.docker:/var/run/riju/.docker -v $(HOME)/.ssh:/var/run/riju/.ssh -v $(HOME)/.terraform.d:/var/run/riju/.terraform.d -e AWS_REGION -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -e DOCKER_USERNAME -e DOCKER_PASSWORD -e DEPLOY_SSH_PRIVATE_KEY -e DOCKER_REPO -e S3_BUCKET -e DOMAIN -e VOLUME_MOUNT=$(VOLUME_MOUNT) $(SHELL_PORTS) $(SHELL_ENV) $(WITH_IMAGE_HASH) --network host riju:$(I) $(BASH_CMD)
|
||||
else ifeq ($(I),app)
|
||||
docker run -it --rm --hostname $(I) -v /var/run/riju:/var/run/riju -v /var/run/docker.sock:/var/run/docker.sock $(SHELL_PORTS) $(SHELL_ENV) $(WITH_IMAGE_HASH) riju:$(I) $(BASH_CMD)
|
||||
else ifneq (,$(filter $(I),base lang))
|
||||
ifeq ($(I),lang)
|
||||
@: $${L}
|
||||
node tools/make-foreach.js --types repkg L=$(L)
|
||||
endif
|
||||
docker run -it --rm --hostname $(LANG_TAG) -v $(VOLUME_MOUNT):/src $(SHELL_PORTS) $(SHELL_ENV) $(WITH_IMAGE_HASH) riju:$(LANG_TAG) $(BASH_CMD)
|
||||
else ifeq ($(I),runtime)
|
||||
docker run -it --rm --hostname $(I) -v $(VOLUME_MOUNT):/src -v /var/run/riju:/var/run/riju -v /var/run/docker.sock:/var/run/docker.sock $(SHELL_PORTS) $(SHELL_ENV) $(WITH_IMAGE_HASH) riju:$(I) $(BASH_CMD)
|
||||
else
|
||||
docker run -it --rm --hostname $(I) -v $(VOLUME_MOUNT):/src $(SHELL_PORTS) $(SHELL_ENV) $(WITH_IMAGE_HASH) riju:$(I) $(BASH_CMD)
|
||||
endif
|
||||
|
||||
ecr: # Authenticate to ECR (temporary credentials)
|
||||
aws ecr get-login-password | docker login --username AWS --password-stdin $(subst /riju,,$(DOCKER_REPO))
|
||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin $(subst /riju,,$(PUBLIC_DOCKER_REPO))
|
||||
|
||||
### Build packaging scripts
|
||||
|
||||
script: # L=<lang> T=<type> : Generate a packaging script
|
||||
@: $${L} $${T}
|
||||
mkdir -p $(BUILD)
|
||||
node tools/generate-build-script.js --lang $(L) --type $(T) > $(BUILD)/build.bash
|
||||
chmod +x $(BUILD)/build.bash
|
||||
|
||||
scripts: # L=<lang> : Generate both lang and config packaging scripts
|
||||
@: $${L}
|
||||
node tools/make-foreach.js --types script L=$(L)
|
||||
|
||||
## This is equivalent to 'make script T=lang', 'make script T=config'.
|
||||
## For shared dependencies, use 'make script T=shared' directly.
|
||||
node tools/generate-build-script.js --lang $(L) --type $(T)
|
||||
|
||||
all-scripts: # Generate packaging scripts for all languages
|
||||
node tools/write-all-build-scripts.js
|
||||
|
@ -108,7 +114,7 @@ all-scripts: # Generate packaging scripts for all languages
|
|||
|
||||
pkg-clean: # L=<lang> T=<type> : Set up fresh packaging environment
|
||||
@: $${L} $${T}
|
||||
rm -rf $(BUILD)/src $(BUILD)/pkg
|
||||
sudo rm -rf $(BUILD)/src $(BUILD)/pkg
|
||||
mkdir -p $(BUILD)/src $(BUILD)/pkg
|
||||
|
||||
pkg-build: # L=<lang> T=<type> : Run packaging script in packaging environment
|
||||
|
@ -133,28 +139,6 @@ pkg-deb: # L=<lang> T=<type> [Z=gzip|xz] : Build .deb from packaging environment
|
|||
|
||||
pkg: pkg-clean pkg-build pkg-deb # L=<lang> T=<type> [Z=gzip|xz] : Build fresh .deb
|
||||
|
||||
## This is equivalent to 'make pkg T=lang', 'make pkg T=config'. For
|
||||
## shared dependencies, use 'make pkg T=shared' directly.
|
||||
#
|
||||
## Z is the compression type to use; defaults to none. Higher
|
||||
## compression levels (gzip is moderate, xz is high) take much longer
|
||||
## but produce much smaller packages.
|
||||
|
||||
pkgs: # L=<lang> [Z=gzip|xz] : Build both lang and config .debs
|
||||
@: $${L}
|
||||
node tools/make-foreach.js --types pkg L=$(L)
|
||||
|
||||
### Install packages
|
||||
|
||||
install: # L=<lang> T=<type> : Install built .deb
|
||||
@: $${L} $${T}
|
||||
if [[ -z "$$(ls -A /var/lib/apt/lists)" ]]; then sudo apt update; fi
|
||||
DEBIAN_FRONTEND=noninteractive sudo -E apt reinstall -y ./$(BUILD)/$(DEB)
|
||||
|
||||
installs: # L=<lang> : Install both lang and config .debs
|
||||
@: $${L}
|
||||
node tools/make-foreach.js --types install L=$(L)
|
||||
|
||||
### Build and run application code
|
||||
|
||||
frontend: # Compile frontend assets for production
|
||||
|
@ -169,25 +153,32 @@ system: # Compile setuid binary for production
|
|||
system-dev: # Compile and watch setuid binary for development
|
||||
watchexec -w system/src -n -- ./system/compile.bash
|
||||
|
||||
supervisor: # Compile supervisor binary for production
|
||||
./supervisor/compile.bash
|
||||
|
||||
supervisor-dev: # Compile and watch supervisor binary for development
|
||||
watchexec -w supervisor/src -n -- ./supervisor/compile.bash
|
||||
|
||||
server: # Run server for production
|
||||
node backend/server.js
|
||||
|
||||
server-dev: # Run and restart server for development
|
||||
watchexec -w backend -r -n -- node backend/server.js
|
||||
|
||||
build: frontend system # Compile all artifacts for production
|
||||
build: frontend system supervisor # Compile all artifacts for production
|
||||
|
||||
dev: # Compile, run, and watch all artifacts and server for development
|
||||
$(MAKE_QUIETLY) -j3 frontend-dev system-dev server-dev
|
||||
$(MAKE_QUIETLY) -j4 frontend-dev system-dev supervisor-dev server-dev
|
||||
|
||||
### Application tools
|
||||
|
||||
## L can be a language identifier or a test type (run, repl, lsp,
|
||||
## format, etc.). Multiple identifiers can be separated by spaces to
|
||||
## form a conjunction (AND), or by commas to form a disjunction (OR).
|
||||
## L is a language identifier or a comma-separated list of them, to
|
||||
## filter tests by language. T is a test type (run, repl, lsp, format,
|
||||
## etc.) or a set of them to filter tests that way. If both filters
|
||||
## are provided, then only tests matching both are run.
|
||||
|
||||
test: # L=<filter> : Run test(s) for language or test category
|
||||
node backend/test-runner.js $(L)
|
||||
test: # [L=<lang>[,...]] [T=<test>[,...]] : Run test(s) for language or test category
|
||||
RIJU_LANG_IMAGE_HASH=$(LANG_IMAGE_HASH) node backend/test-runner.js
|
||||
|
||||
## Functions such as 'repl', 'run', 'format', etc. are available in
|
||||
## the sandbox, and initial setup has already been done (e.g. 'setup'
|
||||
|
@ -207,10 +198,7 @@ lsp: # L=<lang|cmd> : Run LSP REPL for language or custom command line
|
|||
|
||||
### Fetch artifacts from registries
|
||||
|
||||
pull-base: # Pull latest base image(s) from Docker Hub
|
||||
docker pull ubuntu:rolling
|
||||
|
||||
pull: # I=<image> : Pull last published Riju image from Docker Hub
|
||||
pull: # I=<image> : Pull last published Riju image from Docker registry
|
||||
@: $${I} $${DOCKER_REPO}
|
||||
docker pull $(DOCKER_REPO):$(I)
|
||||
docker tag $(DOCKER_REPO):$(I) riju:$(I)
|
||||
|
@ -218,51 +206,62 @@ pull: # I=<image> : Pull last published Riju image from Docker Hub
|
|||
download: # L=<lang> T=<type> : Download last published .deb from S3
|
||||
@: $${L} $${T} $${S3_BUCKET}
|
||||
mkdir -p $(BUILD)
|
||||
aws s3 cp $(S3_DEB) $(BUILD)/$(DEB) --no-sign-request
|
||||
aws s3 cp $(S3_DEB) $(BUILD)/$(DEB)
|
||||
|
||||
plan: # Display plan to pull/rebuild outdated or missing artifacts
|
||||
node tools/plan-publish.js
|
||||
|
||||
sync: # Pull/rebuild outdated or missing artifacts
|
||||
node tools/plan-publish.js --execute
|
||||
undeploy: # Pull latest deployment config from S3
|
||||
mkdir -p $(BUILD)
|
||||
aws s3 cp $(S3_CONFIG) $(BUILD)/config.json
|
||||
|
||||
### Publish artifacts to registries
|
||||
|
||||
push: # I=<image> : Push Riju image to Docker Hub
|
||||
push: # I=<image> : Push Riju image to Docker registry
|
||||
@: $${I} $${DOCKER_REPO}
|
||||
docker tag riju:$(I) $(DOCKER_REPO):$(I)-$(IMAGE_HASH)
|
||||
docker push $(DOCKER_REPO):$(I)-$(IMAGE_HASH)
|
||||
ifeq ($(I),ubuntu)
|
||||
docker tag riju:$(I) $(PUBLIC_DOCKER_REPO):$(I)
|
||||
docker push $(PUBLIC_DOCKER_REPO):$(I)
|
||||
endif
|
||||
docker tag riju:$(I) $(DOCKER_REPO):$(I)
|
||||
docker push $(DOCKER_REPO):$(I)
|
||||
|
||||
upload: # L=<lang> T=<type> : Upload .deb to S3
|
||||
@: $${L} $${T} $${S3_BUCKET}
|
||||
tools/ensure-deb-compressed.bash
|
||||
aws s3 rm --recursive $(S3_HASH)
|
||||
aws s3 cp $(BUILD)/$(DEB) $(S3_DEB)
|
||||
hash="$$(dpkg-deb -f $(BUILD)/$(DEB) Riju-Script-Hash | grep .)"; aws s3 cp - "$(S3_HASH)/$${hash}" < /dev/null
|
||||
|
||||
## You should probably only run this from CI.
|
||||
deploy-config: # Generate deployment config file
|
||||
node tools/generate-deploy-config.js
|
||||
|
||||
publish: # Full synchronization and prod deployment
|
||||
tools/publish.bash
|
||||
deploy: deploy-config # Upload deployment config to S3 and update ASG instances
|
||||
aws s3 cp $(BUILD)/config.json $(S3_CONFIG)
|
||||
|
||||
### Infrastructure
|
||||
|
||||
packer: supervisor # Build and publish a new AMI
|
||||
tools/packer-build.bash
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
## Run this every time you update .gitignore.
|
||||
## Run this every time you update .gitignore or .dockerignore.in.
|
||||
|
||||
dockerignore: # Update .dockerignore from .gitignore
|
||||
dockerignore: # Update .dockerignore from .gitignore and .dockerignore.in
|
||||
echo "# This file is generated by 'make dockerignore', do not edit." > .dockerignore
|
||||
cat .gitignore | sed 's#^#**/#' >> .dockerignore
|
||||
cat .gitignore | sed 's/#.*//' | grep . | sed 's#^#**/#' >> .dockerignore
|
||||
|
||||
## You need to be inside a 'make env' shell whenever you are running
|
||||
## manual commands (Docker, Terraform, Packer, etc.) directly, as
|
||||
## opposed to through the Makefile.
|
||||
|
||||
env: # Run shell with .env file loaded and $PATH fixed
|
||||
exec bash --rcfile <(cat ~/.bashrc - <<< 'PS1="[.env] $$PS1"')
|
||||
env: # [CMD=<target>] : Run shell with .env file loaded and $PATH fixed
|
||||
exec bash $(C_CMD)
|
||||
|
||||
tmux: # Start or attach to tmux session
|
||||
MAKELEVEL= tmux attach || MAKELEVEL= tmux new-session -s tmux
|
||||
|
||||
usage:
|
||||
usage:
|
||||
@cat Makefile | \
|
||||
grep -E '^[^.:[:space:]]+:|[#]##' | \
|
||||
sed -E 's/:[^#]*#([^:]+)$$/: #:\1/' | \
|
||||
|
|
44
README.md
44
README.md
|
@ -2,25 +2,37 @@
|
|||
|
||||
Riju is a very fast online playground for every programming language.
|
||||
In less than a second, you can start playing with a Python interpreter
|
||||
or compiling INTERCAL code.
|
||||
or compiling [INTERCAL](https://en.wikipedia.org/wiki/INTERCAL) code.
|
||||
|
||||
Check out the [live application](https://riju.codes/)!
|
||||
Check it out at the <https://riju.codes>!
|
||||
|
||||
**You should not write any sensitive code on Riju, as NO GUARANTEES
|
||||
are made about the security or privacy of your data. (No warranty etc
|
||||
etc.)**
|
||||
## Is it free?
|
||||
|
||||
This project is a work in progress, and I don't intend on thoroughly
|
||||
documenting it until it has reached feature-completeness.
|
||||
Riju is free and always will be free for everyone.
|
||||
|
||||
## Documentation
|
||||
However, if Riju gets popular enough, I won't be able to afford paying
|
||||
for the hosting myself. To help me keep Riju online, you can donate
|
||||
via Patreon. All donations are used solely to cover hosting costs, and
|
||||
any surplus is donated to the [Electronic Frontier
|
||||
Foundation](https://www.eff.org/).
|
||||
|
||||
* [Criteria for language inclusion](doc/what-languages.md)
|
||||
* [How to add your own language to Riju](doc/tutorial.md)
|
||||
* [Deep dive on Riju build system](doc/build.md)
|
||||
* [Deploying your own instance of Riju](doc/infrastructure.md)
|
||||
## Is it safe?
|
||||
|
||||
If you'd like to request a new language, head to the [language support
|
||||
meta-issue](https://github.com/raxod502/riju/issues/24) and add a
|
||||
comment. Of course, if you actually want it to be added anytime soon,
|
||||
you should submit a pull request :)
|
||||
Riju does not collect your personal information.
|
||||
|
||||
* Your code is deleted from the server as soon as you close Riju.
|
||||
* Your terminal input and output is never saved or logged anywhere.
|
||||
* Riju uses [Fathom Analytics](https://usefathom.com/) to measure
|
||||
traffic. Fathom collects very limited data and does not sell it to
|
||||
third parties, unlike Google Analytics.
|
||||
* Riju does not serve advertisements.
|
||||
|
||||
All of the above notwithstanding, any service that allows people to
|
||||
run code online is inherently risky. For this reason, I can't make any
|
||||
guarantees about the security or privacy of your data.
|
||||
|
||||
Please see [Reporting a security issue](SECURITY.md).
|
||||
|
||||
## Can I help?
|
||||
|
||||
Please see [Contributing guide](CONTRIBUTING.md).
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
# Reporting a security issue
|
||||
|
||||
Please contact me at
|
||||
[radon.neon@gmail.com](mailto:radon.neon@gmail.com) if you find any
|
||||
way to:
|
||||
|
||||
* Take down Riju without using a large number of concurrent sessions.
|
||||
* View or interfere with another user's session.
|
||||
* Tamper with the Riju server.
|
||||
|
||||
I will do my best to correct the vulnerability as soon as possible.
|
131
backend/api.js
131
backend/api.js
|
@ -1,39 +1,30 @@
|
|||
import { spawn } from "child_process";
|
||||
import path from "path";
|
||||
import process from "process";
|
||||
import WebSocket from "ws";
|
||||
|
||||
import pty from "node-pty";
|
||||
import pQueue from "p-queue";
|
||||
const PQueue = pQueue.default;
|
||||
import rpc from "vscode-jsonrpc";
|
||||
import { v4 as getUUID } from "uuid";
|
||||
|
||||
import { langs } from "./langs.js";
|
||||
import { borrowUser } from "./users.js";
|
||||
import * as util from "./util.js";
|
||||
import { bash } from "./util.js";
|
||||
import { bash, getUUID } from "./util.js";
|
||||
|
||||
const allSessions = new Set();
|
||||
|
||||
export class Session {
|
||||
get homedir() {
|
||||
return `/tmp/riju/${this.uuid}`;
|
||||
return "/home/riju/src";
|
||||
}
|
||||
|
||||
get config() {
|
||||
return langs[this.lang];
|
||||
}
|
||||
|
||||
get uid() {
|
||||
return this.uidInfo.uid;
|
||||
}
|
||||
|
||||
returnUser = async () => {
|
||||
this.uidInfo && (await this.uidInfo.returnUser());
|
||||
};
|
||||
|
||||
get context() {
|
||||
return { uid: this.uid, uuid: this.uuid };
|
||||
return { uuid: this.uuid, lang: this.lang };
|
||||
}
|
||||
|
||||
log = (msg) => this.logPrimitive(`[${this.uuid}] ${msg}`);
|
||||
|
@ -43,7 +34,7 @@ export class Session {
|
|||
this.uuid = getUUID();
|
||||
this.lang = lang;
|
||||
this.tearingDown = false;
|
||||
this.uidInfo = null;
|
||||
this.container = null;
|
||||
this.term = null;
|
||||
this.lsp = null;
|
||||
this.daemon = null;
|
||||
|
@ -57,24 +48,62 @@ export class Session {
|
|||
return await util.run(args, this.log, options);
|
||||
};
|
||||
|
||||
privilegedSetup = () => util.privilegedSetup(this.context);
|
||||
privilegedSpawn = (args) => util.privilegedSpawn(this.context, args);
|
||||
privilegedUseradd = () => util.privilegedUseradd(this.uid);
|
||||
privilegedTeardown = () => util.privilegedTeardown(this.context);
|
||||
privilegedSession = () => util.privilegedSession(this.context);
|
||||
privilegedExec = (cmdline) =>
|
||||
util.privilegedExec(this.context, bash(cmdline));
|
||||
privilegedPty = (cmdline) =>
|
||||
util.privilegedPty(this.context, bash(cmdline, { stty: true }));
|
||||
|
||||
setup = async () => {
|
||||
try {
|
||||
allSessions.add(this);
|
||||
const { uid, returnUser } = await borrowUser();
|
||||
this.uidInfo = { uid, returnUser };
|
||||
this.log(`Borrowed uid ${this.uid}`);
|
||||
await this.run(this.privilegedSetup());
|
||||
const containerArgs = this.privilegedSession();
|
||||
const containerPty = pty.spawn(containerArgs[0], containerArgs.slice(1), {
|
||||
name: "xterm-color",
|
||||
});
|
||||
this.container = {
|
||||
pty: containerPty,
|
||||
};
|
||||
containerPty.on("close", (code, signal) =>
|
||||
this.send({
|
||||
event: "serviceFailed",
|
||||
service: "container",
|
||||
error: `Exited with status ${signal || code}`,
|
||||
})
|
||||
);
|
||||
containerPty.on("error", (err) =>
|
||||
this.send({
|
||||
event: "serviceFailed",
|
||||
service: "container",
|
||||
error: `${err}`,
|
||||
})
|
||||
);
|
||||
let buffer = "";
|
||||
await new Promise((resolve) => {
|
||||
containerPty.on("data", (data) => {
|
||||
buffer += data;
|
||||
let idx;
|
||||
while ((idx = buffer.indexOf("\r\n")) !== -1) {
|
||||
const line = buffer.slice(0, idx);
|
||||
buffer = buffer.slice(idx + 2);
|
||||
if (line === "riju: container ready") {
|
||||
resolve();
|
||||
} else {
|
||||
this.send({
|
||||
event: "serviceLog",
|
||||
service: "container",
|
||||
output: line + "\n",
|
||||
})
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
if (this.config.setup) {
|
||||
await this.run(this.privilegedSpawn(bash(this.config.setup)));
|
||||
await this.run(this.privilegedExec(this.config.setup));
|
||||
}
|
||||
await this.runCode();
|
||||
if (this.config.daemon) {
|
||||
const daemonArgs = this.privilegedSpawn(bash(this.config.daemon));
|
||||
const daemonArgs = this.privilegedExec(this.config.daemon);
|
||||
const daemonProc = spawn(daemonArgs[0], daemonArgs.slice(1));
|
||||
this.daemon = {
|
||||
proc: daemonProc,
|
||||
|
@ -105,9 +134,9 @@ export class Session {
|
|||
}
|
||||
if (this.config.lsp) {
|
||||
if (this.config.lsp.setup) {
|
||||
await this.run(this.privilegedSpawn(bash(this.config.lsp.setup)));
|
||||
await this.run(this.privilegedExec(this.config.lsp.setup));
|
||||
}
|
||||
const lspArgs = this.privilegedSpawn(bash(this.config.lsp.start));
|
||||
const lspArgs = this.privilegedExec(this.config.lsp.start);
|
||||
const lspProc = spawn(lspArgs[0], lspArgs.slice(1));
|
||||
this.lsp = {
|
||||
proc: lspProc,
|
||||
|
@ -251,22 +280,11 @@ export class Session {
|
|||
|
||||
writeCode = async (code) => {
|
||||
if (this.config.main.includes("/")) {
|
||||
await this.run(
|
||||
this.privilegedSpawn([
|
||||
"mkdir",
|
||||
"-p",
|
||||
path.dirname(`${this.homedir}/${this.config.main}`),
|
||||
])
|
||||
);
|
||||
const dir = path.dirname(`${this.homedir}/${this.config.main}`);
|
||||
await this.run(this.privilegedExec(`mkdir -p ${dir}`));
|
||||
}
|
||||
await this.run(
|
||||
this.privilegedSpawn([
|
||||
"sh",
|
||||
"-c",
|
||||
`cat > ${path.resolve(this.homedir, this.config.main)}`,
|
||||
]),
|
||||
{ input: code }
|
||||
);
|
||||
const file = path.resolve(this.homedir, this.config.main);
|
||||
await this.run(this.privilegedExec(`cat > ${file}`), { input: code });
|
||||
};
|
||||
|
||||
runCode = async (code) => {
|
||||
|
@ -282,11 +300,11 @@ export class Session {
|
|||
template,
|
||||
} = this.config;
|
||||
if (this.term) {
|
||||
const pid = this.term.pty.pid;
|
||||
const args = this.privilegedSpawn(
|
||||
bash(`kill -SIGTERM ${pid}; sleep 1; kill -SIGKILL ${pid}`)
|
||||
);
|
||||
spawn(args[0], args.slice(1));
|
||||
try {
|
||||
process.kill(this.term.pty.pid);
|
||||
} catch (err) {
|
||||
// process might have already exited
|
||||
}
|
||||
// Signal to terminalOutput message generator using closure.
|
||||
this.term.live = false;
|
||||
this.term = null;
|
||||
|
@ -294,9 +312,9 @@ export class Session {
|
|||
this.send({ event: "terminalClear" });
|
||||
let cmdline;
|
||||
if (code) {
|
||||
cmdline = run;
|
||||
cmdline = `set +e; ${run}`;
|
||||
if (compile) {
|
||||
cmdline = `( ${compile} ) && ( set +e; ${run} )`;
|
||||
cmdline = `( ${compile} ) && ( ${run} )`;
|
||||
}
|
||||
} else if (repl) {
|
||||
cmdline = repl;
|
||||
|
@ -310,7 +328,7 @@ export class Session {
|
|||
code += suffix + "\n";
|
||||
}
|
||||
await this.writeCode(code);
|
||||
const termArgs = this.privilegedSpawn(bash(cmdline));
|
||||
const termArgs = this.privilegedPty(cmdline);
|
||||
const term = {
|
||||
pty: pty.spawn(termArgs[0], termArgs.slice(1), {
|
||||
name: "xterm-color",
|
||||
|
@ -349,14 +367,14 @@ export class Session {
|
|||
}
|
||||
if (this.formatter) {
|
||||
const pid = this.formatter.proc.pid;
|
||||
const args = this.privilegedSpawn(
|
||||
bash(`kill -SIGTERM ${pid}; sleep 1; kill -SIGKILL ${pid}`)
|
||||
const args = this.privilegedExec(
|
||||
`kill -SIGTERM ${pid}; sleep 1; kill -SIGKILL ${pid}`
|
||||
);
|
||||
spawn(args[0], args.slice(1));
|
||||
this.formatter.live = false;
|
||||
this.formatter = null;
|
||||
}
|
||||
const args = this.privilegedSpawn(bash(this.config.format.run));
|
||||
const args = this.privilegedExec(this.config.format.run);
|
||||
const formatter = {
|
||||
proc: spawn(args[0], args.slice(1)),
|
||||
live: true,
|
||||
|
@ -409,7 +427,7 @@ export class Session {
|
|||
};
|
||||
|
||||
ensure = async (cmd) => {
|
||||
const code = await this.run(this.privilegedSpawn(bash(cmd)), {
|
||||
const code = await this.run(this.privilegedExec(cmd), {
|
||||
check: false,
|
||||
});
|
||||
this.send({ event: "ensured", code });
|
||||
|
@ -422,11 +440,10 @@ export class Session {
|
|||
}
|
||||
this.log(`Tearing down session`);
|
||||
this.tearingDown = true;
|
||||
allSessions.delete(this);
|
||||
if (this.uidInfo) {
|
||||
await this.run(this.privilegedTeardown());
|
||||
await this.returnUser();
|
||||
if (this.container) {
|
||||
this.container.pty.kill();
|
||||
}
|
||||
allSessions.delete(this);
|
||||
this.ws.terminate();
|
||||
} catch (err) {
|
||||
this.log(`Error during teardown`);
|
||||
|
|
|
@ -3,6 +3,7 @@ import path from "path";
|
|||
|
||||
import debounce from "debounce";
|
||||
|
||||
import { getLangs, readLangConfig } from "../lib/yaml.js";
|
||||
import { log } from "./util.js";
|
||||
|
||||
// Map from language IDs to language configuration objects. This is
|
||||
|
@ -12,28 +13,17 @@ export let langs = {};
|
|||
// Map from language aliases and IDs to canonical language IDs.
|
||||
export let aliases = {};
|
||||
|
||||
// Read languages from JSON files in /opt/riju/langs, and update the
|
||||
// global langs variable in this module. Never throw an error. If
|
||||
// there is a problem then just leave the languages as they previously
|
||||
// were.
|
||||
async function readLangsFromDisk() {
|
||||
// Read languages from YAML, and update the global langs variable in
|
||||
// this module. Never throw an error. If there is a problem then just
|
||||
// leave the languages as they previously were.
|
||||
async function updateLangsFromDisk() {
|
||||
try {
|
||||
const newLangs = {};
|
||||
const newAliases = {};
|
||||
for (const filename of await fs.readdir("/opt/riju/langs")) {
|
||||
if (path.parse(filename).ext !== ".json") {
|
||||
continue;
|
||||
}
|
||||
const id = path.parse(filename).name;
|
||||
const langConfig = JSON.parse(
|
||||
await fs.readFile(`/opt/riju/langs/${filename}`, "utf-8")
|
||||
);
|
||||
if (langConfig.id !== id) {
|
||||
log.error(
|
||||
"Language config ${filename} has mismatched language ID ${id}, ignoring"
|
||||
);
|
||||
continue;
|
||||
}
|
||||
for (const langConfig of await Promise.all(
|
||||
(await getLangs()).map(readLangConfig)
|
||||
)) {
|
||||
const { id } = langConfig;
|
||||
newLangs[id] = langConfig;
|
||||
newAliases[id] = id;
|
||||
for (const alias of langConfig.aliases || []) {
|
||||
|
@ -52,6 +42,6 @@ async function readLangsFromDisk() {
|
|||
}
|
||||
}
|
||||
|
||||
export const langsPromise = readLangsFromDisk().then(() => langs);
|
||||
export const langsPromise = updateLangsFromDisk().then(() => langs);
|
||||
|
||||
fsOrig.watch("/opt/riju/langs", debounce(readLangsFromDisk, 200));
|
||||
fsOrig.watch("langs", debounce(updateLangsFromDisk, 200));
|
||||
|
|
|
@ -2,10 +2,10 @@ import child_process from "child_process";
|
|||
import process from "process";
|
||||
|
||||
import readline from "historic-readline";
|
||||
import { quote } from "shell-quote";
|
||||
import rpc from "vscode-jsonrpc";
|
||||
|
||||
import { langsPromise } from "./langs.js";
|
||||
import { quote } from "./util.js";
|
||||
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
|
@ -32,7 +32,7 @@ if (args.length === 1 && langs[args[0]] && langs[args[0]].lsp) {
|
|||
cmdline = args;
|
||||
}
|
||||
|
||||
console.error(quote(cmdline));
|
||||
console.error(cmdline.map(quote).join(" "));
|
||||
const proc = child_process.spawn(cmdline[0], cmdline.slice(1));
|
||||
|
||||
proc.stderr.on("data", (data) => process.stderr.write(data));
|
||||
|
|
|
@ -5,10 +5,13 @@ if [[ -z "$L" ]]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
cfg="$(< "/opt/riju/langs/$L.json")" || exit 1
|
||||
if [[ -z "$LANG_CONFIG" ]]; then
|
||||
echo 'environment variable unset: $LANG_CONFIG' >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function get {
|
||||
jq -r ".$1" <<< "${cfg}"
|
||||
jq -r ".$1" <<< "${LANG_CONFIG}"
|
||||
}
|
||||
|
||||
function has {
|
||||
|
|
|
@ -2,14 +2,16 @@ import { spawn } from "child_process";
|
|||
import { promises as fs } from "fs";
|
||||
import process from "process";
|
||||
|
||||
import { quote } from "shell-quote";
|
||||
import { v4 as getUUID } from "uuid";
|
||||
import pty from "node-pty";
|
||||
|
||||
import { borrowUser } from "./users.js";
|
||||
import { readLangConfig } from "../lib/yaml.js";
|
||||
import {
|
||||
privilegedSetup,
|
||||
privilegedSpawn,
|
||||
privilegedTeardown,
|
||||
bash,
|
||||
getUUID,
|
||||
privilegedExec,
|
||||
privilegedPty,
|
||||
privilegedSession,
|
||||
quote,
|
||||
run,
|
||||
} from "./util.js";
|
||||
|
||||
|
@ -28,23 +30,48 @@ async function main() {
|
|||
if (!lang) {
|
||||
die("environment variable unset: $L");
|
||||
}
|
||||
const langConfig = await readLangConfig(lang);
|
||||
const uuid = getUUID();
|
||||
const { uid, returnUser } = await borrowUser(log);
|
||||
await run(privilegedSetup({ uid, uuid }), log);
|
||||
const args = privilegedSpawn({ uid, uuid }, [
|
||||
"bash",
|
||||
"-c",
|
||||
`exec env L='${lang}' bash --rcfile <(cat <<< ${quote([sandboxScript])})`,
|
||||
]);
|
||||
console.log(`Starting session with UUID ${uuid}`);
|
||||
const sessionArgs = privilegedSession({ uuid, lang });
|
||||
const session = pty.spawn(sessionArgs[0], sessionArgs.slice(1), {
|
||||
name: "xterm-color",
|
||||
});
|
||||
let buffer = "";
|
||||
await new Promise((resolve) => {
|
||||
session.on("data", (data) => {
|
||||
buffer += data;
|
||||
let idx;
|
||||
while ((idx = buffer.indexOf("\r\n")) !== -1) {
|
||||
const line = buffer.slice(0, idx);
|
||||
buffer = buffer.slice(idx + 2);
|
||||
if (line === "riju: container ready") {
|
||||
resolve();
|
||||
} else {
|
||||
console.error(line);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
const args = privilegedPty(
|
||||
{ uuid },
|
||||
bash(
|
||||
`env L='${lang}' LANG_CONFIG=${quote(
|
||||
JSON.stringify(langConfig),
|
||||
)} bash --rcfile <(cat <<< ${quote(sandboxScript)})`
|
||||
)
|
||||
);
|
||||
const proc = spawn(args[0], args.slice(1), {
|
||||
stdio: "inherit",
|
||||
});
|
||||
await new Promise((resolve, reject) => {
|
||||
proc.on("error", reject);
|
||||
proc.on("close", resolve);
|
||||
});
|
||||
await run(privilegedTeardown({ uid, uuid }), log);
|
||||
await returnUser();
|
||||
try {
|
||||
await new Promise((resolve, reject) => {
|
||||
proc.on("error", reject);
|
||||
proc.on("close", resolve);
|
||||
});
|
||||
} finally {
|
||||
session.kill();
|
||||
}
|
||||
}
|
||||
|
||||
main().catch(die);
|
||||
|
|
|
@ -52,6 +52,7 @@ app.get("/:lang", (req, res) => {
|
|||
const canonical = aliases[lang];
|
||||
if (!canonical) {
|
||||
res.status(404).send(`No such language: ${lang}\n`);
|
||||
return;
|
||||
} else if (canonical !== lang) {
|
||||
res.redirect(301, `/${canonical}`);
|
||||
return;
|
||||
|
|
|
@ -5,10 +5,11 @@ import _ from "lodash";
|
|||
import pQueue from "p-queue";
|
||||
const PQueue = pQueue.default;
|
||||
import stripAnsi from "strip-ansi";
|
||||
import { v4 as getUUID } from "uuid";
|
||||
|
||||
import { getTestHash } from "../lib/hash-test.js";
|
||||
import * as api from "./api.js";
|
||||
import { langsPromise } from "./langs.js";
|
||||
import { getUUID } from "./util.js";
|
||||
|
||||
let langs = {};
|
||||
|
||||
|
@ -622,16 +623,11 @@ async function writeLog(lang, type, result, log) {
|
|||
async function main() {
|
||||
langs = await langsPromise;
|
||||
let tests = getTestList();
|
||||
const args = process.argv.slice(2);
|
||||
for (const arg of args) {
|
||||
tests = tests.filter(
|
||||
({ lang, type }) =>
|
||||
arg
|
||||
.split(",")
|
||||
.filter((arg) =>
|
||||
[lang, type].concat(langs[lang].aliases || []).includes(arg)
|
||||
).length > 0
|
||||
);
|
||||
if (process.env.L) {
|
||||
tests = tests.filter(({ lang }) => process.env.L.split().includes(lang));
|
||||
}
|
||||
if (process.env.T) {
|
||||
tests = tests.filter(({ type }) => process.env.T.split().includes(type));
|
||||
}
|
||||
if (tests.length === 0) {
|
||||
console.error("no tests selected");
|
||||
|
@ -732,6 +728,23 @@ async function main() {
|
|||
console.error(` - ${lang}/${type} (${err})`)
|
||||
);
|
||||
}
|
||||
const langsValidated = {};
|
||||
passed.forEach((_, { lang }) => {
|
||||
langsValidated[lang] = true;
|
||||
});
|
||||
failed.forEach((_, { lang }) => {
|
||||
langsValidated[lang] = false;
|
||||
});
|
||||
for (const [lang, validated] of Object.entries(langsValidated)) {
|
||||
if (!validated) {
|
||||
continue;
|
||||
}
|
||||
await fs.mkdir(`build/test-hashes/lang`, { recursive: true });
|
||||
await fs.writeFile(
|
||||
`build/test-hashes/lang/${lang}`,
|
||||
await getTestHash(lang, process.env.RIJU_LANG_IMAGE_HASH)
|
||||
);
|
||||
}
|
||||
process.exit(failed.size > 0 ? 1 : 0);
|
||||
}
|
||||
|
||||
|
|
115
backend/users.js
115
backend/users.js
|
@ -1,115 +0,0 @@
|
|||
import { spawn } from "child_process";
|
||||
import { promises as fs } from "fs";
|
||||
import os from "os";
|
||||
|
||||
import AsyncLock from "async-lock";
|
||||
import _ from "lodash";
|
||||
import parsePasswd from "parse-passwd";
|
||||
|
||||
import { asBool, privilegedUseradd, run, uuidRegexp } from "./util.js";
|
||||
|
||||
// Keep in sync with system/src/riju-system-privileged.c
|
||||
export const MIN_UID = 2000;
|
||||
export const MAX_UID = 65000;
|
||||
|
||||
function validUID(uid) {
|
||||
return uid >= MIN_UID && uid < MAX_UID;
|
||||
}
|
||||
|
||||
const CUR_UID = os.userInfo().uid;
|
||||
const ASSUME_SINGLE_PROCESS = asBool(
|
||||
process.env.RIJU_ASSUME_SINGLE_PROCESS,
|
||||
false
|
||||
);
|
||||
|
||||
let initialized = false;
|
||||
let nextUserToCreate = null;
|
||||
let locallyBorrowedUsers = new Set();
|
||||
let availableUsers = new Set();
|
||||
let lock = new AsyncLock();
|
||||
|
||||
async function getCreatedUsers() {
|
||||
return new Set(
|
||||
parsePasswd(await fs.readFile("/etc/passwd", "utf-8"))
|
||||
.map(({ uid }) => parseInt(uid))
|
||||
.filter((uid) => !isNaN(uid) && validUID(uid))
|
||||
);
|
||||
}
|
||||
|
||||
async function getActiveUsers() {
|
||||
let dirents;
|
||||
try {
|
||||
dirents = await fs.readdir("/tmp/riju");
|
||||
} catch (err) {
|
||||
if (err.code === "ENOENT") {
|
||||
return new Set();
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
return new Set(
|
||||
(
|
||||
await Promise.all(
|
||||
dirents
|
||||
.filter((name) => name.match(uuidRegexp))
|
||||
.map((name) => fs.stat(`/tmp/riju/${name}`))
|
||||
)
|
||||
)
|
||||
.map(({ uid }) => uid)
|
||||
.filter(validUID)
|
||||
);
|
||||
}
|
||||
|
||||
async function createUser(log) {
|
||||
if (nextUserToCreate >= MAX_UID) {
|
||||
throw new Error("too many users");
|
||||
}
|
||||
const uid = nextUserToCreate;
|
||||
await run(privilegedUseradd(uid), log);
|
||||
nextUserToCreate += 1;
|
||||
return uid;
|
||||
}
|
||||
|
||||
export async function borrowUser(log) {
|
||||
return await lock.acquire("key", async () => {
|
||||
if (!initialized || !ASSUME_SINGLE_PROCESS) {
|
||||
const createdUsers = await getCreatedUsers();
|
||||
const activeUsers = await getActiveUsers();
|
||||
if (createdUsers.size > 0) {
|
||||
nextUserToCreate = _.max([...createdUsers]) + 1;
|
||||
} else {
|
||||
nextUserToCreate = MIN_UID;
|
||||
}
|
||||
// If there are new users created, we want to make them
|
||||
// available (unless they are already active). Similarly, if
|
||||
// there are users that have become inactive, we want to make
|
||||
// them available (unless they are already borrowed locally).
|
||||
for (const user of createdUsers) {
|
||||
if (!activeUsers.has(user) && !locallyBorrowedUsers.has(user)) {
|
||||
availableUsers.add(user);
|
||||
}
|
||||
}
|
||||
// If there are users that have become active, we want to make
|
||||
// them unavailable.
|
||||
for (const user of activeUsers) {
|
||||
availableUsers.delete(user);
|
||||
}
|
||||
initialized = true;
|
||||
}
|
||||
if (availableUsers.size === 0) {
|
||||
availableUsers.add(await createUser(log));
|
||||
}
|
||||
// https://stackoverflow.com/a/32539929/3538165
|
||||
const user = availableUsers.values().next().value;
|
||||
locallyBorrowedUsers.add(user);
|
||||
availableUsers.delete(user);
|
||||
return {
|
||||
uid: user,
|
||||
returnUser: async () => {
|
||||
await lock.acquire("key", () => {
|
||||
locallyBorrowedUsers.delete(user);
|
||||
availableUsers.add(user);
|
||||
});
|
||||
},
|
||||
};
|
||||
});
|
||||
}
|
117
backend/util.js
117
backend/util.js
|
@ -2,56 +2,39 @@ import { spawn, spawnSync } from "child_process";
|
|||
import os from "os";
|
||||
import process from "process";
|
||||
|
||||
import { quote } from "shell-quote";
|
||||
import { v4 as getUUIDOrig } from "uuid";
|
||||
|
||||
import { MIN_UID, MAX_UID } from "./users.js";
|
||||
function computeImageHashes() {
|
||||
let deployConfig = process.env.RIJU_DEPLOY_CONFIG;
|
||||
if (!deployConfig)
|
||||
return {};
|
||||
deployConfig = JSON.parse(deployConfig);
|
||||
const imageHashes = {};
|
||||
for (const [lang, tag] of Object.entries(deployConfig.langImageTags)) {
|
||||
const prefix = `lang-${lang}-`
|
||||
if (!tag.startsWith(prefix)) {
|
||||
throw new Error(`malformed tag ${tag}`);
|
||||
}
|
||||
const imageHash = tag.slice(prefix.length);
|
||||
if (imageHash.length !== 40) {
|
||||
throw new Error(`malformed tag ${tag}`);
|
||||
}
|
||||
imageHashes[lang] = imageHash;
|
||||
}
|
||||
console.log(imageHashes);
|
||||
return imageHashes;
|
||||
}
|
||||
|
||||
const imageHashes = computeImageHashes();
|
||||
|
||||
export function quote(str) {
|
||||
return "'" + str.replace(/'/g, `'"'"'`) + "'";
|
||||
}
|
||||
|
||||
export const rijuSystemPrivileged = "system/out/riju-system-privileged";
|
||||
|
||||
const rubyVersion = (() => {
|
||||
try {
|
||||
return spawnSync("ruby", ["-e", "puts RUBY_VERSION"])
|
||||
.stdout.toString()
|
||||
.trim();
|
||||
} catch (err) {
|
||||
return null;
|
||||
}
|
||||
})();
|
||||
|
||||
function getEnv({ uid, uuid }) {
|
||||
const cwd = `/tmp/riju/${uuid}`;
|
||||
const path = [
|
||||
rubyVersion && `${cwd}/.gem/ruby/${rubyVersion}/bin`,
|
||||
`${cwd}/.local/bin`,
|
||||
`${cwd}/node_modules/.bin`,
|
||||
`/usr/local/sbin`,
|
||||
`/usr/local/bin`,
|
||||
`/usr/sbin`,
|
||||
`/usr/bin`,
|
||||
`/bin`,
|
||||
].filter((x) => x);
|
||||
const username =
|
||||
uid >= MIN_UID && uid < MAX_UID ? `riju${uid}` : os.userInfo().username;
|
||||
return {
|
||||
HOME: cwd,
|
||||
HOSTNAME: "riju",
|
||||
LANG: "C.UTF-8",
|
||||
LC_ALL: "C.UTF-8",
|
||||
LOGNAME: username,
|
||||
PATH: path.join(":"),
|
||||
PWD: cwd,
|
||||
SHELL: "/usr/bin/bash",
|
||||
TERM: "xterm-256color",
|
||||
TMPDIR: `${cwd}`,
|
||||
USER: username,
|
||||
USERNAME: username,
|
||||
};
|
||||
}
|
||||
|
||||
function getEnvString(ctx) {
|
||||
return Object.entries(getEnv(ctx))
|
||||
.map(([key, val]) => `${key}=${quote([val])}`)
|
||||
.join(" ");
|
||||
export function getUUID() {
|
||||
return getUUIDOrig().replace(/-/g, "");
|
||||
}
|
||||
|
||||
export async function run(args, log, options) {
|
||||
|
@ -87,38 +70,35 @@ export async function run(args, log, options) {
|
|||
});
|
||||
}
|
||||
|
||||
export function privilegedUseradd(uid) {
|
||||
return [rijuSystemPrivileged, "useradd", `${uid}`];
|
||||
export function privilegedSession({ uuid, lang }) {
|
||||
const cmdline = [rijuSystemPrivileged, "session", uuid, lang];
|
||||
if (imageHashes[lang]) {
|
||||
cmdline.push(imageHashes[lang]);
|
||||
}
|
||||
return cmdline;
|
||||
}
|
||||
|
||||
export function privilegedSetup({ uid, uuid }) {
|
||||
return [rijuSystemPrivileged, "setup", `${uid}`, uuid];
|
||||
export function privilegedExec({ uuid }, args) {
|
||||
return [rijuSystemPrivileged, "exec", uuid].concat(args);
|
||||
}
|
||||
|
||||
export function privilegedSpawn(ctx, args) {
|
||||
const { uid, uuid } = ctx;
|
||||
return [
|
||||
rijuSystemPrivileged,
|
||||
"spawn",
|
||||
`${uid}`,
|
||||
uuid,
|
||||
"sh",
|
||||
"-c",
|
||||
`exec env -i ${getEnvString(ctx)} "$@"`,
|
||||
"--",
|
||||
].concat(args);
|
||||
export function privilegedPty({ uuid }, args) {
|
||||
return [rijuSystemPrivileged, "pty", uuid].concat(args);
|
||||
}
|
||||
|
||||
export function privilegedTeardown({ uid, uuid }) {
|
||||
return [rijuSystemPrivileged, "teardown", `${uid}`, uuid];
|
||||
}
|
||||
|
||||
export function bash(cmdline) {
|
||||
export function bash(cmdline, opts) {
|
||||
const stty = opts && opts.stty;
|
||||
if (!cmdline.match(/[;|&(){}=\n]/)) {
|
||||
// Reduce number of subshells we generate, if we're just running a
|
||||
// single command (no shell logic).
|
||||
cmdline = "exec " + cmdline;
|
||||
}
|
||||
if (stty) {
|
||||
// Workaround https://github.com/moby/moby/issues/25450 (the issue
|
||||
// thread claims the bug is resolved and released, but not in my
|
||||
// testing).
|
||||
cmdline = "stty cols 80 rows 24; " + cmdline;
|
||||
}
|
||||
return ["bash", "-c", `set -euo pipefail; ${cmdline}`];
|
||||
}
|
||||
|
||||
|
@ -130,9 +110,6 @@ export const log = {
|
|||
error: console.error,
|
||||
};
|
||||
|
||||
// https://gist.github.com/bugventure/f71337e3927c34132b9a
|
||||
export const uuidRegexp = /^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$/;
|
||||
|
||||
export function asBool(value, def) {
|
||||
if (def === undefined) {
|
||||
throw new Error("asBool needs an explicit default value");
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
root="$(cd "$(dirname "$0")/.." && echo "${PWD}")"
|
||||
|
||||
set -a
|
||||
. "${root}/.env"
|
||||
set +a
|
||||
|
||||
exec node "${root}/tools/depgraph.js" "$@"
|
|
@ -2,7 +2,13 @@
|
|||
|
||||
set -euo pipefail
|
||||
|
||||
export PATH="$(sed -E 's/:bin:/:/; s/(^|:)bin(:|$)//' <<< "${PATH}")"
|
||||
root="$(cd "$(dirname "$0")/.." && echo "${PWD}")"
|
||||
|
||||
export PATH="$(sed -E "s_:${root}/bin:_:_; s_(^|:)${root}/bin(:|$)__" <<< "${PATH}")"
|
||||
|
||||
set -a
|
||||
. "${root}/.env"
|
||||
set +a
|
||||
|
||||
if [[ "${OSTYPE:-}" != darwin* ]] && [[ "${EUID}" != 0 ]]; then
|
||||
exec sudo -E docker "$@"
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
root="$(cd "$(dirname "$0")/.." && echo "${PWD}")"
|
||||
|
||||
export PATH="$(sed -E "s_:${root}/bin:_:_; s_(^|:)${root}/bin(:|$)__" <<< "${PATH}")"
|
||||
|
||||
set -a
|
||||
. "${root}/.env"
|
||||
set +a
|
||||
|
||||
cd "${root}/packer"
|
||||
|
||||
exec packer "$@"
|
|
@ -2,7 +2,13 @@
|
|||
|
||||
set -euo pipefail
|
||||
|
||||
export PATH="$(sed -E 's/:bin:/:/; s/(^|:)bin(:|$)//' <<< "${PATH}")"
|
||||
root="$(cd "$(dirname "$0")/.." && echo "${PWD}")"
|
||||
|
||||
export PATH="$(sed -E "s_:${root}/bin:_:_; s_(^|:)${root}/bin(:|$)__" <<< "${PATH}")"
|
||||
|
||||
set -a
|
||||
. "${root}/.env"
|
||||
set +a
|
||||
|
||||
if [[ "${OSTYPE:-}" != darwin* ]] && [[ "${EUID}" != 0 ]]; then
|
||||
exec sudo -E skopeo "$@"
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
root="$(cd "$(dirname "$0")/.." && echo "${PWD}")"
|
||||
|
||||
export PATH="$(sed -E "s_:${root}/bin:_:_; s_(^|:)${root}/bin(:|$)__" <<< "${PATH}")"
|
||||
|
||||
set -a
|
||||
. "${root}/.env"
|
||||
set +a
|
||||
|
||||
cd "${root}/tf"
|
||||
|
||||
exec terraform "$@"
|
|
@ -6,5 +6,5 @@ RUN /tmp/install.bash
|
|||
WORKDIR /src
|
||||
COPY docker/shared/my_init /usr/local/sbin/
|
||||
COPY docker/shared/admin-pid1.bash /usr/local/sbin/pid1.bash
|
||||
ENTRYPOINT ["/usr/local/sbin/my_init", "--quiet", "--", "/usr/local/sbin/pid1.bash"]
|
||||
ENTRYPOINT ["/usr/local/sbin/my_init", "--quiet", "--skip-runit", "--", "/usr/local/sbin/pid1.bash"]
|
||||
CMD ["bash"]
|
||||
|
|
|
@ -37,11 +37,14 @@ dctrl-tools
|
|||
docker-ce-cli
|
||||
g++
|
||||
git
|
||||
golang
|
||||
htop
|
||||
httpie
|
||||
jq
|
||||
less
|
||||
make
|
||||
man
|
||||
moreutils
|
||||
nodejs
|
||||
packer
|
||||
psmisc
|
||||
|
@ -52,6 +55,7 @@ sudo
|
|||
tmux
|
||||
terraform
|
||||
unzip
|
||||
uuid-runtime
|
||||
vim
|
||||
wget
|
||||
yarn
|
||||
|
|
|
@ -1,12 +1,32 @@
|
|||
FROM riju:compile AS compile
|
||||
FROM riju:composite
|
||||
FROM riju:ubuntu AS build
|
||||
|
||||
ENTRYPOINT ["/usr/local/sbin/my_init", "--quiet", "--"]
|
||||
RUN useradd -p '!' -m -l -s /usr/bin/bash riju
|
||||
COPY docker/app/install-build.bash /tmp/
|
||||
RUN /tmp/install-build.bash
|
||||
|
||||
WORKDIR /src
|
||||
COPY Makefile ./
|
||||
|
||||
COPY --chown=riju:riju --from=compile /src ./
|
||||
COPY system ./system/
|
||||
RUN make system UNPRIVILEGED=1
|
||||
|
||||
COPY package.json yarn.lock ./
|
||||
RUN yarn install
|
||||
|
||||
COPY webpack.config.cjs ./
|
||||
COPY frontend/src ./frontend/src/
|
||||
RUN make frontend
|
||||
|
||||
COPY frontend/pages ./frontend/pages/
|
||||
COPY frontend/styles ./frontend/styles/
|
||||
COPY lib ./lib/
|
||||
COPY backend ./backend/
|
||||
COPY langs ./langs/
|
||||
|
||||
FROM riju:runtime
|
||||
|
||||
ENTRYPOINT ["/usr/local/sbin/my_init", "--quiet", "--skip-runit","--"]
|
||||
RUN useradd -p '!' -m -l -s /usr/bin/bash riju
|
||||
COPY --chown=riju:riju --from=build /src ./
|
||||
RUN chown root:riju system/out/*-privileged && chmod a=,g=rx,u=rwxs system/out/*-privileged
|
||||
|
||||
USER riju
|
||||
|
|
|
@ -24,3 +24,7 @@ EOF
|
|||
|
||||
apt-get update
|
||||
apt-get install -y clang g++ make nodejs sudo yarn
|
||||
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
rm "$0"
|
|
@ -1,29 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get update
|
||||
apt-get dist-upgrade -y
|
||||
apt-get install -y curl gnupg lsb-release
|
||||
|
||||
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -
|
||||
curl -fsSL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
|
||||
|
||||
ubuntu_ver="$(lsb_release -rs)"
|
||||
ubuntu_name="$(lsb_release -cs)"
|
||||
|
||||
node_repo="$(curl -sS https://deb.nodesource.com/setup_current.x | grep NODEREPO= | grep -Eo 'node_[0-9]+\.x' | head -n1)"
|
||||
|
||||
tee -a /etc/apt/sources.list.d/custom.list >/dev/null <<EOF
|
||||
deb [arch=amd64] https://deb.nodesource.com/${node_repo} ${ubuntu_name} main
|
||||
deb [arch=amd64] https://dl.yarnpkg.com/debian/ stable main
|
||||
EOF
|
||||
|
||||
apt-get update
|
||||
apt-get install -y make nodejs yarn
|
||||
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
rm "$0"
|
|
@ -0,0 +1,12 @@
|
|||
FROM riju:ubuntu
|
||||
|
||||
COPY docker/base/install.bash /tmp/
|
||||
RUN /tmp/install.bash
|
||||
|
||||
RUN useradd -p '!' -m -l -s /usr/bin/bash riju
|
||||
RUN runuser -u riju -- mkdir /home/riju/src
|
||||
WORKDIR /home/riju/src
|
||||
|
||||
COPY docker/shared/my_init /usr/local/sbin/
|
||||
ENTRYPOINT ["/usr/local/sbin/my_init", "--quiet", "--skip-runit", "--"]
|
||||
CMD ["bash"]
|
|
@ -0,0 +1,95 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
latest_release() {
|
||||
curl -sSL "https://api.github.com/repos/$1/releases/latest" | jq -r .tag_name
|
||||
}
|
||||
|
||||
mkdir /tmp/riju-work
|
||||
pushd /tmp/riju-work
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get update
|
||||
apt-get dist-upgrade -y
|
||||
(yes || true) | unminimize
|
||||
|
||||
apt-get install -y curl gnupg lsb-release wget
|
||||
|
||||
ubuntu_name="$(lsb_release -cs)"
|
||||
|
||||
node_repo="$(curl -fsSL https://deb.nodesource.com/setup_current.x | grep NODEREPO= | grep -Eo 'node_[0-9]+\.x' | head -n1)"
|
||||
|
||||
# Node.js
|
||||
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -
|
||||
|
||||
# Yarn
|
||||
curl -fsSL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
|
||||
|
||||
tee -a /etc/apt/sources.list.d/custom.list >/dev/null <<EOF
|
||||
# Node.js
|
||||
deb [arch=amd64] https://deb.nodesource.com/${node_repo} ${ubuntu_name} main
|
||||
|
||||
# Yarn
|
||||
deb [arch=amd64] https://dl.yarnpkg.com/debian/ stable main
|
||||
EOF
|
||||
|
||||
apt-get update
|
||||
apt-get install -y dctrl-tools
|
||||
|
||||
libicu="$(grep-aptavail -wF Package 'libicu[0-9]+' -s Package -n | head -n1)"
|
||||
|
||||
packages="
|
||||
|
||||
# compilation tools
|
||||
clang
|
||||
g++
|
||||
gcc
|
||||
make
|
||||
|
||||
# base languages
|
||||
nodejs
|
||||
ocaml
|
||||
perl
|
||||
python3
|
||||
ruby
|
||||
|
||||
# packaging tools
|
||||
apt-file
|
||||
dctrl-tools
|
||||
|
||||
# basic utilities
|
||||
bind9-dnsutils
|
||||
less
|
||||
git
|
||||
htop
|
||||
jq
|
||||
make
|
||||
man
|
||||
moreutils
|
||||
psmisc
|
||||
ripgrep
|
||||
strace
|
||||
sudo
|
||||
tmux
|
||||
tree
|
||||
vim
|
||||
|
||||
# shared dependencies
|
||||
${libicu}
|
||||
|
||||
"
|
||||
|
||||
apt-get install -y $(sed 's/#.*//' <<< "${packages}")
|
||||
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
tee /etc/sudoers.d/90-riju >/dev/null <<"EOF"
|
||||
%sudo ALL=(ALL:ALL) NOPASSWD: ALL
|
||||
EOF
|
||||
|
||||
popd
|
||||
rm -rf /tmp/riju-work
|
||||
|
||||
rm "$0"
|
|
@ -6,5 +6,5 @@ RUN /tmp/install.bash
|
|||
WORKDIR /src
|
||||
COPY docker/shared/my_init /usr/local/sbin/
|
||||
COPY docker/shared/admin-pid1.bash /usr/local/sbin/pid1.bash
|
||||
ENTRYPOINT ["/usr/local/sbin/my_init", "--quiet", "--", "/usr/local/sbin/pid1.bash"]
|
||||
ENTRYPOINT ["/usr/local/sbin/my_init", "--quiet", "--skip-runit", "--", "/usr/local/sbin/pid1.bash"]
|
||||
CMD ["bash"]
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
FROM ubuntu:rolling
|
||||
|
||||
COPY docker/compile/install.bash /tmp/
|
||||
RUN /tmp/install.bash
|
||||
|
||||
WORKDIR /src
|
||||
COPY Makefile ./
|
||||
|
||||
COPY system ./system/
|
||||
RUN make system
|
||||
|
||||
COPY package.json yarn.lock ./
|
||||
RUN yarn install
|
||||
|
||||
COPY webpack.config.cjs ./
|
||||
COPY frontend/src ./frontend/src/
|
||||
RUN make frontend
|
||||
|
||||
COPY frontend/pages ./frontend/pages/
|
||||
COPY frontend/styles ./frontend/styles/
|
||||
COPY backend ./backend/
|
|
@ -1,18 +0,0 @@
|
|||
FROM riju:runtime
|
||||
|
||||
COPY docker/composite/install.bash /tmp/
|
||||
|
||||
# The number of commands here must match NUM_SHARDS in
|
||||
# build-composite-image.js.
|
||||
RUN /tmp/install.bash 0
|
||||
RUN /tmp/install.bash 1
|
||||
RUN /tmp/install.bash 2
|
||||
RUN /tmp/install.bash 3
|
||||
RUN /tmp/install.bash 4
|
||||
RUN /tmp/install.bash 5
|
||||
RUN /tmp/install.bash 6
|
||||
RUN /tmp/install.bash 7
|
||||
RUN /tmp/install.bash 8
|
||||
RUN /tmp/install.bash 9
|
||||
|
||||
RUN rm /tmp/install.bash
|
|
@ -1,29 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
shard="$1"
|
||||
|
||||
function riju-curl {
|
||||
curl -fsSL "localhost:8487$1"
|
||||
}
|
||||
|
||||
function riju-apt-install {
|
||||
riju-curl "$1" > "$(basename "$1")"
|
||||
apt-get install -y "./$(basename "$1")"
|
||||
}
|
||||
|
||||
pushd /tmp
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
apt-get update
|
||||
|
||||
riju-curl "/shard/${shard}" | while read path; do
|
||||
riju-apt-install "/fs/${path}"
|
||||
done
|
||||
|
||||
rm -rf *.deb
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
popd
|
|
@ -0,0 +1,8 @@
|
|||
FROM riju:base
|
||||
|
||||
ARG LANG
|
||||
|
||||
COPY docker/lang/install.bash /tmp/
|
||||
RUN /tmp/install.bash
|
||||
|
||||
USER riju
|
|
@ -0,0 +1,54 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
: "${LANG}"
|
||||
|
||||
mkdir /tmp/riju-work
|
||||
pushd /tmp/riju-work
|
||||
|
||||
function riju-curl {
|
||||
echo >&2 "fetching ./$1"
|
||||
curl -fsSL "localhost:8487/fs/$1"
|
||||
}
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
riju-curl "build/lang/${LANG}/install.bash" > "install-lang-${LANG}.bash"
|
||||
riju-curl "build/lang/${LANG}/riju-lang-${LANG}.deb" > "riju-lang-${LANG}.deb"
|
||||
chmod +x "install-lang-${LANG}.bash"
|
||||
|
||||
(
|
||||
dpkg-deb -f "riju-lang-${LANG}.deb" -f Depends |
|
||||
(grep -Eo 'riju-shared-[^, ]+' || true) |
|
||||
sed 's/riju-shared-//'
|
||||
) | while read name; do
|
||||
riju-curl "build/shared/${name}/install.bash" > "install-shared-${name}.bash"
|
||||
riju-curl "build/shared/${name}/riju-shared-${name}.deb" > "riju-shared-${name}.deb"
|
||||
chmod +x "install-shared-${name}.bash"
|
||||
done
|
||||
|
||||
if compgen -G "./install-shared-*.bash"; then
|
||||
for file in ./install-shared-*.bash; do
|
||||
"${file}"
|
||||
done
|
||||
fi
|
||||
|
||||
"./install-lang-${LANG}.bash"
|
||||
|
||||
if dpkg-deb -f "riju-lang-${LANG}.deb" -f Depends | grep .; then
|
||||
apt-get update
|
||||
fi
|
||||
|
||||
if compgen -G "./riju-shared-*.deb"; then
|
||||
for file in ./riju-shared-*.deb; do
|
||||
apt-get install -y "${file}"
|
||||
done
|
||||
fi
|
||||
|
||||
apt-get install -y "./riju-lang-${LANG}.deb"
|
||||
|
||||
popd
|
||||
rm -rf /tmp/riju-work
|
||||
|
||||
rm "$0"
|
|
@ -1,9 +1,9 @@
|
|||
FROM ubuntu:rolling
|
||||
FROM riju:ubuntu
|
||||
|
||||
COPY docker/packaging/install.bash /tmp/
|
||||
RUN /tmp/install.bash
|
||||
|
||||
WORKDIR /src
|
||||
COPY docker/shared/my_init docker/packaging/pid1.bash /usr/local/sbin/
|
||||
ENTRYPOINT ["/usr/local/sbin/my_init", "--quiet", "--", "/usr/local/sbin/pid1.bash"]
|
||||
ENTRYPOINT ["/usr/local/sbin/my_init", "--quiet", "--skip-runit", "--", "/usr/local/sbin/pid1.bash"]
|
||||
CMD ["bash"]
|
||||
|
|
|
@ -2,50 +2,32 @@
|
|||
|
||||
set -euxo pipefail
|
||||
|
||||
# See install.bash for the runtime image for much of the same, but
|
||||
# with more comments.
|
||||
# See install.bash for the base image for much of the same, but with
|
||||
# more comments.
|
||||
|
||||
mkdir /tmp/riju-work
|
||||
pushd /tmp/riju-work
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
dpkg --add-architecture i386
|
||||
|
||||
apt-get update
|
||||
apt-get dist-upgrade -y
|
||||
(yes || true) | unminimize
|
||||
|
||||
apt-get install -y curl gnupg lsb-release wget
|
||||
|
||||
wget https://cacerts.digicert.com/DigiCertTLSRSASHA2562020CA1.crt.pem -O /usr/local/share/ca-certificates/DigiCertTLSRSASHA2562020CA1.crt
|
||||
wget https://letsencrypt.org/certs/lets-encrypt-r3.pem -O /usr/local/share/ca-certificates/lets-encrypt-r3.crt
|
||||
|
||||
update-ca-certificates
|
||||
|
||||
ubuntu_ver="$(lsb_release -rs)"
|
||||
ubuntu_name="$(lsb_release -cs)"
|
||||
|
||||
node_repo="$(curl -sS https://deb.nodesource.com/setup_current.x | grep NODEREPO= | grep -Eo 'node_[0-9]+\.x' | head -n1)"
|
||||
|
||||
wget "https://packages.microsoft.com/config/ubuntu/${ubuntu_ver}/packages-microsoft-prod.deb"
|
||||
apt-get install ./packages-microsoft-prod.deb
|
||||
|
||||
curl -fsSL https://downloads.ceylon-lang.org/apt/ceylon-debian-repo.gpg.key | apt-key add -
|
||||
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -
|
||||
curl -fsSL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
|
||||
|
||||
tee -a /etc/apt/sources.list.d/custom.list >/dev/null <<EOF
|
||||
deb [arch=amd64] https://downloads.ceylon-lang.org/apt/ unstable main
|
||||
deb [arch=amd64] https://deb.nodesource.com/${node_repo} ${ubuntu_name} main
|
||||
deb [arch=amd64] https://dl.yarnpkg.com/debian/ stable main
|
||||
EOF
|
||||
|
||||
tee -a /etc/apt/preferences.d/riju >/dev/null <<EOF
|
||||
Package: *
|
||||
Pin: origin packages.microsoft.com
|
||||
Pin-Priority: 1
|
||||
EOF
|
||||
|
||||
packages="
|
||||
|
||||
# compilation tools
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
FROM ubuntu:rolling
|
||||
FROM riju:ubuntu
|
||||
|
||||
COPY docker/runtime/install.bash /tmp/
|
||||
RUN /tmp/install.bash
|
||||
|
||||
WORKDIR /src
|
||||
COPY docker/shared/my_init docker/runtime/pid1.bash /usr/local/sbin/
|
||||
ENTRYPOINT ["/usr/local/sbin/my_init", "--quiet", "--", "/usr/local/sbin/pid1.bash"]
|
||||
ENTRYPOINT ["/usr/local/sbin/my_init", "--quiet", "--skip-runit", "--", "/usr/local/sbin/pid1.bash"]
|
||||
|
||||
WORKDIR /src
|
||||
CMD ["bash"]
|
||||
EXPOSE 6119
|
||||
EXPOSE 6120
|
||||
|
|
|
@ -11,120 +11,33 @@ pushd /tmp/riju-work
|
|||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
dpkg --add-architecture i386
|
||||
|
||||
apt-get update
|
||||
apt-get dist-upgrade -y
|
||||
(yes || true) | unminimize
|
||||
|
||||
apt-get install -y curl gnupg lsb-release wget
|
||||
|
||||
# Ceylon
|
||||
wget https://cacerts.digicert.com/DigiCertTLSRSASHA2562020CA1.crt.pem -O /usr/local/share/ca-certificates/DigiCertTLSRSASHA2562020CA1.crt
|
||||
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -
|
||||
curl -fsSL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
|
||||
# D
|
||||
wget https://letsencrypt.org/certs/lets-encrypt-r3.pem -O /usr/local/share/ca-certificates/lets-encrypt-r3.crt
|
||||
|
||||
update-ca-certificates
|
||||
|
||||
ubuntu_ver="$(lsb_release -rs)"
|
||||
ubuntu_name="$(lsb_release -cs)"
|
||||
|
||||
cran_repo="$(curl -fsSL https://cran.r-project.org/bin/linux/ubuntu/ | grep -Eo 'cran[0-9]+' | head -n1)"
|
||||
node_repo="$(curl -fsSL https://deb.nodesource.com/setup_current.x | grep NODEREPO= | grep -Eo 'node_[0-9]+\.x' | head -n1)"
|
||||
|
||||
# .NET
|
||||
wget "https://packages.microsoft.com/config/ubuntu/${ubuntu_ver}/packages-microsoft-prod.deb"
|
||||
apt-get install ./packages-microsoft-prod.deb
|
||||
|
||||
# Ceylon
|
||||
curl -fsSL https://downloads.ceylon-lang.org/apt/ceylon-debian-repo.gpg.key | apt-key add -
|
||||
|
||||
# Crystal
|
||||
curl -fsSL https://keybase.io/crystal/pgp_keys.asc | apt-key add -
|
||||
|
||||
# Dart
|
||||
curl -fsSL https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -
|
||||
|
||||
# Hack
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B4112585D386EB94
|
||||
|
||||
# MongoDB
|
||||
curl -fsSL https://www.mongodb.org/static/pgp/server-4.4.asc | apt-key add -
|
||||
|
||||
# Node.js
|
||||
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -
|
||||
|
||||
# R
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E298A3A825C0D65DFD57CBB651716619E084DAB9
|
||||
|
||||
# Yarn
|
||||
curl -fsSL https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
|
||||
node_repo="$(curl -sS https://deb.nodesource.com/setup_current.x | grep NODEREPO= | grep -Eo 'node_[0-9]+\.x' | head -n1)"
|
||||
|
||||
tee -a /etc/apt/sources.list.d/custom.list >/dev/null <<EOF
|
||||
# Ceylon
|
||||
deb [arch=amd64] https://downloads.ceylon-lang.org/apt/ unstable main
|
||||
|
||||
# Crystal
|
||||
deb [arch=amd64] https://dist.crystal-lang.org/apt crystal main
|
||||
|
||||
# Dart
|
||||
deb [arch=amd64] https://storage.googleapis.com/download.dartlang.org/linux/debian stable main
|
||||
|
||||
# Hack
|
||||
deb [arch=amd64] https://dl.hhvm.com/ubuntu ${ubuntu_name} main
|
||||
|
||||
# MongoDB
|
||||
deb [arch=amd64] https://repo.mongodb.org/apt/ubuntu focal/mongodb-org/4.4 multiverse
|
||||
|
||||
# Node.js
|
||||
deb [arch=amd64] https://deb.nodesource.com/${node_repo} ${ubuntu_name} main
|
||||
|
||||
# R
|
||||
deb [arch=amd64] https://cloud.r-project.org/bin/linux/ubuntu ${ubuntu_name}-${cran_repo}/
|
||||
|
||||
# Yarn
|
||||
deb [arch=amd64] https://dl.yarnpkg.com/debian/ stable main
|
||||
deb [arch=amd64] https://download.docker.com/linux/ubuntu ${ubuntu_name} stable
|
||||
EOF
|
||||
|
||||
# Work around brutal packaging error courtesy of Microsoft.
|
||||
# Unfortunately, the Microsoft repo includes a duplicate version of
|
||||
# the libodbc1 package whose version is not in sync with the one
|
||||
# shipped by the corresponding release of Ubuntu. If this one happens
|
||||
# to be newer, then it'll cause a horrifyingly difficult to diagnose
|
||||
# error later on while building the composite image because there's a
|
||||
# conflict between the default-available versions of libodbc1 and
|
||||
# libodbc1:i386, which surfaces as an inability to install
|
||||
# dependencies for Erlang. Thanks Microsoft. Please don't. Anyway,
|
||||
# solution is to pin this repository at a lower priority than the
|
||||
# Ubuntu standard packages, so the correct version of libodbc1 gets
|
||||
# installed by default.
|
||||
tee -a /etc/apt/preferences.d/riju >/dev/null <<EOF
|
||||
Package: *
|
||||
Pin: origin packages.microsoft.com
|
||||
Pin-Priority: 1
|
||||
EOF
|
||||
|
||||
apt-get update
|
||||
apt-get install -y dctrl-tools
|
||||
|
||||
libicu="$(grep-aptavail -wF Package 'libicu[0-9]+' -s Package -n | head -n1)"
|
||||
|
||||
packages="
|
||||
|
||||
# compilation tools
|
||||
clang
|
||||
g++
|
||||
gcc
|
||||
make
|
||||
|
||||
# base languages
|
||||
nodejs
|
||||
ocaml
|
||||
perl
|
||||
python3
|
||||
ruby
|
||||
|
||||
# project tools
|
||||
clang
|
||||
docker-ce-cli
|
||||
make
|
||||
nodejs
|
||||
yarn
|
||||
|
||||
# packaging tools
|
||||
|
@ -135,7 +48,9 @@ dctrl-tools
|
|||
bind9-dnsutils
|
||||
less
|
||||
git
|
||||
golang
|
||||
htop
|
||||
httpie
|
||||
jq
|
||||
make
|
||||
man
|
||||
|
@ -146,19 +61,17 @@ strace
|
|||
sudo
|
||||
tmux
|
||||
tree
|
||||
uuid-runtime
|
||||
vim
|
||||
|
||||
# shared dependencies
|
||||
${libicu}
|
||||
|
||||
"
|
||||
|
||||
apt-get update
|
||||
apt-get install -y $(sed 's/#.*//' <<< "${packages}")
|
||||
|
||||
ver="$(latest_release watchexec/watchexec)"
|
||||
wget "https://github.com/watchexec/watchexec/releases/download/${ver}/watchexec-${ver}-x86_64-unknown-linux-gnu.deb"
|
||||
ver="$(latest_release watchexec/watchexec | sed 's/^cli-v//')"
|
||||
wget "https://github.com/watchexec/watchexec/releases/download/cli-v${ver}/watchexec-${ver}-x86_64-unknown-linux-gnu.deb"
|
||||
apt-get install -y ./watchexec-*.deb
|
||||
rm watchexec-*.deb
|
||||
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
@ -166,9 +79,6 @@ tee /etc/sudoers.d/90-riju >/dev/null <<"EOF"
|
|||
%sudo ALL=(ALL:ALL) NOPASSWD: ALL
|
||||
EOF
|
||||
|
||||
mkdir -p /opt/riju/langs
|
||||
touch /opt/riju/langs/.keep
|
||||
|
||||
popd
|
||||
rm -rf /tmp/riju-work
|
||||
|
||||
|
|
|
@ -7,10 +7,10 @@ tee -a /etc/hosts >/dev/null <<< "127.0.0.1 $(hostname)"
|
|||
groupadd -g "$(stat -c %g "$PWD")" -o -p '!' -r riju
|
||||
useradd -u "$(stat -c %u "$PWD")" -g "$(stat -c %g "$PWD")" -o -p '!' -m -N -l -s /usr/bin/bash -G sudo riju
|
||||
|
||||
runuser -u riju -- ln -sT /var/riju/.aws /home/riju/.aws
|
||||
runuser -u riju -- ln -sT /var/riju/.docker /home/riju/.docker
|
||||
runuser -u riju -- ln -sT /var/riju/.ssh /home/riju/.ssh
|
||||
runuser -u riju -- ln -sT /var/riju/.terraform.d /home/riju/.terraform.d
|
||||
runuser -u riju -- ln -sT /var/run/riju/.aws /home/riju/.aws
|
||||
runuser -u riju -- ln -sT /var/run/riju/.docker /home/riju/.docker
|
||||
runuser -u riju -- ln -sT /var/run/riju/.ssh /home/riju/.ssh
|
||||
runuser -u riju -- ln -sT /var/run/riju/.terraform.d /home/riju/.terraform.d
|
||||
|
||||
runuser -u riju -- touch /home/riju/.sudo_as_admin_successful
|
||||
runuser -u riju -- tee -a /home/riju/.bashrc >/dev/null <<"EOF"
|
||||
|
|
|
@ -20,6 +20,7 @@ install:
|
|||
repl: |
|
||||
abc
|
||||
input: |
|
||||
DELAY: 1
|
||||
WRITE 123 * 234
|
||||
|
||||
main: "main.abc"
|
||||
|
|
|
@ -29,13 +29,6 @@ info:
|
|||
install:
|
||||
apt:
|
||||
- gnat
|
||||
manual: |
|
||||
wget https://dl.bintray.com/reznikmm/ada-language-server/linux-latest.tar.gz
|
||||
tar -xf linux-latest.tar.gz
|
||||
install -d "${pkg}/usr/local/bin"
|
||||
install -d "${pkg}/usr/local/lib/x86_64-linux-gnu"
|
||||
mv linux/ada_language_server "${pkg}/usr/local/bin/ada_language_server"
|
||||
mv linux/*.so* "${pkg}/usr/local/lib/x86_64-linux-gnu/"
|
||||
|
||||
main: "main.adb"
|
||||
template: |
|
||||
|
@ -47,13 +40,6 @@ template: |
|
|||
end Main;
|
||||
|
||||
compile: |
|
||||
x86_64-linux-gnu-gnatmake-9 main.adb
|
||||
gnatmake main.adb
|
||||
run: |
|
||||
./main
|
||||
|
||||
lsp:
|
||||
start: |
|
||||
ada_language_server
|
||||
code: "\n Ada.IO"
|
||||
after: ");"
|
||||
item: "IO_Exceptions"
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
id: "a+"
|
||||
id: "aplus"
|
||||
aliases:
|
||||
- "a+"
|
||||
- "ap"
|
||||
- "aplus"
|
||||
name: "A+"
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
id: "aspectc++"
|
||||
id: "aspectcpp"
|
||||
aliases:
|
||||
- "aspectcpp"
|
||||
- "aspectc++"
|
||||
- "aspectcplusplus"
|
||||
name: "AspectC++"
|
||||
|
||||
install:
|
|
@ -20,6 +20,14 @@ info:
|
|||
usage: []
|
||||
|
||||
install:
|
||||
prepare: &add-ceylon-repo
|
||||
cert:
|
||||
- "https://cacerts.digicert.com/DigiCertTLSRSASHA2562020CA1.crt.pem"
|
||||
aptKey:
|
||||
- "https://downloads.ceylon-lang.org/apt/ceylon-debian-repo.gpg.key"
|
||||
aptRepo:
|
||||
- "deb [arch=amd64] https://downloads.ceylon-lang.org/apt/ unstable main"
|
||||
<<: *add-ceylon-repo
|
||||
apt:
|
||||
- $(grep-aptavail -F Package ceylon -s Package -n | sort -rV | head -n1)
|
||||
- openjdk-8-jdk-headless
|
||||
|
|
|
@ -25,7 +25,7 @@ info:
|
|||
install:
|
||||
apt:
|
||||
- wine
|
||||
- wine32
|
||||
- wine32:i386
|
||||
|
||||
repl: |
|
||||
wine cmd
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
id: "c++"
|
||||
id: "cpp"
|
||||
aliases:
|
||||
- "cpp"
|
||||
- "c++"
|
||||
- "g++"
|
||||
- "clang++"
|
||||
- "c++98"
|
||||
|
@ -18,6 +18,7 @@ aliases:
|
|||
- "hpp"
|
||||
- "cxx"
|
||||
- "hxx"
|
||||
- "cplusplus"
|
||||
name: "C++"
|
||||
monacoLang: cpp
|
||||
|
|
@ -20,6 +20,10 @@ info:
|
|||
usage: []
|
||||
|
||||
install:
|
||||
aptKey:
|
||||
- "https://keybase.io/crystal/pgp_keys.asc"
|
||||
aptRepo:
|
||||
- "deb [arch=amd64] https://dist.crystal-lang.org/apt crystal main"
|
||||
apt:
|
||||
- crystal
|
||||
|
||||
|
|
|
@ -4,11 +4,14 @@ aliases:
|
|||
name: "D"
|
||||
|
||||
install:
|
||||
prepare:
|
||||
prepare: &add-d-cert
|
||||
cert:
|
||||
- "https://letsencrypt.org/certs/lets-encrypt-r3.pem"
|
||||
manual: |
|
||||
file="$(curl -fsSL https://dlang.org/download.html | grep -Eo '"https://[^"]+amd64.deb"' | grep -v pre-release | tr -d '"')"
|
||||
wget "${file}" -O dmd.deb
|
||||
sudo apt-get install -y ./dmd.deb
|
||||
sudo --preserve-env=DEBIAN_FRONTEND apt-get install -y ./dmd.deb
|
||||
<<: *add-d-cert
|
||||
manual: |
|
||||
install -d "${pkg}/usr/local/bin"
|
||||
dub fetch dfmt@~master
|
||||
|
|
|
@ -3,6 +3,10 @@ name: "Dart"
|
|||
monacoLang: dart
|
||||
|
||||
install:
|
||||
aptKey:
|
||||
- "https://dl-ssl.google.com/linux/linux_signing_key.pub"
|
||||
aptRepo:
|
||||
- "deb [arch=amd64] https://storage.googleapis.com/download.dartlang.org/linux/debian stable main"
|
||||
apt:
|
||||
- dart
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ install:
|
|||
ln -sT /opt/factor/factor "${pkg}/usr/local/bin/factor-lang"
|
||||
|
||||
repl: |
|
||||
factor-lang
|
||||
HOME="$PWD" factor-lang
|
||||
input: |
|
||||
123 234 *
|
||||
|
||||
|
@ -27,7 +27,7 @@ template: |
|
|||
createEmpty: ""
|
||||
|
||||
run: |
|
||||
factor-lang
|
||||
HOME="$PWD" factor-lang
|
||||
|
||||
scope:
|
||||
code: |
|
||||
|
|
|
@ -34,4 +34,5 @@ run: |
|
|||
echo 'Reading from stdin, ctrl+D to end input...' >&2
|
||||
./main
|
||||
helloInput: |
|
||||
DELAY: 1
|
||||
EOF
|
||||
|
|
|
@ -8,7 +8,7 @@ install:
|
|||
- genius
|
||||
|
||||
repl: |
|
||||
genius
|
||||
HOME="$PWD" genius
|
||||
|
||||
main: ".geniusinit"
|
||||
template: |
|
||||
|
@ -16,7 +16,7 @@ template: |
|
|||
createEmpty: ""
|
||||
|
||||
run: |
|
||||
genius
|
||||
HOME="$PWD" genius
|
||||
|
||||
scope:
|
||||
code: |
|
||||
|
|
|
@ -6,6 +6,10 @@ aliases:
|
|||
name: "Hack"
|
||||
|
||||
install:
|
||||
aptKey:
|
||||
- "B4112585D386EB94"
|
||||
aptRepo:
|
||||
- "deb [arch=amd64] https://dl.hhvm.com/ubuntu ${ubuntu_name} main"
|
||||
apt:
|
||||
- hhvm
|
||||
|
||||
|
|
|
@ -4,6 +4,9 @@ aliases:
|
|||
name: "Ioke"
|
||||
|
||||
install:
|
||||
prepare:
|
||||
cert:
|
||||
- "https://letsencrypt.org/certs/lets-encrypt-r3.pem"
|
||||
apt:
|
||||
- default-jdk
|
||||
manual: |
|
||||
|
|
|
@ -10,11 +10,13 @@ install:
|
|||
- mysql-client
|
||||
riju:
|
||||
- sqls
|
||||
# MariaDB has Debian package downloads, but only for LTS versions of
|
||||
# Ubuntu, so we have to download the release tarball instead.
|
||||
manual: |
|
||||
install -d "${pkg}/opt/mariadb"
|
||||
|
||||
ver="$(curl -sSL https://downloads.mariadb.org/ | grep 'href="/mariadb/[0-9]' | grep -Eo '[0-9][^/]+' | sort -rV | head -n1)"
|
||||
wget "https://downloads.mariadb.org/f/mariadb-${ver}/bintar-linux-x86_64/mariadb-${ver}-linux-x86_64.tar.gz/from/http%3A//sfo1.mirrors.digitalocean.com/mariadb/?serve" -O mariadb.tar.gz
|
||||
wget "https://downloads.mariadb.org/f/mariadb-${ver}/bintar-linux-systemd-x86_64/mariadb-${ver}-linux-systemd-x86_64.tar.gz/from/http%3A//sfo1.mirrors.digitalocean.com/mariadb/?serve" -O mariadb.tar.gz
|
||||
tar -xf mariadb.tar.gz -C "${pkg}/opt/mariadb" --strip-components=1
|
||||
chmod a=rx,u=rwx "${pkg}/opt/mariadb/lib/plugin/auth_pam_tool_dir"
|
||||
chmod a=rx,u=rwxs "${pkg}/opt/mariadb/lib/plugin/auth_pam_tool_dir/auth_pam_tool"
|
||||
|
|
|
@ -2,16 +2,17 @@ id: "mongodb"
|
|||
aliases:
|
||||
- "mongo"
|
||||
- "mongod"
|
||||
- "webscale"
|
||||
name: "MongoDB"
|
||||
|
||||
install:
|
||||
# The MongoDB package is only available for LTS releases of Ubuntu,
|
||||
# so we grab it from focal.
|
||||
prepare:
|
||||
aptRepo:
|
||||
- "deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal main universe"
|
||||
manual: |
|
||||
sudo tee -a /etc/apt/sources.list.d/focal.list >/dev/null <<EOF
|
||||
deb [arch=amd64] http://archive.ubuntu.com/ubuntu/ focal main universe
|
||||
EOF
|
||||
|
||||
sudo apt-get update
|
||||
sudo --preserve-env=DEBIAN_FRONTEND apt-get update
|
||||
|
||||
for name in mongodb mongodb-clients mongodb-server mongodb-server-core; do
|
||||
apt-get download "${name}"
|
||||
|
@ -45,8 +46,8 @@ template: |
|
|||
print("Hello, world!")
|
||||
|
||||
run: |
|
||||
set -e
|
||||
while ps -u "$(id -un)" -o comm | grep -q mongod; do
|
||||
ps -u "$(id -un)" -o pid,comm | cat
|
||||
sleep 0.01
|
||||
done
|
||||
rm -rf data
|
||||
|
|
|
@ -16,4 +16,5 @@ template: |
|
|||
quit
|
||||
|
||||
run: |
|
||||
gtm_dist=/usr/lib/x86_64-linux-gnu/fis-gtm/V6.3-007_x86_64 /usr/lib/x86_64-linux-gnu/fis-gtm/V6.3-007_x86_64/utf8/mumps -r main main.m
|
||||
gtm_dist=(/usr/lib/x86_64-linux-gnu/fis-gtm/V*_x86_64)
|
||||
gtm_dist="${gtm_dist[@]}" "${gtm_dist[@]}/utf8/mumps" -r main main.m
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
id: "objectivec++"
|
||||
id: "objectivecpp"
|
||||
aliases:
|
||||
- "objc++"
|
||||
- "objcpp"
|
||||
- "objectivecpp"
|
||||
- "objectivec++"
|
||||
- "objectivecplusplus"
|
||||
name: "Objective-C++"
|
||||
|
||||
install:
|
|
@ -6,10 +6,7 @@ install:
|
|||
- ocaml-nox
|
||||
opam:
|
||||
- ocamlformat
|
||||
- name: ocaml-lsp-server
|
||||
source: "https://github.com/ocaml/ocaml-lsp.git"
|
||||
binaries:
|
||||
- ocamllsp
|
||||
- ocaml-lsp-server
|
||||
|
||||
repl: |
|
||||
ocaml
|
||||
|
|
|
@ -7,11 +7,14 @@ name: "Q#"
|
|||
install:
|
||||
# Apparently, the Q# project template is hardcoded to use version
|
||||
# 3.x of the .NET SDK. Not sure why.
|
||||
prepare:
|
||||
prepare: &install-dotnet
|
||||
preface: |
|
||||
wget "https://packages.microsoft.com/config/ubuntu/${ubuntu_ver}/packages-microsoft-prod.deb"
|
||||
sudo --preserve-env=DEBIAN_FRONTEND apt-get install ./packages-microsoft-prod.deb
|
||||
sudo --preserve-env=DEBIAN_FRONTEND apt-get update
|
||||
apt:
|
||||
- $(grep-aptavail -wF Package "dotnet-sdk-3\.[0-9.]+" -s Package -n | sort -Vr | head -n1)
|
||||
apt:
|
||||
- $(grep-aptavail -wF Package "dotnet-sdk-3\.[0-9.]+" -s Package -n | sort -Vr | head -n1)
|
||||
<<: *install-dotnet
|
||||
# We should cache the .dotnet directory to avoid a .NET banner being
|
||||
# printed, and we should cache the main directory because there is a
|
||||
# generated main.csproj file that is needed by .NET. Finally we
|
||||
|
@ -20,20 +23,27 @@ install:
|
|||
#
|
||||
# We could optimize further but I don't feel like it right now.
|
||||
manual: |
|
||||
install -d "${pkg}/opt/qsharp/skel"
|
||||
install -d "${pkg}/opt/qsharp/skel-home"
|
||||
install -d "${pkg}/opt/qsharp/skel-src"
|
||||
|
||||
dotnet new -i Microsoft.Quantum.ProjectTemplates
|
||||
dotnet new console -lang Q# -o main
|
||||
dotnet run --project main
|
||||
|
||||
shopt -s dotglob
|
||||
cp -R * "${HOME}/.dotnet" "${HOME}/.nuget" "${pkg}/opt/qsharp/skel/"
|
||||
rm "${pkg}/opt/qsharp/skel/main/Program.qs"
|
||||
chmod -R a=u,go-w "${pkg}/opt/qsharp/skel"
|
||||
cp -R main "${pkg}/opt/qsharp/skel-src/"
|
||||
cp -R "${HOME}/.dotnet" "${HOME}/.nuget" "${pkg}/opt/qsharp/skel-home/"
|
||||
rm "${pkg}/opt/qsharp/skel-src/main/Program.qs"
|
||||
chmod -R a=u,go-w "${pkg}/opt/qsharp"
|
||||
manualInstall: |
|
||||
wget "https://packages.microsoft.com/config/ubuntu/${ubuntu_ver}/packages-microsoft-prod.deb"
|
||||
sudo --preserve-env=DEBIAN_FRONTEND apt-get update
|
||||
sudo --preserve-env=DEBIAN_FRONTEND apt-get install ./packages-microsoft-prod.deb
|
||||
|
||||
setup: |
|
||||
shopt -s dotglob
|
||||
cp -R /opt/qsharp/skel/* ./
|
||||
cp -R /opt/qsharp/skel-src/* ./
|
||||
cp -R /opt/qsharp/skel-home/* "${HOME}/"
|
||||
|
||||
main: "main/Main.qs"
|
||||
template: |
|
||||
|
|
|
@ -5,18 +5,22 @@ name: "R"
|
|||
monacoLang: r
|
||||
|
||||
install:
|
||||
aptKey:
|
||||
- "E298A3A825C0D65DFD57CBB651716619E084DAB9"
|
||||
aptRepo:
|
||||
- "deb [arch=amd64] https://cloud.r-project.org/bin/linux/ubuntu ${ubuntu_name}-$(curl -fsSL https://cran.r-project.org/bin/linux/ubuntu/ | grep -Eo 'cran[0-9]+' | head -n1)/"
|
||||
apt:
|
||||
- r-base
|
||||
|
||||
repl: |
|
||||
R
|
||||
HOME="$PWD" R
|
||||
|
||||
main: ".Rprofile"
|
||||
template: |
|
||||
print("Hello, world!")
|
||||
|
||||
run: |
|
||||
R --no-save
|
||||
HOME="$PWD" R --no-save
|
||||
|
||||
scope:
|
||||
code: |
|
||||
|
|
|
@ -9,8 +9,6 @@ name: "ReasonML"
|
|||
|
||||
install:
|
||||
prepare:
|
||||
apt:
|
||||
- yarn
|
||||
npm:
|
||||
- bs-platform
|
||||
npm:
|
||||
|
@ -31,7 +29,7 @@ install:
|
|||
pushd "${pkg}/opt/reasonml/skel"
|
||||
bsb -init .
|
||||
cat bsconfig.json | jq '.name = "riju-project"' | sponge bsconfig.json
|
||||
yarn install
|
||||
npm install
|
||||
popd
|
||||
|
||||
main: "main.re"
|
||||
|
|
|
@ -17,7 +17,7 @@ install:
|
|||
cp -R "$HOME/.red" "${pkg}/opt/red/skel/"
|
||||
|
||||
setup: |
|
||||
shopt -s dotglob; cp -R /opt/red/skel/* ./
|
||||
shopt -s dotglob; cp -R /opt/red/skel/* "${HOME}/"
|
||||
|
||||
# https://github.com/red/red/issues/543#issuecomment-25404212
|
||||
repl: |
|
||||
|
|
|
@ -31,7 +31,7 @@ install:
|
|||
ln -s /opt/sagemath/sage "${pkg}/usr/local/bin/"
|
||||
|
||||
repl: |
|
||||
sage
|
||||
HOME="$PWD" sage
|
||||
|
||||
main: ".sage/init.sage"
|
||||
template: |
|
||||
|
@ -39,7 +39,7 @@ template: |
|
|||
createEmpty: ""
|
||||
|
||||
run: |
|
||||
sage
|
||||
HOME="$PWD" sage
|
||||
|
||||
scope:
|
||||
code: |
|
||||
|
|
|
@ -10,7 +10,7 @@ install:
|
|||
- tcl
|
||||
|
||||
repl: |
|
||||
tclsh
|
||||
HOME="$PWD" tclsh
|
||||
input: |
|
||||
expr 123 * 234
|
||||
|
||||
|
|
|
@ -38,25 +38,37 @@ input: |
|
|||
output: |
|
||||
base.List.reverse
|
||||
|
||||
# runProg implementation courtesy of Robert Offner from Unison Slack!
|
||||
main: "main.u"
|
||||
template: |
|
||||
use io
|
||||
|
||||
main : '{IO} ()
|
||||
main = 'let
|
||||
runProg: '{IO, Exception} a -> '{IO} ()
|
||||
runProg f = 'let
|
||||
printErr err = match err with
|
||||
Failure _ errMsg _ -> handle putBytes (stdHandle StdErr) (toUtf8 errMsg) with cases
|
||||
{raise _ -> _} -> ()
|
||||
{_} -> ()
|
||||
match catch f with
|
||||
Left err -> printErr err
|
||||
Right _ -> ()
|
||||
|
||||
main: '{IO} ()
|
||||
main = runProg 'let
|
||||
printLine "Hello, world!"
|
||||
createEmpty: ""
|
||||
|
||||
run: |
|
||||
echo "Type 'run main' to run the code."
|
||||
unison -codebase . run.file main.u main
|
||||
echo "Type 'load main.u' at the repl prompt to bring variables into scope."
|
||||
unison -codebase .
|
||||
helloInput: |
|
||||
DELAY: 3
|
||||
run main
|
||||
|
||||
scope:
|
||||
code: |
|
||||
x = 123 * 234
|
||||
input: |
|
||||
DELAY: 3
|
||||
load main.u
|
||||
DELAY: 3
|
||||
add x
|
||||
DELAY: 3
|
||||
|
|
|
@ -11,7 +11,7 @@ install:
|
|||
- zsh-doc
|
||||
|
||||
repl: |
|
||||
SHELL=/usr/bin/zsh zsh
|
||||
SHELL=/usr/bin/zsh HOME="$PWD" zsh
|
||||
input: |
|
||||
expr 123 \* 234
|
||||
|
||||
|
@ -21,7 +21,7 @@ template: |
|
|||
createEmpty: ""
|
||||
|
||||
run: |
|
||||
SHELL=/usr/bin/zsh zsh
|
||||
SHELL=/usr/bin/zsh HOME="$PWD" zsh
|
||||
|
||||
scope:
|
||||
code: |
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
import crypto from "crypto";
|
||||
import { promises as fs } from "fs";
|
||||
import path from "path";
|
||||
|
||||
import { parse } from "@babel/parser";
|
||||
import { simple as babelWalk } from "babel-walk";
|
||||
|
||||
import { readLangConfig } from "./yaml.js";
|
||||
|
||||
async function getRelativeImports(filename) {
|
||||
const relativeImports = [];
|
||||
const program = parse(await fs.readFile(filename, "utf-8"), {
|
||||
sourceType: "module",
|
||||
plugins: ["classProperties"],
|
||||
});
|
||||
babelWalk({
|
||||
ImportDeclaration: (node) => {
|
||||
if (node.source.type !== "StringLiteral") {
|
||||
throw new Error(`unsupported import syntax:`, node);
|
||||
}
|
||||
const source = node.source.value;
|
||||
if (!source.startsWith(".")) {
|
||||
return;
|
||||
}
|
||||
relativeImports.push(source);
|
||||
},
|
||||
})(program);
|
||||
return relativeImports;
|
||||
}
|
||||
|
||||
function pathRelativeTo(relativePath, relativeTo) {
|
||||
return path.join(path.dirname(path.resolve(relativeTo)), relativePath);
|
||||
}
|
||||
|
||||
async function getTransitiveRelativeImports(filename) {
|
||||
let queue = [filename];
|
||||
const found = new Set();
|
||||
while (queue.length > 0) {
|
||||
const filename = path.resolve(queue.pop());
|
||||
if (found.has(filename)) {
|
||||
continue;
|
||||
}
|
||||
found.add(filename);
|
||||
queue = queue.concat(
|
||||
(await getRelativeImports(filename)).map((result) =>
|
||||
pathRelativeTo(result, filename)
|
||||
)
|
||||
);
|
||||
}
|
||||
return [...found];
|
||||
}
|
||||
|
||||
async function getTestRunnerHash() {
|
||||
const files = await getTransitiveRelativeImports("backend/test-runner.js");
|
||||
files.push("package.json");
|
||||
files.push("yarn.lock");
|
||||
const hashes = [];
|
||||
for (const file of files) {
|
||||
hashes.push(
|
||||
crypto
|
||||
.createHash("sha1")
|
||||
.update(await fs.readFile(file, "utf-8"))
|
||||
.digest("hex")
|
||||
);
|
||||
}
|
||||
return crypto.createHash("sha1").update(hashes.join(",")).digest("hex");
|
||||
}
|
||||
|
||||
const testRunnerHash = getTestRunnerHash();
|
||||
|
||||
async function getTestConfigHash(lang) {
|
||||
const config = Object.assign({}, await readLangConfig(lang));
|
||||
delete config["install"];
|
||||
delete config["info"];
|
||||
return crypto.createHash("sha1").update(JSON.stringify(config)).digest("hex");
|
||||
}
|
||||
|
||||
export async function getTestHash(lang, imageHash) {
|
||||
return crypto
|
||||
.createHash("sha1")
|
||||
.update(
|
||||
`${await testRunnerHash},${await getTestConfigHash(lang)},${imageHash}`
|
||||
)
|
||||
.digest("hex");
|
||||
}
|
|
@ -511,6 +511,31 @@ properties:
|
|||
type: object
|
||||
additionalProperties: false
|
||||
properties:
|
||||
preface: &preface
|
||||
type: string
|
||||
minLength: 1
|
||||
cert: &cert
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
pattern: "^https?://"
|
||||
examples:
|
||||
- "https://cacerts.digicert.com/DigiCertTLSRSASHA2562020CA1.crt.pem"
|
||||
aptKey: &aptKey
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
pattern: "^https?://|^[0-9A-F]+$"
|
||||
examples:
|
||||
- "https://downloads.ceylon-lang.org/apt/ceylon-debian-repo.gpg.key"
|
||||
- "B4112585D386EB94"
|
||||
aptRepo: &aptRepo
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
pattern: "^deb"
|
||||
examples:
|
||||
- "deb [arch=amd64] https://downloads.ceylon-lang.org/apt/ unstable main"
|
||||
apt:
|
||||
type: array
|
||||
items:
|
||||
|
@ -529,6 +554,10 @@ properties:
|
|||
manual:
|
||||
type: string
|
||||
minLength: 1
|
||||
preface: *preface
|
||||
cert: *cert
|
||||
aptKey: *aptKey
|
||||
aptRepo: *aptRepo
|
||||
apt:
|
||||
type: array
|
||||
items:
|
||||
|
@ -598,6 +627,9 @@ properties:
|
|||
manual:
|
||||
type: string
|
||||
minLength: 1
|
||||
manualInstall:
|
||||
type: string
|
||||
minLength: 1
|
||||
deb:
|
||||
type: array
|
||||
items:
|
|
@ -2,6 +2,7 @@ import { promises as fs } from "fs";
|
|||
import path from "path";
|
||||
|
||||
import { validate as validateJSONSchema } from "jsonschema";
|
||||
import _ from "lodash";
|
||||
import YAML from "yaml";
|
||||
|
||||
// The build scripts in the language configs assume a specific build
|
||||
|
@ -14,7 +15,9 @@ import YAML from "yaml";
|
|||
// * we are using bash with 'set -euxo pipefail'
|
||||
|
||||
async function readJSONSchemaFromDisk() {
|
||||
return YAML.parse(await fs.readFile("tools/jsonschema.yaml", "utf-8"));
|
||||
return YAML.parse(await fs.readFile("lib/jsonschema.yaml", "utf-8"), {
|
||||
merge: true,
|
||||
});
|
||||
}
|
||||
|
||||
const jsonSchemaPromise = readJSONSchemaFromDisk();
|
||||
|
@ -38,8 +41,8 @@ export async function getSharedDeps() {
|
|||
// Return a list of objects representing the packages to be built. See
|
||||
// the function implementation for the full list of keys.
|
||||
export async function getPackages() {
|
||||
// The order (shared, lang, config) is important to get dependencies
|
||||
// correct due to poor abstractions in plan-publish.js.
|
||||
// The order (shared, then lang) is important to get dependencies
|
||||
// correct when building artifacts.
|
||||
const packages = [];
|
||||
for (const lang of await getSharedDeps()) {
|
||||
const type = "shared";
|
||||
|
@ -53,16 +56,15 @@ export async function getPackages() {
|
|||
});
|
||||
}
|
||||
for (const lang of await getLangs()) {
|
||||
for (const type of ["lang", "config"]) {
|
||||
const name = `riju-${type}-${lang}`;
|
||||
packages.push({
|
||||
lang,
|
||||
type,
|
||||
name,
|
||||
buildScriptPath: `build/${type}/${lang}/build.bash`,
|
||||
debPath: `build/${type}/${lang}/${name}.deb`,
|
||||
});
|
||||
}
|
||||
const type = "lang";
|
||||
const name = `riju-${type}-${lang}`;
|
||||
packages.push({
|
||||
lang,
|
||||
type,
|
||||
name,
|
||||
buildScriptPath: `build/${type}/${lang}/build.bash`,
|
||||
debPath: `build/${type}/${lang}/${name}.deb`,
|
||||
});
|
||||
}
|
||||
return packages;
|
||||
}
|
||||
|
@ -90,7 +92,8 @@ function fixupLangConfig(langConfig) {
|
|||
// and return it as an object.
|
||||
export async function readLangConfig(lang) {
|
||||
const langConfig = YAML.parse(
|
||||
await fs.readFile(`langs/${lang}.yaml`, "utf-8")
|
||||
await fs.readFile(`langs/${lang}.yaml`, "utf-8"),
|
||||
{ merge: true }
|
||||
);
|
||||
validateJSONSchema(langConfig, await jsonSchemaPromise, { throwAll: true });
|
||||
if (langConfig.id !== lang) {
|
||||
|
@ -105,7 +108,8 @@ export async function readLangConfig(lang) {
|
|||
// string ID and return it as an object.
|
||||
export async function readSharedDepConfig(lang) {
|
||||
const langConfig = YAML.parse(
|
||||
await fs.readFile(`shared/${lang}.yaml`, "utf-8")
|
||||
await fs.readFile(`shared/${lang}.yaml`, "utf-8"),
|
||||
{ merge: true }
|
||||
);
|
||||
if (langConfig.id !== lang) {
|
||||
throw new Error(
|
||||
|
@ -114,3 +118,10 @@ export async function readSharedDepConfig(lang) {
|
|||
}
|
||||
return fixupLangConfig(langConfig);
|
||||
}
|
||||
|
||||
// Given a language config JSON, return a list of the Riju shared
|
||||
// dependency names, or an empty list if none are configured for this
|
||||
// language. The return value is sorted.
|
||||
export async function getSharedDepsForLangConfig(langConfig) {
|
||||
return [...(langConfig.install && langConfig.install.riju) || []].sort();
|
||||
}
|
|
@ -6,12 +6,14 @@
|
|||
"type": "module",
|
||||
"dependencies": {
|
||||
"@babel/core": "^7.12.10",
|
||||
"@babel/parser": "^7.13.11",
|
||||
"@babel/preset-env": "^7.12.11",
|
||||
"@balena/dockerignore": "^1.0.2",
|
||||
"async-lock": "^1.2.6",
|
||||
"babel-loader": "^8.2.2",
|
||||
"babel-walk": "^3.0.0",
|
||||
"buffer": "^6.0.3",
|
||||
"commander": "^6.2.1",
|
||||
"commander": "^7.1.0",
|
||||
"css-loader": "^5.0.1",
|
||||
"debounce": "^1.2.0",
|
||||
"docker-file-parser": "^1.0.5",
|
||||
|
@ -28,8 +30,8 @@
|
|||
"node-pty": "^0.9.0",
|
||||
"p-queue": "^6.6.2",
|
||||
"parse-passwd": "^1.0.0",
|
||||
"prettier": "^2.3.1",
|
||||
"regenerator-runtime": "^0.13.7",
|
||||
"shell-quote": "^1.7.2",
|
||||
"strip-ansi": "^6.0.0",
|
||||
"style-loader": "^2.0.0",
|
||||
"uuid": "^8.3.2",
|
||||
|
|
|
@ -1,75 +0,0 @@
|
|||
{
|
||||
"variables": {
|
||||
"admin_password": "{{env `ADMIN_PASSWORD`}}",
|
||||
"admin_ssh_public_key_file": "{{env `ADMIN_SSH_PUBLIC_KEY_FILE`}}",
|
||||
"deploy_ssh_public_key_file": "{{env `DEPLOY_SSH_PUBLIC_KEY_FILE`}}"
|
||||
},
|
||||
"builders": [
|
||||
{
|
||||
"type": "amazon-ebs",
|
||||
"source_ami_filter": {
|
||||
"filters": {
|
||||
"virtualization-type": "hvm",
|
||||
"root-device-type": "ebs",
|
||||
"name": "ubuntu/images/hvm-ssd/ubuntu-groovy-20.10-amd64-server-*"
|
||||
},
|
||||
"owners": ["099720109477"],
|
||||
"most_recent": true
|
||||
},
|
||||
"instance_type": "t3.micro",
|
||||
"ssh_username": "ubuntu",
|
||||
"ami_name": "riju-{{timestamp}}"
|
||||
}
|
||||
],
|
||||
"provisioners": [
|
||||
{
|
||||
"type": "shell",
|
||||
"script": "validate.bash",
|
||||
"environment_vars": [
|
||||
"ADMIN_PASSWORD={{user `admin_password`}}",
|
||||
"ADMIN_SSH_PUBLIC_KEY_FILE={{user `admin_ssh_public_key_file`}}",
|
||||
"DEPLOY_SSH_PUBLIC_KEY_FILE={{user `deploy_ssh_public_key_file`}}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "riju",
|
||||
"destination": "/tmp/riju"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "riju-init-volume",
|
||||
"destination": "/tmp/riju-init-volume"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "riju-deploy",
|
||||
"destination": "/tmp/riju-deploy"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "riju-install-certbot-hooks",
|
||||
"destination": "/tmp/riju-install-certbot-hooks"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "riju.service",
|
||||
"destination": "/tmp/riju.service"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "{{user `admin_ssh_public_key_file`}}",
|
||||
"destination": "/tmp/id_admin.pub"
|
||||
},
|
||||
{
|
||||
"type": "file",
|
||||
"source": "{{user `deploy_ssh_public_key_file`}}",
|
||||
"destination": "/tmp/id_deploy.pub"
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"script": "provision.bash",
|
||||
"environment_vars": ["ADMIN_PASSWORD={{user `admin_password`}}"]
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
variable "admin_password" {
|
||||
type = string
|
||||
default = "${env("ADMIN_PASSWORD")}"
|
||||
}
|
||||
|
||||
variable "aws_region" {
|
||||
type = string
|
||||
default = "${env("AWS_REGION")}"
|
||||
}
|
||||
|
||||
variable "s3_bucket" {
|
||||
type = string
|
||||
default = "${env("S3_BUCKET")}"
|
||||
}
|
||||
|
||||
variable "supervisor_access_token" {
|
||||
type = string
|
||||
default = "${env("SUPERVISOR_ACCESS_TOKEN")}"
|
||||
}
|
||||
|
||||
data "amazon-ami" "ubuntu" {
|
||||
filters = {
|
||||
name = "ubuntu/images/hvm-ssd/ubuntu-*-21.04-amd64-server-*"
|
||||
root-device-type = "ebs"
|
||||
virtualization-type = "hvm"
|
||||
}
|
||||
most_recent = true
|
||||
owners = ["099720109477"]
|
||||
}
|
||||
|
||||
locals {
|
||||
timestamp = regex_replace(timestamp(), "[- TZ:]", "")
|
||||
}
|
||||
|
||||
source "amazon-ebs" "ubuntu" {
|
||||
ami_name = "riju-${local.timestamp}"
|
||||
instance_type = "t3.micro"
|
||||
source_ami = "${data.amazon-ami.ubuntu.id}"
|
||||
ssh_username = "ubuntu"
|
||||
}
|
||||
|
||||
build {
|
||||
sources = ["source.amazon-ebs.ubuntu"]
|
||||
|
||||
provisioner "file" {
|
||||
destination = "/tmp/riju-init-volume"
|
||||
source = "riju-init-volume"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
destination = "/tmp/riju-supervisor"
|
||||
source = "../supervisor/out/riju-supervisor"
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
destination = "/tmp/riju.service"
|
||||
source = "riju.service"
|
||||
}
|
||||
|
||||
provisioner "shell" {
|
||||
environment_vars = [
|
||||
"ADMIN_PASSWORD=${var.admin_password}",
|
||||
"AWS_REGION=${var.aws_region}",
|
||||
"S3_BUCKET=${var.s3_bucket}",
|
||||
"SUPERVISOR_ACCESS_TOKEN=${var.supervisor_access_token}",
|
||||
]
|
||||
script = "provision.bash"
|
||||
}
|
||||
}
|
|
@ -2,8 +2,18 @@
|
|||
|
||||
set -euo pipefail
|
||||
|
||||
mkdir /tmp/riju
|
||||
pushd /tmp/riju
|
||||
: ${ADMIN_PASSWORD}
|
||||
: ${AWS_REGION}
|
||||
: ${S3_BUCKET}
|
||||
: ${SUPERVISOR_ACCESS_TOKEN}
|
||||
|
||||
# I think there is a race condition related to Ubuntu wanting to do an
|
||||
# automated system upgrade at boot, which causes 'apt-get update' to
|
||||
# sometimes fail with an obscure error message.
|
||||
sleep 5
|
||||
|
||||
mkdir /tmp/riju-work
|
||||
pushd /tmp/riju-work
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
@ -21,52 +31,31 @@ deb [arch=amd64] https://download.docker.com/linux/ubuntu ${ubuntu_name} stable
|
|||
EOF
|
||||
|
||||
sudo -E apt-get update
|
||||
sudo -E apt-get install -y certbot docker-ce docker-ce-cli containerd.io unzip whois
|
||||
sudo -E apt-get install -y docker-ce docker-ce-cli containerd.io unzip whois
|
||||
|
||||
wget -nv https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip -O awscli.zip
|
||||
unzip -q awscli.zip
|
||||
sudo ./aws/install
|
||||
|
||||
sudo chown root:root /tmp/riju /tmp/riju-deploy /tmp/riju.service
|
||||
sudo mv /tmp/riju /tmp/riju-deploy /tmp/riju-init-volume /tmp/riju-install-certbot-hooks /usr/local/bin/
|
||||
sudo chown root:root /tmp/riju-init-volume /tmp/riju-supervisor /tmp/riju.service
|
||||
sudo mv /tmp/riju-init-volume /tmp/riju-supervisor /usr/local/bin/
|
||||
sudo mv /tmp/riju.service /etc/systemd/system/
|
||||
|
||||
for user in admin deploy; do
|
||||
if ! grep -vq "PRIVATE KEY" "/tmp/id_${user}.pub"; then
|
||||
echo "${user} public key was set to a private key, aborting" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IFS=" " read contents < "/tmp/id_${user}.pub"
|
||||
echo "${contents}" > "/tmp/id_${user}.pub"
|
||||
done
|
||||
|
||||
sudo sed -Ei 's/^#?PermitRootLogin .*/PermitRootLogin no/' /etc/ssh/sshd_config
|
||||
sudo sed -Ei 's/^#?PasswordAuthentication .*/PasswordAuthentication no/' /etc/ssh/sshd_config
|
||||
sudo sed -Ei 's/^#?PermitEmptyPasswords .*/PermitEmptyPasswords no/' /etc/ssh/sshd_config
|
||||
sudo sed -Ei "s/\\\$AWS_REGION/${AWS_REGION}/" /etc/systemd/system/riju.service
|
||||
sudo sed -Ei "s/\\\$S3_BUCKET/${S3_BUCKET}/" /etc/systemd/system/riju.service
|
||||
sudo sed -Ei "s/\\\$SUPERVISOR_ACCESS_TOKEN/${SUPERVISOR_ACCESS_TOKEN}/" /etc/systemd/system/riju.service
|
||||
|
||||
sudo passwd -l root
|
||||
sudo useradd admin -g admin -G sudo -s /usr/bin/bash -p "$(echo "${ADMIN_PASSWORD}" | mkpasswd -s)" -m
|
||||
sudo useradd deploy -s /usr/bin/bash -p "!" -m
|
||||
|
||||
for user in admin deploy; do
|
||||
sudo runuser -u "${user}" -- mkdir -p "/home/${user}/.ssh"
|
||||
sudo mv "/tmp/id_${user}.pub" "/home/${user}/.ssh/authorized_keys"
|
||||
sudo chown -R "${user}:${user}" "/home/${user}/.ssh"
|
||||
sudo chmod -R go-rwx "/home/${user}/.ssh"
|
||||
done
|
||||
|
||||
sudo runuser -u deploy -- sed -i 's/^/command="sudo riju-deploy ${SSH_ORIGINAL_COMMAND}",restrict /' /home/deploy/.ssh/authorized_keys
|
||||
|
||||
sudo tee /etc/sudoers.d/riju >/dev/null <<"EOF"
|
||||
deploy ALL=(root) NOPASSWD: /usr/local/bin/riju-deploy
|
||||
EOF
|
||||
|
||||
sudo hostnamectl set-hostname riju
|
||||
|
||||
sudo systemctl enable riju
|
||||
|
||||
sudo passwd -l ubuntu
|
||||
sudo userdel ubuntu -f
|
||||
|
||||
popd
|
||||
rm -rf /tmp/riju
|
||||
rm -rf /tmp/riju-work
|
||||
|
|
44
packer/riju
44
packer/riju
|
@ -1,44 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
domain="$(ls /etc/letsencrypt/live | grep -v README | head -n1)" || true
|
||||
|
||||
if [[ -n "${DISABLE_TLS:-}" ]]; then
|
||||
echo "Disabling TLS due to DISABLE_TLS=${DISABLE_TLS}" >&2
|
||||
elif [[ -z "${domain}" ]]; then
|
||||
echo "No certs installed in /etc/letsencrypt/live, disabling TLS" >&2
|
||||
else
|
||||
echo "Detected cert for domain: ${domain}, enabling TLS" >&2
|
||||
export TLS=1
|
||||
TLS_PRIVATE_KEY="$(base64 "/etc/letsencrypt/live/${domain}/privkey.pem")"
|
||||
TLS_CERTIFICATE="$(base64 "/etc/letsencrypt/live/${domain}/fullchain.pem")"
|
||||
export TLS_PRIVATE_KEY TLS_CERTIFICATE
|
||||
if [[ "${domain}" == riju.codes ]]; then
|
||||
echo "Domain is riju.codes, enabling analytics" >&2
|
||||
export ANALYTICS=1
|
||||
else
|
||||
echo "Domain is not riju.codes, disabling analytics" >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${DETACH:-}" ]]; then
|
||||
extra_args="-d"
|
||||
elif [[ -t 1 ]]; then
|
||||
extra_args="-it"
|
||||
else
|
||||
extra_args=
|
||||
fi
|
||||
|
||||
port_args="${PORT_MAPPING:--p 0.0.0.0:80:6119 -p 0.0.0.0:443:6120}"
|
||||
image_name="${IMAGE_NAME:-riju:app}"
|
||||
container_name="${CONTAINER_NAME:-riju-prod}"
|
||||
|
||||
if docker container inspect ${container_name} &>/dev/null; then
|
||||
docker stop ${container_name}
|
||||
fi
|
||||
|
||||
docker run --rm ${port_args} ${extra_args} \
|
||||
-e TLS -e TLS_PRIVATE_KEY -e TLS_CERTIFICATE -e ANALYTICS \
|
||||
-h riju --name "${container_name}" \
|
||||
"${image_name}"
|
|
@ -1,35 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
if (( $# != 1 )); then
|
||||
echo "usage: ssh deploy@riju IMAGE" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
image="$1"
|
||||
|
||||
riju-init-volume
|
||||
|
||||
echo "Pull image to be deployed..."
|
||||
docker pull "${image}"
|
||||
|
||||
echo "Start new image in test container..." >&2
|
||||
CONTAINER_NAME=riju-test IMAGE_NAME="${image}" DETACH=1 \
|
||||
PORT_MAPPING="-p 127.0.0.1:6119:6119" DISABLE_TLS=0 riju
|
||||
|
||||
echo "Wait for web server to come up..." >&2
|
||||
sleep 5
|
||||
|
||||
echo "Test web server health..." >&2
|
||||
output="$(curl -fsSL http://localhost:6119)"
|
||||
head -n15 <<< "${output}"
|
||||
|
||||
echo "Tear down test container..." >&2
|
||||
docker stop riju-test
|
||||
|
||||
echo "Retag production image..." >&2
|
||||
docker tag "${image}" riju:app
|
||||
|
||||
echo "Restart production server..." >&2
|
||||
systemctl restart riju
|
|
@ -10,7 +10,7 @@ mount_point=/mnt/riju/data
|
|||
|
||||
mkdir -p "${mount_point}"
|
||||
|
||||
disks="$(lsblk -l -d -b -o name,size | grep nvme)"
|
||||
disks="$(lsblk -l -d -b -o name,size | grep -Ev 'loop|NAME')"
|
||||
num_disks="$(wc -l <<< "${disks}")"
|
||||
|
||||
if [[ "${num_disks}" != 2 ]]; then
|
||||
|
@ -27,6 +27,8 @@ print "volume has ${num_parts} partition(s)"
|
|||
if [[ "${num_parts}" != 1 ]]; then
|
||||
print "repartitioning so we have exactly one partition"
|
||||
sfdisk -X gpt "/dev/${disk}" <<< ";"
|
||||
print "waiting for 1 second so that partitions show up in /dev"
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
part="$(lsblk -l -o name | (grep "${disk}." || true) | head -n1)"
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
sudo tee /etc/letsencrypt/renewal-hooks/pre/riju >/dev/null <<"EOF"
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
systemctl stop riju
|
||||
EOF
|
||||
|
||||
sudo tee /etc/letsencrypt/renewal-hooks/post/riju >/dev/null <<"EOF"
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
systemctl start riju
|
||||
EOF
|
||||
|
||||
sudo chmod +x /etc/letsencrypt/renewal-hooks/pre/riju
|
||||
sudo chmod +x /etc/letsencrypt/renewal-hooks/post/riju
|
|
@ -5,8 +5,11 @@ After=docker.service
|
|||
|
||||
[Service]
|
||||
Type=exec
|
||||
ExecStart=riju
|
||||
ExecStart=riju-supervisor
|
||||
Restart=always
|
||||
Environment=AWS_REGION=$AWS_REGION
|
||||
Environment=S3_BUCKET=$S3_BUCKET
|
||||
Environment=SUPERVISOR_ACCESS_TOKEN=$SUPERVISOR_ACCESS_TOKEN
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
: ${ADMIN_PASSWORD}
|
||||
: ${ADMIN_SSH_PUBLIC_KEY_FILE}
|
||||
: ${DEPLOY_SSH_PUBLIC_KEY_FILE}
|
|
@ -0,0 +1,12 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
function verbosely {
|
||||
echo "$@"
|
||||
"$@"
|
||||
}
|
||||
|
||||
cd supervisor
|
||||
mkdir -p out
|
||||
verbosely go build -o out/riju-supervisor ./src
|
|
@ -0,0 +1,14 @@
|
|||
module github.com/raxod502/riju/supervisor
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.4.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.3.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.4.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.11.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.5.0 // indirect
|
||||
github.com/caarlos0/env/v6 v6.6.2 // indirect
|
||||
github.com/google/uuid v1.2.0 // indirect
|
||||
)
|
|
@ -0,0 +1,43 @@
|
|||
github.com/aws/aws-sdk-go-v2 v1.7.0 h1:UYGnoIPIzed+ycmgw8Snb/0HK+KlMD+SndLTneG8ncE=
|
||||
github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.4.1 h1:PcGp9Kf+1dHJmP3EIDZJmAmWfGABFTU0obuvYQNzWH8=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.4.1/go.mod h1:HCDWZ/oeY59TPtXslxlbkCqLQBsVu6b09kiG43tdP+I=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.3.0 h1:vXxTINCsHn6LKhR043jwSLd6CsL7KOEU7b1woMr1K1A=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.3.0/go.mod h1:tOcv+qDZ0O+6Jk2beMl5JnZX6N0H7O8fw9UsD3bP7GI=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.2.0 h1:ucExzYCoAiL9GpKOsKkQLsa43wTT23tcdP4cDTSbZqY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.2.0/go.mod h1:XvzoGzuS0kKPzCQtJCC22Xh/mMgVAzfGo/0V+mk/Cu0=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.3.1 h1:ag1MjvYmE8hnvl2/3LYOog9GZxcguqR6z1ewCUJQ9rE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.3.1/go.mod h1:WXrj1wxGcYFfQ6H4xqsbVziISWQT55SlpX8B5+EqLOw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.1.0 h1:DJq/vXXF+LAFaa/kQX9C6arlf4xX4uaaqGWIyAKOCpM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.1.0/go.mod h1:qGQ/9IfkZonRNSNLE99/yBJ7EPA/h8jlWEqtJCcaj+Q=
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.4.0 h1:cgMcR4Y2JFhWHFDNiVYLApc5kSaGK0geqqL/2XvP77M=
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.4.0/go.mod h1:66eKvbrtxgZWfVHNwdncN8vciDvc00gX2flcATKqLYQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.0 h1:wfI4yrOCMAGdHaEreQ65ycSmPLVc2Q82O+r7ZxYTynA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.0/go.mod h1:2Kc2Pybp1Hr2ZCCOz78mWnNSZYEKKBQgNcizVGk9sko=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.0 h1:g2npzssI/6XsoQaPYCxliMFeC5iNKKvO0aC+/wWOE0A=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.0/go.mod h1:a7XLWNKuVgOxjssEF019IiHPv35k8KHBaWv/wJAfi2A=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.0 h1:6KmDU3XCGTcZlWPtP/gh7wYErrovnIxjX7um8iiuVsU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.0/go.mod h1:541bxEA+Z8quwit9ZT7uxv/l9xRz85/HS41l9OxOQdY=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.11.0 h1:FuKlyrDBZBk0RFxjqFPtx9y/KDsxTa3MoFVUgIW9w3Q=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.11.0/go.mod h1:zJe8mEFDS2F04nO0pKVBPfArAv2ycC6wt3ILvrV4SQw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.3.0 h1:DMi9w+TpUam7eJ8ksL7svfzpqpqem2MkDAJKW8+I2/k=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.3.0/go.mod h1:qWR+TUuvfji9udM79e4CPe87C5+SjMEb2TFXkZaI0Vc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.5.0 h1:Y1K9dHE2CYOWOvaJSIITq4mJfLX43iziThTvqs5FqOg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.5.0/go.mod h1:HjDKUmissf6Mlut+WzG2r35r6LeTKmLEDJ6p9NryzLg=
|
||||
github.com/aws/smithy-go v1.5.0 h1:2grDq7LxZlo8BZUDeqRfQnQWLZpInmh2TLPPkJku3YM=
|
||||
github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
|
||||
github.com/caarlos0/env/v6 v6.6.2 h1:BypLXDWQTA32rS4UM7pBz+/0BOuvs6C7LSeQAxMwyvI=
|
||||
github.com/caarlos0/env/v6 v6.6.2/go.mod h1:P0BVSgU9zfkxfSpFUs6KsO3uWR4k3Ac0P66ibAGTybM=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
@ -0,0 +1,486 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awsConfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/ecr"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/sts"
|
||||
"github.com/caarlos0/env/v6"
|
||||
uuidlib "github.com/google/uuid"
|
||||
)
|
||||
|
||||
const bluePort = 6229
|
||||
const greenPort = 6230
|
||||
|
||||
const blueName = "riju-app-blue"
|
||||
const greenName = "riju-app-green"
|
||||
|
||||
type deploymentConfig struct {
|
||||
AppImageTag string `json:"appImageTag"`
|
||||
LangImageTags map[string]string `json:"langImageTags"`
|
||||
}
|
||||
|
||||
type supervisorConfig struct {
|
||||
AccessToken string `env:"SUPERVISOR_ACCESS_TOKEN,notEmpty"`
|
||||
S3Bucket string `env:"S3_BUCKET,notEmpty"`
|
||||
}
|
||||
|
||||
type reloadJob struct {
|
||||
status string
|
||||
active bool
|
||||
failed bool
|
||||
}
|
||||
|
||||
type supervisor struct {
|
||||
config supervisorConfig
|
||||
|
||||
blueProxyHandler http.Handler
|
||||
greenProxyHandler http.Handler
|
||||
isGreen bool // blue-green deployment
|
||||
|
||||
awsAccountNumber string
|
||||
awsRegion string
|
||||
s3 *s3.Client
|
||||
ecr *ecr.Client
|
||||
|
||||
reloadLock sync.Mutex
|
||||
reloadInProgress bool
|
||||
reloadNeeded bool
|
||||
reloadUUID string
|
||||
reloadNextUUID string
|
||||
reloadJobs map[string]*reloadJob
|
||||
}
|
||||
|
||||
func (sv *supervisor) status(status string) {
|
||||
sv.reloadLock.Lock()
|
||||
sv.reloadJobs[sv.reloadUUID].status = status
|
||||
sv.reloadLock.Unlock()
|
||||
log.Println("active: " + status)
|
||||
}
|
||||
|
||||
func (sv *supervisor) scheduleReload() string {
|
||||
uuid := ""
|
||||
sv.reloadLock.Lock()
|
||||
if !sv.reloadInProgress {
|
||||
sv.reloadInProgress = true
|
||||
sv.reloadUUID = uuidlib.New().String()
|
||||
uuid = sv.reloadUUID
|
||||
go sv.reloadWithScheduling()
|
||||
} else {
|
||||
if sv.reloadInProgress {
|
||||
uuid = sv.reloadNextUUID
|
||||
} else {
|
||||
sv.reloadNextUUID = uuidlib.New().String()
|
||||
uuid = sv.reloadNextUUID
|
||||
}
|
||||
sv.reloadNeeded = true
|
||||
}
|
||||
sv.reloadLock.Unlock()
|
||||
return uuid
|
||||
}
|
||||
|
||||
func (sv *supervisor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.HasPrefix(r.URL.Path, "/api/supervisor") {
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
if authHeader == "" {
|
||||
http.Error(w, "401 Authorization header missing", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(authHeader, "Bearer ") {
|
||||
http.Error(w, "401 malformed Authorization header", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
if authHeader != "Bearer " + sv.config.AccessToken {
|
||||
http.Error(w, "401 wrong access token", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/api/supervisor/v1/reload" {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "405 method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
uuid := sv.scheduleReload()
|
||||
fmt.Fprintln(w, uuid)
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/api/supervisor/v1/reload/status" {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "405 method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
uuid := r.URL.Query().Get("uuid")
|
||||
if uuid == "" {
|
||||
http.Error(
|
||||
w,
|
||||
"400 missing uuid query parameter",
|
||||
http.StatusBadRequest,
|
||||
)
|
||||
return
|
||||
}
|
||||
sv.reloadLock.Lock()
|
||||
job := sv.reloadJobs[uuid]
|
||||
if job == nil {
|
||||
if uuid == sv.reloadUUID || uuid == sv.reloadNextUUID {
|
||||
fmt.Fprintln(w, "queued")
|
||||
} else {
|
||||
http.Error(w, "404 no such job", http.StatusNotFound)
|
||||
}
|
||||
} else if job.active {
|
||||
fmt.Fprintln(w, "active: " + job.status)
|
||||
} else if job.failed {
|
||||
fmt.Fprintln(w, "failed: " + job.status)
|
||||
} else {
|
||||
fmt.Fprintln(w, "succeeded: " + job.status)
|
||||
}
|
||||
sv.reloadLock.Unlock()
|
||||
return
|
||||
}
|
||||
http.NotFound(w, r)
|
||||
return
|
||||
}
|
||||
if sv.isGreen {
|
||||
sv.greenProxyHandler.ServeHTTP(w, r)
|
||||
} else {
|
||||
sv.blueProxyHandler.ServeHTTP(w, r)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (sv *supervisor) reloadWithScheduling() {
|
||||
sv.reloadLock.Lock()
|
||||
sv.reloadJobs[sv.reloadUUID] = &reloadJob{
|
||||
status: "initializing",
|
||||
active: true,
|
||||
failed: false,
|
||||
}
|
||||
sv.reloadLock.Unlock()
|
||||
err := sv.reload()
|
||||
sv.reloadLock.Lock()
|
||||
sv.reloadJobs[sv.reloadUUID].active = false
|
||||
if err != nil {
|
||||
log.Println("failed: " + err.Error())
|
||||
sv.reloadJobs[sv.reloadUUID].failed = true
|
||||
sv.reloadJobs[sv.reloadUUID].status = err.Error()
|
||||
} else {
|
||||
log.Println("succeeded")
|
||||
}
|
||||
sv.reloadInProgress = false
|
||||
sv.reloadUUID = ""
|
||||
if sv.reloadNeeded {
|
||||
sv.reloadNeeded = false
|
||||
sv.reloadInProgress = true
|
||||
sv.reloadUUID = sv.reloadNextUUID
|
||||
sv.reloadNextUUID = ""
|
||||
go sv.reloadWithScheduling()
|
||||
} else {
|
||||
go func() {
|
||||
// Arguably slightly incorrect but it's fine
|
||||
// if we reload slightly more than once per 5
|
||||
// minutes.
|
||||
time.Sleep(5 * time.Minute)
|
||||
sv.scheduleReload()
|
||||
}()
|
||||
}
|
||||
sv.reloadLock.Unlock()
|
||||
}
|
||||
|
||||
var rijuImageRegexp = regexp.MustCompile(`(?:^|/)riju:([^<>]+)$`)
|
||||
|
||||
func (sv *supervisor) reload() error {
|
||||
sv.status("getting access token from ECR")
|
||||
ecrResp, err := sv.ecr.GetAuthorizationToken(
|
||||
context.Background(),
|
||||
&ecr.GetAuthorizationTokenInput{},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(ecrResp.AuthorizationData) != 1 {
|
||||
return fmt.Errorf(
|
||||
"got unexpected number (%d) of authorization tokens",
|
||||
len(ecrResp.AuthorizationData),
|
||||
)
|
||||
}
|
||||
authInfo, err := base64.StdEncoding.DecodeString(*ecrResp.AuthorizationData[0].AuthorizationToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
authInfoParts := strings.Split(string(authInfo), ":")
|
||||
if len(authInfoParts) != 2 {
|
||||
return errors.New("got malformed auth info from ECR")
|
||||
}
|
||||
dockerUsername := authInfoParts[0]
|
||||
dockerPassword := authInfoParts[1]
|
||||
sv.status("authenticating Docker client to ECR")
|
||||
dockerLogin := exec.Command(
|
||||
"docker", "login",
|
||||
"--username", dockerUsername,
|
||||
"--password-stdin",
|
||||
fmt.Sprintf(
|
||||
"%s.dkr.ecr.%s.amazonaws.com",
|
||||
sv.awsAccountNumber, sv.awsRegion,
|
||||
),
|
||||
)
|
||||
dockerLogin.Stdin = bytes.NewReader([]byte(dockerPassword))
|
||||
dockerLogin.Stdout = os.Stdout
|
||||
dockerLogin.Stderr = os.Stderr
|
||||
if err := dockerLogin.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
sv.status("downloading deployment config from S3")
|
||||
dl := s3manager.NewDownloader(sv.s3)
|
||||
buf := s3manager.NewWriteAtBuffer([]byte{})
|
||||
if _, err := dl.Download(context.Background(), buf, &s3.GetObjectInput{
|
||||
Bucket: &sv.config.S3Bucket,
|
||||
Key: aws.String("config.json"),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
deployCfg := deploymentConfig{}
|
||||
if err := json.Unmarshal(buf.Bytes(), &deployCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
sv.status("listing locally available images")
|
||||
dockerImageLs := exec.Command(
|
||||
"docker", "image", "ls", "--format",
|
||||
"{{ .Repository }}:{{ .Tag }}",
|
||||
)
|
||||
dockerImageLs.Stderr = os.Stderr
|
||||
out, err := dockerImageLs.Output()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existingTags := map[string]bool{}
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
if match := rijuImageRegexp.FindStringSubmatch(line); match != nil {
|
||||
tag := match[1]
|
||||
existingTags[tag] = true
|
||||
}
|
||||
}
|
||||
neededTags := []string{}
|
||||
for _, tag := range deployCfg.LangImageTags {
|
||||
neededTags = append(neededTags, tag)
|
||||
}
|
||||
neededTags = append(neededTags, deployCfg.AppImageTag)
|
||||
sort.Strings(neededTags)
|
||||
for _, tag := range neededTags {
|
||||
if !existingTags[tag] {
|
||||
sv.status("pulling image for " + tag)
|
||||
fullImage := fmt.Sprintf(
|
||||
"%s.dkr.ecr.%s.amazonaws.com/riju:%s",
|
||||
sv.awsAccountNumber,
|
||||
sv.awsRegion,
|
||||
tag,
|
||||
)
|
||||
dockerPull := exec.Command("docker", "pull", fullImage)
|
||||
dockerPull.Stdout = os.Stdout
|
||||
dockerPull.Stderr = os.Stderr
|
||||
if err := dockerPull.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
dockerTag := exec.Command(
|
||||
"docker", "tag", fullImage,
|
||||
fmt.Sprintf("riju:%s", tag),
|
||||
)
|
||||
dockerTag.Stdout = os.Stdout
|
||||
dockerTag.Stderr = os.Stderr
|
||||
if err := dockerTag.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
deployCfgStr, err := json.Marshal(&deployCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var port int
|
||||
var name string
|
||||
var oldName string
|
||||
if sv.isGreen {
|
||||
port = bluePort
|
||||
name = blueName
|
||||
oldName = greenName
|
||||
} else {
|
||||
port = greenPort
|
||||
name = greenName
|
||||
oldName = blueName
|
||||
}
|
||||
sv.status("starting container " + name)
|
||||
dockerRun := exec.Command(
|
||||
"docker", "run", "-d",
|
||||
"-v", "/var/run/riju:/var/run/riju",
|
||||
"-v", "/var/run/docker.sock:/var/run/docker.sock",
|
||||
"-p", fmt.Sprintf("127.0.0.1:%d:6119", port),
|
||||
"-e", "RIJU_DEPLOY_CONFIG",
|
||||
"-e", "ANALYTICS=1",
|
||||
"--name", name,
|
||||
fmt.Sprintf("riju:%s", deployCfg.AppImageTag),
|
||||
)
|
||||
dockerRun.Stdout = os.Stdout
|
||||
dockerRun.Stderr = os.Stderr
|
||||
dockerRun.Env = append(os.Environ(), fmt.Sprintf("RIJU_DEPLOY_CONFIG=%s", deployCfgStr))
|
||||
if err := dockerRun.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
sv.status("waiting for container to start up")
|
||||
time.Sleep(5 * time.Second)
|
||||
sv.status("checking that container is healthy")
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:%d", port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(string(body), "python") {
|
||||
return errors.New("container did not appear to be healthy")
|
||||
}
|
||||
sv.isGreen = !sv.isGreen
|
||||
sv.status("stopping old container")
|
||||
dockerRm := exec.Command("docker", "rm", "-f", oldName)
|
||||
dockerRm.Stdout = dockerRm.Stdout
|
||||
dockerRm.Stderr = dockerRm.Stderr
|
||||
if err := dockerRm.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
sv.status("reload complete")
|
||||
return nil
|
||||
}
|
||||
|
||||
var rijuContainerRegexp = regexp.MustCompile(`^([^:]+):(.+)$`)
|
||||
|
||||
func main() {
|
||||
supervisorCfg := supervisorConfig{}
|
||||
if err := env.Parse(&supervisorCfg); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
rijuInitVolume := exec.Command("riju-init-volume")
|
||||
rijuInitVolume.Stdout = rijuInitVolume.Stdout
|
||||
rijuInitVolume.Stderr = rijuInitVolume.Stderr
|
||||
if err := rijuInitVolume.Run(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
blueUrl, err := url.Parse(fmt.Sprintf("http://localhost:%d", bluePort))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
greenUrl, err := url.Parse(fmt.Sprintf("http://localhost:%d", greenPort))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
awsCfg, err := awsConfig.LoadDefaultConfig(context.Background())
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
stsClient := sts.NewFromConfig(awsCfg)
|
||||
ident, err := stsClient.GetCallerIdentity(context.Background(), &sts.GetCallerIdentityInput{})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
dockerContainerLs := exec.Command(
|
||||
"docker", "container", "ls", "-a",
|
||||
"--format", "{{ .Names }}:{{ .CreatedAt }}",
|
||||
)
|
||||
dockerContainerLs.Stderr = os.Stderr
|
||||
out, err := dockerContainerLs.Output()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
var blueRunningSince *time.Time
|
||||
var greenRunningSince *time.Time
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
if match := rijuContainerRegexp.FindStringSubmatch(line); match != nil {
|
||||
name := match[1]
|
||||
created, err := time.Parse(
|
||||
"2006-01-02 15:04:05 -0700 MST",
|
||||
match[2],
|
||||
)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if name == blueName {
|
||||
blueRunningSince = &created
|
||||
continue
|
||||
}
|
||||
if name == greenName {
|
||||
greenRunningSince = &created
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var isGreen bool
|
||||
if blueRunningSince == nil && greenRunningSince == nil {
|
||||
log.Println("did not detect any existing containers")
|
||||
isGreen = false
|
||||
} else if blueRunningSince != nil && greenRunningSince == nil {
|
||||
log.Println("detected existing blue container")
|
||||
isGreen = false
|
||||
} else if greenRunningSince != nil && blueRunningSince == nil {
|
||||
log.Println("detected existing green container")
|
||||
isGreen = true
|
||||
} else {
|
||||
log.Println("detected existing blue and green containers")
|
||||
isGreen = greenRunningSince.Before(*blueRunningSince)
|
||||
var color string
|
||||
var name string
|
||||
if isGreen {
|
||||
color = "blue"
|
||||
name = blueName
|
||||
} else {
|
||||
color = "green"
|
||||
name = greenName
|
||||
}
|
||||
log.Printf("stopping %s container as it is newer\n", color)
|
||||
dockerRm := exec.Command("docker", "rm", "-f", name)
|
||||
dockerRm.Stdout = os.Stdout
|
||||
dockerRm.Stderr = os.Stderr
|
||||
if err := dockerRm.Run(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
sv := &supervisor{
|
||||
config: supervisorCfg,
|
||||
blueProxyHandler: httputil.NewSingleHostReverseProxy(blueUrl),
|
||||
greenProxyHandler: httputil.NewSingleHostReverseProxy(greenUrl),
|
||||
isGreen: isGreen,
|
||||
s3: s3.NewFromConfig(awsCfg),
|
||||
ecr: ecr.NewFromConfig(awsCfg),
|
||||
awsRegion: awsCfg.Region,
|
||||
awsAccountNumber: *ident.Account,
|
||||
reloadJobs: map[string]*reloadJob{},
|
||||
}
|
||||
go sv.scheduleReload()
|
||||
log.Println("listening on http://0.0.0.0:80")
|
||||
log.Fatalln(http.ListenAndServe("0.0.0.0:80", sv))
|
||||
}
|
|
@ -18,10 +18,8 @@ for src in system/src/*.c; do
|
|||
out="${src/src/out}"
|
||||
out="${out/.c}"
|
||||
verbosely clang -Wall -Wextra -Werror -std=c11 "${src}" -o "${out}"
|
||||
if [[ "${out}" == *-privileged ]]; then
|
||||
if getent group riju >/dev/null; then
|
||||
sudo chown root:riju "${out}"
|
||||
fi
|
||||
sudo chmod a=,g=rx,u=rwxs "${out}"
|
||||
if [[ "${out}" == *-privileged && -z "${UNPRIVILEGED:-}" ]]; then
|
||||
verbosely sudo chown root:riju "${out}"
|
||||
verbosely sudo chmod a=,g=rx,u=rwxs "${out}"
|
||||
fi
|
||||
done
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import argparse
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
class Parser(argparse.ArgumentParser):
|
||||
def format_help(self):
|
||||
return """
|
||||
Usage: docker-exec.bash [OPTIONS] CONTAINER COMMAND [ARG...]
|
||||
|
||||
Run a command in a running container
|
||||
|
||||
Options:
|
||||
-i, --interactive Keep STDIN open even if not attached
|
||||
-t, --tty Allocate a pseudo-TTY
|
||||
-u, --user string Username or UID (format: <name|uid>:[<group|gid>])
|
||||
"""
|
||||
|
||||
parser = Parser()
|
||||
parser.add_argument("-i", "--interactive", action="store_true")
|
||||
parser.add_argument("-t", "--tty", action="store_true")
|
||||
parser.add_argument("-u", "--user", type=str)
|
||||
parser.add_argument("container", type=str)
|
||||
parser.add_argument("arg", type=str, nargs="*")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
pidfiles = "/var/run/riju/pidfiles"
|
||||
pidfile = pidfiles + "/" + str(uuid.uuid4()).replace("-", "")
|
||||
|
||||
# We have to use 'kill -9' here, otherwise runuser intercepts the
|
||||
# signal and takes its sweet time cleaning up.
|
||||
def cleanup(*ignored_args):
|
||||
subprocess.run([
|
||||
"docker",
|
||||
"exec",
|
||||
args.container,
|
||||
"bash",
|
||||
"-c",
|
||||
f"""
|
||||
set -euo pipefail
|
||||
if [[ -f '{pidfile}' ]]; then
|
||||
kill -9 -$(< '{pidfile}') 2>/dev/null || true
|
||||
rm -f '{pidfile}'
|
||||
fi
|
||||
"""
|
||||
])
|
||||
|
||||
signal.signal(signal.SIGINT, cleanup)
|
||||
signal.signal(signal.SIGTERM, cleanup)
|
||||
|
||||
exec_args = []
|
||||
|
||||
if args.interactive:
|
||||
exec_args.append("-i")
|
||||
if args.tty:
|
||||
exec_args.append("-t")
|
||||
|
||||
runuser_args = []
|
||||
|
||||
if args.user:
|
||||
runuser_args = ["runuser", "-u", args.user, "--"]
|
||||
|
||||
subprocess.run([
|
||||
"docker",
|
||||
"exec",
|
||||
*exec_args,
|
||||
args.container,
|
||||
"bash",
|
||||
"-c",
|
||||
f"""
|
||||
set -euo pipefail
|
||||
umask 077
|
||||
mkdir -p '{pidfiles}'
|
||||
echo "$$" > '{pidfile}'
|
||||
exec "$@"
|
||||
""",
|
||||
"--",
|
||||
*runuser_args,
|
||||
*args.arg,
|
||||
])
|
|
@ -1,19 +1,18 @@
|
|||
#define _GNU_SOURCE
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <grp.h>
|
||||
#include <signal.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
// Keep in sync with backend/src/users.ts
|
||||
const int MIN_UID = 2000;
|
||||
const int MAX_UID = 65000;
|
||||
|
||||
int privileged;
|
||||
|
||||
void __attribute__ ((noreturn)) die(char *msg)
|
||||
{
|
||||
fprintf(stderr, "%s\n", msg);
|
||||
|
@ -23,155 +22,171 @@ void __attribute__ ((noreturn)) die(char *msg)
|
|||
void die_with_usage()
|
||||
{
|
||||
die("usage:\n"
|
||||
" riju-system-privileged useradd UID\n"
|
||||
" riju-system-privileged setup UID UUID\n"
|
||||
" riju-system-privileged spawn UID UUID CMDLINE...\n"
|
||||
" riju-system-privileged teardown UID UUID");
|
||||
}
|
||||
|
||||
int parseUID(char *str)
|
||||
{
|
||||
if (!privileged)
|
||||
return -1;
|
||||
char *endptr;
|
||||
long uid = strtol(str, &endptr, 10);
|
||||
if (!*str || *endptr)
|
||||
die("uid must be an integer");
|
||||
if (uid < MIN_UID || uid >= MAX_UID)
|
||||
die("uid is out of range");
|
||||
return uid;
|
||||
" riju-system-privileged session UUID LANG [IMAGE-HASH]\n"
|
||||
" riju-system-privileged exec UUID CMDLINE...\n"
|
||||
" riju-system-privileged pty UUID CMDLINE...");
|
||||
}
|
||||
|
||||
char *parseUUID(char *uuid)
|
||||
{
|
||||
if (!*uuid)
|
||||
if (strnlen(uuid, 33) != 32)
|
||||
die("illegal uuid");
|
||||
for (char *ptr = uuid; *ptr; ++ptr)
|
||||
if (!((*ptr >= 'a' && *ptr <= 'z') || (*ptr >= '0' && *ptr <= '9') || *ptr == '-'))
|
||||
if (!((*ptr >= 'a' && *ptr <= 'z') || (*ptr >= '0' && *ptr <= '9')))
|
||||
die("illegal uuid");
|
||||
return uuid;
|
||||
}
|
||||
|
||||
void useradd(int uid)
|
||||
{
|
||||
if (!privileged)
|
||||
die("useradd not allowed without root privileges");
|
||||
char *cmdline;
|
||||
if (asprintf(&cmdline, "groupadd -g %1$d riju%1$d", uid) < 0)
|
||||
die("asprintf failed");
|
||||
int status = system(cmdline);
|
||||
if (status != 0)
|
||||
die("groupadd failed");
|
||||
if (asprintf(&cmdline, "useradd -M -N -l -r -u %1$d -g %1$d -p '!' -s /usr/bin/bash riju%1$d", uid) < 0)
|
||||
die("asprintf failed");
|
||||
status = system(cmdline);
|
||||
if (status != 0)
|
||||
die("useradd failed");
|
||||
char *parseLang(char *lang) {
|
||||
size_t len = strnlen(lang, 65);
|
||||
if (len == 0 || len > 64)
|
||||
die("illegal language name");
|
||||
return lang;
|
||||
}
|
||||
|
||||
void spawn(int uid, char *uuid, char **cmdline)
|
||||
char *parseImageHash(char *imageHash)
|
||||
{
|
||||
char *cwd;
|
||||
if (asprintf(&cwd, "/tmp/riju/%s", uuid) < 0)
|
||||
if (strnlen(imageHash, 41) != 40)
|
||||
die("illegal imageHash");
|
||||
for (char *ptr = imageHash; *ptr; ++ptr)
|
||||
if (!((*ptr >= 'a' && *ptr <= 'z') || (*ptr >= '0' && *ptr <= '9')))
|
||||
die("illegal imageHash");
|
||||
return imageHash;
|
||||
}
|
||||
|
||||
void wait_alarm(int signum)
|
||||
{
|
||||
(void)signum;
|
||||
die("container did not come up within 1 second");
|
||||
}
|
||||
|
||||
void session(char *uuid, char *lang, char *imageHash)
|
||||
{
|
||||
char *image, *container, *hostname, *volume, *fifo;
|
||||
if ((imageHash != NULL ?
|
||||
asprintf(&image, "riju:lang-%s-%s", lang, imageHash) :
|
||||
asprintf(&image, "riju:lang-%s", lang)) < 0)
|
||||
die("asprintf failed");
|
||||
if (chdir(cwd) < 0)
|
||||
die("chdir failed");
|
||||
if (privileged) {
|
||||
if (setgid(uid) < 0)
|
||||
die("setgid failed");
|
||||
if (setgroups(0, NULL) < 0)
|
||||
die("setgroups failed");
|
||||
if (setuid(uid) < 0)
|
||||
die("setuid failed");
|
||||
if (asprintf(&container, "riju-session-%s", uuid) < 0)
|
||||
die("asprintf failed");
|
||||
if (asprintf(&hostname, "HOSTNAME=%s", lang) < 0)
|
||||
die("asprintf failed");
|
||||
int rv = mkdir("/var/run/riju/sentinels", 0700);
|
||||
if (rv < 0 && errno != EEXIST)
|
||||
die("mkdir failed");
|
||||
char tmpdir[] = "/var/run/riju/sentinels/XXXXXX";
|
||||
if (mkdtemp(tmpdir) == NULL)
|
||||
die("mkdtemp failed");
|
||||
if (asprintf(&volume, "%s:/var/run/riju/sentinel", tmpdir) < 0)
|
||||
die("asprintf failed");
|
||||
if (asprintf(&fifo, "%s/fifo", tmpdir) < 0)
|
||||
die("asprintf failed");
|
||||
if (mknod(fifo, 0700 | S_IFIFO, 0) < 0)
|
||||
die("mknod failed");
|
||||
pid_t pid = fork();
|
||||
if (pid < 0)
|
||||
die("fork failed");
|
||||
else if (pid == 0) {
|
||||
char *argv[] = {
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"-v", volume,
|
||||
"-e", "HOME=/home/riju",
|
||||
"-e", hostname,
|
||||
"-e", "LANG=C.UTF-8",
|
||||
"-e", "LC_ALL=C.UTF-8",
|
||||
"-e", "LOGNAME=riju",
|
||||
"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin",
|
||||
"-e", "PWD=/home/riju/src",
|
||||
"-e", "SHELL=/usr/bin/bash",
|
||||
"-e", "TERM=xterm-256color",
|
||||
"-e", "TMPDIR=/tmp",
|
||||
"-e", "USER=riju",
|
||||
"-e", "USERNAME=riju",
|
||||
"--user", "root",
|
||||
"--hostname", lang,
|
||||
"--name", container,
|
||||
image, "cat", "/var/run/riju/sentinel/fifo", NULL,
|
||||
};
|
||||
execvp(argv[0], argv);
|
||||
die("execvp failed");
|
||||
}
|
||||
umask(077);
|
||||
execvp(cmdline[0], cmdline);
|
||||
struct timespec ts; // 10ms
|
||||
ts.tv_sec = 0;
|
||||
ts.tv_nsec = 1000 * 1000 * 10;
|
||||
signal(SIGALRM, wait_alarm);
|
||||
alarm(1);
|
||||
int fd;
|
||||
while (1) {
|
||||
fd = open(fifo, O_WRONLY);
|
||||
if (fd >= 0)
|
||||
break;
|
||||
if (errno != ENXIO)
|
||||
die("open failed");
|
||||
int rv = nanosleep(&ts, NULL);
|
||||
if (rv != 0 && errno != EINTR)
|
||||
die("nanosleep failed");
|
||||
}
|
||||
signal(SIGALRM, SIG_IGN);
|
||||
if (unlink(fifo) < 0)
|
||||
die("unlink failed");
|
||||
if (rmdir(tmpdir) < 0)
|
||||
die("rmdir failed");
|
||||
printf("riju: container ready\n"); // magic string
|
||||
if (waitpid(pid, NULL, 0) <= 0)
|
||||
die("waitpid failed");
|
||||
if (close(fd) < 0)
|
||||
die("close failed");
|
||||
}
|
||||
|
||||
void exec(char *uuid, int argc, char **cmdline, bool pty)
|
||||
{
|
||||
char *container;
|
||||
if (asprintf(&container, "riju-session-%s", uuid) < 0)
|
||||
die("asprintf failed");
|
||||
char *argvPrefix[] = {
|
||||
"./system/res/docker-exec.py",
|
||||
"--user", "riju",
|
||||
pty ? "-it" : "-i",
|
||||
container,
|
||||
"--",
|
||||
};
|
||||
char **argv = malloc(sizeof(argvPrefix) + (argc + 1) * sizeof(char *));
|
||||
if (argv == NULL)
|
||||
die("malloc failed");
|
||||
memcpy(argv, argvPrefix, sizeof(argvPrefix));
|
||||
memcpy((void *)argv + sizeof(argvPrefix), cmdline, argc * sizeof(char *));
|
||||
argv[sizeof(argvPrefix) + argc * sizeof(char *)] = NULL;
|
||||
execvp(argv[0], argv);
|
||||
die("execvp failed");
|
||||
}
|
||||
|
||||
void setup(int uid, char *uuid)
|
||||
{
|
||||
char *cmdline;
|
||||
if (asprintf(&cmdline, privileged
|
||||
? "install -d -o riju%1$d -g riju%1$d -m 700 /tmp/riju/%2$s"
|
||||
: "install -d -m 700 /tmp/riju/%2$s", uid, uuid) < 0)
|
||||
die("asprintf failed");
|
||||
int status = system(cmdline);
|
||||
if (status != 0)
|
||||
die("install failed");
|
||||
}
|
||||
|
||||
void teardown(int uid, char *uuid)
|
||||
{
|
||||
char *cmdline;
|
||||
int status;
|
||||
char *users;
|
||||
if (uid >= MIN_UID && uid < MAX_UID) {
|
||||
if (asprintf(&users, "%d", uid) < 0)
|
||||
die("asprintf failed");
|
||||
} else {
|
||||
cmdline = "getent passwd | grep -Eo '^riju[0-9]{4}' | paste -s -d, - | tr -d '\n'";
|
||||
FILE *fp = popen(cmdline, "r");
|
||||
if (fp == NULL)
|
||||
die("popen failed");
|
||||
static char buf[(MAX_UID - MIN_UID) * 9];
|
||||
if (fgets(buf, sizeof(buf), fp) == NULL) {
|
||||
if (feof(fp))
|
||||
users = NULL;
|
||||
else {
|
||||
die("fgets failed");
|
||||
}
|
||||
} else
|
||||
users = buf;
|
||||
}
|
||||
if (users != NULL) {
|
||||
if (asprintf(&cmdline, "while pkill -9 --uid %1$s; do sleep 0.01; done", users) < 0)
|
||||
die("asprintf failed");
|
||||
status = system(cmdline);
|
||||
if (status != 0 && status != 256)
|
||||
die("pkill failed");
|
||||
}
|
||||
if (asprintf(&cmdline, "rm -rf /tmp/riju/%s", uuid) < 0)
|
||||
die("asprintf failed");
|
||||
status = system(cmdline);
|
||||
if (status != 0)
|
||||
die("rm failed");
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int code = setuid(0);
|
||||
if (code != 0 && code != -EPERM)
|
||||
die("setuid failed");
|
||||
privileged = code == 0;
|
||||
if (seteuid(0) != 0)
|
||||
die("seteuid failed");
|
||||
if (argc < 2)
|
||||
die_with_usage();
|
||||
if (!strcmp(argv[1], "useradd")) {
|
||||
if (argc != 3)
|
||||
if (!strcmp(argv[1], "session")) {
|
||||
if (argc < 4 || argc > 5)
|
||||
die_with_usage();
|
||||
useradd(parseUID(argv[2]));
|
||||
char *uuid = parseUUID(argv[2]);
|
||||
char *lang = parseLang(argv[3]);
|
||||
char *imageHash = argc == 5 ? parseImageHash(argv[4]) : NULL;
|
||||
session(uuid, lang, imageHash);
|
||||
return 0;
|
||||
}
|
||||
if (!strcmp(argv[1], "spawn")) {
|
||||
if (argc < 5)
|
||||
if (!strcmp(argv[1], "exec")) {
|
||||
if (argc < 4)
|
||||
die_with_usage();
|
||||
spawn(parseUID(argv[2]), parseUUID(argv[3]), &argv[4]);
|
||||
exec(parseUUID(argv[2]), argc, &argv[3], false);
|
||||
return 0;
|
||||
}
|
||||
if (!strcmp(argv[1], "setup")) {
|
||||
if (argc != 4)
|
||||
if (!strcmp(argv[1], "pty")) {
|
||||
if (argc < 4)
|
||||
die_with_usage();
|
||||
int uid = parseUID(argv[2]);
|
||||
char *uuid = parseUUID(argv[3]);
|
||||
setup(uid, uuid);
|
||||
return 0;
|
||||
}
|
||||
if (!strcmp(argv[1], "teardown")) {
|
||||
if (argc != 4)
|
||||
die_with_usage();
|
||||
int uid = strcmp(argv[2], "*") ? parseUID(argv[2]) : -1;
|
||||
char *uuid = strcmp(argv[3], "*") ? parseUUID(argv[3]) : "*";
|
||||
teardown(uid, uuid);
|
||||
exec(parseUUID(argv[2]), argc, &argv[3], true);
|
||||
return 0;
|
||||
}
|
||||
die_with_usage();
|
||||
|
|
|
@ -2,38 +2,57 @@
|
|||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/aws" {
|
||||
version = "2.70.0"
|
||||
constraints = "~> 2.70"
|
||||
version = "3.45.0"
|
||||
constraints = "~> 3.45"
|
||||
hashes = [
|
||||
"h1:6tf4jg37RrMHyVCql+fEgAFvX8JiqDognr+lk6rx7To=",
|
||||
"zh:01a5f351146434b418f9ff8d8cc956ddc801110f1cc8b139e01be2ff8c544605",
|
||||
"zh:1ec08abbaf09e3e0547511d48f77a1e2c89face2d55886b23f643011c76cb247",
|
||||
"zh:606d134fef7c1357c9d155aadbee6826bc22bc0115b6291d483bc1444291c3e1",
|
||||
"zh:67e31a71a5ecbbc96a1a6708c9cc300bbfe921c322320cdbb95b9002026387e1",
|
||||
"zh:75aa59ae6f0834ed7142c81569182a658e4c22724a34db5d10f7545857d8db0c",
|
||||
"zh:76880f29fca7a0a3ff1caef31d245af2fb12a40709d67262e099bc22d039a51d",
|
||||
"zh:aaeaf97ffc1f76714e68bc0242c7407484c783d604584c04ad0b267b6812b6dc",
|
||||
"zh:ae1f88d19cc85b2e9b6ef71994134d55ef7830fd02f1f3c58c0b3f2b90e8b337",
|
||||
"zh:b155bdda487461e7b3d6e3a8d5ce5c887a047e4d983512e81e2c8266009f2a1f",
|
||||
"zh:ba394a7c391a26c4a91da63ad680e83bde0bc1ecc0a0856e26e9d62a4e77c408",
|
||||
"zh:e243c9d91feb0979638f28eb26f89ebadc179c57a2bd299b5729fb52bd1902f2",
|
||||
"zh:f6c05e20d9a3fba76ca5f47206dde35e5b43b6821c6cbf57186164ce27ba9f15",
|
||||
"h1:LKU/xfna87/p+hl5yTTW3dvOqWJp5JEM+Dt3nnvSDvA=",
|
||||
"zh:0fdbb3af75ff55807466533f97eb314556ec41a908a543d7cafb06546930f7c6",
|
||||
"zh:20656895744fa0f4607096b9681c77b2385f450b1577f9151d3070818378a724",
|
||||
"zh:390f316d00f25a5e45ef5410961fd05bf673068c1b701dc752d11df6d8e741d7",
|
||||
"zh:3da70f9de241d5f66ea9994ef1e0beddfdb005fa2d2ef6712392f57c5d2e4844",
|
||||
"zh:65de63cc0f97c85c28a19db560c546aa25f4f403dbf4783ac53c3918044cf180",
|
||||
"zh:6fc52072e5a66a5d0510aaa2b373a2697895f51398613c68619d8c0c95fc75f5",
|
||||
"zh:7c1da61092bd1206a020e3ee340ab11be8a4f9bb74e925ca1229ea5267fb3a62",
|
||||
"zh:94e533d86ce3c08e7102dcabe34ba32ae7fd7819fd0aedef28f48d29e635eae2",
|
||||
"zh:a3180d4826662e19e71cf20e925a2be8613a51f2f3f7b6d2643ac1418b976d58",
|
||||
"zh:c783df364928c77fd4dec5419533b125bebe2d50212c4ad609f83b701c2d981a",
|
||||
"zh:e1279bde388cb675d324584d965c6d22c3ec6890b13de76a50910a3bcd84ed64",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/external" {
|
||||
version = "2.0.0"
|
||||
version = "2.1.0"
|
||||
hashes = [
|
||||
"h1:Q5xqryWI3tCY8yr+fugq7dz4Qz+8g4GaW9ZS8dc6Ob8=",
|
||||
"zh:07949780dd6a1d43e7b46950f6e6976581d9724102cb5388d3411a1b6f476bde",
|
||||
"zh:0a4f4636ff93f0644affa8474465dd8c9252946437ad025b28fc9f6603534a24",
|
||||
"zh:0dd7e05a974c649950d1a21d7015d3753324ae52ebdd1744b144bc409ca4b3e8",
|
||||
"zh:2b881032b9aa9d227ac712f614056d050bcdcc67df0dc79e2b2cb76a197059ad",
|
||||
"zh:38feb4787b4570335459ca75a55389df1a7570bdca8cdf5df4c2876afe3c14b4",
|
||||
"zh:40f7e0aaef3b1f4c2ca2bb1189e3fe9af8c296da129423986d1d99ccc8cfb86c",
|
||||
"zh:56b361f64f0f0df5c4f958ae2f0e6f8ba192f35b720b9d3ae1be068fabcf73d9",
|
||||
"zh:5fadb5880cd31c2105f635ded92b9b16f918c1dd989627a4ce62c04939223909",
|
||||
"zh:61fa0be9c14c8c4109cfb7be8d54a80c56d35dbae49d3231cddb59831e7e5a4d",
|
||||
"zh:853774bf97fbc4a784d5af5a4ca0090848430781ae6cfc586adeb48f7c44af79",
|
||||
"h1:wbtDfLeawmv6xVT1W0w0fctRCb4ABlaD3JTxwb1jXag=",
|
||||
"zh:0d83ffb72fbd08986378204a7373d8c43b127049096eaf2765bfdd6b00ad9853",
|
||||
"zh:7577d6edc67b1e8c2cf62fe6501192df1231d74125d90e51d570d586d95269c5",
|
||||
"zh:9c669ded5d5affa4b2544952c4b6588dfed55260147d24ced02dca3a2829f328",
|
||||
"zh:a404d46f2831f90633947ab5d57e19dbfe35b3704104ba6ec80bcf50b058acfd",
|
||||
"zh:ae1caea1c936d459ceadf287bb5c5bd67b5e2a7819df6f5c4114b7305df7f822",
|
||||
"zh:afb4f805477694a4b9dde86b268d2c0821711c8aab1c6088f5f992228c4c06fb",
|
||||
"zh:b993b4a1de8a462643e78f4786789e44ce5064b332fee1cb0d6250ed085561b8",
|
||||
"zh:c84b2c13fa3ea2c0aa7291243006d560ce480a5591294b9001ce3742fc9c5791",
|
||||
"zh:c8966f69b7eccccb771704fd5335923692eccc9e0e90cb95d14538fe2e92a3b8",
|
||||
"zh:d5fe68850d449b811e633a300b114d0617df6d450305e8251643b4d143dc855b",
|
||||
"zh:ddebfd1e674ba336df09b1f27bbaa0e036c25b7a7087dc8081443f6e5954028b",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/null" {
|
||||
version = "3.1.0"
|
||||
constraints = "3.1.0"
|
||||
hashes = [
|
||||
"h1:vpC6bgUQoJ0znqIKVFevOdq+YQw42bRq0u+H3nto8nA=",
|
||||
"zh:02a1675fd8de126a00460942aaae242e65ca3380b5bb192e8773ef3da9073fd2",
|
||||
"zh:53e30545ff8926a8e30ad30648991ca8b93b6fa496272cd23b26763c8ee84515",
|
||||
"zh:5f9200bf708913621d0f6514179d89700e9aa3097c77dac730e8ba6e5901d521",
|
||||
"zh:9ebf4d9704faba06b3ec7242c773c0fbfe12d62db7d00356d4f55385fc69bfb2",
|
||||
"zh:a6576c81adc70326e4e1c999c04ad9ca37113a6e925aefab4765e5a5198efa7e",
|
||||
"zh:a8a42d13346347aff6c63a37cda9b2c6aa5cc384a55b2fe6d6adfa390e609c53",
|
||||
"zh:c797744d08a5307d50210e0454f91ca4d1c7621c68740441cf4579390452321d",
|
||||
"zh:cecb6a304046df34c11229f20a80b24b1603960b794d68361a67c5efe58e62b8",
|
||||
"zh:e1371aa1e502000d9974cfaff5be4cfa02f47b17400005a16f14d2ef30dc2a70",
|
||||
"zh:fc39cc1fe71234a0b0369d5c5c7f876c71b956d23d7d6f518289737a001ba69b",
|
||||
"zh:fea4227271ebf7d9e2b61b89ce2328c7262acd9fd190e1fd6d15a591abfa848e",
|
||||
]
|
||||
}
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
resource "aws_acm_certificate" "riju" {
|
||||
domain_name = "riju.codes"
|
||||
subject_alternative_names = ["*.riju.codes"]
|
||||
validation_method = "DNS"
|
||||
|
||||
tags = {
|
||||
Name = "Riju server"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_acm_certificate_validation" "riju" {
|
||||
certificate_arn = aws_acm_certificate.riju.arn
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
resource "aws_security_group" "alb" {
|
||||
name = "riju-alb"
|
||||
description = "Security group for Riju application load balancer"
|
||||
|
||||
ingress {
|
||||
description = "HTTP"
|
||||
from_port = 80
|
||||
to_port = 80
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
ingress {
|
||||
description = "HTTPS"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
egress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_lb" "server" {
|
||||
name = "riju-server"
|
||||
security_groups = [aws_security_group.alb.id]
|
||||
subnets = data.aws_subnet_ids.default.ids
|
||||
}
|
||||
|
||||
resource "aws_lb_target_group" "server" {
|
||||
name = "riju-server-http"
|
||||
port = 80
|
||||
protocol = "HTTP"
|
||||
vpc_id = data.aws_vpc.default.id
|
||||
}
|
||||
|
||||
resource "aws_lb_listener" "server_http" {
|
||||
load_balancer_arn = aws_lb.server.arn
|
||||
port = "80"
|
||||
protocol = "HTTP"
|
||||
|
||||
default_action {
|
||||
type = "redirect"
|
||||
|
||||
redirect {
|
||||
port = "443"
|
||||
protocol = "HTTPS"
|
||||
status_code = "HTTP_301"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_lb_listener" "server_https" {
|
||||
load_balancer_arn = aws_lb.server.arn
|
||||
port = "443"
|
||||
protocol = "HTTPS"
|
||||
ssl_policy = "ELBSecurityPolicy-2016-08"
|
||||
certificate_arn = aws_acm_certificate.riju.arn
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
target_group_arn = aws_lb_target_group.server.arn
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_attachment" "server" {
|
||||
count = local.ami_available ? 1 : 0
|
||||
|
||||
autoscaling_group_name = aws_autoscaling_group.server[0].name
|
||||
alb_target_group_arn = aws_lb_target_group.server.arn
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
data "aws_ami" "server" {
|
||||
count = local.ami_available ? 1 : 0
|
||||
|
||||
owners = ["self"]
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = [data.external.env.result.AMI_NAME]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "server" {
|
||||
name = "riju-server"
|
||||
description = "Security group for Riju server"
|
||||
|
||||
ingress {
|
||||
description = "SSH"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
ingress {
|
||||
description = "HTTP"
|
||||
from_port = 80
|
||||
to_port = 80
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
ingress {
|
||||
description = "HTTPS"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
egress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_launch_template" "server" {
|
||||
count = local.ami_available ? 1 : 0
|
||||
|
||||
name = "riju-server"
|
||||
image_id = data.aws_ami.server[0].id
|
||||
instance_type = "t3.small"
|
||||
|
||||
security_group_names = [aws_security_group.server.name]
|
||||
iam_instance_profile {
|
||||
name = aws_iam_instance_profile.server.name
|
||||
}
|
||||
|
||||
update_default_version = true
|
||||
|
||||
block_device_mappings {
|
||||
device_name = "/dev/sdh"
|
||||
ebs {
|
||||
volume_type = "gp3"
|
||||
volume_size = 256
|
||||
}
|
||||
}
|
||||
|
||||
tags = {
|
||||
Name = "Riju server"
|
||||
}
|
||||
|
||||
tag_specifications {
|
||||
resource_type = "instance"
|
||||
tags = {
|
||||
Name = "Riju server"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_autoscaling_group" "server" {
|
||||
count = local.ami_available ? 1 : 0
|
||||
|
||||
name = "riju-server"
|
||||
|
||||
availability_zones = [
|
||||
for subnet in data.aws_subnet.default : subnet.availability_zone
|
||||
]
|
||||
desired_capacity = 1
|
||||
min_size = 1
|
||||
max_size = 3
|
||||
|
||||
launch_template {
|
||||
id = aws_launch_template.server[0].id
|
||||
}
|
||||
|
||||
tags = concat(
|
||||
[
|
||||
{
|
||||
key = "Name"
|
||||
value = "Riju server"
|
||||
propagate_at_launch = false
|
||||
}
|
||||
],
|
||||
[
|
||||
for key, value in local.tags : {
|
||||
key = key,
|
||||
value = value,
|
||||
propagate_at_launch = true,
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [target_group_arns]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
resource "aws_ecr_repository" "riju" {
|
||||
name = "riju"
|
||||
image_tag_mutability = "MUTABLE"
|
||||
}
|
||||
|
||||
resource "aws_ecrpublic_repository" "riju" {
|
||||
provider = aws.us_east_1
|
||||
repository_name = "riju"
|
||||
}
|
|
@ -0,0 +1,110 @@
|
|||
resource "aws_iam_user" "deploy" {
|
||||
name = "riju-deploy"
|
||||
}
|
||||
|
||||
resource "aws_iam_access_key" "deploy" {
|
||||
user = aws_iam_user.deploy.name
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "deploy" {
|
||||
statement {
|
||||
actions = [
|
||||
"s3:ListBucket",
|
||||
]
|
||||
|
||||
resources = [
|
||||
"arn:aws:s3:::${aws_s3_bucket.riju.bucket}",
|
||||
]
|
||||
}
|
||||
|
||||
statement {
|
||||
actions = [
|
||||
"s3:*Object",
|
||||
]
|
||||
|
||||
resources = [
|
||||
"arn:aws:s3:::${aws_s3_bucket.riju.bucket}/*",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "deploy" {
|
||||
name = "riju-deploy"
|
||||
description = "Policy granting CI access to deploy Riju"
|
||||
policy = data.aws_iam_policy_document.deploy.json
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy_attachment" "deploy" {
|
||||
user = aws_iam_user.deploy.name
|
||||
policy_arn = aws_iam_policy.deploy.arn
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "server" {
|
||||
statement {
|
||||
actions = [
|
||||
"s3:GetObject",
|
||||
]
|
||||
|
||||
resources = [
|
||||
"arn:aws:s3:::${aws_s3_bucket.riju.bucket}/config.json",
|
||||
]
|
||||
}
|
||||
|
||||
statement {
|
||||
actions = [
|
||||
"ecr:GetAuthorizationToken",
|
||||
]
|
||||
|
||||
resources = [
|
||||
"*",
|
||||
]
|
||||
}
|
||||
|
||||
statement {
|
||||
actions = [
|
||||
"ecr:BatchGetImage",
|
||||
"ecr:GetDownloadUrlForLayer",
|
||||
]
|
||||
|
||||
resources = [
|
||||
aws_ecr_repository.riju.arn,
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "server" {
|
||||
name = "riju-server"
|
||||
description = "Policy granting supervisor process on Riju server ability to download from S3"
|
||||
policy = data.aws_iam_policy_document.server.json
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "server_assume_role" {
|
||||
statement {
|
||||
actions = [
|
||||
"sts:AssumeRole",
|
||||
]
|
||||
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = [
|
||||
"ec2.amazonaws.com",
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "server" {
|
||||
name = "riju-server"
|
||||
description = "Role used by supervisor process on Riju server"
|
||||
assume_role_policy = data.aws_iam_policy_document.server_assume_role.json
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "server" {
|
||||
role = aws_iam_role.server.name
|
||||
policy_arn = aws_iam_policy.server.arn
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "server" {
|
||||
name = "riju-server"
|
||||
role = aws_iam_role.server.name
|
||||
}
|
192
tf/infra.tf
192
tf/infra.tf
|
@ -1,192 +0,0 @@
|
|||
terraform {
|
||||
backend "s3" {
|
||||
key = "state"
|
||||
region = "us-west-1"
|
||||
}
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 2.70"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
tags = {
|
||||
Terraform = "Managed by Terraform"
|
||||
}
|
||||
}
|
||||
|
||||
data "external" "env" {
|
||||
program = ["jq", "-n", "env"]
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = "us-west-1"
|
||||
}
|
||||
|
||||
data "aws_region" "current" {}
|
||||
|
||||
resource "aws_iam_user" "deploy" {
|
||||
name = "riju-deploy"
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_access_key" "deploy" {
|
||||
user = aws_iam_user.deploy.name
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "deploy" {
|
||||
statement {
|
||||
actions = [
|
||||
"s3:ListBucket",
|
||||
]
|
||||
|
||||
resources = [
|
||||
"arn:aws:s3:::${aws_s3_bucket.riju_debs.bucket}",
|
||||
]
|
||||
}
|
||||
|
||||
statement {
|
||||
actions = [
|
||||
"s3:*Object",
|
||||
]
|
||||
|
||||
resources = [
|
||||
"arn:aws:s3:::${aws_s3_bucket.riju_debs.bucket}/*",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "deploy" {
|
||||
name = "riju-deploy"
|
||||
description = "Role used by CI to deploy Riju"
|
||||
policy = data.aws_iam_policy_document.deploy.json
|
||||
}
|
||||
|
||||
resource "aws_iam_user_policy_attachment" "deploy" {
|
||||
user = aws_iam_user.deploy.name
|
||||
policy_arn = aws_iam_policy.deploy.arn
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "riju_debs" {
|
||||
statement {
|
||||
principals {
|
||||
type = "*"
|
||||
identifiers = ["*"]
|
||||
}
|
||||
|
||||
actions = [
|
||||
"s3:ListBucket",
|
||||
]
|
||||
|
||||
resources = [
|
||||
"arn:aws:s3:::${aws_s3_bucket.riju_debs.bucket}",
|
||||
]
|
||||
}
|
||||
|
||||
statement {
|
||||
principals {
|
||||
type = "*"
|
||||
identifiers = ["*"]
|
||||
}
|
||||
|
||||
actions = [
|
||||
"s3:GetObject",
|
||||
]
|
||||
|
||||
resources = [
|
||||
"arn:aws:s3:::${aws_s3_bucket.riju_debs.bucket}/*",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_s3_bucket" "riju_debs" {
|
||||
bucket = "${data.external.env.result.S3_BUCKET}-debs"
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_s3_bucket_policy" "riju_debs" {
|
||||
bucket = aws_s3_bucket.riju_debs.id
|
||||
policy = data.aws_iam_policy_document.riju_debs.json
|
||||
}
|
||||
|
||||
data "aws_ami" "server" {
|
||||
owners = ["self"]
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = [data.external.env.result.AMI_NAME]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "server" {
|
||||
name = "riju-server"
|
||||
description = "Security group for Riju server"
|
||||
|
||||
ingress {
|
||||
description = "SSH"
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
ingress {
|
||||
description = "HTTP"
|
||||
from_port = 80
|
||||
to_port = 80
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
ingress {
|
||||
description = "HTTPS"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
egress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_instance" "server" {
|
||||
instance_type = "t3.small"
|
||||
ami = data.aws_ami.server.id
|
||||
availability_zone = "${data.aws_region.current.name}b"
|
||||
security_groups = [aws_security_group.server.name]
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_ebs_volume" "data" {
|
||||
availability_zone = "${data.aws_region.current.name}b"
|
||||
size = 125
|
||||
type = "gp3"
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_volume_attachment" "data" {
|
||||
device_name = "/dev/sdh"
|
||||
volume_id = aws_ebs_volume.data.id
|
||||
instance_id = aws_instance.server.id
|
||||
}
|
||||
|
||||
output "server_ip_address" {
|
||||
value = aws_instance.server.public_ip
|
||||
}
|
||||
|
||||
output "deploy_aws_access_key_id" {
|
||||
value = aws_iam_access_key.deploy.id
|
||||
}
|
||||
|
||||
output "deploy_aws_secret_access_key" {
|
||||
value = aws_iam_access_key.deploy.secret
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
terraform {
|
||||
backend "s3" {
|
||||
key = "state"
|
||||
region = "us-west-1"
|
||||
}
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 3.45"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
version = "~> 3.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "external" "env" {
|
||||
program = ["jq", "-n", "env"]
|
||||
}
|
||||
|
||||
locals {
|
||||
tags = {
|
||||
Terraform = "Managed by Terraform"
|
||||
BillingCategory = "Riju"
|
||||
}
|
||||
|
||||
ami_available = lookup(data.external.env.result, "AMI_NAME", "") != "" ? true : false
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
region = "us-west-1"
|
||||
default_tags {
|
||||
tags = local.tags
|
||||
}
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
alias = "us_east_1"
|
||||
region = "us-east-1"
|
||||
default_tags {
|
||||
tags = local.tags
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_caller_identity" "current" {}
|
||||
|
||||
data "aws_region" "current" {}
|
||||
|
||||
data "aws_vpc" "default" {
|
||||
default = true
|
||||
}
|
||||
|
||||
data "aws_subnet_ids" "default" {
|
||||
vpc_id = data.aws_vpc.default.id
|
||||
}
|
||||
|
||||
data "aws_subnet" "default" {
|
||||
for_each = data.aws_subnet_ids.default.ids
|
||||
id = each.value
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
output "alb_dns_name" {
|
||||
value = aws_lb.server.dns_name
|
||||
}
|
||||
|
||||
output "deploy_aws_access_key_id" {
|
||||
value = aws_iam_access_key.deploy.id
|
||||
}
|
||||
|
||||
output "deploy_aws_secret_access_key" {
|
||||
value = aws_iam_access_key.deploy.secret
|
||||
sensitive = true
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
resource "aws_s3_bucket" "riju" {
|
||||
bucket = data.external.env.result.S3_BUCKET
|
||||
}
|
||||
|
||||
resource "aws_s3_bucket_public_access_block" "riju" {
|
||||
bucket = aws_s3_bucket.riju.id
|
||||
|
||||
block_public_acls = true
|
||||
block_public_policy = true
|
||||
ignore_public_acls = true
|
||||
restrict_public_buckets = true
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "s3" {
|
||||
statement {
|
||||
principals {
|
||||
type = "*"
|
||||
identifiers = ["*"]
|
||||
}
|
||||
|
||||
actions = [
|
||||
"s3:ListBucket",
|
||||
]
|
||||
|
||||
resources = [
|
||||
"arn:aws:s3:::${aws_s3_bucket.riju.bucket}",
|
||||
]
|
||||
}
|
||||
|
||||
statement {
|
||||
principals {
|
||||
type = "*"
|
||||
identifiers = ["*"]
|
||||
}
|
||||
|
||||
actions = [
|
||||
"s3:GetObject",
|
||||
]
|
||||
|
||||
resources = [
|
||||
"arn:aws:s3:::${aws_s3_bucket.riju.bucket}/*",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_s3_bucket_policy" "riju" {
|
||||
bucket = aws_s3_bucket.riju.id
|
||||
policy = data.aws_iam_policy_document.s3.json
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
import { promises as fs } from "fs";
|
||||
import http from "http";
|
||||
|
||||
import express from "express";
|
||||
|
||||
import { getLangs, getPackages, getSharedDeps } from "./config.js";
|
||||
import { getLocalImageLabel } from "./docker-util.js";
|
||||
import { hashDockerfile } from "./hash-dockerfile.js";
|
||||
import { runCommand } from "./util.js";
|
||||
|
||||
// Number of package installation layers in the composite Docker
|
||||
// image. This needs to match the number of installation RUN commands
|
||||
// in the composite Dockerfile.
|
||||
const NUM_SHARDS = 10;
|
||||
|
||||
// Get a Node.js http server object that will serve information and
|
||||
// files for packages that should be installed into the composite
|
||||
// Docker image.
|
||||
function getServer({ shards }) {
|
||||
const app = express();
|
||||
app.get("/shard/:shard", (req, res) => {
|
||||
res.send(
|
||||
shards[parseInt(req.params.shard)]
|
||||
.map(({ debPath }) => debPath + "\n")
|
||||
.join("")
|
||||
);
|
||||
});
|
||||
app.use("/fs", express.static("."));
|
||||
return http.createServer(app);
|
||||
}
|
||||
|
||||
// Given a list of the packages to be built, split them into shards.
|
||||
// Return a list of shards. Each shard is a list of the package
|
||||
// objects, such that there are NUM_SHARDS shards. Traversing each
|
||||
// shard in order will return the packages in the same order as the
|
||||
// original list.
|
||||
//
|
||||
// Currently this uses an extremely simple algorithm, but that might
|
||||
// be improved in the future.
|
||||
function getShards(pkgs) {
|
||||
const shards = [];
|
||||
for (let i = 0; i < NUM_SHARDS; ++i) {
|
||||
shards.push([]);
|
||||
}
|
||||
const shardSize = Math.ceil(pkgs.length / NUM_SHARDS);
|
||||
for (let i = 0; i < pkgs.length; ++i) {
|
||||
shards[Math.floor(i / shardSize)].push(pkgs[i]);
|
||||
}
|
||||
return shards;
|
||||
}
|
||||
|
||||
// Parse command-line arguments, run main functionality, and exit.
|
||||
async function main() {
|
||||
const packages = await getPackages();
|
||||
const hash = await hashDockerfile(
|
||||
"composite",
|
||||
{
|
||||
"riju:runtime": await getLocalImageLabel(
|
||||
"riju:runtime",
|
||||
"riju.image-hash"
|
||||
),
|
||||
},
|
||||
{
|
||||
salt: {
|
||||
packageHashes: (
|
||||
await Promise.all(
|
||||
packages.map(async ({ debPath }) => {
|
||||
return (
|
||||
await runCommand(`dpkg-deb -f ${debPath} Riju-Script-Hash`, {
|
||||
getStdout: true,
|
||||
})
|
||||
).stdout.trim();
|
||||
})
|
||||
)
|
||||
).sort(),
|
||||
},
|
||||
}
|
||||
);
|
||||
const server = getServer({
|
||||
shards: getShards(packages),
|
||||
});
|
||||
await new Promise((resolve) => server.listen(8487, "localhost", resolve));
|
||||
try {
|
||||
await runCommand(
|
||||
`docker build . -f docker/composite/Dockerfile -t riju:composite` +
|
||||
` --network host --no-cache --label riju.image-hash=${hash}`
|
||||
);
|
||||
} finally {
|
||||
await server.close();
|
||||
}
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
|
@ -0,0 +1,86 @@
|
|||
import crypto from "crypto";
|
||||
import { promises as fs } from "fs";
|
||||
import http from "http";
|
||||
import url from "url";
|
||||
|
||||
import { Command } from "commander";
|
||||
import express from "express";
|
||||
|
||||
import { getSharedDepsForLangConfig, readLangConfig } from "../lib/yaml.js";
|
||||
import { getLocalImageLabel } from "./docker-util.js";
|
||||
import { hashDockerfile } from "./hash-dockerfile.js";
|
||||
import { getDebHash, runCommand } from "./util.js";
|
||||
|
||||
// Get a Node.js http server object that will allow the Docker
|
||||
// build to fetch files from outside the container, without them
|
||||
// being in the build context.
|
||||
function getServer() {
|
||||
const app = express();
|
||||
app.use("/fs", express.static("."));
|
||||
return http.createServer(app);
|
||||
}
|
||||
|
||||
// Parse command-line arguments, run main functionality, and exit.
|
||||
async function main() {
|
||||
const program = new Command();
|
||||
program.requiredOption("--lang <id>", "language ID");
|
||||
program.option("--debug", "interactive debugging");
|
||||
program.parse(process.argv);
|
||||
const { lang, debug } = program.opts();
|
||||
const sharedDeps = await getSharedDepsForLangConfig(await readLangConfig(lang));
|
||||
const installContents = await fs.readFile(
|
||||
`build/lang/${lang}/install.bash`,
|
||||
"utf-8"
|
||||
);
|
||||
const sharedInstallContents = await Promise.all(sharedDeps.map(
|
||||
async (name) => fs.readFile(`build/shared/${name}/install.bash`),
|
||||
));
|
||||
const allInstallContents = [].concat.apply([installContents], sharedInstallContents);
|
||||
const hash = await hashDockerfile(
|
||||
"lang",
|
||||
{
|
||||
"riju:base": await getLocalImageLabel("riju:base", "riju.image-hash"),
|
||||
},
|
||||
{
|
||||
salt: {
|
||||
langHash: await getDebHash(`build/lang/${lang}/riju-lang-${lang}.deb`),
|
||||
sharedHashes: (
|
||||
await Promise.all(
|
||||
sharedDeps.map(
|
||||
async (name) =>
|
||||
await getDebHash(`build/shared/${name}/riju-shared-${name}.deb`)
|
||||
)
|
||||
)
|
||||
).sort(),
|
||||
installHash: allInstallContents.map(
|
||||
(c) => crypto.createHash("sha1").update(c).digest("hex"),
|
||||
).join(""),
|
||||
},
|
||||
}
|
||||
);
|
||||
const server = getServer();
|
||||
await new Promise((resolve) => server.listen(8487, "localhost", resolve));
|
||||
try {
|
||||
if (debug) {
|
||||
await runCommand(
|
||||
`docker run -it --rm -e LANG=${lang} -w /tmp/riju-work --network host base:runtime`
|
||||
);
|
||||
} else {
|
||||
await runCommand(
|
||||
`docker build . -f docker/lang/Dockerfile ` +
|
||||
`--build-arg LANG=${lang} -t riju:lang-${lang} ` +
|
||||
`--network host --no-cache --label riju.image-hash=${hash}`
|
||||
);
|
||||
}
|
||||
} finally {
|
||||
await server.close();
|
||||
}
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
if (process.argv[1] === url.fileURLToPath(import.meta.url)) {
|
||||
main().catch((err) => {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue