Add code formatters for Riju itself
This commit is contained in:
parent
85d0a42371
commit
0bec96b88b
|
@ -0,0 +1 @@
|
|||
BreakBeforeBraces: Linux
|
19
Makefile
19
Makefile
|
@ -255,6 +255,25 @@ deploy-latest: # Upload deployment config to S3 and update ASG instances
|
|||
|
||||
deploy: deploy-config deploy-latest # Shorthand for deploy-config followed by deploy-latest
|
||||
|
||||
### Code formatting
|
||||
|
||||
fmt-c: # Format C code
|
||||
git ls-files | grep -E '\.c$$' | xargs clang-format -i
|
||||
|
||||
fmt-go: # Format Go code
|
||||
git ls-files | grep -E '\.go$' | xargs gofmt -l -w
|
||||
|
||||
fmt-python: # Format Python code
|
||||
git ls-files | grep -E '\.py$$' | xargs black -q
|
||||
|
||||
fmt-terraform: # Format Terraform code
|
||||
terraform fmt "$(PWD)/tf"
|
||||
|
||||
fmt-web: # Format CSS, JSON, and YAML code
|
||||
git ls-files | grep -E '\.(|css|c?js|json|ya?ml)$$' | grep -Ev '^(langs|shared)/' | xargs prettier --write --loglevel=warn
|
||||
|
||||
fmt: fmt-c fmt-go fmt-python fmt-terraform fmt-web # Format all code
|
||||
|
||||
### Infrastructure
|
||||
|
||||
packer: supervisor # Build and publish a new AMI
|
||||
|
|
|
@ -95,7 +95,7 @@ export class Session {
|
|||
event: "serviceLog",
|
||||
service: "container",
|
||||
output: line + "\n",
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -293,16 +293,8 @@ export class Session {
|
|||
|
||||
runCode = async (code) => {
|
||||
try {
|
||||
const {
|
||||
name,
|
||||
repl,
|
||||
main,
|
||||
suffix,
|
||||
createEmpty,
|
||||
compile,
|
||||
run,
|
||||
template,
|
||||
} = this.config;
|
||||
const { name, repl, main, suffix, createEmpty, compile, run, template } =
|
||||
this.config;
|
||||
if (this.term) {
|
||||
try {
|
||||
process.kill(this.term.pty.pid);
|
||||
|
@ -433,9 +425,11 @@ export class Session {
|
|||
};
|
||||
|
||||
ensure = async (cmd) => {
|
||||
const code = (await this.run(this.privilegedExec(cmd), {
|
||||
check: false,
|
||||
})).code;
|
||||
const code = (
|
||||
await this.run(this.privilegedExec(cmd), {
|
||||
check: false,
|
||||
})
|
||||
).code;
|
||||
this.send({ event: "ensured", code });
|
||||
};
|
||||
|
||||
|
|
|
@ -44,4 +44,7 @@ async function updateLangsFromDisk() {
|
|||
|
||||
export const langsPromise = updateLangsFromDisk().then(() => langs);
|
||||
|
||||
export const langWatcher = fsOrig.watch("langs", debounce(updateLangsFromDisk, 200));
|
||||
export const langWatcher = fsOrig.watch(
|
||||
"langs",
|
||||
debounce(updateLangsFromDisk, 200)
|
||||
);
|
||||
|
|
|
@ -57,7 +57,7 @@ async function main() {
|
|||
{ uuid },
|
||||
bash(
|
||||
`env L='${lang}' LANG_CONFIG=${quote(
|
||||
JSON.stringify(langConfig),
|
||||
JSON.stringify(langConfig)
|
||||
)} bash --rcfile <(cat <<< ${quote(sandboxScript)})`
|
||||
)
|
||||
);
|
||||
|
|
|
@ -318,32 +318,8 @@ class Test {
|
|||
dynamicRegistration: true,
|
||||
symbolKind: {
|
||||
valueSet: [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25,
|
||||
26,
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
|
||||
18, 19, 20, 21, 22, 23, 24, 25, 26,
|
||||
],
|
||||
},
|
||||
},
|
||||
|
@ -376,31 +352,8 @@ class Test {
|
|||
},
|
||||
completionItemKind: {
|
||||
valueSet: [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25,
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
|
||||
18, 19, 20, 21, 22, 23, 24, 25,
|
||||
],
|
||||
},
|
||||
},
|
||||
|
@ -423,32 +376,8 @@ class Test {
|
|||
dynamicRegistration: true,
|
||||
symbolKind: {
|
||||
valueSet: [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25,
|
||||
26,
|
||||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
|
||||
18, 19, 20, 21, 22, 23, 24, 25, 26,
|
||||
],
|
||||
},
|
||||
hierarchicalDocumentSymbolSupport: true,
|
||||
|
@ -638,9 +567,11 @@ async function writeLog(lang, type, result, log) {
|
|||
}
|
||||
|
||||
async function getImageHash(tag) {
|
||||
const output = (await run(["docker", "inspect", `riju:${tag}`], console.error, {
|
||||
suppressOutput: true,
|
||||
})).output;
|
||||
const output = (
|
||||
await run(["docker", "inspect", `riju:${tag}`], console.error, {
|
||||
suppressOutput: true,
|
||||
})
|
||||
).output;
|
||||
return JSON.parse(output)[0].Config.Labels["riju.image-hash"];
|
||||
}
|
||||
|
||||
|
@ -777,7 +708,7 @@ async function main() {
|
|||
await fs.mkdir(`build/test-hashes/lang`, { recursive: true });
|
||||
await fs.writeFile(
|
||||
`build/test-hashes/lang/${lang}`,
|
||||
await getTestHash(lang, runtimeHash, langHashes[lang]),
|
||||
await getTestHash(lang, runtimeHash, langHashes[lang])
|
||||
);
|
||||
}
|
||||
process.exit(failed.size > 0 ? 1 : 0);
|
||||
|
|
|
@ -6,12 +6,11 @@ import { v4 as getUUIDOrig } from "uuid";
|
|||
|
||||
function computeImageHashes() {
|
||||
let deployConfig = process.env.RIJU_DEPLOY_CONFIG;
|
||||
if (!deployConfig)
|
||||
return {};
|
||||
if (!deployConfig) return {};
|
||||
deployConfig = JSON.parse(deployConfig);
|
||||
const imageHashes = {};
|
||||
for (const [lang, tag] of Object.entries(deployConfig.langImageTags)) {
|
||||
const prefix = `lang-${lang}-`
|
||||
const prefix = `lang-${lang}-`;
|
||||
if (!tag.startsWith(prefix)) {
|
||||
throw new Error(`malformed tag ${tag}`);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,9 @@ packages="
|
|||
|
||||
apt-file
|
||||
bind9-dnsutils
|
||||
black
|
||||
clang
|
||||
clang-format
|
||||
dctrl-tools
|
||||
docker-ce-cli
|
||||
g++
|
||||
|
@ -47,6 +49,7 @@ man
|
|||
moreutils
|
||||
nodejs
|
||||
packer
|
||||
prettier
|
||||
psmisc
|
||||
python3-pip
|
||||
pwgen
|
||||
|
@ -68,6 +71,8 @@ apt-get install -y $(sed 's/#.*//' <<< "${packages}")
|
|||
|
||||
pip3 install ec2instanceconnectcli
|
||||
|
||||
npm install -g prettier
|
||||
|
||||
wget -nv https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip -O awscli.zip
|
||||
unzip -q awscli.zip
|
||||
./aws/install
|
||||
|
|
|
@ -82,7 +82,9 @@ export async function getTestHash(lang, runtimeImageHash, langImageHash) {
|
|||
return crypto
|
||||
.createHash("sha1")
|
||||
.update(
|
||||
`${await testRunnerHash},${await getTestConfigHash(lang)},${runtimeImageHash},${langImageHash}`
|
||||
`${await testRunnerHash},${await getTestConfigHash(
|
||||
lang
|
||||
)},${runtimeImageHash},${langImageHash}`
|
||||
)
|
||||
.digest("hex");
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ type: object
|
|||
additionalProperties: false
|
||||
required: [id, name, main, template, run]
|
||||
properties:
|
||||
|
||||
id:
|
||||
title: "Canonical language ID"
|
||||
description: |
|
||||
|
|
|
@ -27,7 +27,8 @@ const jsonSchemaPromise = readJSONSchemaFromDisk();
|
|||
export async function getLangs() {
|
||||
return (await fs.readdir("langs"))
|
||||
.filter((lang) => lang.endsWith(".yaml"))
|
||||
.map((lang) => path.parse(lang).name).sort();
|
||||
.map((lang) => path.parse(lang).name)
|
||||
.sort();
|
||||
}
|
||||
|
||||
// Return a list of the IDs of all the configured shared dependencies.
|
||||
|
@ -123,5 +124,5 @@ export async function readSharedDepConfig(lang) {
|
|||
// dependency names, or an empty list if none are configured for this
|
||||
// language. The return value is sorted.
|
||||
export async function getSharedDepsForLangConfig(langConfig) {
|
||||
return [...(langConfig.install && langConfig.install.riju) || []].sort();
|
||||
return [...((langConfig.install && langConfig.install.riju) || [])].sort();
|
||||
}
|
||||
|
|
|
@ -16,24 +16,16 @@
|
|||
],
|
||||
"metrics_collected": {
|
||||
"cpu": {
|
||||
"measurement": [
|
||||
"usage_active"
|
||||
],
|
||||
"measurement": ["usage_active"],
|
||||
"metrics_collection_interval": 60
|
||||
},
|
||||
"disk": {
|
||||
"measurement": [
|
||||
"used_percent"
|
||||
],
|
||||
"measurement": ["used_percent"],
|
||||
"metrics_collection_interval": 60,
|
||||
"resources": [
|
||||
"*"
|
||||
]
|
||||
"resources": ["*"]
|
||||
},
|
||||
"mem": {
|
||||
"measurement": [
|
||||
"mem_used_percent"
|
||||
],
|
||||
"measurement": ["mem_used_percent"],
|
||||
"metrics_collection_interval": 60
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,13 +38,13 @@ const blueName = "riju-app-blue"
|
|||
const greenName = "riju-app-green"
|
||||
|
||||
type deploymentConfig struct {
|
||||
AppImageTag string `json:"appImageTag"`
|
||||
AppImageTag string `json:"appImageTag"`
|
||||
LangImageTags map[string]string `json:"langImageTags"`
|
||||
}
|
||||
|
||||
type supervisorConfig struct {
|
||||
AccessToken string `env:"SUPERVISOR_ACCESS_TOKEN,notEmpty"`
|
||||
S3Bucket string `env:"S3_BUCKET,notEmpty"`
|
||||
S3Bucket string `env:"S3_BUCKET,notEmpty"`
|
||||
}
|
||||
|
||||
type reloadJob struct {
|
||||
|
@ -56,22 +56,22 @@ type reloadJob struct {
|
|||
type supervisor struct {
|
||||
config supervisorConfig
|
||||
|
||||
blueProxyHandler http.Handler
|
||||
blueProxyHandler http.Handler
|
||||
greenProxyHandler http.Handler
|
||||
isGreen bool // blue-green deployment
|
||||
deployConfigHash string
|
||||
isGreen bool // blue-green deployment
|
||||
deployConfigHash string
|
||||
|
||||
awsAccountNumber string
|
||||
awsRegion string
|
||||
s3 *s3.Client
|
||||
ecr *ecr.Client
|
||||
awsRegion string
|
||||
s3 *s3.Client
|
||||
ecr *ecr.Client
|
||||
|
||||
reloadLock sync.Mutex
|
||||
reloadLock sync.Mutex
|
||||
reloadInProgress bool
|
||||
reloadNeeded bool
|
||||
reloadUUID string
|
||||
reloadNextUUID string
|
||||
reloadJobs map[string]*reloadJob
|
||||
reloadNeeded bool
|
||||
reloadUUID string
|
||||
reloadNextUUID string
|
||||
reloadJobs map[string]*reloadJob
|
||||
}
|
||||
|
||||
func (sv *supervisor) status(status string) {
|
||||
|
@ -113,7 +113,7 @@ func (sv *supervisor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
http.Error(w, "401 malformed Authorization header", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
if authHeader != "Bearer " + sv.config.AccessToken {
|
||||
if authHeader != "Bearer "+sv.config.AccessToken {
|
||||
http.Error(w, "401 wrong access token", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
@ -149,11 +149,11 @@ func (sv *supervisor) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
http.Error(w, "404 no such job", http.StatusNotFound)
|
||||
}
|
||||
} else if job.active {
|
||||
fmt.Fprintln(w, "active: " + job.status)
|
||||
fmt.Fprintln(w, "active: "+job.status)
|
||||
} else if job.failed {
|
||||
fmt.Fprintln(w, "failed: " + job.status)
|
||||
fmt.Fprintln(w, "failed: "+job.status)
|
||||
} else {
|
||||
fmt.Fprintln(w, "succeeded: " + job.status)
|
||||
fmt.Fprintln(w, "succeeded: "+job.status)
|
||||
}
|
||||
sv.reloadLock.Unlock()
|
||||
return
|
||||
|
@ -256,7 +256,7 @@ func (sv *supervisor) reload() error {
|
|||
buf := s3manager.NewWriteAtBuffer([]byte{})
|
||||
if _, err := dl.Download(context.Background(), buf, &s3.GetObjectInput{
|
||||
Bucket: &sv.config.S3Bucket,
|
||||
Key: aws.String("config.json"),
|
||||
Key: aws.String("config.json"),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -575,16 +575,16 @@ func main() {
|
|||
}
|
||||
|
||||
sv := &supervisor{
|
||||
config: supervisorCfg,
|
||||
blueProxyHandler: httputil.NewSingleHostReverseProxy(blueUrl),
|
||||
config: supervisorCfg,
|
||||
blueProxyHandler: httputil.NewSingleHostReverseProxy(blueUrl),
|
||||
greenProxyHandler: httputil.NewSingleHostReverseProxy(greenUrl),
|
||||
isGreen: isGreen,
|
||||
deployConfigHash: deployCfgHash,
|
||||
s3: s3.NewFromConfig(awsCfg),
|
||||
ecr: ecr.NewFromConfig(awsCfg),
|
||||
awsRegion: awsCfg.Region,
|
||||
awsAccountNumber: *ident.Account,
|
||||
reloadJobs: map[string]*reloadJob{},
|
||||
isGreen: isGreen,
|
||||
deployConfigHash: deployCfgHash,
|
||||
s3: s3.NewFromConfig(awsCfg),
|
||||
ecr: ecr.NewFromConfig(awsCfg),
|
||||
awsRegion: awsCfg.Region,
|
||||
awsAccountNumber: *ident.Account,
|
||||
reloadJobs: map[string]*reloadJob{},
|
||||
}
|
||||
go sv.scheduleReload()
|
||||
log.Println("listening on http://0.0.0.0:80")
|
||||
|
|
|
@ -6,6 +6,7 @@ import subprocess
|
|||
import sys
|
||||
import uuid
|
||||
|
||||
|
||||
class Parser(argparse.ArgumentParser):
|
||||
def format_help(self):
|
||||
return """
|
||||
|
@ -19,6 +20,7 @@ Options:
|
|||
-u, --user string Username or UID (format: <name|uid>:[<group|gid>])
|
||||
"""
|
||||
|
||||
|
||||
parser = Parser()
|
||||
parser.add_argument("-i", "--interactive", action="store_true")
|
||||
parser.add_argument("-t", "--tty", action="store_true")
|
||||
|
@ -34,20 +36,23 @@ pidfile = pidfiles + "/" + str(uuid.uuid4()).replace("-", "")
|
|||
# We have to use 'kill -9' here, otherwise runuser intercepts the
|
||||
# signal and takes its sweet time cleaning up.
|
||||
def cleanup(*ignored_args):
|
||||
subprocess.run([
|
||||
"docker",
|
||||
"exec",
|
||||
args.container,
|
||||
"bash",
|
||||
"-c",
|
||||
f"""
|
||||
subprocess.run(
|
||||
[
|
||||
"docker",
|
||||
"exec",
|
||||
args.container,
|
||||
"bash",
|
||||
"-c",
|
||||
f"""
|
||||
set -euo pipefail
|
||||
if [[ -f '{pidfile}' ]]; then
|
||||
kill -9 -$(< '{pidfile}') 2>/dev/null || true
|
||||
rm -f '{pidfile}'
|
||||
fi
|
||||
"""
|
||||
])
|
||||
""",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
signal.signal(signal.SIGINT, cleanup)
|
||||
signal.signal(signal.SIGTERM, cleanup)
|
||||
|
@ -64,21 +69,25 @@ runuser_args = []
|
|||
if args.user:
|
||||
runuser_args = ["runuser", "-u", args.user, "--"]
|
||||
|
||||
sys.exit(subprocess.run([
|
||||
"docker",
|
||||
"exec",
|
||||
*exec_args,
|
||||
args.container,
|
||||
"bash",
|
||||
"-c",
|
||||
f"""
|
||||
sys.exit(
|
||||
subprocess.run(
|
||||
[
|
||||
"docker",
|
||||
"exec",
|
||||
*exec_args,
|
||||
args.container,
|
||||
"bash",
|
||||
"-c",
|
||||
f"""
|
||||
set -euo pipefail
|
||||
umask 077
|
||||
mkdir -p '{pidfiles}'
|
||||
echo "$$" > '{pidfile}'
|
||||
exec "$@"
|
||||
""",
|
||||
"--",
|
||||
*runuser_args,
|
||||
*args.arg,
|
||||
]).returncode)
|
||||
"--",
|
||||
*runuser_args,
|
||||
*args.arg,
|
||||
]
|
||||
).returncode
|
||||
)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#define _GNU_SOURCE
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <grp.h>
|
||||
#include <signal.h>
|
||||
#include <stdbool.h>
|
||||
|
@ -13,7 +13,7 @@
|
|||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
void __attribute__ ((noreturn)) die(char *msg)
|
||||
void __attribute__((noreturn)) die(char *msg)
|
||||
{
|
||||
fprintf(stderr, "%s\n", msg);
|
||||
exit(1);
|
||||
|
@ -37,7 +37,8 @@ char *parseUUID(char *uuid)
|
|||
return uuid;
|
||||
}
|
||||
|
||||
char *parseLang(char *lang) {
|
||||
char *parseLang(char *lang)
|
||||
{
|
||||
size_t len = strnlen(lang, 65);
|
||||
if (len == 0 || len > 64)
|
||||
die("illegal language name");
|
||||
|
@ -63,9 +64,8 @@ void wait_alarm(int signum)
|
|||
void session(char *uuid, char *lang, char *imageHash)
|
||||
{
|
||||
char *image, *container, *hostname, *volume, *fifo;
|
||||
if ((imageHash != NULL ?
|
||||
asprintf(&image, "riju:lang-%s-%s", lang, imageHash) :
|
||||
asprintf(&image, "riju:lang-%s", lang)) < 0)
|
||||
if ((imageHash != NULL ? asprintf(&image, "riju:lang-%s-%s", lang, imageHash)
|
||||
: asprintf(&image, "riju:lang-%s", lang)) < 0)
|
||||
die("asprintf failed");
|
||||
if (asprintf(&container, "riju-session-%s", uuid) < 0)
|
||||
die("asprintf failed");
|
||||
|
@ -88,37 +88,60 @@ void session(char *uuid, char *lang, char *imageHash)
|
|||
die("fork failed");
|
||||
else if (pid == 0) {
|
||||
char *argv[] = {
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"-v", volume,
|
||||
"-e", "HOME=/home/riju",
|
||||
"-e", hostname,
|
||||
"-e", "LANG=C.UTF-8",
|
||||
"-e", "LC_ALL=C.UTF-8",
|
||||
"-e", "LOGNAME=riju",
|
||||
"-e", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin",
|
||||
"-e", "PWD=/home/riju/src",
|
||||
"-e", "SHELL=/usr/bin/bash",
|
||||
"-e", "TERM=xterm-256color",
|
||||
"-e", "TMPDIR=/tmp",
|
||||
"-e", "USER=riju",
|
||||
"-e", "USERNAME=riju",
|
||||
"--user", "root",
|
||||
"--hostname", lang,
|
||||
"--name", container,
|
||||
"--cpus", "1",
|
||||
"--memory", "1g",
|
||||
"--memory-swap", "3g",
|
||||
"--pids-limit", "512",
|
||||
image, "bash", "-c",
|
||||
"cat /var/run/riju/sentinel/fifo | ( sleep 10; while read -t2; do :; done; pkill -g0 )",
|
||||
NULL,
|
||||
"docker",
|
||||
"run",
|
||||
"--rm",
|
||||
"-v",
|
||||
volume,
|
||||
"-e",
|
||||
"HOME=/home/riju",
|
||||
"-e",
|
||||
hostname,
|
||||
"-e",
|
||||
"LANG=C.UTF-8",
|
||||
"-e",
|
||||
"LC_ALL=C.UTF-8",
|
||||
"-e",
|
||||
"LOGNAME=riju",
|
||||
"-e",
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin",
|
||||
"-e",
|
||||
"PWD=/home/riju/src",
|
||||
"-e",
|
||||
"SHELL=/usr/bin/bash",
|
||||
"-e",
|
||||
"TERM=xterm-256color",
|
||||
"-e",
|
||||
"TMPDIR=/tmp",
|
||||
"-e",
|
||||
"USER=riju",
|
||||
"-e",
|
||||
"USERNAME=riju",
|
||||
"--user",
|
||||
"root",
|
||||
"--hostname",
|
||||
lang,
|
||||
"--name",
|
||||
container,
|
||||
"--cpus",
|
||||
"1",
|
||||
"--memory",
|
||||
"1g",
|
||||
"--memory-swap",
|
||||
"3g",
|
||||
"--pids-limit",
|
||||
"512",
|
||||
image,
|
||||
"bash",
|
||||
"-c",
|
||||
"cat /var/run/riju/sentinel/fifo | ( sleep 10; while read -t2; do :; "
|
||||
"done; pkill -g0 )",
|
||||
NULL,
|
||||
};
|
||||
execvp(argv[0], argv);
|
||||
die("execvp failed");
|
||||
}
|
||||
struct timespec ts_10ms; // 10ms
|
||||
struct timespec ts_10ms; // 10ms
|
||||
ts_10ms.tv_sec = 0;
|
||||
ts_10ms.tv_nsec = 1000 * 1000 * 10;
|
||||
signal(SIGALRM, wait_alarm);
|
||||
|
@ -143,7 +166,7 @@ void session(char *uuid, char *lang, char *imageHash)
|
|||
if (pid < 0)
|
||||
die("fork failed");
|
||||
else if (pid == 0) {
|
||||
struct timespec ts_1s; // 10ms
|
||||
struct timespec ts_1s; // 10ms
|
||||
ts_1s.tv_sec = 1;
|
||||
ts_1s.tv_nsec = 0;
|
||||
while (1) {
|
||||
|
@ -155,7 +178,7 @@ void session(char *uuid, char *lang, char *imageHash)
|
|||
die("nanosleep failed");
|
||||
}
|
||||
}
|
||||
printf("riju: container ready\n"); // magic string
|
||||
printf("riju: container ready\n"); // magic string
|
||||
if (waitpid(pid, NULL, 0) <= 0)
|
||||
die("waitpid failed");
|
||||
if (close(fd) < 0)
|
||||
|
@ -168,11 +191,12 @@ void exec(char *uuid, int argc, char **cmdline, bool pty)
|
|||
if (asprintf(&container, "riju-session-%s", uuid) < 0)
|
||||
die("asprintf failed");
|
||||
char *argvPrefix[] = {
|
||||
"./system/res/docker-exec.py",
|
||||
"--user", "riju",
|
||||
pty ? "-it" : "-i",
|
||||
container,
|
||||
"--",
|
||||
"./system/res/docker-exec.py",
|
||||
"--user",
|
||||
"riju",
|
||||
pty ? "-it" : "-i",
|
||||
container,
|
||||
"--",
|
||||
};
|
||||
char **argv = malloc(sizeof(argvPrefix) + (argc + 1) * sizeof(char *));
|
||||
if (argv == NULL)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
resource "aws_acm_certificate" "riju" {
|
||||
domain_name = "riju.codes"
|
||||
domain_name = "riju.codes"
|
||||
subject_alternative_names = ["*.riju.codes"]
|
||||
validation_method = "DNS"
|
||||
validation_method = "DNS"
|
||||
|
||||
tags = {
|
||||
Name = "Riju server"
|
||||
|
|
32
tf/alb.tf
32
tf/alb.tf
|
@ -27,30 +27,30 @@ resource "aws_security_group" "alb" {
|
|||
}
|
||||
|
||||
resource "aws_lb" "server" {
|
||||
name = "riju-server"
|
||||
name = "riju-server"
|
||||
security_groups = [aws_security_group.alb.id]
|
||||
subnets = data.aws_subnet_ids.default.ids
|
||||
idle_timeout = 3600
|
||||
subnets = data.aws_subnet_ids.default.ids
|
||||
idle_timeout = 3600
|
||||
}
|
||||
|
||||
resource "aws_lb_target_group" "server" {
|
||||
name = "riju-server-http"
|
||||
port = 80
|
||||
name = "riju-server-http"
|
||||
port = 80
|
||||
protocol = "HTTP"
|
||||
vpc_id = data.aws_vpc.default.id
|
||||
vpc_id = data.aws_vpc.default.id
|
||||
}
|
||||
|
||||
resource "aws_lb_listener" "server_http" {
|
||||
load_balancer_arn = aws_lb.server.arn
|
||||
port = "80"
|
||||
protocol = "HTTP"
|
||||
port = "80"
|
||||
protocol = "HTTP"
|
||||
|
||||
default_action {
|
||||
type = "redirect"
|
||||
|
||||
redirect {
|
||||
port = "443"
|
||||
protocol = "HTTPS"
|
||||
port = "443"
|
||||
protocol = "HTTPS"
|
||||
status_code = "HTTP_301"
|
||||
}
|
||||
}
|
||||
|
@ -58,13 +58,13 @@ resource "aws_lb_listener" "server_http" {
|
|||
|
||||
resource "aws_lb_listener" "server_https" {
|
||||
load_balancer_arn = aws_lb.server.arn
|
||||
port = "443"
|
||||
protocol = "HTTPS"
|
||||
ssl_policy = "ELBSecurityPolicy-2016-08"
|
||||
certificate_arn = aws_acm_certificate.riju.arn
|
||||
port = "443"
|
||||
protocol = "HTTPS"
|
||||
ssl_policy = "ELBSecurityPolicy-2016-08"
|
||||
certificate_arn = aws_acm_certificate.riju.arn
|
||||
|
||||
default_action {
|
||||
type = "forward"
|
||||
type = "forward"
|
||||
target_group_arn = aws_lb_target_group.server.arn
|
||||
}
|
||||
}
|
||||
|
@ -73,5 +73,5 @@ resource "aws_autoscaling_attachment" "server" {
|
|||
count = local.ami_available ? 1 : 0
|
||||
|
||||
autoscaling_group_name = aws_autoscaling_group.server[count.index].name
|
||||
alb_target_group_arn = aws_lb_target_group.server.arn
|
||||
alb_target_group_arn = aws_lb_target_group.server.arn
|
||||
}
|
||||
|
|
|
@ -15,17 +15,17 @@ data "aws_ami" "ubuntu" {
|
|||
owners = ["099720109477"]
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
name = "name"
|
||||
values = ["ubuntu/images/hvm-ssd/ubuntu-*-21.04-amd64-server-*"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "root-device-type"
|
||||
name = "root-device-type"
|
||||
values = ["ebs"]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "virtualization-type"
|
||||
name = "virtualization-type"
|
||||
values = ["hvm"]
|
||||
}
|
||||
|
||||
|
|
22
tf/asg.tf
22
tf/asg.tf
|
@ -37,8 +37,8 @@ resource "aws_security_group" "server" {
|
|||
resource "aws_launch_template" "server" {
|
||||
count = local.ami_available ? 1 : 0
|
||||
|
||||
name = "riju-server"
|
||||
image_id = data.aws_ami.server[count.index].id
|
||||
name = "riju-server"
|
||||
image_id = data.aws_ami.server[count.index].id
|
||||
instance_type = "t3.medium"
|
||||
|
||||
security_group_names = [aws_security_group.server.name]
|
||||
|
@ -51,8 +51,8 @@ resource "aws_launch_template" "server" {
|
|||
block_device_mappings {
|
||||
device_name = "/dev/sdh"
|
||||
ebs {
|
||||
volume_type = "gp3"
|
||||
volume_size = 256
|
||||
volume_type = "gp3"
|
||||
volume_size = 256
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -77,8 +77,8 @@ resource "aws_autoscaling_group" "server" {
|
|||
for subnet in data.aws_subnet.default : subnet.availability_zone
|
||||
]
|
||||
desired_capacity = 1
|
||||
min_size = 1
|
||||
max_size = 3
|
||||
min_size = 1
|
||||
max_size = 3
|
||||
|
||||
launch_template {
|
||||
id = aws_launch_template.server[count.index].id
|
||||
|
@ -87,19 +87,19 @@ resource "aws_autoscaling_group" "server" {
|
|||
tags = concat(
|
||||
[
|
||||
{
|
||||
key = "Name"
|
||||
value = "Riju server"
|
||||
key = "Name"
|
||||
value = "Riju server"
|
||||
propagate_at_launch = false
|
||||
}
|
||||
],
|
||||
[
|
||||
for key, value in local.tags : {
|
||||
key = key,
|
||||
value = value,
|
||||
key = key,
|
||||
value = value,
|
||||
propagate_at_launch = true,
|
||||
}
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [target_group_arns]
|
||||
|
|
|
@ -6,9 +6,9 @@ resource "aws_backup_plan" "riju" {
|
|||
name = "riju"
|
||||
|
||||
rule {
|
||||
rule_name = "riju"
|
||||
rule_name = "riju"
|
||||
target_vault_name = aws_backup_vault.riju.name
|
||||
schedule = "cron(0 5 ? * * *)"
|
||||
schedule = "cron(0 5 ? * * *)"
|
||||
|
||||
lifecycle {
|
||||
delete_after = 3
|
||||
|
@ -24,8 +24,8 @@ resource "aws_backup_selection" "riju" {
|
|||
count = local.ssh_key_available ? 1 : 0
|
||||
|
||||
iam_role_arn = aws_iam_role.backup.arn
|
||||
name = "riju"
|
||||
plan_id = aws_backup_plan.riju.id
|
||||
name = "riju"
|
||||
plan_id = aws_backup_plan.riju.id
|
||||
|
||||
resources = [
|
||||
aws_instance.dev_server[count.index].arn,
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
resource "aws_cloudwatch_metric_alarm" "server_cpu" {
|
||||
count = local.ami_available ? 1 : 0
|
||||
|
||||
alarm_name = "riju-server-cpu-high"
|
||||
comparison_operator = "GreaterThanOrEqualToThreshold"
|
||||
evaluation_periods = "30"
|
||||
metric_name = "cpu_usage_active"
|
||||
namespace = "CWAgent"
|
||||
period = "60"
|
||||
statistic = "Maximum"
|
||||
threshold = "90"
|
||||
alarm_description = "CPU usage on Riju server is above 90% for 30 minutes"
|
||||
ok_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_name = "riju-server-cpu-high"
|
||||
comparison_operator = "GreaterThanOrEqualToThreshold"
|
||||
evaluation_periods = "30"
|
||||
metric_name = "cpu_usage_active"
|
||||
namespace = "CWAgent"
|
||||
period = "60"
|
||||
statistic = "Maximum"
|
||||
threshold = "90"
|
||||
alarm_description = "CPU usage on Riju server is above 90% for 30 minutes"
|
||||
ok_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_actions = [aws_sns_topic.riju.arn]
|
||||
insufficient_data_actions = [aws_sns_topic.riju.arn]
|
||||
dimensions = {
|
||||
AutoScalingGroupName = aws_autoscaling_group.server[count.index].name
|
||||
|
@ -21,17 +21,17 @@ resource "aws_cloudwatch_metric_alarm" "server_cpu" {
|
|||
resource "aws_cloudwatch_metric_alarm" "server_memory" {
|
||||
count = local.ami_available ? 1 : 0
|
||||
|
||||
alarm_name = "riju-server-memory-high"
|
||||
comparison_operator = "GreaterThanOrEqualToThreshold"
|
||||
evaluation_periods = "30"
|
||||
metric_name = "mem_used_percent"
|
||||
namespace = "CWAgent"
|
||||
period = "60"
|
||||
statistic = "Maximum"
|
||||
threshold = "80"
|
||||
alarm_description = "Memory usage on Riju server is above 80% for 30 minutes"
|
||||
ok_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_name = "riju-server-memory-high"
|
||||
comparison_operator = "GreaterThanOrEqualToThreshold"
|
||||
evaluation_periods = "30"
|
||||
metric_name = "mem_used_percent"
|
||||
namespace = "CWAgent"
|
||||
period = "60"
|
||||
statistic = "Maximum"
|
||||
threshold = "80"
|
||||
alarm_description = "Memory usage on Riju server is above 80% for 30 minutes"
|
||||
ok_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_actions = [aws_sns_topic.riju.arn]
|
||||
insufficient_data_actions = [aws_sns_topic.riju.arn]
|
||||
dimensions = {
|
||||
AutoScalingGroupName = aws_autoscaling_group.server[count.index].name
|
||||
|
@ -41,42 +41,42 @@ resource "aws_cloudwatch_metric_alarm" "server_memory" {
|
|||
resource "aws_cloudwatch_metric_alarm" "server_data_volume_disk_space" {
|
||||
count = local.ami_available ? 1 : 0
|
||||
|
||||
alarm_name = "riju-server-data-volume-disk-usage-high"
|
||||
comparison_operator = "GreaterThanOrEqualToThreshold"
|
||||
evaluation_periods = "30"
|
||||
metric_name = "disk_used_percent"
|
||||
namespace = "CWAgent"
|
||||
period = "60"
|
||||
statistic = "Maximum"
|
||||
threshold = "90"
|
||||
alarm_description = "Disk space usage for data volume on Riju server is above 90% for 30 minutes"
|
||||
ok_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_name = "riju-server-data-volume-disk-usage-high"
|
||||
comparison_operator = "GreaterThanOrEqualToThreshold"
|
||||
evaluation_periods = "30"
|
||||
metric_name = "disk_used_percent"
|
||||
namespace = "CWAgent"
|
||||
period = "60"
|
||||
statistic = "Maximum"
|
||||
threshold = "90"
|
||||
alarm_description = "Disk space usage for data volume on Riju server is above 90% for 30 minutes"
|
||||
ok_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_actions = [aws_sns_topic.riju.arn]
|
||||
insufficient_data_actions = [aws_sns_topic.riju.arn]
|
||||
dimensions = {
|
||||
AutoScalingGroupName = aws_autoscaling_group.server[count.index].name
|
||||
path = "/mnt/riju/data"
|
||||
path = "/mnt/riju/data"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_metric_alarm" "server_root_volume_disk_space" {
|
||||
count = local.ami_available ? 1 : 0
|
||||
|
||||
alarm_name = "riju-server-root-volume-disk-usage-high"
|
||||
comparison_operator = "GreaterThanOrEqualToThreshold"
|
||||
evaluation_periods = "30"
|
||||
metric_name = "disk_used_percent"
|
||||
namespace = "CWAgent"
|
||||
period = "60"
|
||||
statistic = "Maximum"
|
||||
threshold = "90"
|
||||
alarm_description = "Disk space usage for root volume on Riju server is above 90% for 30 minutes"
|
||||
ok_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_name = "riju-server-root-volume-disk-usage-high"
|
||||
comparison_operator = "GreaterThanOrEqualToThreshold"
|
||||
evaluation_periods = "30"
|
||||
metric_name = "disk_used_percent"
|
||||
namespace = "CWAgent"
|
||||
period = "60"
|
||||
statistic = "Maximum"
|
||||
threshold = "90"
|
||||
alarm_description = "Disk space usage for root volume on Riju server is above 90% for 30 minutes"
|
||||
ok_actions = [aws_sns_topic.riju.arn]
|
||||
alarm_actions = [aws_sns_topic.riju.arn]
|
||||
insufficient_data_actions = [aws_sns_topic.riju.arn]
|
||||
dimensions = {
|
||||
AutoScalingGroupName = aws_autoscaling_group.server[count.index].name
|
||||
path = "/"
|
||||
path = "/"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
10
tf/ec2.tf
10
tf/ec2.tf
|
@ -31,14 +31,14 @@ resource "aws_security_group" "dev_server" {
|
|||
resource "aws_instance" "dev_server" {
|
||||
count = local.ssh_key_available ? 1 : 0
|
||||
|
||||
ami = data.aws_ami.ubuntu[count.index].id
|
||||
ami = data.aws_ami.ubuntu[count.index].id
|
||||
instance_type = "t3.2xlarge"
|
||||
ebs_optimized = true
|
||||
|
||||
security_groups = [aws_security_group.dev_server[count.index].name]
|
||||
|
||||
iam_instance_profile = aws_iam_instance_profile.dev_server.name
|
||||
key_name = data.external.env.result.SSH_KEY_NAME
|
||||
key_name = data.external.env.result.SSH_KEY_NAME
|
||||
|
||||
root_block_device {
|
||||
volume_size = 256
|
||||
|
@ -56,7 +56,7 @@ resource "aws_instance" "dev_server" {
|
|||
lifecycle {
|
||||
ignore_changes = [
|
||||
ami,
|
||||
security_groups, # legacy
|
||||
security_groups, # legacy
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ resource "aws_eip" "dev_server" {
|
|||
}
|
||||
|
||||
resource "aws_eip_association" "dev_server" {
|
||||
count = local.ssh_key_available ? 1 : 0
|
||||
instance_id = aws_instance.dev_server[count.index].id
|
||||
count = local.ssh_key_available ? 1 : 0
|
||||
instance_id = aws_instance.dev_server[count.index].id
|
||||
allocation_id = aws_eip.dev_server[count.index].id
|
||||
}
|
||||
|
|
|
@ -4,6 +4,6 @@ resource "aws_ecr_repository" "riju" {
|
|||
}
|
||||
|
||||
resource "aws_ecrpublic_repository" "riju" {
|
||||
provider = aws.us_east_1
|
||||
provider = aws.us_east_1
|
||||
repository_name = "riju"
|
||||
}
|
||||
|
|
38
tf/iam.tf
38
tf/iam.tf
|
@ -104,13 +104,13 @@ resource "aws_iam_user_policy_attachment" "deploy" {
|
|||
}
|
||||
|
||||
resource "aws_iam_role" "deploy" {
|
||||
name = "riju-deploy"
|
||||
description = "Role used by CI and deployment"
|
||||
name = "riju-deploy"
|
||||
description = "Role used by CI and deployment"
|
||||
assume_role_policy = data.aws_iam_policy_document.deploy_assume_role.json
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "deploy" {
|
||||
role = aws_iam_role.deploy.name
|
||||
role = aws_iam_role.deploy.name
|
||||
policy_arn = aws_iam_policy.deploy.arn
|
||||
}
|
||||
|
||||
|
@ -148,9 +148,9 @@ data "aws_iam_policy_document" "server" {
|
|||
}
|
||||
|
||||
resource "aws_iam_policy" "server" {
|
||||
name = "riju-server"
|
||||
name = "riju-server"
|
||||
description = "Policy granting supervisor process on Riju server ability to download from S3"
|
||||
policy = data.aws_iam_policy_document.server.json
|
||||
policy = data.aws_iam_policy_document.server.json
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "server_assume_role" {
|
||||
|
@ -169,23 +169,23 @@ data "aws_iam_policy_document" "server_assume_role" {
|
|||
}
|
||||
|
||||
resource "aws_iam_role" "server" {
|
||||
name = "riju-server"
|
||||
description = "Role used by supervisor process on Riju server"
|
||||
name = "riju-server"
|
||||
description = "Role used by supervisor process on Riju server"
|
||||
assume_role_policy = data.aws_iam_policy_document.server_assume_role.json
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "server" {
|
||||
role = aws_iam_role.server.name
|
||||
role = aws_iam_role.server.name
|
||||
policy_arn = aws_iam_policy.server.arn
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "server_cloudwatch" {
|
||||
role = aws_iam_role.server.name
|
||||
role = aws_iam_role.server.name
|
||||
policy_arn = data.aws_iam_policy.cloudwatch.arn
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "server_ssm" {
|
||||
role = aws_iam_role.server.name
|
||||
role = aws_iam_role.server.name
|
||||
policy_arn = data.aws_iam_policy.ssm.arn
|
||||
}
|
||||
|
||||
|
@ -207,9 +207,9 @@ data "aws_iam_policy_document" "dev_server" {
|
|||
}
|
||||
|
||||
resource "aws_iam_policy" "dev_server" {
|
||||
name = "riju-dev-server"
|
||||
name = "riju-dev-server"
|
||||
description = "Policy granting AWS administrative access from dev server"
|
||||
policy = data.aws_iam_policy_document.dev_server.json
|
||||
policy = data.aws_iam_policy_document.dev_server.json
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "dev_server_assume_role" {
|
||||
|
@ -228,13 +228,13 @@ data "aws_iam_policy_document" "dev_server_assume_role" {
|
|||
}
|
||||
|
||||
resource "aws_iam_role" "dev_server" {
|
||||
name = "riju-dev-server"
|
||||
description = "Role used by Riju dev server"
|
||||
name = "riju-dev-server"
|
||||
description = "Role used by Riju dev server"
|
||||
assume_role_policy = data.aws_iam_policy_document.dev_server_assume_role.json
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "dev_server" {
|
||||
role = aws_iam_role.dev_server.name
|
||||
role = aws_iam_role.dev_server.name
|
||||
policy_arn = aws_iam_policy.dev_server.arn
|
||||
}
|
||||
|
||||
|
@ -259,8 +259,8 @@ data "aws_iam_policy_document" "backup_assume_role" {
|
|||
}
|
||||
|
||||
resource "aws_iam_role" "backup" {
|
||||
name = "riju-backup"
|
||||
description = "Role used by AWS Backup for Riju"
|
||||
name = "riju-backup"
|
||||
description = "Role used by AWS Backup for Riju"
|
||||
assume_role_policy = data.aws_iam_policy_document.backup_assume_role.json
|
||||
}
|
||||
|
||||
|
@ -273,11 +273,11 @@ data "aws_iam_policy" "backup_restores" {
|
|||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "backup" {
|
||||
role = aws_iam_role.backup.name
|
||||
role = aws_iam_role.backup.name
|
||||
policy_arn = data.aws_iam_policy.backup.arn
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "backup_restores" {
|
||||
role = aws_iam_role.backup.name
|
||||
role = aws_iam_role.backup.name
|
||||
policy_arn = data.aws_iam_policy.backup_restores.arn
|
||||
}
|
||||
|
|
10
tf/main.tf
10
tf/main.tf
|
@ -1,6 +1,6 @@
|
|||
terraform {
|
||||
backend "s3" {
|
||||
key = "state"
|
||||
key = "state"
|
||||
}
|
||||
required_providers {
|
||||
aws = {
|
||||
|
@ -8,7 +8,7 @@ terraform {
|
|||
version = "~> 3.45"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
source = "hashicorp/null"
|
||||
version = "~> 3.1"
|
||||
}
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ locals {
|
|||
BillingCategory = "Riju"
|
||||
}
|
||||
|
||||
ami_available = lookup(data.external.env.result, "AMI_NAME", "") != "" ? true : false
|
||||
ami_available = lookup(data.external.env.result, "AMI_NAME", "") != "" ? true : false
|
||||
ssh_key_available = lookup(data.external.env.result, "SSH_KEY_NAME", "") != "" ? true : false
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ provider "aws" {
|
|||
}
|
||||
|
||||
provider "aws" {
|
||||
alias = "us_east_1"
|
||||
alias = "us_east_1"
|
||||
region = "us-east-1"
|
||||
default_tags {
|
||||
tags = local.tags
|
||||
|
@ -56,5 +56,5 @@ data "aws_subnet_ids" "default" {
|
|||
|
||||
data "aws_subnet" "default" {
|
||||
for_each = data.aws_subnet_ids.default.ids
|
||||
id = each.value
|
||||
id = each.value
|
||||
}
|
||||
|
|
|
@ -27,15 +27,22 @@ async function main() {
|
|||
program.option("--debug", "interactive debugging");
|
||||
program.parse(process.argv);
|
||||
const { lang, debug } = program.opts();
|
||||
const sharedDeps = await getSharedDepsForLangConfig(await readLangConfig(lang));
|
||||
const sharedDeps = await getSharedDepsForLangConfig(
|
||||
await readLangConfig(lang)
|
||||
);
|
||||
const installContents = await fs.readFile(
|
||||
`build/lang/${lang}/install.bash`,
|
||||
"utf-8"
|
||||
);
|
||||
const sharedInstallContents = await Promise.all(sharedDeps.map(
|
||||
async (name) => fs.readFile(`build/shared/${name}/install.bash`),
|
||||
));
|
||||
const allInstallContents = [].concat.apply([installContents], sharedInstallContents);
|
||||
const sharedInstallContents = await Promise.all(
|
||||
sharedDeps.map(async (name) =>
|
||||
fs.readFile(`build/shared/${name}/install.bash`)
|
||||
)
|
||||
);
|
||||
const allInstallContents = [].concat.apply(
|
||||
[installContents],
|
||||
sharedInstallContents
|
||||
);
|
||||
const hash = await hashDockerfile(
|
||||
"lang",
|
||||
{
|
||||
|
@ -52,9 +59,9 @@ async function main() {
|
|||
)
|
||||
)
|
||||
).sort(),
|
||||
installHash: allInstallContents.map(
|
||||
(c) => crypto.createHash("sha1").update(c).digest("hex"),
|
||||
).join(""),
|
||||
installHash: allInstallContents
|
||||
.map((c) => crypto.createHash("sha1").update(c).digest("hex"))
|
||||
.join(""),
|
||||
},
|
||||
}
|
||||
);
|
||||
|
|
|
@ -124,18 +124,23 @@ async function getImageArtifact({ tag, isBaseImage, isLangImage }) {
|
|||
`build/lang/${isLangImage.lang}/install.bash`,
|
||||
"utf-8"
|
||||
);
|
||||
const sharedInstallContents = await Promise.all(isLangImage.sharedDeps.map(
|
||||
async (name) => fs.readFile(`build/shared/${name}/install.bash`),
|
||||
));
|
||||
const allInstallContents = [].concat.apply([installContents], sharedInstallContents);
|
||||
const sharedInstallContents = await Promise.all(
|
||||
isLangImage.sharedDeps.map(async (name) =>
|
||||
fs.readFile(`build/shared/${name}/install.bash`)
|
||||
)
|
||||
);
|
||||
const allInstallContents = [].concat.apply(
|
||||
[installContents],
|
||||
sharedInstallContents
|
||||
);
|
||||
salt = {
|
||||
langHash: dependencyHashes[`deb:lang-${isLangImage.lang}`],
|
||||
sharedHashes: isLangImage.sharedDeps.map(
|
||||
(name) => dependencyHashes[`deb:shared-${name}`]
|
||||
),
|
||||
installHash: allInstallContents.map(
|
||||
(c) => crypto.createHash("sha1").update(c).digest("hex"),
|
||||
).join(""),
|
||||
installHash: allInstallContents
|
||||
.map((c) => crypto.createHash("sha1").update(c).digest("hex"))
|
||||
.join(""),
|
||||
};
|
||||
}
|
||||
return await hashDockerfile(name, dependentDockerHashes, { salt });
|
||||
|
@ -231,7 +236,7 @@ async function getLanguageTestArtifact({ lang }) {
|
|||
return await getTestHash(
|
||||
lang,
|
||||
dependencyHashes[`image:runtime`],
|
||||
dependencyHashes[`image:lang-${lang}`],
|
||||
dependencyHashes[`image:lang-${lang}`]
|
||||
);
|
||||
},
|
||||
buildLocally: async () => {
|
||||
|
@ -248,10 +253,10 @@ async function getLanguageTestArtifact({ lang }) {
|
|||
const hash = (await fs.readFile(hashPath, "utf-8")).trim();
|
||||
const S3_BUCKET = getS3Bucket();
|
||||
await runCommand(
|
||||
`aws s3 rm --recursive s3://${S3_BUCKET}/test-hashes/lang/${lang}`,
|
||||
`aws s3 rm --recursive s3://${S3_BUCKET}/test-hashes/lang/${lang}`
|
||||
);
|
||||
await runCommand(
|
||||
`aws s3 cp ${hashPath} s3://${S3_BUCKET}/test-hashes/lang/${lang}/${hash}`,
|
||||
`aws s3 cp ${hashPath} s3://${S3_BUCKET}/test-hashes/lang/${lang}/${hash}`
|
||||
);
|
||||
},
|
||||
};
|
||||
|
@ -658,15 +663,8 @@ async function main() {
|
|||
program.option("--publish", "publish artifacts to remote registries");
|
||||
program.option("--yes", "execute plan without confirmation");
|
||||
program.parse(process.argv);
|
||||
const {
|
||||
list,
|
||||
manual,
|
||||
holdManual,
|
||||
all,
|
||||
localOnly,
|
||||
publish,
|
||||
yes,
|
||||
} = program.opts();
|
||||
const { list, manual, holdManual, all, localOnly, publish, yes } =
|
||||
program.opts();
|
||||
const depgraph = await getDepGraph();
|
||||
if (list) {
|
||||
for (const { name } of depgraph.artifacts) {
|
||||
|
|
|
@ -41,16 +41,8 @@ function makeLangScript(langConfig, isShared) {
|
|||
deb,
|
||||
} = install;
|
||||
if (prepare) {
|
||||
const {
|
||||
preface,
|
||||
cert,
|
||||
aptKey,
|
||||
aptRepo,
|
||||
apt,
|
||||
npm,
|
||||
opam,
|
||||
manual,
|
||||
} = prepare;
|
||||
const { preface, cert, aptKey, aptRepo, apt, npm, opam, manual } =
|
||||
prepare;
|
||||
if (preface) {
|
||||
prefaceParts.push(preface);
|
||||
}
|
||||
|
@ -450,9 +442,11 @@ export async function generateBuildScript({ lang, type }) {
|
|||
const buildScriptPath = `build/${type}/${lang}/build.bash`;
|
||||
const installScriptPath = `build/${type}/${lang}/install.bash`;
|
||||
await Promise.all([
|
||||
fs.writeFile(buildScriptPath, buildScript + "\n")
|
||||
fs
|
||||
.writeFile(buildScriptPath, buildScript + "\n")
|
||||
.then(() => fs.chmod(buildScriptPath, 0o755)),
|
||||
fs.writeFile(installScriptPath, installScript + "\n")
|
||||
fs
|
||||
.writeFile(installScriptPath, installScript + "\n")
|
||||
.then(() => fs.chmod(installScriptPath, 0o755)),
|
||||
]);
|
||||
}
|
||||
|
@ -462,10 +456,7 @@ async function main() {
|
|||
const program = new Command();
|
||||
program
|
||||
.requiredOption("--lang <id>", "language ID")
|
||||
.requiredOption(
|
||||
"--type <value>",
|
||||
"package category (lang or shared)"
|
||||
);
|
||||
.requiredOption("--type <value>", "package category (lang or shared)");
|
||||
program.parse(process.argv);
|
||||
await generateBuildScript(program.opts());
|
||||
process.exit(0);
|
||||
|
|
|
@ -15,15 +15,16 @@ async function getDeployConfig() {
|
|||
langs.map(async (lang) => [
|
||||
lang,
|
||||
`lang-${lang}-` +
|
||||
(await getLocalImageLabel(`riju:lang-${lang}`, "riju.image-hash")),
|
||||
(await getLocalImageLabel(`riju:lang-${lang}`, "riju.image-hash")),
|
||||
])
|
||||
)
|
||||
);
|
||||
const appImageTag = `app-` + await getLocalImageLabel(`riju:app`, "riju.image-hash");
|
||||
const appImageTag =
|
||||
`app-` + (await getLocalImageLabel(`riju:app`, "riju.image-hash"));
|
||||
return {
|
||||
appImageTag,
|
||||
langImageTags,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Parse command-line arguments, run main functionality, and exit.
|
||||
|
@ -31,7 +32,10 @@ async function main() {
|
|||
const program = new Command();
|
||||
program.parse(process.argv);
|
||||
await fs.mkdir("build", { recursive: true });
|
||||
await fs.writeFile("build/config.json", JSON.stringify(await getDeployConfig(), null, 2) + "\n");
|
||||
await fs.writeFile(
|
||||
"build/config.json",
|
||||
JSON.stringify(await getDeployConfig(), null, 2) + "\n"
|
||||
);
|
||||
console.log("wrote build/config.json");
|
||||
process.exit(0);
|
||||
}
|
||||
|
|
|
@ -139,9 +139,9 @@ async function encodeDockerfile(name, dependentHashes, opts) {
|
|||
}
|
||||
step.context = await Promise.all(
|
||||
sources.map(async (source) =>
|
||||
(await listFiles(source)).filter(
|
||||
(entry) => !ignore.ignores(entry.path)
|
||||
)
|
||||
(
|
||||
await listFiles(source)
|
||||
).filter((entry) => !ignore.ignores(entry.path))
|
||||
)
|
||||
);
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue