Builds but is untested
ci/woodpecker/push/woodpecker Pipeline failed Details

This commit is contained in:
Colin 2024-04-17 09:21:50 -04:00
parent 4c567e247e
commit 61448d4ad5
17 changed files with 202 additions and 35 deletions

84
.woodpecker.yml Normal file
View File

@ -0,0 +1,84 @@
labels:
hostname: "macmini7"
clone:
git:
image: woodpeckerci/plugin-git
settings:
partial: false
depth: 1
steps:
build-staging:
name: build-staging
image: woodpeckerci/plugin-docker-buildx
secrets: [REGISTRY_USER, REGISTRY_PASSWORD]
volumes:
- /var/run/docker.sock:/var/run/docker.sock
commands:
- echo "Building application for staging branch"
- echo "$${REGISTRY_PASSWORD}" | docker login -u "$${REGISTRY_USER}" --password-stdin git.nixc.us
- echo compose build
- docker compose -f docker-compose.staging.yml build
when:
- branch: main
- event: push
- path:
include: [ 'stack.production.yml', 'stack.staging.yml', 'docker-compose.staging.yml', 'docker-compose.production.yml', 'Dockerfile', '*.tests.ts' ]
deploy-new:
name: deploy-new
when:
- branch: main
- path:
include: [ 'stack.production.yml', 'stack.staging.yml', 'docker-compose.staging.yml', 'docker-compose.production.yml', 'Dockerfile', '*.tests.ts' ]
image: woodpeckerci/plugin-docker-buildx
secrets: [REGISTRY_USER, REGISTRY_PASSWORD]
volumes:
- /var/run/docker.sock:/var/run/docker.sock
commands:
- echo "$${REGISTRY_PASSWORD}" | docker login -u "$${REGISTRY_USER}" --password-stdin git.nixc.us
- echo compose push
- docker compose -f docker-compose.staging.yml push
cleanup-staging:
name: cleanup-staging
when:
- branch: main
- path:
include: [ 'stack.production.yml', 'stack.staging.yml', 'docker-compose.staging.yml', 'docker-compose.production.yml', 'Dockerfile', '*.tests.ts' ]
image: woodpeckerci/plugin-docker-buildx
secrets: [REGISTRY_USER, REGISTRY_PASSWORD]
volumes:
- /var/run/docker.sock:/var/run/docker.sock
commands:
- docker compose -f docker-compose.staging.yml down
- docker compose -f docker-compose.staging.yml rm -f
build-build-push-production:
name: build-build-push-production
image: woodpeckerci/plugin-docker-buildx
secrets: [REGISTRY_USER, REGISTRY_PASSWORD]
volumes:
- /var/run/docker.sock:/var/run/docker.sock
commands:
- echo "Building application for staging branch"
- echo "$${REGISTRY_PASSWORD}" | docker login -u "$${REGISTRY_USER}" --password-stdin git.nixc.us
- echo compose build
- docker compose -f docker-compose.production.yml build
- docker compose -f docker-compose.production.yml push
when:
- branch: production
- event: push
- path:
include: [ 'stack.production.yml', 'stack.staging.yml', 'docker-compose.staging.yml', 'docker-compose.production.yml', 'Dockerfile', '*.tests.ts' ]
deploy-production:
name: deploy-production
image: woodpeckerci/plugin-docker-buildx
secrets: [REGISTRY_USER, REGISTRY_PASSWORD]
volumes:
- /var/run/docker.sock:/var/run/docker.sock
commands:
- echo "$${REGISTRY_PASSWORD}" | docker login -u "$${REGISTRY_USER}" --password-stdin git.nixc.us
when:
- branch: production
- event: push

View File

@ -1,53 +1,63 @@
# UptimeRobot to Kuma migration
# UptimeRobot to Kuma Migration
We migrated from UptimeRobot to UptimeKuma, but there was no fast way to achieve this, so
we wrote our own small migration helper.
We needed a fast and easy way to migrate from UptimeRobot to UptimeKuma, so we created this migration helper. It leverages Docker Compose to simplify the entire process.
## Getting started
Copy the `.env.sample` as `.env` and enter your UptimeRobot API key.
## WARRANTY VOID WARNING
This repository comes entirely without support if something on the docker layer doesn't work and you know how to reach out to me please feel free and we could tweak some stuff but the nature of Docker is to unify environments so that support is effectively unnecessary if you're doing your docker layer correctly. This repository uses source code from a publically contributed tool written by github user @sandstorm via https://github.com/sandstorm/uptime-robot-to-kuma-helper and I offer no assurance of continued support from them but this allegedly worked the year they supplied this code and if someone wants to fork this and maintain it I would hope that they post about it in this thread as the UptimeKuma people don't seem to have any interest or resources available for interoperability. https://github.com/louislam/uptime-kuma/issues/1190
For testing, you can simply start UptimeKuma via Docker:
## Prerequisites
```shell
docker run --rm -p 3001:3001 --name uptime-kuma louislam/uptime-kuma:1
```
* Docker and Docker Compose installed.
* A copy of your UptimeKuma data directory (for safety).
Ensure you finished the initial setup (simply open [localhost:3001](localhost:3001) in your browser) and
updated the credentials in the `.env` file.
To start the migration run:
## Migration Steps
1. **Clone the Repository:**
```bash
# copy all your UptimeRobot monitors to your Kuma installation
yarn copy-monitors
# disable all UptimeRobot monitors
yarn disable-uptime-robot
# delete all your monitors from UptimeRobot
# DANGER!!! This is can not be undone
yarn delete-uptime-robot
git clone https://github.com/Nixius/UptimeRobot-Migrator.git # Replace if your project is elsewhere
cd uptimerobot-migrator
```
Alternatively, you can probably copy this repository docker-compose.yml somewhere useful in your case.
## Production Migration
2. **Environment Configuration:**
* Edit the docker-compose.yml
* Enter your UptimeRobot API key.
* Provide your Kuma URL, username, and password.
**Important Node:** This migration helper was writen specially for our use-case. So not all UptimeRobot
scenarios and features are implemented. So no garantie this will work 100% for you.
3. **Place UptimeKuma Data (if migrating to an existing instance):**
* Place a copy of your UptimeKuma data into a directory named `data` next to the `docker-compose.yml` file.
**Pro Tipp:** Before migrating, create a default notification that will get used as default.
4. **Execute the Migration:**
* Run the following command to start the temporary uptime kuma instance we include in the background to copy your monitors to:
```bash
docker-compose up -d kuma-migration
```
* Ensure that uptime Kuma is online by navigating to it in your browser if you have this locally it should be located at http://localhost:3001/ and use the credentials you set in your original instance.
* Next you need to launch the migrator in the foreground.
```bash
docker-compose up migrator
```
* Once the migrator has completed you must check your temporary uptime kuma instance we loaded up earlier and then if everything is copacetic you can do `docker-compose down && docker-compose rm` to clean up the mess.
## Architecture
### Fetching from UptimeRobot
* Additionally, To perform other actions, modify the `docker-compose.yml` file:
* Change the `command` under the `migrator` service:
* `disable-uptime-robot`
* `delete-uptime-robot` (**Caution:** This action is irreversible)
This part was quite easy, because UptimeRobot got a good REST-API to fetch all monitors from
## Important Notes
### Creating the monitors in Kuma
* **Backup:** Always create a backup of your UptimeKuma data before running the migration.
* **Customization:** The helper was designed for our use case. You must modify it to fit your specific API key and credentials.
* **Default Notifications:** Consider creating a default notification in UptimeKuma beforehand.
This was the hard part. Currently, Kuma does not provide any form of API. In the first version of this migration
helper, I tried to hook into the websocket connection of the UI and create monitors that way. This was really instabile
and resulted in many non-deterministic errors.
## Disclaimer
For this reason I switched to Playwright. This allows us the remote-control a browser, which will create
the monitors via the Kuma-UI.
This tool aims to streamline the migration, but we cannot guarantee perfect results with every UptimeRobot configuration and Kuma setup.
**Key Improvements:**
* **Focus on `docker-compose.yml`:** Instructions prioritize the ease of using the provided compose file.
* **Clarity:** The outline of the prerequisites and steps emphasizes that the migration runs with a single command.
* **UptimeKuma Data:** Includes guidance on handling data for existing Kuma instances.

0
data/.gitkeep Normal file
View File

View File

@ -0,0 +1,8 @@
version: '3.8'
services:
migrator:
build:
context: docker/migrator
dockerfile: Dockerfile.production
image: git.nixc.us/nixius/uptimerobot-migrator:production

View File

@ -0,0 +1,8 @@
version: '3.8'
services:
migrator:
build:
context: docker/migrator
dockerfile: Dockerfile
image: git.nixc.us/nixius/uptimerobot-migrator:staging

27
docker-compose.yml Normal file
View File

@ -0,0 +1,27 @@
version: '3.8'
services:
kuma-migration:
image: louislam/uptime-kuma:1
ports:
- '3001:3001'
networks:
- default
## make sure you run this on a copy of your uptime kuma data not your live one.
volumes:
- ./data:/app/data
migrator:
build: .
image: git.nixc.us/nixius/uptimerobot-migrator:production
environment:
UPTIME_ROBOT_API_KEY: "xxxxxxxxxxxxxxxxxxxxxxxxxxx" # change this for sure and don't share it.
KUMA_URL: "http://kuma-migration:3001" # probably don't change this
KUMA_USERNAME: "admin" # change this
KUMA_PASSWORD: "password2" # don't share this and change it
depends_on:
- kuma-migration
networks:
- default
restart: "no"
command: ["copy-monitors"] # replace with the different task names as needed options are copy-monitors, disable-uptime-robot, delete-uptime-robot

View File

@ -0,0 +1,6 @@
FROM node:18.13.0
WORKDIR /usr/src/app
COPY . .
RUN yarn install && chmod +x entrypoint.sh
EXPOSE 3000
ENTRYPOINT ['/entrypoint.sh']

View File

@ -0,0 +1 @@
FROM git.nixc.us/nixius/uptimerobot-migrator:staging

View File

@ -0,0 +1,23 @@
#!/bin/sh
case "$1" in
"copy-monitors")
yarn copy-monitors
;;
"disable-uptime-robot")
yarn disable-uptime-robot
;;
"delete-uptime-robot")
echo "DANGER!!: This action can not be undone"
read -p "Are you sure you want to delete all UptimeRobot monitors? (yes/no): " confirm
if [[ $confirm == "yes" ]]; then
yarn delete-uptime-robot
else
echo "Deletion canceled."
fi
;;
*)
echo "Usage: $0 {copy-monitors | disable-uptime-robot | delete-uptime-robot}"
exit 1
;;
esac