CI setup for new swarm mode (#248)
- build backend and frontend with cacheing using GHA cache) - streamline frontend image to reduce layers - setup local swarm with test/setup.sh script, wait for containers to init - copy sample config files as default (add storages.sample.yaml) - add initial backend test for logging in with default superadmin credentials via 127.0.0.1:9871 - must use 127.0.0.1 instead of localhost for accessing frontend container within action
This commit is contained in:
parent
0c8a5a49b4
commit
e3f268a2e8
66
.github/workflows/ci.yaml
vendored
Normal file
66
.github/workflows/ci.yaml
vendored
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
name: Browsertrix Cloud Integration Test (for Swarm)
|
||||||
|
|
||||||
|
on: [push, pull_request]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
btrix-swarm-test:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
services:
|
||||||
|
registry:
|
||||||
|
image: registry:2
|
||||||
|
ports:
|
||||||
|
- 5000:5000
|
||||||
|
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
with:
|
||||||
|
driver-opts: network=host
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Copy Configs
|
||||||
|
run: cp ./configs/config.sample.env ./configs/config.env; cp ./configs/storages.sample.yaml ./configs/storages.yaml
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Build Backend
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
context: backend
|
||||||
|
push: true
|
||||||
|
tags: localhost:5000/webrecorder/browsertrix-backend:latest
|
||||||
|
cache-from: type=gha,scope=backend
|
||||||
|
cache-to: type=gha,scope=backend,mode=max
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Build Frontend
|
||||||
|
uses: docker/build-push-action@v3
|
||||||
|
with:
|
||||||
|
context: frontend
|
||||||
|
push: true
|
||||||
|
tags: localhost:5000/webrecorder/browsertrix-frontend:latest
|
||||||
|
cache-from: type=gha,scope=frontend
|
||||||
|
cache-to: type=gha,scope=frontend,mode=max
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Run Bootstrap Script
|
||||||
|
run: ./test/setup.sh
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Install Python
|
||||||
|
uses: actions/setup-python@v3
|
||||||
|
with:
|
||||||
|
python-version: '3.9'
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Install Python Libs
|
||||||
|
run: pip install pytest requests
|
||||||
|
|
||||||
|
-
|
||||||
|
name: Backend Tests
|
||||||
|
run: py.test -vv ./backend/test/*.py
|
||||||
|
|
@ -8,7 +8,7 @@ RUN pip install -r requirements.txt
|
|||||||
|
|
||||||
RUN python-on-whales download-cli
|
RUN python-on-whales download-cli
|
||||||
|
|
||||||
ADD . /app
|
ADD btrixcloud/ /app/btrixcloud/
|
||||||
|
|
||||||
CMD uvicorn btrixcloud.main:app_root --host 0.0.0.0 --access-log --log-level info
|
CMD uvicorn btrixcloud.main:app_root --host 0.0.0.0 --access-log --log-level info
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@ class BaseCrawlManager(ABC):
|
|||||||
def __init__(self, templates):
|
def __init__(self, templates):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
self.crawler_image = os.environ["CRAWLER_IMAGE"]
|
|
||||||
self.job_image = os.environ["JOB_IMAGE"]
|
self.job_image = os.environ["JOB_IMAGE"]
|
||||||
|
|
||||||
self.no_delete_jobs = os.environ.get("NO_DELETE_JOBS", "0") != "0"
|
self.no_delete_jobs = os.environ.get("NO_DELETE_JOBS", "0") != "0"
|
||||||
|
@ -21,7 +21,7 @@ def run_swarm_stack(name, data):
|
|||||||
fh_io.flush()
|
fh_io.flush()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
docker.stack.deploy(name, compose_files=[fh_io.name], orchestrator="swarm")
|
docker.stack.deploy(name, compose_files=[fh_io.name], orchestrator="swarm", resolve_image="never")
|
||||||
except DockerException as exc:
|
except DockerException as exc:
|
||||||
print(exc, flush=True)
|
print(exc, flush=True)
|
||||||
|
|
||||||
|
23
backend/test/test_login.py
Normal file
23
backend/test/test_login.py
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
import requests
|
||||||
|
|
||||||
|
api_prefix = "http://127.0.0.1:9871/api"
|
||||||
|
|
||||||
|
def test_login_invalid():
|
||||||
|
username = "admin@example.com"
|
||||||
|
password = "invalid"
|
||||||
|
r = requests.post(f"{api_prefix}/auth/jwt/login", data={"username": username, "password": password, "grant_type": "password"})
|
||||||
|
data = r.json()
|
||||||
|
|
||||||
|
assert r.status_code == 400
|
||||||
|
assert data["detail"] == "LOGIN_BAD_CREDENTIALS"
|
||||||
|
|
||||||
|
def test_login():
|
||||||
|
username = "admin@example.com"
|
||||||
|
password = "PASSW0RD0"
|
||||||
|
r = requests.post(f"{api_prefix}/auth/jwt/login", data={"username": username, "password": password, "grant_type": "password"})
|
||||||
|
data = r.json()
|
||||||
|
|
||||||
|
assert r.status_code == 200
|
||||||
|
assert data["token_type"] == "bearer"
|
||||||
|
assert data["access_token"]
|
||||||
|
access_token = data["access_token"]
|
@ -1,41 +1,45 @@
|
|||||||
# Env Settings (for local Docker Deployment)
|
# Env Settings (for local Docker Deployment)
|
||||||
|
|
||||||
|
# mongo
|
||||||
|
# ==========
|
||||||
MONGO_HOST=mongo
|
MONGO_HOST=mongo
|
||||||
PASSWORD_SECRET=change_me
|
PASSWORD_SECRET=change_me
|
||||||
|
|
||||||
MONGO_INITDB_ROOT_USERNAME=root
|
MONGO_INITDB_ROOT_USERNAME=root
|
||||||
MONGO_INITDB_ROOT_PASSWORD=example
|
MONGO_INITDB_ROOT_PASSWORD=example
|
||||||
|
|
||||||
|
|
||||||
|
# minio
|
||||||
|
# ==========
|
||||||
MINIO_ROOT_USER=ADMIN
|
MINIO_ROOT_USER=ADMIN
|
||||||
MINIO_ROOT_PASSWORD=PASSW0RD
|
MINIO_ROOT_PASSWORD=PASSW0RD
|
||||||
|
|
||||||
MINIO_BUCKET=btrix-data
|
MINIO_BUCKET=btrix-data
|
||||||
|
|
||||||
|
MC_HOST_local=http://ADMIN:PASSW0RD@minio:9000
|
||||||
|
|
||||||
|
|
||||||
|
# Super Admin
|
||||||
|
# =================
|
||||||
SUPERUSER_EMAIL=admin@example.com
|
SUPERUSER_EMAIL=admin@example.com
|
||||||
|
|
||||||
# if blank, a password is generated automatically
|
# if blank, a password is generated automatically
|
||||||
SUPERUSER_PASSWORD=
|
SUPERUSER_PASSWORD=PASSW0RD0
|
||||||
|
|
||||||
STORE_ENDPOINT_URL=http://minio:9000/btrix-data/
|
STORE_ENDPOINT_URL=http://minio:9000/btrix-data/
|
||||||
STORE_ACCESS_ENDPOINT_URL=/data/
|
STORE_ACCESS_ENDPOINT_URL=/data/
|
||||||
STORE_ACCESS_KEY=ADMIN
|
STORE_ACCESS_KEY=ADMIN
|
||||||
STORE_SECRET_KEY=PASSW0RD
|
STORE_SECRET_KEY=PASSW0RD
|
||||||
|
|
||||||
MC_HOST_local=http://ADMIN:PASSW0RD@minio:9000
|
|
||||||
|
|
||||||
REDIS_URL=redis://redis/0
|
|
||||||
|
|
||||||
# enable to send verification emails
|
# enable to send verification emails
|
||||||
#EMAIL_SMTP_HOST=smtp.gmail.com
|
#EMAIL_SMTP_HOST=smtp.gmail.com
|
||||||
#EMAIL_SMTP_PORT=587
|
#EMAIL_SMTP_PORT=587
|
||||||
#EMAIL_SENDER=user@example.com
|
#EMAIL_SENDER=user@example.com
|
||||||
#EMAIL_PASSWORD=password
|
#EMAIL_PASSWORD=password
|
||||||
|
|
||||||
# Browsertrix Crawler image to use
|
|
||||||
CRAWLER_IMAGE=webrecorder/browsertrix-crawler:latest
|
|
||||||
|
|
||||||
CRAWL_ARGS="--timeout 90 --logging stats,behaviors,debug --generateWACZ --screencastPort 9037 --collection main"
|
|
||||||
|
|
||||||
|
# misc
|
||||||
|
# =================
|
||||||
REGISTRATION_ENABLED=1
|
REGISTRATION_ENABLED=1
|
||||||
|
|
||||||
# number of workers to run for backend
|
# number of workers to run for backend
|
||||||
@ -47,6 +51,7 @@ JWT_TOKEN_LIFETIME_MINUTES=240
|
|||||||
NO_DELETE_ON_FAIL=0
|
NO_DELETE_ON_FAIL=0
|
||||||
|
|
||||||
# auth sign -- uncomment to enable signing
|
# auth sign -- uncomment to enable signing
|
||||||
|
# ==================
|
||||||
# WACZ_SIGN_URL="http://authsign:8080/sign"
|
# WACZ_SIGN_URL="http://authsign:8080/sign"
|
||||||
|
|
||||||
# optional token for signing (useful if using remote signing server)
|
# optional token for signing (useful if using remote signing server)
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
crawler_image: localhost:5000/webrecorder/browsertrix-crawler:latest
|
redis_image: redis
|
||||||
|
crawler_image: webrecorder/browsertrix-crawler:cloud
|
||||||
|
|
||||||
crawler_requests_cpu: "0.8"
|
crawler_requests_cpu: "0.8"
|
||||||
crawler_limits_cpu: "1.0"
|
crawler_limits_cpu: "1.0"
|
||||||
@ -6,7 +7,5 @@ crawler_limits_cpu: "1.0"
|
|||||||
crawler_requests_memory: "800M"
|
crawler_requests_memory: "800M"
|
||||||
crawler_limits_memory: "1G"
|
crawler_limits_memory: "1G"
|
||||||
|
|
||||||
redis_image: redis
|
crawler_args: "--timeout 90 --logging stats,behaviors,debug --generateWACZ --text --workers 2 --collection thecrawl --screencastPort 9037 --sizeLimit 100000000000 --timeLimit 18000 --healthCheckPort 6065 --waitOnDone"
|
||||||
|
|
||||||
crawler_args: "--timeout 90 --logging stats,behaviors,debug --generateWACZ --text --workers 4 --collection thecrawl --screencastPort 9037 --sizeLimit 100000000000 --timeLimit 18000 --healthCheckPort 6065 --waitOnDone"
|
|
||||||
|
|
||||||
|
9
configs/storages.sample.yaml
Normal file
9
configs/storages.sample.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
|
||||||
|
# default storages using built-in minio
|
||||||
|
|
||||||
|
storages:
|
||||||
|
- name: default
|
||||||
|
endpoint_url: http://minio:9000/btrix-data/
|
||||||
|
access_endpoint_url: /data/
|
||||||
|
access_key: ADMIN
|
||||||
|
secret_key: PASSW0RD
|
@ -26,6 +26,7 @@ services:
|
|||||||
|
|
||||||
environment:
|
environment:
|
||||||
- CRAWLER_FQDN_SUFFIX=
|
- CRAWLER_FQDN_SUFFIX=
|
||||||
|
- JOB_IMAGE=${REGISTRY}webrecorder/browsertrix-backend:latest
|
||||||
|
|
||||||
deploy:
|
deploy:
|
||||||
placement:
|
placement:
|
||||||
|
@ -2,26 +2,27 @@
|
|||||||
ARG RWP_BASE_URL=https://cdn.jsdelivr.net/npm/replaywebpage@1.5.8/
|
ARG RWP_BASE_URL=https://cdn.jsdelivr.net/npm/replaywebpage@1.5.8/
|
||||||
|
|
||||||
FROM node:16 as build
|
FROM node:16 as build
|
||||||
ARG RWP_BASE_URL
|
|
||||||
|
|
||||||
WORKDIR /app
|
|
||||||
COPY package.json .
|
|
||||||
COPY yarn.lock .
|
|
||||||
RUN yarn --frozen-lockfile
|
|
||||||
|
|
||||||
COPY lit-localize.json ./
|
|
||||||
COPY postcss.config.js ./
|
|
||||||
COPY tailwind.config.js ./
|
|
||||||
COPY tsconfig.json ./
|
|
||||||
COPY webpack.config.js ./
|
|
||||||
COPY webpack.prod.js ./
|
|
||||||
COPY src ./src/
|
|
||||||
|
|
||||||
ARG GIT_COMMIT_HASH
|
ARG GIT_COMMIT_HASH
|
||||||
ENV GIT_COMMIT_HASH=${GIT_COMMIT_HASH}
|
|
||||||
|
|
||||||
ARG GIT_BRANCH_NAME
|
ARG GIT_BRANCH_NAME
|
||||||
ENV GIT_BRANCH_NAME=${GIT_BRANCH_NAME}
|
|
||||||
|
ENV GIT_COMMIT_HASH=${GIT_COMMIT_HASH} \
|
||||||
|
GIT_BRANCH_NAME=${GIT_BRANCH_NAME}
|
||||||
|
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY yarn.lock package.json .
|
||||||
|
RUN yarn --frozen-lockfile
|
||||||
|
|
||||||
|
COPY lit-localize.json \
|
||||||
|
postcss.config.js \
|
||||||
|
tailwind.config.js \
|
||||||
|
tsconfig.json \
|
||||||
|
webpack.config.js \
|
||||||
|
webpack.prod.js \
|
||||||
|
.
|
||||||
|
|
||||||
|
COPY src ./src/
|
||||||
|
|
||||||
RUN yarn build
|
RUN yarn build
|
||||||
|
|
||||||
@ -35,11 +36,10 @@ COPY --from=build /app/dist /usr/share/nginx/html
|
|||||||
COPY ./nginx.conf /etc/nginx/nginx.conf
|
COPY ./nginx.conf /etc/nginx/nginx.conf
|
||||||
COPY ./frontend.conf.template /etc/nginx/templates/
|
COPY ./frontend.conf.template /etc/nginx/templates/
|
||||||
|
|
||||||
RUN mkdir -p /etc/nginx/includes/
|
|
||||||
COPY ./locations.conf /etc/nginx/includes/
|
|
||||||
|
|
||||||
RUN rm /etc/nginx/conf.d/*
|
|
||||||
|
|
||||||
# default docker resolver, overridden in k8s
|
# default docker resolver, overridden in k8s
|
||||||
RUN mkdir -p /etc/nginx/resolvers; echo "resolver 127.0.0.11;" > /etc/nginx/resolvers/resolvers.conf
|
RUN rm /etc/nginx/conf.d/*; \
|
||||||
|
mkdir -p /etc/nginx/includes/; \
|
||||||
|
mkdir -p /etc/nginx/resolvers; echo "resolver 127.0.0.11;" > /etc/nginx/resolvers/resolvers.conf
|
||||||
|
#mkdir -p /etc/nginx/resolvers; echo "" > /etc/nginx/resolvers/resolvers.conf
|
||||||
|
|
||||||
|
COPY ./locations.conf /etc/nginx/includes/
|
||||||
|
61
test/setup.sh
Executable file
61
test/setup.sh
Executable file
@ -0,0 +1,61 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
|
||||||
|
#docker service create --name registry --publish published=5000,target=5000 registry:2
|
||||||
|
|
||||||
|
export REGISTRY=localhost:5000/
|
||||||
|
export FRONTEND_HOST=http://127.0.0.1:9871
|
||||||
|
|
||||||
|
docker swarm init
|
||||||
|
|
||||||
|
docker stack deploy -c docker-compose.yml btrix --resolve-image changed
|
||||||
|
|
||||||
|
sleepfor=5
|
||||||
|
|
||||||
|
# check frontend
|
||||||
|
count=0
|
||||||
|
|
||||||
|
until $(curl -m 3 --output /dev/null --silent --head --fail $FRONTEND_HOST/); do
|
||||||
|
echo "waiting for frontend startup... (has waited for $count seconds)"
|
||||||
|
sleep $sleepfor
|
||||||
|
count=$((count+$sleepfor))
|
||||||
|
if [ $count -gt 60 ]; then
|
||||||
|
echo "swarm frontend startup failed, frontend & backend logs below:"
|
||||||
|
echo ""
|
||||||
|
echo "ps"
|
||||||
|
echo "--------"
|
||||||
|
docker stack ps btrix --no-trunc
|
||||||
|
echo "frontend"
|
||||||
|
echo "--------"
|
||||||
|
docker service logs btrix_frontend 2>&1 | cat
|
||||||
|
echo "backend"
|
||||||
|
echo "--------"
|
||||||
|
docker service logs btrix_backend 2>&1 | cat
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# check backend api
|
||||||
|
count=0
|
||||||
|
|
||||||
|
until $(curl -m 3 --output /dev/null --silent --fail $FRONTEND_HOST/api/settings | jq); do
|
||||||
|
echo "waiting for backend api startup... (has waited for $count seconds)"
|
||||||
|
sleep $sleepfor
|
||||||
|
count=$((count+$sleepfor))
|
||||||
|
if [ $count -gt 60 ]; then
|
||||||
|
echo "swarm frontend startup failed, frontend & backend logs below:"
|
||||||
|
echo ""
|
||||||
|
echo "ps"
|
||||||
|
echo "--------"
|
||||||
|
docker stack ps btrix --no-trunc
|
||||||
|
echo "frontend"
|
||||||
|
echo "--------"
|
||||||
|
docker service logs btrix_frontend 2>&1 | cat
|
||||||
|
echo "backend"
|
||||||
|
echo "--------"
|
||||||
|
docker service logs btrix_backend 2>&1 | cat
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user