- build backend and frontend with cacheing using GHA cache) - streamline frontend image to reduce layers - setup local swarm with test/setup.sh script, wait for containers to init - copy sample config files as default (add storages.sample.yaml) - add initial backend test for logging in with default superadmin credentials via 127.0.0.1:9871 - must use 127.0.0.1 instead of localhost for accessing frontend container within action
12 lines
408 B
YAML
12 lines
408 B
YAML
redis_image: redis
|
|
crawler_image: webrecorder/browsertrix-crawler:cloud
|
|
|
|
crawler_requests_cpu: "0.8"
|
|
crawler_limits_cpu: "1.0"
|
|
|
|
crawler_requests_memory: "800M"
|
|
crawler_limits_memory: "1G"
|
|
|
|
crawler_args: "--timeout 90 --logging stats,behaviors,debug --generateWACZ --text --workers 2 --collection thecrawl --screencastPort 9037 --sizeLimit 100000000000 --timeLimit 18000 --healthCheckPort 6065 --waitOnDone"
|
|
|