browsertrix/backend/test_nightly/test_cleanup_seed_files.py
Tessa Walsh f7ba712646 Add seed file support to Browsertrix backend (#2710)
Fixes #2673 

Changes in this PR:

- Adds a new `file_uploads.py` module and corresponding `/files` API
prefix with methods/endpoints for uploading, GETing, and deleting seed
files (can be extended to other types of files moving forward)
- Seed files are supported via `CrawlConfig.config.seedFileId` on POST
and PATCH endpoints. This seedFileId is replaced by a presigned url when
passed to the crawler by the operator
- Seed files are read when first uploaded to calculate `firstSeed` and
`seedCount` and store them in the database, and this is copied into the
workflow and crawl documents when they are created.
- Logic is added to store `firstSeed` and `seedCount` for other
workflows as well, and a migration added to backfill data, to maintain
consistency and fix some of the pymongo aggregations that previously
assumed all workflows would have at least one `Seed` object in
`CrawlConfig.seeds`
- Seed file and thumbnail storage stats are added to org stats
- Seed file and thumbnail uploads first check that the org's storage
quota has not been exceeded and return a 400 if so
- A cron background job (run weekly each Sunday at midnight by default,
but configurable) is added to look for seed files at least x minutes old
(1440 minutes, or 1 day, by default, but configurable) that are not in
use in any workflows, and to delete them when they are found. The
backend pods will ensure this k8s batch job exists when starting up and
create it if it does not already exist. A database entry for each run of
the job is created in the operator on job completion so that it'll
appear in the `/jobs` API endpoints, but retrying of this type of
regularly scheduled background job is not supported as we don't want to
accidentally create multiple competing scheduled jobs.
- Adds a `min_seed_file_crawler_image` value to the Helm chart that is
checked before creating a crawl from a workflow if set. If a workflow
cannot be run, return the detail of the exception in
`CrawlConfigAddedResponse.errorDetail` so that we can display the reason
in the frontend
- Add SeedFile model from base UserFile (former ImageFIle), ensure all APIs
returning uploaded files return an absolute pre-signed URL (either with external origin or internal service origin)

---------
Co-authored-by: Ilya Kreymer <ikreymer@gmail.com>
2025-07-22 19:11:02 -07:00

120 lines
3.6 KiB
Python

import os
import requests
import time
import pytest
from .conftest import API_PREFIX
from .utils import read_in_chunks
curr_dir = os.path.dirname(os.path.realpath(__file__))
@pytest.fixture(scope="session")
def seed_file_unused_id(crawler_auth_headers, default_org_id):
with open(os.path.join(curr_dir, "data", "seedfile.txt"), "rb") as fh:
r = requests.put(
f"{API_PREFIX}/orgs/{default_org_id}/files/seedFile?filename=seedfile.txt",
headers=crawler_auth_headers,
data=read_in_chunks(fh),
)
assert r.status_code == 200
return r.json()["id"]
@pytest.fixture(scope="session")
def seed_file_used_id(crawler_auth_headers, default_org_id):
with open(os.path.join(curr_dir, "data", "seedfile.txt"), "rb") as fh:
r = requests.put(
f"{API_PREFIX}/orgs/{default_org_id}/files/seedFile?filename=seedfile.txt",
headers=crawler_auth_headers,
data=read_in_chunks(fh),
)
assert r.status_code == 200
return r.json()["id"]
@pytest.fixture(scope="session")
def seed_file_config_id(crawler_auth_headers, default_org_id, seed_file_used_id):
crawl_data = {
"runNow": False,
"name": "Seed File Test Crawl Nightly",
"config": {
"scopeType": "page",
"seedFileId": seed_file_used_id,
"limit": 2,
},
"crawlerChannel": "test",
}
r = requests.post(
f"{API_PREFIX}/orgs/{default_org_id}/crawlconfigs/",
headers=crawler_auth_headers,
json=crawl_data,
)
return r.json()["id"]
def test_seed_file_cleanup_cron_job(
admin_auth_headers,
default_org_id,
seed_file_unused_id,
seed_file_used_id,
seed_file_config_id,
):
# Verify unused and used seed files exist
for seed_file_id in (seed_file_unused_id, seed_file_used_id):
r = requests.get(
f"{API_PREFIX}/orgs/{default_org_id}/files/{seed_file_id}",
headers=admin_auth_headers,
)
assert r.status_code == 200
data = r.json()
assert data["id"] == seed_file_id
assert data["oid"] == default_org_id
# Verify workflow with used seed file exists
r = requests.get(
f"{API_PREFIX}/orgs/{default_org_id}/crawlconfigs/{seed_file_config_id}/",
headers=admin_auth_headers,
)
assert r.status_code == 200
data = r.json()
assert data["id"] == seed_file_config_id
assert data["config"]["seedFileId"] == seed_file_used_id
# Wait 5 minutes to give cleanup job time to run
time.sleep(300)
# Check that at least one bg job entry exists for cleanup jobs and that
# the jobs are marked as successful
r = requests.get(
f"{API_PREFIX}/orgs/all/jobs?jobType=cleanup-seed-files",
headers=admin_auth_headers,
)
assert r.status_code == 200
data = r.json()
assert data["total"] > 0
for job in data["items"]:
print(job)
assert job["id"]
assert job["type"] == "cleanup-seed-files"
assert job["success"]
assert job["started"]
assert job["finished"]
# Check that unused seed file was deleted from database
r = requests.get(
f"{API_PREFIX}/orgs/{default_org_id}/files/{seed_file_unused_id}",
headers=admin_auth_headers,
)
assert r.status_code == 404
# Check that used seed file was not deleted from database
r = requests.get(
f"{API_PREFIX}/orgs/{default_org_id}/files/{seed_file_used_id}",
headers=admin_auth_headers,
)
assert r.status_code == 200
assert r.json()["id"] == seed_file_used_id