browsertrix/backend/btrixcloud/migrations/migration_0004_config_seeds.py
Ilya Kreymer 4f676e4e82
QA Runs Initial Backend Implementation (#1586)
Supports running QA Runs via the QA API!

Builds on top of the `issue-1498-crawl-qa-backend-support` branch, fixes
#1498

Also requires the latest Browsertrix Crawler 1.1.0+ (from
webrecorder/browsertrix-crawler#469 branch)

Notable changes:
- QARun objects contain info about QA runs, which are crawls
performed on data loaded from existing crawls.

- Various crawl db operations can be performed on either the crawl or
`qa.` object, and core crawl fields have been moved to CoreCrawlable.

- While running,`QARun` data stored in a single `qa` object, while
finished qa runs are added to `qaFinished` dictionary on the Crawl. The
QA list API returns data from the finished list, sorted by most recent
first.

- Includes additional type fixes / type safety, especially around
BaseCrawl / Crawl / UploadedCrawl functionality, also creating specific
get_upload(), get_basecrawl(), get_crawl() getters for internal use and
get_crawl_out() for API

- Support filtering and sorting pages via `qaFilterBy` (screenshotMatch, textMatch) 
along with `gt`, `lt`, `gte`, `lte` params to return pages based on QA results.

---------
Co-authored-by: Tessa Walsh <tessa@bitarchivist.net>
2024-03-20 22:42:16 -07:00

116 lines
4.0 KiB
Python

"""
Migration 0004 - Ensuring all config.seeds are Seeds not HttpUrls
"""
from pydantic import HttpUrl
from btrixcloud.models import Crawl, CrawlConfig, ScopeType, Seed
from btrixcloud.migrations import BaseMigration
MIGRATION_VERSION = "0004"
class Migration(BaseMigration):
"""Migration class."""
# pylint: disable=unused-argument
def __init__(self, mdb, **kwargs):
super().__init__(mdb, migration_version=MIGRATION_VERSION)
async def migrate_up(self):
"""Perform migration up.
Convert any crawlconfig.config.seed HttpUrl values to Seeds with url value.
"""
# pylint: disable=too-many-branches
# Migrate workflows
crawl_configs = self.mdb["crawl_configs"]
async for config_dict in crawl_configs.find({}):
seeds_to_migrate = []
seed_dicts = []
seed_list = config_dict["config"]["seeds"]
for seed in seed_list:
if isinstance(seed, HttpUrl):
new_seed = Seed(url=str(seed.url), scopeType=ScopeType.PAGE)
seeds_to_migrate.append(new_seed)
elif isinstance(seed, str):
new_seed = Seed(url=str(seed), scopeType=ScopeType.PAGE)
seeds_to_migrate.append(new_seed)
elif isinstance(seed, Seed):
seeds_to_migrate.append(seed)
for seed in seeds_to_migrate:
seed_dict = {
"url": str(seed.url),
"scopeType": seed.scopeType,
"include": seed.include,
"exclude": seed.exclude,
"sitemap": seed.sitemap,
"allowHash": seed.allowHash,
"depth": seed.depth,
"extraHops": seed.extraHops,
}
seed_dicts.append(seed_dict)
if seed_dicts:
await crawl_configs.find_one_and_update(
{"_id": config_dict["_id"]},
{"$set": {"config.seeds": seed_dicts}},
)
# Migrate seeds copied into crawls
crawls = self.mdb["crawls"]
async for crawl_dict in crawls.find({}):
seeds_to_migrate = []
seed_dicts = []
seed_list = crawl_dict["config"]["seeds"]
for seed in seed_list:
if isinstance(seed, HttpUrl):
new_seed = Seed(url=str(seed.url), scopeType=ScopeType.PAGE)
seeds_to_migrate.append(new_seed)
elif isinstance(seed, str):
new_seed = Seed(url=str(seed), scopeType=ScopeType.PAGE)
seeds_to_migrate.append(new_seed)
elif isinstance(seed, Seed):
seeds_to_migrate.append(seed)
for seed in seeds_to_migrate:
seed_dict = {
"url": str(seed.url),
"scopeType": seed.scopeType,
"include": seed.include,
"exclude": seed.exclude,
"sitemap": seed.sitemap,
"allowHash": seed.allowHash,
"depth": seed.depth,
"extraHops": seed.extraHops,
}
seed_dicts.append(seed_dict)
if seed_dicts:
await crawls.find_one_and_update(
{"_id": crawl_dict["_id"]},
{"$set": {"config.seeds": seed_dicts}},
)
# Test migration
async for config_dict in crawl_configs.find({}):
config = CrawlConfig.from_dict(config_dict)
seeds = config.config.seeds or []
for seed in seeds:
assert isinstance(seed, Seed)
assert seed.url
async for crawl_dict in crawls.find({}):
crawl = Crawl.from_dict(crawl_dict)
seeds = crawl.config.seeds or []
for seed in seeds:
assert isinstance(seed, Seed)
assert seed.url