browsertrix/backend/btrixcloud/migrations/migration_0024_crawlerchannel.py
Tessa Walsh 14189b7cfb
Add crawl pages and related API endpoints (#1516)
Fixes #1502 

- Adds pages to database as they get added to Redis during crawl
- Adds migration to add pages to database for older crawls from
pages.jsonl and extraPages.jsonl files in WACZ
- Adds GET, list GET, and PATCH update endpoints for pages
- Adds POST (add), PATCH, and POST (delete) endpoints for page notes,
each with their own id, timestamp, and user info in addition to text
- Adds page_ops methods for 1. adding resources/urls to page, and 2.
adding automated heuristics and supplemental info (mime, type, etc.) to
page (for use in crawl QA job)
- Modifies `Migration` class to accept kwargs so that we can pass in ops
classes as needed for migrations
- Deletes WACZ files and pages from database for failed crawls during
crawl_finished process
- Deletes crawl pages when a crawl is deleted

Note: Requires a crawler version 1.0.0 beta3 or later, with support for
`--writePagesToRedis` to populate pages at crawl completion. Beta 4 is
configured in the test chart, which should be upgraded to stable 1.0.0
when it's released.

Connected to https://github.com/webrecorder/browsertrix-crawler/pull/464

---------

Co-authored-by: Ilya Kreymer <ikreymer@gmail.com>
2024-02-28 12:11:35 -05:00

74 lines
2.5 KiB
Python

"""
Migration 0024 -- crawlerChannel
"""
from btrixcloud.crawlmanager import CrawlManager
from btrixcloud.migrations import BaseMigration
from btrixcloud.models import CrawlConfig, UpdateCrawlConfig
MIGRATION_VERSION = "0024"
class Migration(BaseMigration):
"""Migration class."""
# pylint: disable=unused-argument
def __init__(self, mdb, **kwargs):
super().__init__(mdb, migration_version=MIGRATION_VERSION)
async def migrate_up(self):
"""Perform migration up.
Add crawlerChannel to existing workflows and profiles, and update configmaps
"""
# pylint: disable=duplicate-code
mdb_crawl_configs = self.mdb["crawl_configs"]
mdb_profiles = self.mdb["profiles"]
async for config in mdb_crawl_configs.find(
{"crawlerChannel": {"$in": ["", None]}}
):
config_id = config["_id"]
try:
await mdb_crawl_configs.find_one_and_update(
{"_id": config_id},
{"$set": {"crawlerChannel": "default"}},
)
# pylint: disable=broad-except
except Exception as err:
print(
f"Error adding crawlerChannel 'default' to workflow {config_id}: {err}",
flush=True,
)
async for profile in mdb_profiles.find({"crawlerChannel": {"$in": ["", None]}}):
profile_id = profile["_id"]
try:
await mdb_profiles.find_one_and_update(
{"_id": profile_id},
{"$set": {"crawlerChannel": "default"}},
)
# pylint: disable=broad-except
except Exception as err:
print(
f"Error adding crawlerChannel 'default' to profile {profile_id}: {err}",
flush=True,
)
# Update configmaps
crawl_manager = CrawlManager()
match_query = {"crawlerChannel": {"$in": ["", None]}}
async for config_dict in mdb_crawl_configs.find(match_query):
config = CrawlConfig.from_dict(config_dict)
try:
await crawl_manager.update_crawl_config(
config, UpdateCrawlConfig(crawlerChannel="default")
)
# pylint: disable=broad-except
except Exception as exc:
print(
"Skip configmap migration due to error, likely missing config",
exc,
)