browsertrix/backend/btrixcloud/migrations/migration_0005_operator_scheduled_jobs.py
Tessa Walsh 14189b7cfb
Add crawl pages and related API endpoints (#1516)
Fixes #1502 

- Adds pages to database as they get added to Redis during crawl
- Adds migration to add pages to database for older crawls from
pages.jsonl and extraPages.jsonl files in WACZ
- Adds GET, list GET, and PATCH update endpoints for pages
- Adds POST (add), PATCH, and POST (delete) endpoints for page notes,
each with their own id, timestamp, and user info in addition to text
- Adds page_ops methods for 1. adding resources/urls to page, and 2.
adding automated heuristics and supplemental info (mime, type, etc.) to
page (for use in crawl QA job)
- Modifies `Migration` class to accept kwargs so that we can pass in ops
classes as needed for migrations
- Deletes WACZ files and pages from database for failed crawls during
crawl_finished process
- Deletes crawl pages when a crawl is deleted

Note: Requires a crawler version 1.0.0 beta3 or later, with support for
`--writePagesToRedis` to populate pages at crawl completion. Beta 4 is
configured in the test chart, which should be upgraded to stable 1.0.0
when it's released.

Connected to https://github.com/webrecorder/browsertrix-crawler/pull/464

---------

Co-authored-by: Ilya Kreymer <ikreymer@gmail.com>
2024-02-28 12:11:35 -05:00

73 lines
2.5 KiB
Python

"""
Migration 0005 - Updating scheduled cron jobs after Operator changes
"""
from btrixcloud.models import CrawlConfig, UpdateCrawlConfig
from btrixcloud.crawlmanager import CrawlManager
from btrixcloud.migrations import BaseMigration
MIGRATION_VERSION = "0005"
class Migration(BaseMigration):
"""Migration class."""
# pylint: disable=unused-argument
def __init__(self, mdb, **kwargs):
super().__init__(mdb, migration_version=MIGRATION_VERSION)
async def migrate_up(self):
"""Perform migration up.
Find existing workflows with schedule and create new crawl_cron_jobs
from template for each, then delete existing scheduled jobs from
crawler namespace.
Additionally update the configmap for crawl configs with scale > 1
or crawlTimeout > 0.
"""
# pylint: disable=too-many-locals, duplicate-code
crawl_configs = self.mdb["crawl_configs"]
crawl_manager = CrawlManager()
# Update configmap for crawl configs that have non-zero timeout or scale > 1
match_query = {
"$or": [
{"crawlTimeout": {"$gt": 0}},
{"scale": {"$gt": 1}},
{"schedule": {"$nin": ["", None]}},
]
}
async for config_dict in crawl_configs.find(match_query):
config = CrawlConfig.from_dict(config_dict)
print(
f"Updating Crawl Config {config.id}: schedule: {config.schedule}, "
+ f"timeout: {config.crawlTimeout}, scale: {config.scale}"
)
try:
await crawl_manager.update_crawl_config(
config,
UpdateCrawlConfig(
scale=config.scale,
crawlTimeout=config.crawlTimeout,
schedule=config.schedule,
),
)
# pylint: disable=broad-except
except Exception as exc:
print(
"Skip crawl config migration due to error, likely missing config",
exc,
)
# Delete existing scheduled jobs from crawler namespace
print("Deleting cronjobs from crawler namespace")
await crawl_manager.batch_api.delete_collection_namespaced_cron_job(
namespace=crawl_manager.namespace
)
result = await crawl_manager.batch_api.list_namespaced_cron_job(
namespace=crawl_manager.namespace
)
assert len(result.items) == 0