Fixes #1502 - Adds pages to database as they get added to Redis during crawl - Adds migration to add pages to database for older crawls from pages.jsonl and extraPages.jsonl files in WACZ - Adds GET, list GET, and PATCH update endpoints for pages - Adds POST (add), PATCH, and POST (delete) endpoints for page notes, each with their own id, timestamp, and user info in addition to text - Adds page_ops methods for 1. adding resources/urls to page, and 2. adding automated heuristics and supplemental info (mime, type, etc.) to page (for use in crawl QA job) - Modifies `Migration` class to accept kwargs so that we can pass in ops classes as needed for migrations - Deletes WACZ files and pages from database for failed crawls during crawl_finished process - Deletes crawl pages when a crawl is deleted Note: Requires a crawler version 1.0.0 beta3 or later, with support for `--writePagesToRedis` to populate pages at crawl completion. Beta 4 is configured in the test chart, which should be upgraded to stable 1.0.0 when it's released. Connected to https://github.com/webrecorder/browsertrix-crawler/pull/464 --------- Co-authored-by: Ilya Kreymer <ikreymer@gmail.com>
73 lines
2.1 KiB
Python
73 lines
2.1 KiB
Python
"""
|
|
Migration 0001 - Archives to Orgs
|
|
"""
|
|
|
|
import os
|
|
|
|
from pymongo.errors import OperationFailure
|
|
|
|
from btrixcloud.migrations import BaseMigration
|
|
from btrixcloud.k8sapi import K8sAPI
|
|
|
|
|
|
MIGRATION_VERSION = "0001"
|
|
|
|
|
|
class Migration(BaseMigration):
|
|
"""Migration class."""
|
|
|
|
COLLECTIONS_AID_TO_OID = [
|
|
"collections",
|
|
"crawl_configs",
|
|
"crawls",
|
|
"invites",
|
|
"profiles",
|
|
]
|
|
|
|
# pylint: disable=unused-argument
|
|
def __init__(self, mdb, **kwargs):
|
|
super().__init__(mdb, migration_version=MIGRATION_VERSION)
|
|
|
|
async def migrate_up(self):
|
|
"""Perform migration up."""
|
|
# Rename archives collection to organizations
|
|
org_collection = self.mdb["archives"]
|
|
try:
|
|
await org_collection.rename("organizations", dropTarget=True)
|
|
except OperationFailure as err:
|
|
print(f"Error renaming archives to organizations: {err}")
|
|
|
|
# Rename aid fields to oid
|
|
for collection in self.COLLECTIONS_AID_TO_OID:
|
|
current_coll = self.mdb[collection]
|
|
await current_coll.update_many({}, {"$rename": {"aid": "oid"}})
|
|
|
|
# Update k8s configmaps
|
|
k8s_api_instance = K8sAPI()
|
|
crawler_namespace = os.environ.get("CRAWLER_NAMESPACE") or "crawlers"
|
|
config_map = await k8s_api_instance.core_api.list_namespaced_config_map(
|
|
namespace=crawler_namespace
|
|
)
|
|
for item in config_map.items:
|
|
item_name = item.metadata.name
|
|
try:
|
|
org_id = item.data["ARCHIVE_ID"]
|
|
except KeyError:
|
|
continue
|
|
|
|
item.data["ORG_ID"] = org_id
|
|
try:
|
|
item.data.pop("ARCHIVE_ID")
|
|
except KeyError:
|
|
pass
|
|
|
|
item.metadata.labels["btrix.org"] = org_id
|
|
try:
|
|
item.metadata.labels.pop("btrix.archive")
|
|
except KeyError:
|
|
pass
|
|
|
|
await k8s_api_instance.core_api.patch_namespaced_config_map(
|
|
name=item_name, namespace=crawler_namespace, body=item
|
|
)
|