Fixes #1502 - Adds pages to database as they get added to Redis during crawl - Adds migration to add pages to database for older crawls from pages.jsonl and extraPages.jsonl files in WACZ - Adds GET, list GET, and PATCH update endpoints for pages - Adds POST (add), PATCH, and POST (delete) endpoints for page notes, each with their own id, timestamp, and user info in addition to text - Adds page_ops methods for 1. adding resources/urls to page, and 2. adding automated heuristics and supplemental info (mime, type, etc.) to page (for use in crawl QA job) - Modifies `Migration` class to accept kwargs so that we can pass in ops classes as needed for migrations - Deletes WACZ files and pages from database for failed crawls during crawl_finished process - Deletes crawl pages when a crawl is deleted Note: Requires a crawler version 1.0.0 beta3 or later, with support for `--writePagesToRedis` to populate pages at crawl completion. Beta 4 is configured in the test chart, which should be upgraded to stable 1.0.0 when it's released. Connected to https://github.com/webrecorder/browsertrix-crawler/pull/464 --------- Co-authored-by: Ilya Kreymer <ikreymer@gmail.com>
		
			
				
	
	
		
			66 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			66 lines
		
	
	
		
			2.0 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| """
 | |
| Migration 0023 -- Available extra/gifted minutes
 | |
| """
 | |
| 
 | |
| from btrixcloud.migrations import BaseMigration
 | |
| 
 | |
| 
 | |
| MIGRATION_VERSION = "0023"
 | |
| 
 | |
| 
 | |
| class Migration(BaseMigration):
 | |
|     """Migration class."""
 | |
| 
 | |
|     # pylint: disable=unused-argument
 | |
|     def __init__(self, mdb, **kwargs):
 | |
|         super().__init__(mdb, migration_version=MIGRATION_VERSION)
 | |
| 
 | |
|     async def migrate_up(self):
 | |
|         """Perform migration up.
 | |
| 
 | |
|         Add extraExecSecondsAvailable and giftedExecSecondsAvailable to org.
 | |
|         Initialize at 0 to avoid them being None.
 | |
| 
 | |
|         Also add monthlyExecSeconds and copy previous crawlExecSeconds values
 | |
|         to it.
 | |
|         """
 | |
|         # pylint: disable=duplicate-code
 | |
|         mdb_orgs = self.mdb["organizations"]
 | |
| 
 | |
|         query = {
 | |
|             "extraExecSecondsAvailable": None,
 | |
|             "giftedExecSecondsAvailable": None,
 | |
|         }
 | |
|         async for org in mdb_orgs.find(query):
 | |
|             oid = org["_id"]
 | |
|             try:
 | |
|                 await mdb_orgs.find_one_and_update(
 | |
|                     {"_id": oid},
 | |
|                     {
 | |
|                         "$set": {
 | |
|                             "extraExecSecondsAvailable": 0,
 | |
|                             "giftedExecSecondsAvailable": 0,
 | |
|                         }
 | |
|                     },
 | |
|                 )
 | |
|             # pylint: disable=broad-exception-caught
 | |
|             except Exception as err:
 | |
|                 print(
 | |
|                     f"Error adding exec seconds available fields to org {oid}: {err}",
 | |
|                     flush=True,
 | |
|                 )
 | |
| 
 | |
|         async for org in mdb_orgs.find({"monthlyExecSeconds": None}):
 | |
|             oid = org["_id"]
 | |
|             try:
 | |
|                 await mdb_orgs.update_one(
 | |
|                     {"_id": oid},
 | |
|                     [{"$set": {"monthlyExecSeconds": "$crawlExecSeconds"}}],
 | |
|                 )
 | |
|             # pylint: disable=broad-exception-caught
 | |
|             except Exception as err:
 | |
|                 print(
 | |
|                     f"Error copying crawlExecSeconds to monthlyExecSeconds for org {oid}: {err}",
 | |
|                     flush=True,
 | |
|                 )
 |