Fixes #1502 - Adds pages to database as they get added to Redis during crawl - Adds migration to add pages to database for older crawls from pages.jsonl and extraPages.jsonl files in WACZ - Adds GET, list GET, and PATCH update endpoints for pages - Adds POST (add), PATCH, and POST (delete) endpoints for page notes, each with their own id, timestamp, and user info in addition to text - Adds page_ops methods for 1. adding resources/urls to page, and 2. adding automated heuristics and supplemental info (mime, type, etc.) to page (for use in crawl QA job) - Modifies `Migration` class to accept kwargs so that we can pass in ops classes as needed for migrations - Deletes WACZ files and pages from database for failed crawls during crawl_finished process - Deletes crawl pages when a crawl is deleted Note: Requires a crawler version 1.0.0 beta3 or later, with support for `--writePagesToRedis` to populate pages at crawl completion. Beta 4 is configured in the test chart, which should be upgraded to stable 1.0.0 when it's released. Connected to https://github.com/webrecorder/browsertrix-crawler/pull/464 --------- Co-authored-by: Ilya Kreymer <ikreymer@gmail.com>
		
			
				
	
	
		
			45 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			45 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| """
 | |
| Migration 0022 -- Partial Complete
 | |
| """
 | |
| 
 | |
| from btrixcloud.migrations import BaseMigration
 | |
| 
 | |
| 
 | |
| MIGRATION_VERSION = "0022"
 | |
| 
 | |
| 
 | |
| class Migration(BaseMigration):
 | |
|     """Migration class."""
 | |
| 
 | |
|     # pylint: disable=unused-argument
 | |
|     def __init__(self, mdb, **kwargs):
 | |
|         super().__init__(mdb, migration_version=MIGRATION_VERSION)
 | |
| 
 | |
|     async def migrate_up(self):
 | |
|         """Perform migration up.
 | |
| 
 | |
|         Convert partial_complete -> complete, stopped_by_user or stopped_quota_reached
 | |
|         """
 | |
|         # pylint: disable=duplicate-code
 | |
|         crawls = self.mdb["crawls"]
 | |
|         crawl_configs = self.mdb["crawl_configs"]
 | |
| 
 | |
|         await crawls.update_many(
 | |
|             {"state": "partial_complete", "stopping": True},
 | |
|             {"$set": {"state": "stopped_by_user"}},
 | |
|         )
 | |
|         await crawls.update_many(
 | |
|             {"state": "partial_complete", "stopping": {"$ne": True}},
 | |
|             {"$set": {"state": "complete"}},
 | |
|         )
 | |
| 
 | |
|         async for config in crawl_configs.find({"lastCrawlState": "partial_complete"}):
 | |
|             crawl = await crawls.find_one({"_id": config.get("lastCrawlId")})
 | |
|             if not crawl:
 | |
|                 continue
 | |
| 
 | |
|             await crawl_configs.find_one_and_update(
 | |
|                 {"_id": config.get("_id")},
 | |
|                 {"$set": {"lastCrawlState": crawl.get("state")}},
 | |
|             )
 |