* optimizations: - rename update_crawl_config_stats to stats_recompute_all, only used in migration to fetch all crawls and do a full recompute of all file sizes - add stats_recompute_last to only get last crawl by size, increment total size by specified amount, and incr/decr number of crawls - Update migration 0007 to use stats_recompute_all - Add isCrawlRunning, lastCrawlStopping, and lastRun to stats_recompute_last - Increment crawlSuccessfulCount in stats_recompute_last * operator/crawls: - operator: keep track of filesAddedSize in redis as well - rename update_crawl to update_crawl_state_if_changed() and only update if state is different, otherwise return false - ensure mark_finished() operations only occur if crawl is state has changed - don't clear 'stopping' flag, can track if crawl was stopped - state always starts with "starting", don't reset to starting tests: - Add test for incremental workflow stats updating - don't clear stopping==true, indicates crawl was manually stopped --------- Co-authored-by: Ilya Kreymer <ikreymer@gmail.com>
		
			
				
	
	
		
			48 lines
		
	
	
		
			1.6 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			48 lines
		
	
	
		
			1.6 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| """
 | |
| Migration 0007 - Workflows changes
 | |
| 
 | |
| - Rename colls to autoAddCollections 
 | |
| - Re-calculate workflow crawl stats to populate crawlSuccessfulCount
 | |
| """
 | |
| from btrixcloud.crawlconfigs import stats_recompute_all
 | |
| from btrixcloud.migrations import BaseMigration
 | |
| 
 | |
| 
 | |
| MIGRATION_VERSION = "0007"
 | |
| 
 | |
| 
 | |
| class Migration(BaseMigration):
 | |
|     """Migration class."""
 | |
| 
 | |
|     def __init__(self, mdb, migration_version=MIGRATION_VERSION):
 | |
|         super().__init__(mdb, migration_version)
 | |
| 
 | |
|     async def migrate_up(self):
 | |
|         """Perform migration up."""
 | |
|         # pylint: disable=duplicate-code
 | |
|         crawl_configs = self.mdb["crawl_configs"]
 | |
|         crawls = self.mdb["crawls"]
 | |
| 
 | |
|         # Update workflows crawl stats to populate crawlSuccessfulCount
 | |
|         configs = [res async for res in crawl_configs.find({"inactive": {"$ne": True}})]
 | |
|         if not configs:
 | |
|             return
 | |
| 
 | |
|         for config in configs:
 | |
|             config_id = config["_id"]
 | |
|             try:
 | |
|                 await stats_recompute_all(crawl_configs, crawls, config_id)
 | |
|             # pylint: disable=broad-exception-caught
 | |
|             except Exception as err:
 | |
|                 print(f"Unable to update workflow {config_id}: {err}", flush=True)
 | |
| 
 | |
|         # Make sure crawls have collections array
 | |
|         await crawls.update_many({"collections": None}, {"$set": {"collections": []}})
 | |
| 
 | |
|         # Rename colls to autoAddCollections
 | |
|         await crawl_configs.update_many({}, {"$unset": {"autoAddCollections": 1}})
 | |
|         await crawl_configs.update_many(
 | |
|             {}, {"$rename": {"colls": "autoAddCollections"}}
 | |
|         )
 | |
|         await crawl_configs.update_many({}, {"$unset": {"colls": 1}})
 |