Fixes #1893 - Removes crawl workflow-scoped configmaps, and replaces with operator-controlled per-crawl configmaps that only contain the json config passed to Browsertrix Crawler (as a volume). - Other configmap settings replaced are replaced the custom CrawlJob options (mostly already were, just added profile_filename and storage_filename) - Cron jobs also updated to create CrawlJob without relying on configmaps, querying the db for additional settings. - The `userid` associated with cron jobs is set to the user that last modified the schedule of the crawl, rather than whomever last modified the workflow - Various functions that deal with updating configmaps have been removed, including in migrations. - New migration 0029 added to remove all crawl workflow configmaps
		
			
				
	
	
		
			66 lines
		
	
	
		
			2.3 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			66 lines
		
	
	
		
			2.3 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
"""
 | 
						|
Migration 0005 - Updating scheduled cron jobs after Operator changes
 | 
						|
"""
 | 
						|
 | 
						|
from btrixcloud.models import CrawlConfig
 | 
						|
from btrixcloud.crawlmanager import CrawlManager
 | 
						|
from btrixcloud.migrations import BaseMigration
 | 
						|
 | 
						|
 | 
						|
MIGRATION_VERSION = "0005"
 | 
						|
 | 
						|
 | 
						|
class Migration(BaseMigration):
 | 
						|
    """Migration class."""
 | 
						|
 | 
						|
    # pylint: disable=unused-argument
 | 
						|
    def __init__(self, mdb, **kwargs):
 | 
						|
        super().__init__(mdb, migration_version=MIGRATION_VERSION)
 | 
						|
 | 
						|
    async def migrate_up(self):
 | 
						|
        """Perform migration up.
 | 
						|
 | 
						|
        Find existing workflows with schedule and create new crawl_cron_jobs
 | 
						|
        from template for each, then delete existing scheduled jobs from
 | 
						|
        crawler namespace.
 | 
						|
 | 
						|
        Additionally update the configmap for crawl configs with scale > 1
 | 
						|
        or crawlTimeout > 0.
 | 
						|
        """
 | 
						|
        # pylint: disable=too-many-locals, duplicate-code
 | 
						|
        crawl_configs = self.mdb["crawl_configs"]
 | 
						|
        crawl_manager = CrawlManager()
 | 
						|
 | 
						|
        # Update configmap for crawl configs that have non-zero timeout or scale > 1
 | 
						|
        match_query = {
 | 
						|
            "$or": [
 | 
						|
                {"crawlTimeout": {"$gt": 0}},
 | 
						|
                {"scale": {"$gt": 1}},
 | 
						|
                {"schedule": {"$nin": ["", None]}},
 | 
						|
            ]
 | 
						|
        }
 | 
						|
        async for config_dict in crawl_configs.find(match_query):
 | 
						|
            config = CrawlConfig.from_dict(config_dict)
 | 
						|
            print(
 | 
						|
                f"Updating Crawl Config {config.id}: schedule: {config.schedule}, "
 | 
						|
                + f"timeout: {config.crawlTimeout}, scale: {config.scale}"
 | 
						|
            )
 | 
						|
            try:
 | 
						|
                await crawl_manager.update_scheduled_job(config)
 | 
						|
            # pylint: disable=broad-except
 | 
						|
            except Exception as exc:
 | 
						|
                print(
 | 
						|
                    "Skip crawl config migration due to error, likely missing config",
 | 
						|
                    exc,
 | 
						|
                )
 | 
						|
 | 
						|
        # Delete existing scheduled jobs from crawler namespace
 | 
						|
        print("Deleting cronjobs from crawler namespace")
 | 
						|
        await crawl_manager.batch_api.delete_collection_namespaced_cron_job(
 | 
						|
            namespace=crawl_manager.namespace
 | 
						|
        )
 | 
						|
        result = await crawl_manager.batch_api.list_namespaced_cron_job(
 | 
						|
            namespace=crawl_manager.namespace
 | 
						|
        )
 | 
						|
        assert len(result.items) == 0
 |