concurrent crawl limits: (addresses #866) - support limits on concurrent crawls that can be run within a single org - change 'waiting' state to 'waiting_org_limit' for concurrent crawl limit and 'waiting_capacity' for capacity-based limits orgs: - add 'maxConcurrentCrawl' to new 'quotas' object on orgs - add /quotas endpoint for updating quotas object operator: - add all crawljobs as related, appear to be returned in creation order - operator: if concurrent crawl limit set, ensures current job is in the first N set of crawljobs (as provided via 'related' list of crawljob objects) before it can proceed to 'starting', otherwise set to 'waiting_org_limit' - api: add org /quotas endpoint for configuring quotas - remove 'new' state, always start with 'starting' - crawljob: add 'oid' to crawljob spec and label for easier querying - more stringent state transitions: add allowed_from to set_state() - ensure state transitions only happened from allowed states, while failed/canceled can happen from any state - ensure finished and state synched from db if transition not allowed - add crawl indices by oid and cid frontend: - show different waiting states on frontend: 'Waiting (Crawl Limit) and 'Waiting (At Capacity)' - add gear icon on orgs admin page - and initial popup for setting org quotas, showing all properties from org 'quotas' object tests: - add concurrent crawl limit nightly tests - fix state waiting -> waiting_capacity - ci: add logging of operator output on test failure
		
			
				
	
	
		
			76 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			76 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| """ entrypoint for cron crawl job"""
 | |
| 
 | |
| import asyncio
 | |
| import os
 | |
| import uuid
 | |
| 
 | |
| from .k8sapi import K8sAPI
 | |
| from .db import init_db
 | |
| from .crawlconfigs import (
 | |
|     get_crawl_config,
 | |
|     inc_crawl_count,
 | |
|     set_config_current_crawl_info,
 | |
| )
 | |
| from .crawls import add_new_crawl
 | |
| from .utils import register_exit_handler
 | |
| 
 | |
| 
 | |
| # ============================================================================
 | |
| class ScheduledJob(K8sAPI):
 | |
|     """Schedulued Job APIs for starting CrawlJobs on schedule"""
 | |
| 
 | |
|     def __init__(self):
 | |
|         super().__init__()
 | |
|         self.cid = os.environ["CID"]
 | |
| 
 | |
|         _, mdb = init_db()
 | |
|         self.crawls = mdb["crawls"]
 | |
|         self.crawlconfigs = mdb["crawl_configs"]
 | |
| 
 | |
|     async def run(self):
 | |
|         """run crawl!"""
 | |
|         config_map = await self.core_api.read_namespaced_config_map(
 | |
|             name=f"crawl-config-{self.cid}", namespace=self.namespace
 | |
|         )
 | |
|         data = config_map.data
 | |
| 
 | |
|         userid = data["USER_ID"]
 | |
|         scale = int(data.get("INITIAL_SCALE", 0))
 | |
|         crawl_timeout = int(data.get("CRAWL_TIMEOUT", 0))
 | |
|         oid = data["ORG_ID"]
 | |
| 
 | |
|         crawlconfig = await get_crawl_config(self.crawlconfigs, uuid.UUID(self.cid))
 | |
| 
 | |
|         # k8s create
 | |
|         crawl_id = await self.new_crawl_job(
 | |
|             self.cid, userid, oid, scale, crawl_timeout, manual=False
 | |
|         )
 | |
| 
 | |
|         # db create
 | |
|         await inc_crawl_count(self.crawlconfigs, crawlconfig.id)
 | |
|         new_crawl = await add_new_crawl(
 | |
|             self.crawls, crawl_id, crawlconfig, uuid.UUID(userid), manual=False
 | |
|         )
 | |
|         # pylint: disable=duplicate-code
 | |
|         await set_config_current_crawl_info(
 | |
|             self.crawlconfigs.crawl_configs,
 | |
|             crawlconfig.id,
 | |
|             new_crawl["id"],
 | |
|             new_crawl["started"],
 | |
|         )
 | |
| 
 | |
|         print("Crawl Created: " + crawl_id)
 | |
| 
 | |
| 
 | |
| # ============================================================================
 | |
| def main():
 | |
|     """main entrypoint"""
 | |
|     job = ScheduledJob()
 | |
|     loop = asyncio.get_event_loop()
 | |
|     loop.run_until_complete(job.run())
 | |
| 
 | |
| 
 | |
| if __name__ == "__main__":
 | |
|     register_exit_handler()
 | |
|     main()
 |