fix issues that caused cronjob container to crash: (#987)

- don't set CRAWL_TIMEOUT to "None" in configmap, and if encountered, just set to 0
- run register_exit_handler() after run loop has been inited
This commit is contained in:
Ilya Kreymer 2023-07-18 18:08:53 +02:00 committed by GitHub
parent c5b3be0680
commit a5312709bb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 9 additions and 3 deletions

View File

@ -105,7 +105,7 @@ class CrawlManager(K8sAPI):
STORAGE_NAME=storage_name, STORAGE_NAME=storage_name,
PROFILE_FILENAME=profile_filename, PROFILE_FILENAME=profile_filename,
INITIAL_SCALE=str(crawlconfig.scale), INITIAL_SCALE=str(crawlconfig.scale),
CRAWL_TIMEOUT=str(crawlconfig.crawlTimeout) CRAWL_TIMEOUT=str(crawlconfig.crawlTimeout or 0)
# REV=str(crawlconfig.rev), # REV=str(crawlconfig.rev),
) )

View File

@ -29,6 +29,8 @@ class ScheduledJob(K8sAPI):
async def run(self): async def run(self):
"""run crawl!""" """run crawl!"""
register_exit_handler()
config_map = await self.core_api.read_namespaced_config_map( config_map = await self.core_api.read_namespaced_config_map(
name=f"crawl-config-{self.cid}", namespace=self.namespace name=f"crawl-config-{self.cid}", namespace=self.namespace
) )
@ -36,7 +38,12 @@ class ScheduledJob(K8sAPI):
userid = data["USER_ID"] userid = data["USER_ID"]
scale = int(data.get("INITIAL_SCALE", 0)) scale = int(data.get("INITIAL_SCALE", 0))
try:
crawl_timeout = int(data.get("CRAWL_TIMEOUT", 0)) crawl_timeout = int(data.get("CRAWL_TIMEOUT", 0))
# pylint: disable=bare-except
except:
crawl_timeout = 0
oid = data["ORG_ID"] oid = data["ORG_ID"]
crawlconfig = await get_crawl_config(self.crawlconfigs, uuid.UUID(self.cid)) crawlconfig = await get_crawl_config(self.crawlconfigs, uuid.UUID(self.cid))
@ -71,5 +78,4 @@ def main():
if __name__ == "__main__": if __name__ == "__main__":
register_exit_handler()
main() main()