browsertrix/backend/btrixcloud/main_scheduled_job.py
Tessa Walsh 147bfd9d44
Add event webhook notifications system to backend (#1061)
Initial set of backend API for event webhook notifications for the following events:
* Crawl started (including boolean indicating if crawl was scheduled)
* Crawl finished
* Upload finished
* Archived item added to collection
* Archived item removed from collection

Configuration of URLs is done via /api/orgs/<oid>/event-webhook-urls. If a URL is configured for a given event, a webhook notification is added to the database and then attempted to be sent (up to a total of 5 tries per overall attempt, with an increasing backoff between, implemented via use of the backoff library, which supports async).

webhook status available via /api/orgs/<oid>/webhooks

(Additional testing + potential fastapi integration left in separate follow-ups
Fixes #1041
2023-08-31 19:52:37 -07:00

79 lines
2.0 KiB
Python

""" entrypoint for cron crawl job"""
import asyncio
import os
import uuid
from .k8sapi import K8sAPI
from .db import init_db
from .crawlconfigs import (
get_crawl_config,
inc_crawl_count,
)
from .crawls import add_new_crawl
from .utils import register_exit_handler
# ============================================================================
class ScheduledJob(K8sAPI):
"""Schedulued Job APIs for starting CrawlJobs on schedule"""
def __init__(self):
super().__init__()
self.cid = os.environ["CID"]
_, mdb = init_db()
self.crawls = mdb["crawls"]
self.crawlconfigs = mdb["crawl_configs"]
async def run(self):
"""run crawl!"""
register_exit_handler()
config_map = await self.core_api.read_namespaced_config_map(
name=f"crawl-config-{self.cid}", namespace=self.namespace
)
data = config_map.data
userid = data["USER_ID"]
scale = int(data.get("INITIAL_SCALE", 0))
try:
crawl_timeout = int(data.get("CRAWL_TIMEOUT", 0))
# pylint: disable=bare-except
except:
crawl_timeout = 0
oid = data["ORG_ID"]
crawlconfig = await get_crawl_config(self.crawlconfigs, uuid.UUID(self.cid))
# k8s create
crawl_id = await self.new_crawl_job(
self.cid, userid, oid, scale, crawl_timeout, manual=False
)
# db create
await inc_crawl_count(self.crawlconfigs, crawlconfig.id)
await add_new_crawl(
self.crawls,
self.crawlconfigs,
crawl_id,
crawlconfig,
uuid.UUID(userid),
manual=False,
)
print("Crawl Created: " + crawl_id)
# ============================================================================
def main():
"""main entrypoint"""
job = ScheduledJob()
loop = asyncio.get_event_loop()
loop.run_until_complete(job.run())
if __name__ == "__main__":
main()