* backend: refactor swarm support to also support podman (#260) - implement podman support as subclass of swarm deployment - podman is used when 'RUNTIME=podman' env var is set - podman socket is mapped instead of docker socket - podman-compose is used instead of docker-compose (though docker-compose works with podman, it does not support secrets, but podman-compose does) - separate cli utils into SwarmRunner and PodmanRunner which extends it - using config.yaml and config.env, both copied from sample versions - work on simplifying config: add docker-compose.podman.yml and docker-compose.swarm.yml and signing and debug configs in ./configs - add {build,run,stop}-{swarm,podman}.sh in scripts dir - add init-configs, only copy if configs don't exist - build local image use current version of podman, to support both podman 3.x and 4.x - additional fixes for after testing podman on centos - docs: update Deployment.md to cover swarm, podman, k8s deployment
92 lines
3.0 KiB
Python
92 lines
3.0 KiB
Python
""" base k8s job driver """
|
|
|
|
import os
|
|
import asyncio
|
|
import sys
|
|
|
|
import yaml
|
|
|
|
from fastapi.templating import Jinja2Templates
|
|
|
|
from .utils import create_from_yaml, get_templates_dir
|
|
from .k8sapi import K8sAPI
|
|
|
|
|
|
# =============================================================================
|
|
# pylint: disable=too-many-instance-attributes,bare-except,broad-except
|
|
class K8SJobMixin(K8sAPI):
|
|
""" Crawl Job State """
|
|
|
|
def __init__(self):
|
|
self.namespace = os.environ.get("CRAWL_NAMESPACE") or "crawlers"
|
|
self.config_file = "/config/config.yaml"
|
|
|
|
self.job_id = os.environ.get("JOB_ID")
|
|
self.orig_job_id = self.job_id
|
|
if self.job_id.startswith("job-"):
|
|
self.job_id = self.job_id[4:]
|
|
|
|
self.templates = Jinja2Templates(directory=get_templates_dir())
|
|
super().__init__()
|
|
|
|
async def init_job_objects(self, template, extra_params=None):
|
|
""" init k8s objects from specified template with given extra_params """
|
|
with open(self.config_file, encoding="utf-8") as fh_config:
|
|
params = yaml.safe_load(fh_config)
|
|
|
|
params["id"] = self.job_id
|
|
|
|
if extra_params:
|
|
params.update(extra_params)
|
|
|
|
data = self.templates.env.get_template(template).render(params)
|
|
|
|
await create_from_yaml(self.api_client, data, namespace=self.namespace)
|
|
|
|
async def delete_job_objects(self, selector):
|
|
""" delete crawl stateful sets, services and pvcs """
|
|
kwargs = {
|
|
"namespace": self.namespace,
|
|
"label_selector": selector,
|
|
}
|
|
|
|
statefulsets = await self.apps_api.list_namespaced_stateful_set(**kwargs)
|
|
|
|
for statefulset in statefulsets.items:
|
|
print(f"Deleting service {statefulset.spec.service_name}")
|
|
await self.core_api.delete_namespaced_service(
|
|
name=statefulset.spec.service_name,
|
|
namespace=self.namespace,
|
|
propagation_policy="Foreground",
|
|
)
|
|
print(f"Deleting statefulset {statefulset.metadata.name}")
|
|
await self.apps_api.delete_namespaced_stateful_set(
|
|
name=statefulset.metadata.name,
|
|
namespace=self.namespace,
|
|
propagation_policy="Foreground",
|
|
)
|
|
|
|
# until delete policy is supported
|
|
try:
|
|
await self.core_api.delete_collection_namespaced_persistent_volume_claim(
|
|
**kwargs
|
|
)
|
|
except Exception as exc:
|
|
print("PVC Delete failed", exc, flush=True)
|
|
|
|
# delete our own job!
|
|
await self.batch_api.delete_namespaced_job(
|
|
name=self.orig_job_id,
|
|
namespace=self.namespace,
|
|
grace_period_seconds=30,
|
|
propagation_policy="Foreground",
|
|
)
|
|
|
|
asyncio.create_task(self.exit_soon(5))
|
|
|
|
async def exit_soon(self, timeout):
|
|
""" exit soon """
|
|
print("k8s objects deleted, job complete, exiting", flush=True)
|
|
await asyncio.sleep(timeout)
|
|
sys.exit(0)
|