browsertrix/chart/templates/frontend.yaml
Ilya Kreymer 0c8a5a49b4 refactor to use docker swarm for local alternative to k8s instead of docker compose (#247):
- use python-on-whale to use docker cli api directly, creating docker stack for each crawl or profile browser
- configure storages via storages.yaml secret
- add crawl_job, profile_job, splitting into base and k8s/swarm implementations
- split manager into base crawlmanager and k8s/swarm implementations
- swarm: load initial scale from db to avoid modifying fixed configs, in k8s, load from configmap
- swarm: support scheduled jobs via swarm-cronjob service
- remove docker dependencies (aiodocker, apscheduler, scheduling)
- swarm: when using local minio, expose via /data/ route in nginx via extra include (in k8s, include dir is empty and routing handled via ingress)
- k8s: cleanup minio chart: move init containers to minio.yaml
- swarm: stateful set implementation to be consistent with k8s scaling:
  - don't use service replicas,
  - create a unique service with '-N' appended and allocate unique volume for each replica
  - allows crawl containers to be restarted w/o losing data
- add volume pruning background service, as volumes can be deleted only after service shuts down fully
- watch: fully simplify routing, route via replica index instead of ip for both k8s and swarm
- rename network btrix-cloud-net -> btrix-net to avoid conflict with compose network
2022-06-05 10:37:17 -07:00

124 lines
3.0 KiB
YAML

---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Values.name }}-frontend
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app: {{ .Values.name }}
role: frontend
replicas: {{ .Values.nginx_num_replicas | default 1 }}
template:
metadata:
labels:
app: {{ .Values.name }}
role: frontend
annotations:
# force helm to update the deployment each time
{{- if not .Values.backend_only }}
"helm.update": {{ randAlphaNum 5 | quote }}
{{- end }}
spec:
{{- if .Values.main_node_type }}
nodeSelector:
nodeType: {{ .Values.main_node_type }}
{{- end }}
volumes:
- name: nginx-resolver
emptyDir: {}
- name: nginx-extra-includes
emptyDir: {}
initContainers:
- name: init-nginx
image: {{ .Values.nginx_image }}
command: ["/bin/sh"]
args: ["-c", "echo resolver $(awk 'BEGIN{ORS=\" \"} $1==\"nameserver\" {print $2}' /etc/resolv.conf) valid=30s \";\" > /etc/nginx/resolvers/resolvers.conf"]
volumeMounts:
- name: nginx-resolver
mountPath: /etc/nginx/resolvers/
containers:
- name: nginx
image: {{ .Values.nginx_image }}
imagePullPolicy: {{ .Values.nginx_pull_policy }}
volumeMounts:
- name: nginx-resolver
mountPath: /etc/nginx/resolvers/
readOnly: true
# adding to override /etc/nginx/includes/locations.conf with empty dir for now
- name: nginx-extra-includes
mountPath: /etc/nginx/includes/
readOnly: true
env:
- name: BACKEND_HOST
value: {{ .Values.name }}-backend
- name: CRAWLER_FQDN_SUFFIX
value: ".{{ .Values.crawler_namespace }}.svc.cluster.local"
- name: CRAWLER_SVC_SUFFIX
value: ".crawl-$crawl"
resources:
limits:
cpu: {{ .Values.nginx_limits_cpu }}
memory: {{ .Values.nginx_limits_memory }}
requests:
cpu: {{ .Values.nginx_requests_cpu }}
memory: {{ .Values.nginx_requests_memory }}
readinessProbe:
httpGet:
path: /
port: 80
---
apiVersion: v1
kind: Service
metadata:
namespace: {{ .Release.Namespace }}
name: {{ .Values.name }}-frontend
labels:
app: {{ .Values.name }}
role: frontend
{{- if .Values.service }}
{{- if .Values.service.annotations }}
annotations:
{{- range $key, $val := .Values.service.annotations }}
{{ $key }}: {{ $val | quote }}
{{- end }}
{{- end }}
{{- end }}
spec:
selector:
app: {{ .Values.name }}
role: frontend
{{- if .Values.service }}
{{- if .Values.service.type }}
type: {{ .Values.service.type | quote }}
{{- end }}
{{- end }}
ports:
- protocol: TCP
port: 80
name: frontend