* resource constraints: (fixes #895) - for cpu, only set cpu requests - for memory, set mem requests == mem limits - add missing resource constraints for minio and scheduled job - for crawler, set mem and cpu constraints per browser, scale based on browser instances per crawler - add comments in values.yaml for crawler values being multiplied - default values: bump crawler to 650 millicpu per browser instance just in case cleanup: remove unused entries from main backend configmap
This commit is contained in:
parent
d8502da885
commit
7ea6d76f10
@ -149,12 +149,11 @@ spec:
|
|||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: {{ crawler_limits_cpu }}
|
memory: {{ crawler_memory }}
|
||||||
memory: {{ crawler_limits_memory }}
|
|
||||||
|
|
||||||
requests:
|
requests:
|
||||||
cpu: {{ crawler_requests_cpu }}
|
cpu: {{ crawler_cpu }}
|
||||||
memory: {{ crawler_requests_memory }}
|
memory: {{ crawler_memory }}
|
||||||
|
|
||||||
{% if crawler_liveness_port and crawler_liveness_port != '0' %}
|
{% if crawler_liveness_port and crawler_liveness_port != '0' %}
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
|
@ -104,12 +104,11 @@ spec:
|
|||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: {{ redis_limits_cpu }}
|
memory: {{ redis_memory }}
|
||||||
memory: {{ redis_limits_memory }}
|
|
||||||
|
|
||||||
requests:
|
requests:
|
||||||
cpu: {{ redis_requests_cpu }}
|
cpu: {{ redis_cpu }}
|
||||||
memory: {{ redis_requests_memory }}
|
memory: {{ redis_memory }}
|
||||||
|
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
exec:
|
exec:
|
||||||
|
@ -55,12 +55,11 @@ spec:
|
|||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: {{ .Values.backend_limits_cpu }}
|
memory: {{ .Values.backend_memory }}
|
||||||
memory: {{ .Values.backend_limits_memory }}
|
|
||||||
|
|
||||||
requests:
|
requests:
|
||||||
cpu: {{ .Values.backend_requests_cpu }}
|
cpu: {{ .Values.backend_cpu }}
|
||||||
memory: {{ .Values.backend_requests_memory }}
|
memory: {{ .Values.backend_memory }}
|
||||||
|
|
||||||
startupProbe:
|
startupProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
@ -121,12 +120,11 @@ spec:
|
|||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: {{ .Values.backend_limits_cpu }}
|
memory: {{ .Values.backend_memory }}
|
||||||
memory: {{ .Values.backend_limits_memory }}
|
|
||||||
|
|
||||||
requests:
|
requests:
|
||||||
cpu: {{ .Values.backend_requests_cpu }}
|
cpu: {{ .Values.backend_cpu }}
|
||||||
memory: {{ .Values.backend_requests_memory }}
|
memory: {{ .Values.backend_memory }}
|
||||||
|
|
||||||
startupProbe:
|
startupProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
|
@ -11,22 +11,9 @@ data:
|
|||||||
CRON_NAMESPACE: {{ .Release.Namespace }}
|
CRON_NAMESPACE: {{ .Release.Namespace }}
|
||||||
|
|
||||||
CRAWLER_NAMESPACE: {{ .Values.crawler_namespace }}
|
CRAWLER_NAMESPACE: {{ .Values.crawler_namespace }}
|
||||||
CRAWLER_IMAGE: {{ .Values.crawler_image }}
|
|
||||||
CRAWLER_PULL_POLICY: {{ .Values.crawler_pull_policy }}
|
|
||||||
|
|
||||||
CRAWLER_FQDN_SUFFIX: ".{{ .Values.crawler_namespace }}.svc.cluster.local"
|
CRAWLER_FQDN_SUFFIX: ".{{ .Values.crawler_namespace }}.svc.cluster.local"
|
||||||
|
|
||||||
CRAWLER_TIMEOUT: "{{ .Values.crawl_timeout }}"
|
|
||||||
CRAWLER_RETRIES: "{{ .Values.crawl_retries }}"
|
|
||||||
|
|
||||||
CRAWLER_REQUESTS_CPU: "{{ .Values.crawler_requests_cpu }}"
|
|
||||||
CRAWLER_LIMITS_CPU: "{{ .Values.crawler_limits_cpu }}"
|
|
||||||
|
|
||||||
CRAWLER_REQUESTS_MEM: "{{ .Values.crawler_requests_memory }}"
|
|
||||||
CRAWLER_LIMITS_MEM: "{{ .Values.crawler_limits_memory }}"
|
|
||||||
|
|
||||||
CRAWLER_LIVENESS_PORT: "{{ .Values.crawler_liveness_port | default 0 }}"
|
|
||||||
|
|
||||||
DEFAULT_ORG: "{{ .Values.default_org }}"
|
DEFAULT_ORG: "{{ .Values.default_org }}"
|
||||||
|
|
||||||
INVITE_EXPIRE_SECONDS: "{{ .Values.invite_expire_seconds }}"
|
INVITE_EXPIRE_SECONDS: "{{ .Values.invite_expire_seconds }}"
|
||||||
@ -34,16 +21,6 @@ data:
|
|||||||
JOB_IMAGE: "{{ .Values.backend_image }}"
|
JOB_IMAGE: "{{ .Values.backend_image }}"
|
||||||
JOB_PULL_POLICY: "{{ .Values.backend_pull_policy }}"
|
JOB_PULL_POLICY: "{{ .Values.backend_pull_policy }}"
|
||||||
|
|
||||||
{{- if .Values.crawler_pv_claim }}
|
|
||||||
CRAWLER_PV_CLAIM: "{{ .Values.crawler_pv_claim }}"
|
|
||||||
{{- end }}
|
|
||||||
|
|
||||||
REDIS_URL: "{{ .Values.redis_url }}"
|
|
||||||
|
|
||||||
REDIS_CRAWLS_DONE_KEY: "crawls-done"
|
|
||||||
|
|
||||||
GRACE_PERIOD_SECS: "{{ .Values.grace_period_secs | default 600 }}"
|
|
||||||
|
|
||||||
REGISTRATION_ENABLED: "{{ .Values.registration_enabled | default 0 }}"
|
REGISTRATION_ENABLED: "{{ .Values.registration_enabled | default 0 }}"
|
||||||
|
|
||||||
ALLOW_DUPE_INVITES: "{{ .Values.allow_dupe_invites | default 0 }}"
|
ALLOW_DUPE_INVITES: "{{ .Values.allow_dupe_invites | default 0 }}"
|
||||||
@ -94,28 +71,24 @@ data:
|
|||||||
|
|
||||||
volume_storage_class: "{{ .Values.volume_storage_class }}"
|
volume_storage_class: "{{ .Values.volume_storage_class }}"
|
||||||
|
|
||||||
requests_hd: "{{ .Values.crawler_requests_storage }}"
|
requests_hd: "{{ .Values.crawler_storage }}"
|
||||||
|
|
||||||
# redis
|
# redis
|
||||||
redis_image: {{ .Values.redis_image }}
|
redis_image: {{ .Values.redis_image }}
|
||||||
redis_image_pull_policy: {{ .Values.redis_pull_policy }}
|
redis_image_pull_policy: {{ .Values.redis_pull_policy }}
|
||||||
|
|
||||||
redis_requests_cpu: "{{ .Values.redis_requests_cpu }}"
|
redis_cpu: "{{ .Values.redis_cpu }}"
|
||||||
redis_limits_cpu: "{{ .Values.redis_limits_cpu }}"
|
|
||||||
|
|
||||||
redis_requests_memory: "{{ .Values.redis_requests_memory }}"
|
redis_memory: "{{ .Values.redis_memory }}"
|
||||||
redis_limits_memory: "{{ .Values.redis_limits_memory }}"
|
|
||||||
|
|
||||||
|
|
||||||
# crawler
|
# crawler
|
||||||
crawler_image: {{ .Values.crawler_image }}
|
crawler_image: {{ .Values.crawler_image }}
|
||||||
crawler_image_pull_policy: {{ .Values.crawler_pull_policy }}
|
crawler_image_pull_policy: {{ .Values.crawler_pull_policy }}
|
||||||
|
|
||||||
crawler_requests_cpu: "{{ .Values.crawler_requests_cpu }}"
|
crawler_cpu: "{{ mul .Values.crawler_cpu_per_browser .Values.crawler_browser_instances }}m"
|
||||||
crawler_limits_cpu: "{{ .Values.crawler_limits_cpu }}"
|
|
||||||
|
|
||||||
crawler_requests_memory: "{{ .Values.crawler_requests_memory }}"
|
crawler_memory: "{{ mul .Values.crawler_memory_per_browser .Values.crawler_browser_instances }}Mi"
|
||||||
crawler_limits_memory: "{{ .Values.crawler_limits_memory }}"
|
|
||||||
|
|
||||||
crawler_liveness_port: "{{ .Values.crawler_liveness_port | default 0 }}"
|
crawler_liveness_port: "{{ .Values.crawler_liveness_port | default 0 }}"
|
||||||
|
|
||||||
|
@ -57,12 +57,11 @@ spec:
|
|||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: {{ .Values.frontend_limits_cpu }}
|
memory: {{ .Values.frontend_memory }}
|
||||||
memory: {{ .Values.frontend_limits_memory }}
|
|
||||||
|
|
||||||
requests:
|
requests:
|
||||||
cpu: {{ .Values.frontend_requests_cpu }}
|
cpu: {{ .Values.frontend_cpu }}
|
||||||
memory: {{ .Values.frontend_requests_memory }}
|
memory: {{ .Values.frontend_memory }}
|
||||||
|
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
httpGet:
|
httpGet:
|
||||||
|
@ -66,6 +66,14 @@ spec:
|
|||||||
mountPath: /data
|
mountPath: /data
|
||||||
subPath: minio
|
subPath: minio
|
||||||
|
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: {{ .Values.minio_memory }}
|
||||||
|
|
||||||
|
requests:
|
||||||
|
cpu: {{ .Values.minio_cpu }}
|
||||||
|
memory: {{ .Values.minio_memory }}
|
||||||
|
|
||||||
containers:
|
containers:
|
||||||
- name: minio
|
- name: minio
|
||||||
image: {{ .Values.minio_image }}
|
image: {{ .Values.minio_image }}
|
||||||
@ -80,6 +88,14 @@ spec:
|
|||||||
mountPath: /data
|
mountPath: /data
|
||||||
subPath: minio
|
subPath: minio
|
||||||
|
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
memory: {{ .Values.minio_memory }}
|
||||||
|
|
||||||
|
requests:
|
||||||
|
cpu: {{ .Values.minio_cpu }}
|
||||||
|
memory: {{ .Values.minio_memory }}
|
||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
|
@ -94,12 +94,11 @@ spec:
|
|||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: {{ .Values.mongo_limits_cpu }}
|
memory: {{ .Values.mongo_memory }}
|
||||||
memory: {{ .Values.mongo_limits_memory }}
|
|
||||||
|
|
||||||
requests:
|
requests:
|
||||||
cpu: {{ .Values.mongo_requests_cpu }}
|
cpu: {{ .Values.mongo_cpu }}
|
||||||
memory: {{ .Values.mongo_requests_memory }}
|
memory: {{ .Values.mongo_memory }}
|
||||||
|
|
||||||
# should work with 6.0.x with longer timeout
|
# should work with 6.0.x with longer timeout
|
||||||
readinessProbe:
|
readinessProbe:
|
||||||
|
@ -114,12 +114,11 @@ spec:
|
|||||||
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: {{ .Values.signer_limits_cpu }}
|
memory: {{ .Values.signer_memory }}
|
||||||
memory: {{ .Values.signer_limits_memory }}
|
|
||||||
|
|
||||||
requests:
|
requests:
|
||||||
cpu: {{ .Values.signer_requests_cpu }}
|
cpu: {{ .Values.signer_cpu }}
|
||||||
memory: {{ .Values.signer_requests_memory }}
|
memory: {{ .Values.signer_memory }}
|
||||||
|
|
||||||
---
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
|
@ -99,11 +99,9 @@ backend_num_replicas: 1
|
|||||||
# number of workers per pod
|
# number of workers per pod
|
||||||
backend_workers: 2
|
backend_workers: 2
|
||||||
|
|
||||||
backend_requests_cpu: "10m"
|
backend_cpu: "25m"
|
||||||
backend_limits_cpu: "768m"
|
|
||||||
|
|
||||||
backend_requests_memory: "100Mi"
|
backend_memory: "384Mi"
|
||||||
backend_limits_memory: "512Mi"
|
|
||||||
|
|
||||||
# port for operator service
|
# port for operator service
|
||||||
opPort: 8756
|
opPort: 8756
|
||||||
@ -119,11 +117,9 @@ profile_browser_idle_seconds: 60
|
|||||||
frontend_image: "docker.io/webrecorder/browsertrix-frontend:latest"
|
frontend_image: "docker.io/webrecorder/browsertrix-frontend:latest"
|
||||||
frontend_pull_policy: "Always"
|
frontend_pull_policy: "Always"
|
||||||
|
|
||||||
frontend_requests_cpu: "3m"
|
frontend_cpu: "5m"
|
||||||
frontend_limits_cpu: "30m"
|
|
||||||
|
|
||||||
frontend_requests_memory: "12Mi"
|
frontend_memory: "36Mi"
|
||||||
frontend_limits_memory: "40Mi"
|
|
||||||
|
|
||||||
# if set, maps nginx to a fixed port on host machine
|
# if set, maps nginx to a fixed port on host machine
|
||||||
# must be between 30000 - 32767
|
# must be between 30000 - 32767
|
||||||
@ -140,11 +136,9 @@ mongo_host: "local-mongo.default"
|
|||||||
mongo_image: "docker.io/library/mongo:6.0.5"
|
mongo_image: "docker.io/library/mongo:6.0.5"
|
||||||
mongo_pull_policy: "IfNotPresent"
|
mongo_pull_policy: "IfNotPresent"
|
||||||
|
|
||||||
mongo_requests_cpu: "12m"
|
mongo_cpu: "12m"
|
||||||
mongo_limits_cpu: "128m"
|
|
||||||
|
|
||||||
mongo_requests_memory: "96Mi"
|
mongo_memory: "512Mi"
|
||||||
mongo_limits_memory: "512Mi"
|
|
||||||
|
|
||||||
|
|
||||||
mongo_auth:
|
mongo_auth:
|
||||||
@ -165,12 +159,9 @@ redis_pull_policy: "IfNotPresent"
|
|||||||
|
|
||||||
redis_url: "redis://local-redis.default:6379/1"
|
redis_url: "redis://local-redis.default:6379/1"
|
||||||
|
|
||||||
redis_requests_cpu: "3m"
|
redis_cpu: "5m"
|
||||||
redis_limits_cpu: "48m"
|
|
||||||
|
|
||||||
redis_requests_memory: "10Mi"
|
|
||||||
redis_limits_memory: "64Mi"
|
|
||||||
|
|
||||||
|
redis_memory: "48Mi"
|
||||||
|
|
||||||
|
|
||||||
# Crawler Image
|
# Crawler Image
|
||||||
@ -190,15 +181,17 @@ crawl_retries: 1000
|
|||||||
|
|
||||||
crawler_browser_instances: 2
|
crawler_browser_instances: 2
|
||||||
|
|
||||||
crawler_requests_cpu: "800m"
|
# note: the following values are multipled by 'crawler_browser_instances' to get final value
|
||||||
crawler_limits_cpu: "1200m"
|
|
||||||
|
|
||||||
crawler_requests_memory: "512Mi"
|
# this value is an integer in 'm' (millicpu) units, multiplied by 'crawler_browser_instances'
|
||||||
crawler_limits_memory: "1024Mi"
|
crawler_cpu_per_browser: 650
|
||||||
|
|
||||||
|
# this value is an integer in 'Mi' (Megabyte) units, multiplied by 'crawler_browser_instances'
|
||||||
|
crawler_memory_per_browser: 768
|
||||||
|
|
||||||
# minimum size allocated to each crawler
|
# minimum size allocated to each crawler
|
||||||
# should be at least double crawl session size to ensure space for WACZ
|
# should be at least double crawl session size to ensure space for WACZ
|
||||||
crawler_requests_storage: "22Gi"
|
crawler_storage: "22Gi"
|
||||||
|
|
||||||
# max size at which crawler will commit current crawl session
|
# max size at which crawler will commit current crawl session
|
||||||
crawler_session_size_limit_bytes: "10000000000"
|
crawler_session_size_limit_bytes: "10000000000"
|
||||||
@ -230,6 +223,9 @@ minio_pull_policy: "IfNotPresent"
|
|||||||
|
|
||||||
minio_local_bucket_name: &local_bucket_name "btrix-data"
|
minio_local_bucket_name: &local_bucket_name "btrix-data"
|
||||||
|
|
||||||
|
minio_cpu: "10m"
|
||||||
|
minio_memory: "1024Mi"
|
||||||
|
|
||||||
|
|
||||||
# Storage
|
# Storage
|
||||||
# =========================================
|
# =========================================
|
||||||
@ -285,11 +281,9 @@ signer:
|
|||||||
# image_pull_policy: "IfNotPresent"
|
# image_pull_policy: "IfNotPresent"
|
||||||
# auth_token: <set to custom value>
|
# auth_token: <set to custom value>
|
||||||
|
|
||||||
signer_requests_cpu: "3m"
|
signer_cpu: "5m"
|
||||||
signer_limits_cpu: "32m"
|
|
||||||
|
|
||||||
signer_requests_memory: "36Mi"
|
signer_memory: "40Mi"
|
||||||
signer_limits_memory: "96Mi"
|
|
||||||
|
|
||||||
|
|
||||||
# Optional: configure load balancing annotations
|
# Optional: configure load balancing annotations
|
||||||
|
Loading…
Reference in New Issue
Block a user