Move DO ansible playbook to new format (#1159)
* feat: move do_setup to new unified format at root of ansible/ dir to allow sharing roles, inventory with playbooks for other deployment types * fix: pass ansible lint * update do settings to current deployment: - bump main node params - add additional settings to helm values template --------- Co-authored-by: Ilya Kreymer <ikreymer@gmail.com>
This commit is contained in:
parent
e5cc70754e
commit
253a267830
2
.github/workflows/ansible-lint.yaml
vendored
2
.github/workflows/ansible-lint.yaml
vendored
@ -32,4 +32,4 @@ jobs:
|
||||
- name: Lint
|
||||
run: |
|
||||
cd ansible
|
||||
pipenv run ansible-lint -c ./lint-cfg.yml ./playbooks/do_setup.yml
|
||||
pipenv run ansible-lint -c ./lint-cfg.yml ./do_setup.yml
|
||||
|
||||
10
ansible/do_setup.yml
Normal file
10
ansible/do_setup.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: deploy browsertrix cloud on digital ocean
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
vars_files:
|
||||
- inventory/digital_ocean/group_vars/main.yml
|
||||
roles:
|
||||
- role: digital_ocean/setup
|
||||
- role: btrix/deploy
|
||||
@ -28,3 +28,4 @@
|
||||
roles:
|
||||
- role: btrix/prereq # Only required if you wish to install & configure Helm / Kubectl
|
||||
- role: btrix/install
|
||||
- role: btrix/deploy
|
||||
52
ansible/inventory/digital_ocean/group_vars/main.yml
Normal file
52
ansible/inventory/digital_ocean/group_vars/main.yml
Normal file
@ -0,0 +1,52 @@
|
||||
---
|
||||
project_name: "default"
|
||||
|
||||
main_node_size: "s-4vcpu-8gb"
|
||||
crawl_node_size: "c-4"
|
||||
droplet_region: "sfo3"
|
||||
|
||||
node_pools:
|
||||
- name=main-app;size={{ main_node_size }};label=nodeType=main;min-nodes=1;max-nodes=2;count=1
|
||||
- name=crawling;size={{ crawl_node_size }};label=nodeType=crawling;taint=nodeType=crawling:NoSchedule;auto-scale=true;min-nodes=1;max-nodes=3;count=1
|
||||
|
||||
enable_admin_addons: false
|
||||
|
||||
admin_node_pool:
|
||||
name: admin-app
|
||||
size: s-4vcpu-8gb
|
||||
label: nodeType=admin
|
||||
count: 1
|
||||
|
||||
db_name: "{{ project_name }}"
|
||||
k8s_name: "{{ project_name }}"
|
||||
|
||||
bucket_name: "{{ project_name }}"
|
||||
bucket_path: "crawls"
|
||||
|
||||
registry_name: "{{ project_name }}"
|
||||
|
||||
domain: "browsertrix.cloud"
|
||||
subdomain: "{{ project_name }}"
|
||||
|
||||
|
||||
configure_kubectl: false
|
||||
use_do_registry: false
|
||||
image_tag: "latest"
|
||||
|
||||
enable_signing: true
|
||||
signing_host: "signing"
|
||||
|
||||
superuser_email: "dev@webrecorder.net"
|
||||
superuser_password: "PassW0rd!"
|
||||
|
||||
org_name: "{{ project_name }}"
|
||||
|
||||
registration_enabled: false
|
||||
|
||||
cert_email: "{{ superuser_email }}"
|
||||
|
||||
smtp_port: ""
|
||||
smtp_host: ""
|
||||
sender_email: ""
|
||||
reply_to_email: ""
|
||||
sender_password: ""
|
||||
1
ansible/inventory/digital_ocean/hosts.ini
Normal file
1
ansible/inventory/digital_ocean/hosts.ini
Normal file
@ -0,0 +1 @@
|
||||
127.0.0.1
|
||||
@ -54,7 +54,8 @@ skip_list:
|
||||
- skip_this_tag
|
||||
- name[casing]
|
||||
- yaml[line-length]
|
||||
- fqcn[canonical] # we're using community.aws.aws_s3_cors instead of community.aws.s3_cors, but using the other package made everything break
|
||||
- fqcn[canonical] # we're using community.aws.aws_s3_cors instead of community.aws.s3_cors, but using the other package made everything break
|
||||
- role-name[path]
|
||||
|
||||
# Ansible-lint does not automatically load rules that have the 'opt-in' tag.
|
||||
# You must enable opt-in rules by listing each rule 'id' below.
|
||||
@ -67,7 +68,7 @@ enable_list:
|
||||
# add yaml here if you want to avoid ignoring yaml checks when yamllint is missing
|
||||
# Report only a subset of tags and fully ignore any others
|
||||
# tags:
|
||||
# -
|
||||
# -
|
||||
|
||||
# Ansible-lint does not fail on warnings from the rules or tags listed below
|
||||
warn_list:
|
||||
|
||||
@ -1,311 +0,0 @@
|
||||
---
|
||||
- name: deploy browsertrix cloud on digital ocean
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: false
|
||||
vars_files:
|
||||
- ../group_vars/do/main.yml
|
||||
|
||||
tasks:
|
||||
|
||||
# ===========================================
|
||||
# Init
|
||||
- name: d_ocean | init | install doctl and helm
|
||||
ansible.builtin.package:
|
||||
name: "{{ item }}"
|
||||
state: present
|
||||
loop:
|
||||
- helm
|
||||
- doctl
|
||||
|
||||
- name: d_ocean | init | set full domain
|
||||
ansible.builtin.set_fact:
|
||||
full_domain: "{{ subdomain + '.' + domain if subdomain else domain }}"
|
||||
|
||||
# MongoDB
|
||||
# ===========================================
|
||||
- name: d_ocean | db | test for existing mongodb
|
||||
ansible.builtin.command: doctl db list -o json
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: db_check
|
||||
|
||||
- name: d_ocean | db | create mongodb database
|
||||
ansible.builtin.command: doctl databases create {{ db_name }} --region {{ droplet_region }} --engine mongodb --version 6 --output json
|
||||
async: 1800
|
||||
poll: 60
|
||||
register: db_create
|
||||
when: "db_check.stdout | from_json | json_query(name_query) | length < 1"
|
||||
changed_when: true
|
||||
vars:
|
||||
name_query: '[?name==`{{ db_name }}`]'
|
||||
|
||||
- name: d_ocean | db | set db id
|
||||
ansible.builtin.set_fact:
|
||||
db_uuid: "{{ db_create.stdout if db_create.changed else db_check.stdout | from_json | json_query(name_query) | json_query('[0].id') }}"
|
||||
vars:
|
||||
name_query: '[?name==`{{ db_name }}`]'
|
||||
|
||||
- name: d_ocean | db | reset db user password
|
||||
ansible.builtin.command: doctl databases user reset {{ db_uuid }} doadmin -o json
|
||||
register: db_user
|
||||
when: not db_create.changed
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | db | set db config
|
||||
ansible.builtin.set_fact:
|
||||
db_uuid: "{{ db_create.stdout if db_create.changed else db_check.stdout | from_json | json_query(name_query) | json_query('[0].id') }}"
|
||||
vars:
|
||||
name_query: '[?name==`{{ db_name }}`]'
|
||||
|
||||
- name: d_ocean | db | set db config
|
||||
ansible.builtin.set_fact:
|
||||
db_url: "{{ db_check.stdout | from_json | json_query(name_query) | json_query('[0].private_connection.uri') | replace(old, new) }}"
|
||||
when: not db_create.changed
|
||||
vars:
|
||||
name_query: '[?name==`{{ db_name }}`]'
|
||||
old: ":@"
|
||||
new: ":{{ db_user.stdout | from_json | json_query('[0].password') }}@"
|
||||
|
||||
# Storage (Space)
|
||||
# ===========================================
|
||||
- name: d_ocean | space | create new
|
||||
community.digitalocean.digital_ocean_spaces:
|
||||
name: "{{ bucket_name }}"
|
||||
state: present
|
||||
oauth_token: "{{ lookup('env', 'DO_API_TOKEN') }}"
|
||||
aws_access_key_id: "{{ lookup('env', 'DO_AWS_ACCESS_KEY') }}"
|
||||
aws_secret_access_key: "{{ lookup('env', 'DO_AWS_SECRET_KEY') }}"
|
||||
region: "{{ droplet_region }}"
|
||||
register: db_space_result
|
||||
ignore_errors: true
|
||||
|
||||
- name: d_ocean | space | set endpoint urls
|
||||
ansible.builtin.set_fact:
|
||||
endpoint_url: "{{ db_space_result.data.space.endpoint_url }}/"
|
||||
bucket_endpoint_url: "{{ db_space_result.data.space.endpoint_url }}/{{ db_space_result.data.space.name }}/{{ bucket_path }}/"
|
||||
|
||||
- name: d_ocean | space | set bucket cors
|
||||
community.aws.aws_s3_cors:
|
||||
name: "{{ bucket_name }}"
|
||||
aws_access_key: "{{ lookup('env', 'DO_AWS_ACCESS_KEY') }}"
|
||||
aws_secret_key: "{{ lookup('env', 'DO_AWS_SECRET_KEY') }}"
|
||||
endpoint_url: "{{ endpoint_url }}"
|
||||
region: "{{ droplet_region }}"
|
||||
state: present
|
||||
rules:
|
||||
- allowed_origins:
|
||||
- "https://{{ full_domain }}"
|
||||
allowed_methods:
|
||||
- GET
|
||||
- HEAD
|
||||
allowed_headers:
|
||||
- "*"
|
||||
expose_headers:
|
||||
- Content-Range
|
||||
- Content-Encoding
|
||||
- Content-Length
|
||||
|
||||
# K8S
|
||||
# ===========================================
|
||||
- name: d_ocean | k8s | test for existing k8s cluster
|
||||
ansible.builtin.command: doctl k8s cluster list
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: cluster_check
|
||||
|
||||
- name: d_ocean | k8s | create a kubernetes cluster in {{ droplet_region }}
|
||||
# skip_ansible_lint
|
||||
ansible.builtin.command: >-
|
||||
doctl kubernetes cluster create {{ k8s_name }} --1-clicks ingress-nginx,cert-manager --node-pool
|
||||
"{{ node_pools | select('match', 'name=*') | list | join(',') }}"
|
||||
--region={{ droplet_region }}
|
||||
|
||||
async: 1800
|
||||
poll: 60
|
||||
changed_when: false
|
||||
when: cluster_check.stdout.find(k8s_name) == -1
|
||||
|
||||
- name: d_ocean | k8s | test for existing k8s cluster's node pools
|
||||
ansible.builtin.command: doctl kubernetes cluster node-pool list {{ k8s_name }}
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: cluster_nodepools_check
|
||||
|
||||
- name: d_ocean | k8s | create a admin node pool in the cluster {{ k8s_name }}
|
||||
# skip_ansible_lint
|
||||
ansible.builtin.command: >-
|
||||
doctl kubernetes cluster node-pool create {{ k8s_name }}
|
||||
--name {{ admin_node_pool.name }} --size {{ admin_node_pool.size }}
|
||||
--label {{ admin_node_pool.label }} --count {{ admin_node_pool.count }}
|
||||
|
||||
async: 1800
|
||||
poll: 60
|
||||
changed_when: false
|
||||
when: enable_admin_addons and cluster_nodepools_check.stdout.find(admin_node_pool.name) == -1
|
||||
|
||||
- name: d_ocean | k8s | remove a admin node pool from the cluster {{ k8s_name }}
|
||||
# skip_ansible_lint
|
||||
ansible.builtin.command: >-
|
||||
echo "yes" | doctl kubernetes cluster node-pool delete {{ k8s_name }} {{ admin_node_pool.name }}
|
||||
|
||||
async: 1800
|
||||
poll: 60
|
||||
changed_when: false
|
||||
when: not enable_admin_addons and cluster_nodepools_check.stdout.find(admin_node_pool.name) >= 0
|
||||
|
||||
- name: d_ocean | k8s | Get information about our cluster
|
||||
community.digitalocean.digital_ocean_kubernetes_info:
|
||||
oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}"
|
||||
name: "{{ k8s_name }}"
|
||||
return_kubeconfig: true
|
||||
register: my_cluster
|
||||
|
||||
- name: d_ocean | k8s | print information about an existing DigitalOcean Kubernetes cluster
|
||||
ansible.builtin.debug:
|
||||
msg: Cluster name is {{ my_cluster.data.name }}, ID is {{ my_cluster.data.id }}
|
||||
failed_when: not my_cluster
|
||||
|
||||
- name: d_ocean | k8s | save kubectl config to kube_dir
|
||||
ansible.builtin.command: doctl kubernetes cluster kubeconfig save {{ my_cluster.data.id }}
|
||||
changed_when: false
|
||||
|
||||
- name: d_ocean | k8s | get loadbalancer info from doctl
|
||||
ansible.builtin.command: doctl k8s cluster list-associated-resources {{ my_cluster.data.id }} --format LoadBalancers --output json
|
||||
register: lb_id_result
|
||||
retries: 100
|
||||
delay: 5
|
||||
changed_when: lb_id_result.stdout | from_json | json_query('load_balancers') | length > 0
|
||||
until: lb_id_result.stdout | from_json | json_query('load_balancers') | length > 0
|
||||
|
||||
- name: d_ocean | k8s | parse lb id
|
||||
ansible.builtin.set_fact:
|
||||
lb_id: "{{ lb_id_result.stdout | from_json | json_query('load_balancers[0].id') }}"
|
||||
|
||||
# FIREWALL
|
||||
# ===========================================
|
||||
- name: d_ocean | db | get db firewall list
|
||||
ansible.builtin.command: doctl db firewalls list {{ db_uuid }} -o json
|
||||
register: db_firewalls
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | db | configure firewall for mongoDB
|
||||
ansible.builtin.command: doctl db firewalls append {{ db_uuid }} --rule k8s:{{ my_cluster.data.id }}
|
||||
when: "db_firewalls.stdout == []"
|
||||
changed_when: true # if it runs, it's changed
|
||||
|
||||
# DNS
|
||||
# ===========================================
|
||||
- name: d_ocean | dns | grab loadbalancer ip using doctl
|
||||
ansible.builtin.command: doctl compute load-balancer get --format IP "{{ lb_id }}"
|
||||
register: loadbalancer_ip_result
|
||||
retries: 100
|
||||
delay: 5
|
||||
changed_when: loadbalancer_ip_result.stdout_lines | length > 1
|
||||
until: loadbalancer_ip_result.stdout_lines | length > 1
|
||||
|
||||
- name: d_ocean | dns | parse ip
|
||||
ansible.builtin.set_fact:
|
||||
lb_ip: "{{ loadbalancer_ip_result.stdout_lines[1] }}"
|
||||
|
||||
- name: d_ocean | dns | register the dns for browsertrix cloud
|
||||
ansible.builtin.command: >-
|
||||
doctl compute domain records create --record-type A --record-name "{{ subdomain if subdomain else '@' }}" --record-data "{{ lb_ip }}" "{{ domain }}"
|
||||
|
||||
changed_when: dns_result.rc == 0
|
||||
register: dns_result
|
||||
|
||||
|
||||
# Signing + DNS
|
||||
# ===========================================
|
||||
- name: d_ocean | signing | set signing domain + authtoken
|
||||
ansible.builtin.set_fact:
|
||||
full_signing_domain: "{{ signing_host }}.{{ full_domain }}"
|
||||
signing_subdomain: "{{ signing_host + '.' + subdomain if subdomain else signing_host }}"
|
||||
signing_authtoken: "{{ 99999999 | random | to_uuid }}"
|
||||
|
||||
when: enable_signing
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | signing | check signing record exists
|
||||
ansible.builtin.command: doctl compute domain records ls {{ domain }} -o json
|
||||
register: check_domain
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | signing | register the dns for signing subdomain
|
||||
ansible.builtin.command: >-
|
||||
doctl compute domain records create --record-type A --record-name "{{ signing_subdomain }}" --record-data "{{ lb_ip }}" "{{ domain }}"
|
||||
|
||||
register: signing_dns_result
|
||||
when: "check_domain.stdout | from_json | json_query(name_query) | length < 1"
|
||||
changed_when: true # if it ran, it changed
|
||||
vars:
|
||||
name_query: '[?name==`{{ signing_subdomain }}`]'
|
||||
|
||||
|
||||
# Registry
|
||||
# ===========================================
|
||||
- name: d_ocean | registry | get endpoint, if using registry
|
||||
ansible.builtin.command: doctl registry get --format Endpoint
|
||||
register: do_registry_result
|
||||
when: use_do_registry
|
||||
changed_when: true
|
||||
failed_when: do_registry_result.stdout_lines | length < 2
|
||||
ignore_errors: true
|
||||
|
||||
- name: d_ocean | registry | store original registry endpoint
|
||||
ansible.builtin.set_fact:
|
||||
registry_endpoint: "{{ do_registry_result.stdout_lines[1] }}"
|
||||
when: use_do_registry and (do_registry_result.stdout_lines | length >= 2)
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | registry | create registry endpoint
|
||||
ansible.builtin.command: doctl registry create {{ registry_name }} --region {{ droplet_region }}
|
||||
when: use_do_registry and (do_registry_result.stdout_lines | length < 2)
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | registry | update registry endpoint
|
||||
ansible.builtin.command: doctl registry get --format Endpoint
|
||||
register: do_registry_result
|
||||
when: use_do_registry and (do_registry_result.stdout_lines | length < 2)
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | registry | store registry endpoint
|
||||
ansible.builtin.set_fact:
|
||||
registry_endpoint: "{{ do_registry_result.stdout_lines[1] }}"
|
||||
when: use_do_registry and (not do_registry_result.skipped)
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | registry | configure kubectl
|
||||
ansible.builtin.command: doctl k8s cluster kubeconfig save {{ my_cluster.data.id }}
|
||||
when: configure_kubectl and use_do_registry
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | registry | add to new k8s cluster
|
||||
ansible.builtin.shell: set -o pipefail && doctl registry kubernetes-manifest | kubectl apply -f -
|
||||
when: use_do_registry
|
||||
changed_when: true
|
||||
|
||||
# Addons
|
||||
# ===========================================
|
||||
- name: d_ocean | addons | run all admin bookstrap scripts
|
||||
ansible.builtin.command: >-
|
||||
../../chart/admin/logging/scripts/eck_install.sh
|
||||
register: addons_init
|
||||
when: enable_admin_addons
|
||||
changed_when: true
|
||||
|
||||
# Helm Output + Deploy
|
||||
# ===========================================
|
||||
- name: d_ocean | helm | output values yaml
|
||||
ansible.builtin.template:
|
||||
src: ../group_vars/do/do-values.template.yaml
|
||||
dest: ../deploys/{{ project_name }}-values.yaml
|
||||
mode: u+rw
|
||||
|
||||
- name: d_ocean | helm | deploy btrix
|
||||
ansible.builtin.command: helm upgrade --install -f ../../chart/values.yaml -f ../deploys/{{ project_name }}-values.yaml btrix ../../chart/
|
||||
register: helm_result
|
||||
changed_when: helm_result.rc == 0
|
||||
|
||||
tags: helm_upgrade
|
||||
8
ansible/roles/btrix/deploy/main.yml
Normal file
8
ansible/roles/btrix/deploy/main.yml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: d_ocean | helm | deploy btrix
|
||||
ansible.builtin.command: helm upgrade --install -f ../chart/values.yaml -f ../chart/{{ project_name }}-values.yaml btrix ../chart/
|
||||
register: helm_result
|
||||
changed_when: helm_result.rc == 0
|
||||
environment:
|
||||
KUBECONFIG: "/home/{{ ansible_user }}/.kube/config"
|
||||
tags: helm_upgrade
|
||||
@ -45,11 +45,3 @@
|
||||
src: k8s-manifest.yaml.j2
|
||||
dest: ../chart/{{ project_name }}-values.yaml
|
||||
mode: u+rw
|
||||
|
||||
- name: d_ocean | helm | deploy btrix
|
||||
ansible.builtin.command: helm upgrade --install -f ../chart/values.yaml -f ../chart/{{ project_name }}-values.yaml btrix ../chart/
|
||||
register: helm_result
|
||||
changed_when: helm_result.rc == 0
|
||||
environment:
|
||||
KUBECONFIG: "/home/{{ ansible_user }}/.kube/config"
|
||||
tags: helm_upgrade
|
||||
|
||||
299
ansible/roles/digital_ocean/setup/tasks/main.yml
Normal file
299
ansible/roles/digital_ocean/setup/tasks/main.yml
Normal file
@ -0,0 +1,299 @@
|
||||
---
|
||||
# Prereq
|
||||
# ===========================================
|
||||
# - name: d_ocean | init | install doctl
|
||||
# ansible.builtin.package:
|
||||
# name: "{{ item }}"
|
||||
# state: present
|
||||
# loop:
|
||||
# - doctl
|
||||
|
||||
- name: d_ocean | init | set full domain
|
||||
ansible.builtin.set_fact:
|
||||
full_domain: "{{ subdomain + '.' + domain if subdomain else domain }}"
|
||||
|
||||
# MongoDB
|
||||
# ===========================================
|
||||
- name: d_ocean | db | test for existing mongodb
|
||||
ansible.builtin.command: doctl db list -o json
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: db_check
|
||||
|
||||
- name: d_ocean | db | create mongodb database
|
||||
ansible.builtin.command: doctl databases create {{ db_name }} --region {{ droplet_region }} --engine mongodb --version 6 --output json
|
||||
async: 1800
|
||||
poll: 60
|
||||
register: db_create
|
||||
when: "db_check.stdout | from_json | json_query(name_query) | length < 1"
|
||||
changed_when: true
|
||||
vars:
|
||||
name_query: '[?name==`{{ db_name }}`]'
|
||||
|
||||
- name: d_ocean | db | set db id
|
||||
ansible.builtin.set_fact:
|
||||
db_uuid: "{{ db_create.stdout if db_create.changed else db_check.stdout | from_json | json_query(name_query) | json_query('[0].id') }}"
|
||||
vars:
|
||||
name_query: '[?name==`{{ db_name }}`]'
|
||||
|
||||
- name: d_ocean | db | reset db user password
|
||||
ansible.builtin.command: doctl databases user reset {{ db_uuid }} doadmin -o json
|
||||
register: db_user
|
||||
when: not db_create.changed
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | db | set db config
|
||||
ansible.builtin.set_fact:
|
||||
db_uuid: "{{ db_create.stdout if db_create.changed else db_check.stdout | from_json | json_query(name_query) | json_query('[0].id') }}"
|
||||
vars:
|
||||
name_query: '[?name==`{{ db_name }}`]'
|
||||
|
||||
- name: d_ocean | db | set db config
|
||||
ansible.builtin.set_fact:
|
||||
db_url: "{{ db_check.stdout | from_json | json_query(name_query) | json_query('[0].private_connection.uri') | replace(old, new) }}"
|
||||
when: not db_create.changed
|
||||
vars:
|
||||
name_query: '[?name==`{{ db_name }}`]'
|
||||
old: ":@"
|
||||
new: ":{{ db_user.stdout | from_json | json_query('[0].password') }}@"
|
||||
|
||||
# Storage (Space)
|
||||
# ===========================================
|
||||
- name: d_ocean | space | create new
|
||||
community.digitalocean.digital_ocean_spaces:
|
||||
name: "{{ bucket_name }}"
|
||||
state: present
|
||||
oauth_token: "{{ lookup('env', 'DO_API_TOKEN') }}"
|
||||
aws_access_key_id: "{{ lookup('env', 'DO_AWS_ACCESS_KEY') }}"
|
||||
aws_secret_access_key: "{{ lookup('env', 'DO_AWS_SECRET_KEY') }}"
|
||||
region: "{{ droplet_region }}"
|
||||
register: db_space_result
|
||||
ignore_errors: true
|
||||
|
||||
- name: d_ocean | space | set endpoint urls
|
||||
ansible.builtin.set_fact:
|
||||
endpoint_url: "{{ db_space_result.data.space.endpoint_url }}/"
|
||||
bucket_endpoint_url: "{{ db_space_result.data.space.endpoint_url }}/{{ db_space_result.data.space.name }}/{{ bucket_path }}/"
|
||||
|
||||
- name: d_ocean | space | set bucket cors
|
||||
community.aws.aws_s3_cors:
|
||||
name: "{{ bucket_name }}"
|
||||
aws_access_key: "{{ lookup('env', 'DO_AWS_ACCESS_KEY') }}"
|
||||
aws_secret_key: "{{ lookup('env', 'DO_AWS_SECRET_KEY') }}"
|
||||
endpoint_url: "{{ endpoint_url }}"
|
||||
region: "{{ droplet_region }}"
|
||||
state: present
|
||||
rules:
|
||||
- allowed_origins:
|
||||
- "https://{{ full_domain }}"
|
||||
allowed_methods:
|
||||
- GET
|
||||
- HEAD
|
||||
allowed_headers:
|
||||
- "*"
|
||||
expose_headers:
|
||||
- Content-Range
|
||||
- Content-Encoding
|
||||
- Content-Length
|
||||
|
||||
# K8S
|
||||
# ===========================================
|
||||
- name: d_ocean | k8s | test for existing k8s cluster
|
||||
ansible.builtin.command: doctl k8s cluster list
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: cluster_check
|
||||
|
||||
- name: d_ocean | k8s | create a kubernetes cluster in {{ droplet_region }}
|
||||
# skip_ansible_lint
|
||||
ansible.builtin.command: >-
|
||||
doctl kubernetes cluster create {{ k8s_name }} --1-clicks ingress-nginx,cert-manager --node-pool
|
||||
"{{ node_pools | select('match', 'name=*') | list | join(',') }}"
|
||||
--region={{ droplet_region }}
|
||||
|
||||
async: 1800
|
||||
poll: 60
|
||||
changed_when: false
|
||||
when: cluster_check.stdout.find(k8s_name) == -1
|
||||
|
||||
- name: d_ocean | k8s | test for existing k8s cluster's node pools
|
||||
ansible.builtin.command: doctl kubernetes cluster node-pool list {{ k8s_name }}
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
register: cluster_nodepools_check
|
||||
|
||||
- name: d_ocean | k8s | create a admin node pool in the cluster {{ k8s_name }}
|
||||
# skip_ansible_lint
|
||||
ansible.builtin.command: >-
|
||||
doctl kubernetes cluster node-pool create {{ k8s_name }}
|
||||
--name {{ admin_node_pool.name }} --size {{ admin_node_pool.size }}
|
||||
--label {{ admin_node_pool.label }} --count {{ admin_node_pool.count }}
|
||||
|
||||
async: 1800
|
||||
poll: 60
|
||||
changed_when: false
|
||||
when: enable_admin_addons and cluster_nodepools_check.stdout.find(admin_node_pool.name) == -1
|
||||
|
||||
- name: d_ocean | k8s | remove a admin node pool from the cluster {{ k8s_name }}
|
||||
# skip_ansible_lint
|
||||
ansible.builtin.command: >-
|
||||
echo "yes" | doctl kubernetes cluster node-pool delete {{ k8s_name }} {{ admin_node_pool.name }}
|
||||
|
||||
async: 1800
|
||||
poll: 60
|
||||
changed_when: false
|
||||
when: not enable_admin_addons and cluster_nodepools_check.stdout.find(admin_node_pool.name) >= 0
|
||||
|
||||
- name: d_ocean | k8s | Get information about our cluster
|
||||
community.digitalocean.digital_ocean_kubernetes_info:
|
||||
oauth_token: "{{ lookup('ansible.builtin.env', 'DO_API_TOKEN') }}"
|
||||
name: "{{ k8s_name }}"
|
||||
return_kubeconfig: true
|
||||
register: my_cluster
|
||||
|
||||
- name: d_ocean | k8s | print information about an existing DigitalOcean Kubernetes cluster
|
||||
ansible.builtin.debug:
|
||||
msg: Cluster name is {{ my_cluster.data.name }}, ID is {{ my_cluster.data.id }}
|
||||
failed_when: not my_cluster
|
||||
|
||||
- name: d_ocean | k8s | save kubectl config to kube_dir
|
||||
ansible.builtin.command: doctl kubernetes cluster kubeconfig save {{ my_cluster.data.id }}
|
||||
changed_when: false
|
||||
|
||||
- name: d_ocean | k8s | get loadbalancer info from doctl
|
||||
ansible.builtin.command: doctl k8s cluster list-associated-resources {{ my_cluster.data.id }} --format LoadBalancers --output json
|
||||
register: lb_id_result
|
||||
retries: 100
|
||||
delay: 5
|
||||
changed_when: lb_id_result.stdout | from_json | json_query('load_balancers') | length > 0
|
||||
until: lb_id_result.stdout | from_json | json_query('load_balancers') | length > 0
|
||||
|
||||
- name: d_ocean | k8s | parse lb id
|
||||
ansible.builtin.set_fact:
|
||||
lb_id: "{{ lb_id_result.stdout | from_json | json_query('load_balancers[0].id') }}"
|
||||
|
||||
# FIREWALL
|
||||
# ===========================================
|
||||
- name: d_ocean | db | get db firewall list
|
||||
ansible.builtin.command: doctl db firewalls list {{ db_uuid }} -o json
|
||||
register: db_firewalls
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | db | configure firewall for mongoDB
|
||||
ansible.builtin.command: doctl db firewalls append {{ db_uuid }} --rule k8s:{{ my_cluster.data.id }}
|
||||
when: "db_firewalls.stdout == []"
|
||||
changed_when: true # if it runs, it's changed
|
||||
|
||||
# DNS
|
||||
# ===========================================
|
||||
- name: d_ocean | dns | grab loadbalancer ip using doctl
|
||||
ansible.builtin.command: doctl compute load-balancer get --format IP "{{ lb_id }}"
|
||||
register: loadbalancer_ip_result
|
||||
retries: 100
|
||||
delay: 5
|
||||
changed_when: loadbalancer_ip_result.stdout_lines | length > 1
|
||||
until: loadbalancer_ip_result.stdout_lines | length > 1
|
||||
|
||||
- name: d_ocean | dns | parse ip
|
||||
ansible.builtin.set_fact:
|
||||
lb_ip: "{{ loadbalancer_ip_result.stdout_lines[1] }}"
|
||||
|
||||
- name: d_ocean | dns | register the dns for browsertrix cloud
|
||||
ansible.builtin.command: >-
|
||||
doctl compute domain records create --record-type A --record-name "{{ subdomain if subdomain else '@' }}" --record-data "{{ lb_ip }}" "{{ domain }}"
|
||||
|
||||
changed_when: dns_result.rc == 0
|
||||
register: dns_result
|
||||
|
||||
|
||||
# Signing + DNS
|
||||
# ===========================================
|
||||
- name: d_ocean | signing | set signing domain + authtoken
|
||||
ansible.builtin.set_fact:
|
||||
full_signing_domain: "{{ signing_host }}.{{ full_domain }}"
|
||||
signing_subdomain: "{{ signing_host + '.' + subdomain if subdomain else signing_host }}"
|
||||
signing_authtoken: "{{ 99999999 | random | to_uuid }}"
|
||||
|
||||
when: enable_signing
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | signing | check signing record exists
|
||||
ansible.builtin.command: doctl compute domain records ls {{ domain }} -o json
|
||||
register: check_domain
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | signing | register the dns for signing subdomain
|
||||
ansible.builtin.command: >-
|
||||
doctl compute domain records create --record-type A --record-name "{{ signing_subdomain }}" --record-data "{{ lb_ip }}" "{{ domain }}"
|
||||
|
||||
register: signing_dns_result
|
||||
when: "check_domain.stdout | from_json | json_query(name_query) | length < 1"
|
||||
changed_when: true # if it ran, it changed
|
||||
vars:
|
||||
name_query: '[?name==`{{ signing_subdomain }}`]'
|
||||
|
||||
# Registry
|
||||
# ===========================================
|
||||
- name: d_ocean | registry | get endpoint, if using registry
|
||||
ansible.builtin.command: doctl registry get --format Endpoint
|
||||
register: do_registry_result
|
||||
when: use_do_registry
|
||||
changed_when: true
|
||||
failed_when: do_registry_result.stdout_lines | length < 2
|
||||
ignore_errors: true
|
||||
|
||||
- name: d_ocean | registry | store original registry endpoint
|
||||
ansible.builtin.set_fact:
|
||||
registry_endpoint: "{{ do_registry_result.stdout_lines[1] }}"
|
||||
when: use_do_registry and (do_registry_result.stdout_lines | length >= 2)
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | registry | create registry endpoint
|
||||
ansible.builtin.command: doctl registry create {{ registry_name }} --region {{ droplet_region }}
|
||||
when: use_do_registry and (do_registry_result.stdout_lines | length < 2)
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | registry | update registry endpoint
|
||||
ansible.builtin.command: doctl registry get --format Endpoint
|
||||
register: do_registry_result
|
||||
when: use_do_registry and (do_registry_result.stdout_lines | length < 2)
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | registry | store registry endpoint
|
||||
ansible.builtin.set_fact:
|
||||
registry_endpoint: "{{ do_registry_result.stdout_lines[1] }}"
|
||||
when: use_do_registry and (not do_registry_result.skipped)
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | registry | configure kubectl
|
||||
ansible.builtin.command: doctl k8s cluster kubeconfig save {{ my_cluster.data.id }}
|
||||
when: configure_kubectl and use_do_registry
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | registry | add to new k8s cluster
|
||||
ansible.builtin.shell: set -o pipefail && doctl registry kubernetes-manifest | kubectl apply -f -
|
||||
when: use_do_registry
|
||||
changed_when: true
|
||||
|
||||
# Addons
|
||||
# ===========================================
|
||||
- name: d_ocean | addons | run all admin bookstrap scripts
|
||||
ansible.builtin.command: >-
|
||||
../chart/admin/logging/scripts/eck_install.sh
|
||||
register: addons_init
|
||||
when: enable_admin_addons
|
||||
changed_when: true
|
||||
|
||||
- name: d_ocean | addons | install metrics server
|
||||
ansible.builtin.shell: |
|
||||
helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/
|
||||
helm upgrade --install metrics-server metrics-server/metrics-server
|
||||
changed_when: false
|
||||
|
||||
# Helm Output
|
||||
# ===========================================
|
||||
- name: d_ocean | helm | output values yaml
|
||||
ansible.builtin.template:
|
||||
src: do-values.template.yaml
|
||||
dest: ../chart/{{ project_name }}-values.yaml
|
||||
mode: u+rw
|
||||
@ -4,13 +4,12 @@ main_node_type: main
|
||||
crawler_node_type: crawling
|
||||
redis_node_type: crawling
|
||||
|
||||
crawler_requests_cpu: "1200m"
|
||||
crawler_limits_cpu: "2200m"
|
||||
crawler_requests_memory: "1200Mi"
|
||||
crawler_limits_memory: "3200Mi"
|
||||
|
||||
crawler_pull_policy: "Always"
|
||||
|
||||
crawler_browser_instances: 4
|
||||
|
||||
crawler_storage: "220Gi"
|
||||
|
||||
# Registry
|
||||
{% if use_do_registry %}
|
||||
backend_image: "{{ registry_endpoint }}/webrecorder/browsertrix-backend:{{ image_tag }}"
|
||||
@ -39,7 +38,6 @@ storages:
|
||||
ingress:
|
||||
host: {{ full_domain }}
|
||||
cert_email: {{ cert_email }}
|
||||
scheme: "https"
|
||||
tls: true
|
||||
|
||||
|
||||
@ -31,20 +31,22 @@ pipenv install
|
||||
pipenv shell
|
||||
```
|
||||
|
||||
3. [Look at the configuration options](https://github.com/webrecorder/browsertrix-cloud/blob/main/ansible/group_vars/do/main.yml) and modify them or pass them as extra variables as shown below. If you haven't configured `kubectl`, please enable the `configure_kube` option
|
||||
3. [Look at the configuration options](https://github.com/webrecorder/browsertrix-cloud/blob/main/ansible/inventory/digital_ocean/group_vars/main.yml) and modify them or pass them as extra variables as shown below. If you haven't configured `kubectl`, please enable the `configure_kube` option
|
||||
|
||||
4. Run the playbook:
|
||||
```zsh
|
||||
ansible-playbook playbooks/do_setup.yml -e project_name="your-project" -e superuser_email="you@yourdomain.com" -e domain="yourdomain.com"
|
||||
ansible-playbook do_setup.yml -e project_name="your-project" -e superuser_email="you@yourdomain.com" -e domain="yourdomain.com"
|
||||
```
|
||||
|
||||
You may optionally configure these command line parameters through the [group_vars file](https://github.com/webrecorder/browsertrix-cloud/blob/main/ansible/inventory/digital_ocean/group_vars/main.yml)
|
||||
|
||||
#### Upgrading
|
||||
|
||||
1. Run `git pull`
|
||||
|
||||
2. Run the playbook:
|
||||
```zsh
|
||||
ansible-playbook playbooks/do_setup.yml -e project_name="your-project" -e superuser_email="you@yourdomain.com" -e domain_name="yourdomain.com" -t helm_upgrade
|
||||
ansible-playbook do_setup.yml -e project_name="your-project" -e superuser_email="you@yourdomain.com" -e domain="yourdomain.com" -t helm_upgrade
|
||||
```
|
||||
|
||||
### Uninstall
|
||||
|
||||
@ -29,14 +29,9 @@ cp -r ansible/inventory/sample-k3s ansible/inventory/my-deployment
|
||||
|
||||
2. Change the [hosts IP address](https://github.com/webrecorder/browsertrix-cloud/blob/main/ansible/inventory/sample-k3s/hosts.ini) in your just created inventory
|
||||
|
||||
3. Copy the playbook into the root ansible directory:
|
||||
```zsh
|
||||
cp ansible/playbooks/install_k3s.yml ansible/install_k3s.yml
|
||||
```
|
||||
4. You may need to make modifications to the playbook itself based on your configuration. The playbook lists sections that can be removed or changed based on whether you'd like to install a multi-node or single-node k3s installation for your Browsertrix Cloud deployment. By default the playbook assumes you'll run in a single-node environment deploying directly to `localhost`
|
||||
|
||||
5. You may need to make modifications to the playbook itself based on your configuration. The playbook lists sections that can be removed or changed based on whether you'd like to install a multi-node or single-node k3s installation for your Browsertrix Cloud deployment. By default the playbook assumes you'll run in a single-node environment deploying directly to `localhost`
|
||||
|
||||
6. Run the playbook:
|
||||
5. Run the playbook:
|
||||
```zsh
|
||||
ansible-playbook -i inventory/my-deployment/hosts.ini install_k3s.yml
|
||||
```
|
||||
@ -47,5 +42,5 @@ ansible-playbook -i inventory/my-deployment/hosts.ini install_k3s.yml
|
||||
|
||||
2. Run the playbook:
|
||||
```zsh
|
||||
ansible-playbook -i inventory/hosts playbooks/install_k3s.yml -t helm_upgrade
|
||||
ansible-playbook -i inventory/hosts install_k3s.yml -t helm_upgrade
|
||||
```
|
||||
|
||||
Loading…
Reference in New Issue
Block a user