feat: k3s ansible playbook (#1071)

It changes the directory layout of the ansible playbook to a
more "best practices" friendly approach using ansible roles and
a real inventory file

Co-authored-by: Ilya Kreymer <ikreymer@users.noreply.github.com>
This commit is contained in:
Anish Lakhwara 2023-09-05 14:50:18 -07:00 committed by GitHub
parent 7d0cfa93e2
commit 00eddd548d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 501 additions and 0 deletions

View File

@ -0,0 +1,10 @@
---
k3s_version: v1.22.3+k3s1
ansible_user: debian
systemd_dir: /etc/systemd/system
controller_ip: "{{ hostvars[groups['controller'][0]]['ansible_host'] | default(groups['controller'][0]) }}"
extra_server_args: "--disable traefik"
extra_agent_args: ""
project_name: browsertrix-cloud
domain: my-domain.example.com
email: test@example.com

View File

@ -0,0 +1,12 @@
[controller]
# Set to the IP address of the k3s host node
127.0.0.1
# Uncomment for multi-node deployment
# [node]
# 192.168.1.2
[k3s_cluster:children]
controller
# Uncomment for multi-node deployment
# node

View File

@ -0,0 +1,30 @@
---
# Can be skipped if k3s is installed, this installs k3s
- hosts: k3s_cluster
gather_facts: yes
connection: local # Comment if deploying to remote host
become: yes
roles:
- role: prereq
- role: download
# Can be skipped if k3s is installed, this configures the master k3s node
- hosts: controller
connection: local # Comment if deploying to remote host
become: yes
roles:
- role: k3s/master
# Uncomment for multi-node deployment
# - hosts: node
# roles:
# - role: k3s/node
# Ansible controller to install browsertrix cloud
- hosts: 127.0.0.1
connection: local
become: yes # Can be removed if not using the btrix/prereq role
roles:
- role: btrix/prereq # Only required if you wish to install & configure Helm / Kubectl
- role: btrix/install

View File

@ -0,0 +1,55 @@
---
- name: Create directory .kube
file:
path: ~{{ ansible_user }}/.kube
state: directory
owner: "{{ ansible_user }}"
mode: "u=rwx,g=rx,o="
- name: Check whether kube config exists
stat:
path: ~/.kube/config
register: kubeconfig_result
- name: Get k3s config
ansible.posix.synchronize:
src: rsync://{{ controller_ip }}/home/{{ ansible_user }}/.kube/config
dest: ~/.kube/config
when: not kubeconfig_result.stat.exists
- name: Check whether CRDs installed
ansible.builtin.command: kubectl get crd
register: crd_register
- name: Run all admin bookstrap scripts
ansible.builtin.command: >-
../chart/admin/logging/scripts/eck_install.sh
register: addons_init
when: "crd_register.stdout | length < 16"
changed_when: true
- name: Install Cert-Manager
ansible.builtin.command: kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.0/cert-manager.yaml
changed_when: true
- name: Install ingress-nginx
ansible.builtin.command: helm upgrade --install ingress-nginx ingress-nginx --repo https://kubernetes.github.io/ingress-nginx --namespace ingress-nginx --create-namespace
- name: Install Metrics Server
ansible.builtin.shell: |
helm repo add metrics-server https://kubernetes-sigs.github.io/metrics-server/
helm upgrade --install metrics-server metrics-server/metrics-server
- name: Output values yaml
ansible.builtin.template:
src: k8s-manifest.yaml.j2
dest: ../chart/{{ project_name }}-values.yaml
mode: u+rw
- name: d_ocean | helm | deploy btrix
ansible.builtin.command: helm upgrade --install -f ../chart/values.yaml -f ../chart/{{ project_name }}-values.yaml btrix ../chart/
register: helm_result
changed_when: helm_result.rc == 0
environment:
KUBECONFIG: "/home/{{ ansible_user }}/.kube/config"
tags: helm_upgrade

View File

@ -0,0 +1,26 @@
---
- name: Gather installed helm version, if there is any
ansible.builtin.shell: helm version
register: helm_result
failed_when: helm_result.rc != 0 and helm_result.rc != 127
# Since this is a reporting task, it should never change
# as well run and register a result in any case
changed_when: false
check_mode: false
- name: Install Helm
ansible.builtin.shell: |
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
chmod +700 get_helm.sh
./get_helm.sh
when: helm_result.rc != 0
- name: Install kubectl
ansible.builtin.shell: |
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: Install jq
ansible.builtin.package:
name: jq
state: present

View File

@ -0,0 +1,11 @@
ingress_class: "nginx"
mongo_auth:
username: root
password: example
ingress:
host: "{{ domain }}"
cert_email: "{{ email }}"
scheme: "https"
tls: true

View File

@ -0,0 +1,36 @@
---
- name: Download k3s binary x64
get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
when: ansible_facts.architecture == "x86_64"
- name: Download k3s binary arm64
get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
when:
- ( ansible_facts.architecture is search("arm") and
ansible_facts.userspace_bits == "64" ) or
ansible_facts.architecture is search("aarch64")
- name: Download k3s binary armhf
get_url:
url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf
checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt
dest: /usr/local/bin/k3s
owner: root
group: root
mode: 0755
when:
- ansible_facts.architecture is search("arm")
- ansible_facts.userspace_bits == "32"

View File

@ -0,0 +1,2 @@
---
k3s_server_location: /var/lib/rancher/k3s

View File

@ -0,0 +1,91 @@
---
- name: Copy K3s service file
register: k3s_service
template:
src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s.service"
owner: root
group: root
mode: 0644
- name: Enable and check K3s service
systemd:
name: k3s
daemon_reload: yes
state: started
enabled: yes
- name: Wait for node-token
wait_for:
path: "{{ k3s_server_location }}/server/node-token"
- name: Register node-token file access mode
stat:
path: "{{ k3s_server_location }}/server/node-token"
register: p
- name: Change file access node-token
file:
path: "{{ k3s_server_location }}/server/node-token"
mode: "g+rx,o+rx"
- name: Read node-token from master
slurp:
path: "{{ k3s_server_location }}/server/node-token"
register: node_token
- name: Store Master node-token
set_fact:
token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}"
- name: Restore node-token file access
file:
path: "{{ k3s_server_location }}/server/node-token"
mode: "{{ p.stat.mode }}"
- name: Create directory .kube
file:
path: ~{{ ansible_user }}/.kube
state: directory
owner: "{{ ansible_user }}"
mode: "u=rwx,g=rx,o="
- name: Copy config file to user home directory
copy:
src: /etc/rancher/k3s/k3s.yaml
dest: ~{{ ansible_user }}/.kube/config
remote_src: yes
owner: "{{ ansible_user }}"
mode: "u=rw,g=,o="
- name: Replace https://localhost:6443 by https://controller-ip:6443
command: >-
k3s kubectl config set-cluster default
--server=https://{{ controller_ip }}:6443
--kubeconfig ~{{ ansible_user }}/.kube/config
changed_when: true
- name: Check that the kubectl binary exists
stat:
path: /usr/local/bin/kubectl
register: kubectl_result
- name: Check that the crictl binary exists
stat:
path: /usr/local/bin/crictl
register: crictl_result
- name: Create kubectl symlink
file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/kubectl
state: link
when: not kubectl_result.stat.exists
- name: Create crictl symlink
file:
src: /usr/local/bin/k3s
dest: /usr/local/bin/crictl
state: link
when: not crictl_result.stat.exists

View File

@ -0,0 +1,24 @@
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
After=network-online.target
[Service]
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s server --data-dir {{ k3s_server_location }} {{ extra_server_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,16 @@
---
- name: Copy K3s service file
template:
src: "k3s.service.j2"
dest: "{{ systemd_dir }}/k3s-node.service"
owner: root
group: root
mode: 0755
- name: Enable and check K3s service
systemd:
name: k3s-node
daemon_reload: yes
state: restarted
enabled: yes

View File

@ -0,0 +1,24 @@
[Unit]
Description=Lightweight Kubernetes
Documentation=https://k3s.io
After=network-online.target
[Service]
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s agent --server https://{{ controller_ip }}:6443 --token {{ hostvars[groups['controller'][0]]['token'] }} {{ extra_agent_args | default("") }}
KillMode=process
Delegate=yes
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=0
Restart=always
RestartSec=5s
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,54 @@
---
- name: Set SELinux to disabled state
selinux:
state: disabled
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux', 'Rocky Linux']
- name: Enable IPv4 forwarding
sysctl:
name: net.ipv4.ip_forward
value: "1"
state: present
reload: yes
- name: Enable IPv6 forwarding
sysctl:
name: net.ipv6.conf.all.forwarding
value: "1"
state: present
reload: yes
when: ansible_all_ipv6_addresses
- name: Add br_netfilter to /etc/modules-load.d/
copy:
content: "br_netfilter"
dest: /etc/modules-load.d/br_netfilter.conf
mode: "u=rw,g=,o="
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux', 'Rocky Linux']
- name: Load br_netfilter
modprobe:
name: br_netfilter
state: present
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux', 'Rocky Linux']
- name: Set bridge-nf-call-iptables (just to be sure)
sysctl:
name: "{{ item }}"
value: "1"
state: present
reload: yes
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux', 'Rocky Linux']
loop:
- net.bridge.bridge-nf-call-iptables
- net.bridge.bridge-nf-call-ip6tables
- name: Add /usr/local/bin to sudo secure_path
lineinfile:
line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin'
regexp: "Defaults(\\s)*secure_path(\\s)*="
state: present
insertafter: EOF
path: /etc/sudoers
validate: 'visudo -cf %s'
when: ansible_distribution in ['CentOS', 'Red Hat Enterprise Linux', 'Rocky Linux']

View File

@ -0,0 +1,42 @@
---
- name: Disable services
systemd:
name: "{{ item }}"
state: stopped
enabled: no
failed_when: false
with_items:
- k3s
- k3s-node
- name: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
register: pkill_containerd_shim_runc
command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc"
changed_when: "pkill_containerd_shim_runc.rc == 0"
failed_when: false
- name: Umount k3s filesystems
include_tasks: umount_with_children.yml
with_items:
- /run/k3s
- /var/lib/kubelet
- /run/netns
- /var/lib/rancher/k3s
loop_control:
loop_var: mounted_fs
- name: Remove service files, binaries and data
file:
name: "{{ item }}"
state: absent
with_items:
- /usr/local/bin/k3s
- "{{ systemd_dir }}/k3s.service"
- "{{ systemd_dir }}/k3s-node.service"
- /etc/rancher/k3s
- /var/lib/kubelet
- /var/lib/rancher/k3s
- name: daemon_reload
systemd:
daemon_reload: yes

View File

@ -0,0 +1,16 @@
---
- name: Get the list of mounted filesystems
shell: set -o pipefail && cat /proc/mounts | awk '{ print $2}' | grep -E "^{{ mounted_fs }}"
register: get_mounted_filesystems
args:
executable: /bin/bash
failed_when: false
changed_when: get_mounted_filesystems.stdout | length > 0
check_mode: false
- name: Umount filesystem
mount:
path: "{{ item }}"
state: unmounted
with_items:
"{{ get_mounted_filesystems.stdout_lines | reverse | list }}"

View File

@ -0,0 +1,51 @@
# K3S
*Playbook Path: [ansible/playbooks/install_k3s.yml](https://github.com/webrecorder/browsertrix-cloud/blob/main/ansible/playbooks/install_k3s.yml)*
This playbook provides an easy way to install Browsertrix Cloud on a Linux box (tested on Rocky Linux 9). It automatically sets up Browsertrix with Let's Encrypt certificates.
### Requirements
To run this ansible playbook, you need to:
* Have a server / VPS where browsertrix will run.
* Configure a DNS A Record to point at your server's IP address.
* Make sure you can ssh to it, with a sudo user: ssh <your-user>@<your-domain>
* Install Ansible on your local machine (the control machine).
1. Clone the repo:
```zsh
git clone https://github.com/webrecorder/browsertrix-cloud.git
cd browsertrix-cloud
```
2. Optional: Create a copy of the [inventory directory] and name it what you like (alternatively edit the sample files in place)
```zsh
cp -r ansible/inventory/sample-k3s ansible/inventory/my-deployment
```
1. [Look at the configuration options](https://github.com/webrecorder/browsertrix-cloud/blob/main/ansible/inventory/sample-k3s/group_vars/all.yml) and modify them to match your setup
2. Change the [hosts IP address](https://github.com/webrecorder/browsertrix-cloud/blob/main/ansible/inventory/sample-k3s/hosts.ini) in your just created inventory
3. Copy the playbook into the root ansible directory:
```zsh
cp ansible/playbooks/install_k3s.yml ansible/install_k3s.yml
```
5. You may need to make modifications to the playbook itself based on your configuration. The playbook lists sections that can be removed or changed based on whether you'd like to install a multi-node or single-node k3s installation for your Browsertrix Cloud deployment. By default the playbook assumes you'll run in a single-node environment deploying directly to `localhost`
6. Run the playbook:
```zsh
ansible-playbook -i inventory/my-deployment/hosts.ini install_k3s.yml
```
#### Upgrading
1. Run `git pull`
2. Run the playbook:
```zsh
ansible-playbook -i inventory/hosts playbooks/install_k3s.yml -t helm_upgrade
```

View File

@ -75,3 +75,4 @@ Currently, we provide playbooks for the following tested environments:
- [DigitalOcean](ansible/digitalocean.md)
- [Microk8s](ansible/microk8s.md)
- [k3s](ansible/k3s.md)