rke2
This commit is contained in:
parent
a7b12f0b32
commit
13bf31486f
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
collections:
|
||||||
|
- name: ansible.utils
|
||||||
|
- name: community.general
|
||||||
|
- name: ansible.posix
|
||||||
|
- name: kubernetes.core
|
|
@ -0,0 +1,18 @@
|
||||||
|
os: "linux"
|
||||||
|
arch: "amd64"
|
||||||
|
|
||||||
|
kube_vip_version: "v0.8.0"
|
||||||
|
vip_interface: eth0
|
||||||
|
vip: 192.168.3.50
|
||||||
|
|
||||||
|
metallb_version: v0.13.12
|
||||||
|
lb_range: 192.168.3.80-192.168.3.90
|
||||||
|
lb_pool_name: first-pool
|
||||||
|
|
||||||
|
rke2_version: "v1.29.4+rke2r1"
|
||||||
|
rke2_install_dir: "/usr/local/bin"
|
||||||
|
rke2_binary_url: "https://github.com/rancher/rke2/releases/download/{{ rke2_version }}/rke2.linux-amd64"
|
||||||
|
|
||||||
|
ansible_user: ubuntu
|
||||||
|
ansible_become: true
|
||||||
|
ansible_become_method: sudo
|
|
@ -0,0 +1,11 @@
|
||||||
|
# Make sure Ansible host has access to these devices
|
||||||
|
# Good idea to snapshot all machines and deploy uing cloud-init
|
||||||
|
|
||||||
|
[servers]
|
||||||
|
server1 ansible_host=192.168.3.21
|
||||||
|
server2 ansible_host=192.168.3.22
|
||||||
|
server3 ansible_host=192.168.3.23
|
||||||
|
|
||||||
|
[agents]
|
||||||
|
agent1 ansible_host=192.168.3.24
|
||||||
|
agent2 ansible_host=192.168.3.25
|
|
@ -0,0 +1,17 @@
|
||||||
|
# Copy agent config to all agents - we need to change agent2 & 3 later with the token
|
||||||
|
- name: Deploy RKE2 Agent Configuration
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: templates/rke2-agent-config.j2
|
||||||
|
dest: /etc/rancher/rke2/config.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
when: inventory_hostname in groups['agents']
|
||||||
|
|
||||||
|
# Check agents have restarted to pick up config
|
||||||
|
- name: Ensure RKE2 agents are enabled and running
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: rke2-agent
|
||||||
|
enabled: true
|
||||||
|
state: restarted
|
||||||
|
daemon_reload: true
|
|
@ -0,0 +1,5 @@
|
||||||
|
write-kubeconfig-mode: "0644"
|
||||||
|
token: {{ hostvars['server1']['token'] }}
|
||||||
|
server: https://{{ hostvars['server1']['ansible_host'] }}:9345
|
||||||
|
node-label:
|
||||||
|
- "agent=true"
|
|
@ -0,0 +1,53 @@
|
||||||
|
# Copy server config with token to all servers except server 1 (this has token)
|
||||||
|
- name: Deploy RKE2 server Configuration
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: templates/rke2-server-config.j2
|
||||||
|
dest: /etc/rancher/rke2/config.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
when: inventory_hostname != groups['servers'][0]
|
||||||
|
|
||||||
|
# Keep checking the cluster API until it's functioning (deployed)
|
||||||
|
- name: Wait for cluster API to be ready (can take 5-10 mins depending on internet/hardware)
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "kubectl get nodes"
|
||||||
|
register: kubectl_output
|
||||||
|
until: "'connection refused' not in kubectl_output.stderr"
|
||||||
|
retries: 120
|
||||||
|
delay: 10
|
||||||
|
changed_when: true
|
||||||
|
become_user: "{{ ansible_user }}"
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# Use kubectl to deploy yaml. Perhaps this can be added to the manifest folder initially
|
||||||
|
- name: Apply kube vip configuration file
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml apply -f https://kube-vip.io/manifests/rbac.yaml
|
||||||
|
changed_when: true
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# Apply the kube-vip configration. Perhaps this can be added to the manifest folder initially
|
||||||
|
- name: Apply kube vip configuration file
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml
|
||||||
|
changed_when: true
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# Check that additional servers are restarted
|
||||||
|
- name: Ensure additional RKE2 servers are enabled and running
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: rke2-server
|
||||||
|
enabled: true
|
||||||
|
state: restarted
|
||||||
|
daemon_reload: true
|
||||||
|
when: inventory_hostname != groups['servers'][0]
|
||||||
|
|
||||||
|
# enable additional servers
|
||||||
|
- name: Ensure RKE2 server is enabled and running
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: rke2-server
|
||||||
|
enabled: true
|
||||||
|
state: restarted
|
||||||
|
daemon_reload: true
|
||||||
|
when: inventory_hostname != groups['servers'][0]
|
|
@ -0,0 +1,10 @@
|
||||||
|
write-kubeconfig-mode: "0644"
|
||||||
|
token: {{ hostvars['server1']['token'] }}
|
||||||
|
server: https://{{ hostvars['server1']['ansible_host'] }}:9345
|
||||||
|
tls-san:
|
||||||
|
- {{ vip }}
|
||||||
|
- {{ hostvars['server1']['ansible_host'] }}
|
||||||
|
- {{ hostvars['server2']['ansible_host'] }}
|
||||||
|
- {{ hostvars['server3']['ansible_host'] }}
|
||||||
|
node-label:
|
||||||
|
- server=true
|
|
@ -0,0 +1,60 @@
|
||||||
|
# Wait for Server 1 to be ready before continuing with metallb deployment
|
||||||
|
- name: Wait for k8s nodes with node label 'server=true' to be ready, otherwise we cannot start metallb deployment
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "kubectl wait --for=condition=Ready nodes --selector server=true --timeout=600s"
|
||||||
|
register: nodes_ready
|
||||||
|
retries: 120
|
||||||
|
delay: 10
|
||||||
|
changed_when: true
|
||||||
|
become_user: "{{ ansible_user }}"
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# Create namespace so that we can deploy metallb
|
||||||
|
- name: Apply metallb namespace
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml
|
||||||
|
become_user: "{{ ansible_user }}"
|
||||||
|
changed_when: true
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# Apply metallb manifest
|
||||||
|
- name: Apply metallb manifest
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/{{ metallb_version }}/config/manifests/metallb-native.yaml
|
||||||
|
become_user: "{{ ansible_user }}"
|
||||||
|
changed_when: true
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# Wait for metallb deployment pods to be alive before deploying metallb manifests
|
||||||
|
- name: Wait for metallb pods to be ready, otherwise we cannot start metallb deployment
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "kubectl wait --namespace metallb-system --for=condition=ready pod --selector=component=controller --timeout=1800s"
|
||||||
|
changed_when: true
|
||||||
|
become_user: "{{ ansible_user }}"
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# Apply L2 Advertisement for metallb
|
||||||
|
- name: Apply metallb L2 Advertisement
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml
|
||||||
|
become_user: "{{ ansible_user }}"
|
||||||
|
changed_when: true
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# Deploy metal IP Pool to Server 1
|
||||||
|
- name: Copy metallb IPPool to server 1
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: templates/metallb-ippool.j2
|
||||||
|
dest: /home/{{ ansible_user }}/ippool.yaml
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
group: "{{ ansible_user }}"
|
||||||
|
mode: '0755'
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# don't think this will work as nodes are no execute, might need agents first
|
||||||
|
- name: Apply metallb ipppool
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: kubectl apply -f /home/{{ ansible_user }}/ippool.yaml
|
||||||
|
become_user: "{{ ansible_user }}"
|
||||||
|
changed_when: true
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
|
@ -0,0 +1,8 @@
|
||||||
|
apiVersion: metallb.io/v1beta1
|
||||||
|
kind: IPAddressPool
|
||||||
|
metadata:
|
||||||
|
name: {{ lb_pool_name }}
|
||||||
|
namespace: metallb-system
|
||||||
|
spec:
|
||||||
|
addresses:
|
||||||
|
- {{ lb_range }}
|
|
@ -0,0 +1,17 @@
|
||||||
|
# Create directory to deploy kube-vip manifest
|
||||||
|
- name: Create directory for Kube VIP Manifest
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/var/lib/rancher/rke2/server/manifests"
|
||||||
|
state: directory
|
||||||
|
mode: '0644'
|
||||||
|
when: inventory_hostname in groups['servers']
|
||||||
|
|
||||||
|
# Copy kube-vip to server 1 manifest folder for auto deployment at bootstrap
|
||||||
|
- name: Deploy Kube VIP Configuration
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: templates/kube-vip-config.j2
|
||||||
|
dest: /var/lib/rancher/rke2/server/manifests/kube-vip.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
|
@ -0,0 +1,88 @@
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: kube-vip-ds
|
||||||
|
app.kubernetes.io/version: {{ kube_vip_version }}
|
||||||
|
name: kube-vip-ds
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/name: kube-vip-ds
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: null
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/name: kube-vip-ds
|
||||||
|
app.kubernetes.io/version: {{ kube_vip_version }}
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
nodeAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
nodeSelectorTerms:
|
||||||
|
- matchExpressions:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
operator: Exists
|
||||||
|
- matchExpressions:
|
||||||
|
- key: node-role.kubernetes.io/control-plane
|
||||||
|
operator: Exists
|
||||||
|
containers:
|
||||||
|
- args:
|
||||||
|
- manager
|
||||||
|
env:
|
||||||
|
- name: vip_arp
|
||||||
|
value: "true"
|
||||||
|
- name: port
|
||||||
|
value: "6443"
|
||||||
|
- name: vip_interface
|
||||||
|
value: {{ vip_interface }}
|
||||||
|
- name: vip_cidr
|
||||||
|
value: "32"
|
||||||
|
- name: cp_enable
|
||||||
|
value: "true"
|
||||||
|
- name: cp_namespace
|
||||||
|
value: kube-system
|
||||||
|
- name: vip_ddns
|
||||||
|
value: "false"
|
||||||
|
- name: svc_enable
|
||||||
|
value: "false"
|
||||||
|
- name: svc_leasename
|
||||||
|
value: plndr-svcs-lock
|
||||||
|
- name: vip_leaderelection
|
||||||
|
value: "true"
|
||||||
|
- name: vip_leasename
|
||||||
|
value: plndr-cp-lock
|
||||||
|
- name: vip_leaseduration
|
||||||
|
value: "5"
|
||||||
|
- name: vip_renewdeadline
|
||||||
|
value: "3"
|
||||||
|
- name: vip_retryperiod
|
||||||
|
value: "1"
|
||||||
|
- name: address
|
||||||
|
value: {{ vip }}
|
||||||
|
- name: prometheus_server
|
||||||
|
value: :2112
|
||||||
|
image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_version }}
|
||||||
|
imagePullPolicy: Always
|
||||||
|
name: kube-vip
|
||||||
|
resources: {}
|
||||||
|
securityContext:
|
||||||
|
capabilities:
|
||||||
|
add:
|
||||||
|
- NET_ADMIN
|
||||||
|
- NET_RAW
|
||||||
|
hostNetwork: true
|
||||||
|
serviceAccountName: kube-vip
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
|
- effect: NoExecute
|
||||||
|
operator: Exists
|
||||||
|
updateStrategy: {}
|
||||||
|
status:
|
||||||
|
currentNumberScheduled: 0
|
||||||
|
desiredNumberScheduled: 0
|
||||||
|
numberMisscheduled: 0
|
||||||
|
numberReady: 0
|
|
@ -0,0 +1,15 @@
|
||||||
|
- name: Enable IPv4 forwarding
|
||||||
|
ansible.posix.sysctl:
|
||||||
|
name: net.ipv4.ip_forward
|
||||||
|
value: "1"
|
||||||
|
state: present
|
||||||
|
reload: true
|
||||||
|
tags: sysctl
|
||||||
|
|
||||||
|
- name: Enable IPv6 forwarding
|
||||||
|
ansible.posix.sysctl:
|
||||||
|
name: net.ipv6.conf.all.forwarding
|
||||||
|
value: "1"
|
||||||
|
state: present
|
||||||
|
reload: true
|
||||||
|
tags: sysctl
|
|
@ -0,0 +1,20 @@
|
||||||
|
# Create a directory to download RKE2 binary to
|
||||||
|
- name: Create directory for RKE2 binary
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ rke2_install_dir }}"
|
||||||
|
state: directory
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
# Download the RKE2 binary
|
||||||
|
- name: Download RKE2 binary
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "{{ rke2_binary_url }}"
|
||||||
|
dest: "{{ rke2_install_dir }}/rke2"
|
||||||
|
mode: '0755'
|
||||||
|
|
||||||
|
# Set permissions on the RKE2 binary
|
||||||
|
- name: Set executable permissions on the RKE2 binary
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ rke2_install_dir }}/rke2"
|
||||||
|
mode: '0755'
|
||||||
|
state: file
|
|
@ -0,0 +1,134 @@
|
||||||
|
- name: Create directory for RKE2 config
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/etc/rancher/rke2"
|
||||||
|
state: directory
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
- name: Create directory for RKE2 token
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/var/lib/rancher/rke2/server"
|
||||||
|
state: directory
|
||||||
|
mode: '0644'
|
||||||
|
|
||||||
|
# Copy server config to server 1 for bootstrap - we need to change server2 & 3 later with the token
|
||||||
|
- name: Deploy RKE2 server Configuration
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: templates/rke2-server-config.j2
|
||||||
|
dest: /etc/rancher/rke2/config.yaml
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
when: inventory_hostname in groups['servers']
|
||||||
|
|
||||||
|
- name: Create systemd service file for RKE2 server
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: templates/rke2-server.service.j2
|
||||||
|
dest: /etc/systemd/system/rke2-server.service
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
when: inventory_hostname in groups['servers']
|
||||||
|
|
||||||
|
- name: Create systemd service file for RKE2 agent
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: templates/rke2-agent.service.j2
|
||||||
|
dest: /etc/systemd/system/rke2-agent.service
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: '0644'
|
||||||
|
when: inventory_hostname in groups['agents']
|
||||||
|
|
||||||
|
# we enable the first server to generate tokens etc, copy this afterwards to other servers
|
||||||
|
- name: Ensure RKE2 server is enabled and running
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: rke2-server
|
||||||
|
enabled: true
|
||||||
|
state: restarted
|
||||||
|
daemon_reload: true
|
||||||
|
when: inventory_hostname in groups['servers'][0]
|
||||||
|
|
||||||
|
# wait for node token to be availale so that we can copy it, we need this to join other nodes
|
||||||
|
- name: Wait for node-token
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
path: /var/lib/rancher/rke2/server/node-token
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# wait for kubectl to be downloaded, part of the rke2 installation
|
||||||
|
- name: Wait for kubectl
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
path: /var/lib/rancher/rke2/bin/kubectl
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# copy kubectl to usr bin so that all users can run kubectl commands
|
||||||
|
- name: Copy kubectl to user bin
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: /var/lib/rancher/rke2/bin/kubectl
|
||||||
|
dest: /usr/local/bin/kubectl
|
||||||
|
mode: '0755'
|
||||||
|
remote_src: true
|
||||||
|
become: true
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# wait for the kubectl copy to complete
|
||||||
|
- name: Wait for kubectl
|
||||||
|
ansible.builtin.wait_for:
|
||||||
|
path: /usr/local/bin/kubectl
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# modify token access
|
||||||
|
- name: Register node-token file access mode
|
||||||
|
ansible.builtin.stat:
|
||||||
|
path: /var/lib/rancher/rke2/server
|
||||||
|
register: p
|
||||||
|
|
||||||
|
- name: Change file access for node-token
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /var/lib/rancher/rke2/server
|
||||||
|
mode: "g+rx,o+rx"
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# Save token as variable
|
||||||
|
- name: Fetch the token from the first server node
|
||||||
|
ansible.builtin.slurp:
|
||||||
|
src: /var/lib/rancher/rke2/server/token
|
||||||
|
register: rke2_token
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
# convert token to fact
|
||||||
|
- name: Save Master node-token for later
|
||||||
|
ansible.builtin.set_fact:
|
||||||
|
token: "{{ rke2_token.content | b64decode | regex_replace('\n', '') }}"
|
||||||
|
|
||||||
|
# revert token file access
|
||||||
|
- name: Restore node-token file access
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: /var/lib/rancher/rke2/server
|
||||||
|
mode: "{{ p.stat.mode }}"
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# check .kube folder exists so that we can use kubectl (config resides here)
|
||||||
|
- name: Ensure .kube directory exists in user's home
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/home/{{ ansible_user }}/.kube"
|
||||||
|
state: directory
|
||||||
|
mode: '0755'
|
||||||
|
become: true
|
||||||
|
|
||||||
|
# copy kubectl config file to .kube folder
|
||||||
|
- name: Copy config file to user home directory
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: /etc/rancher/rke2/rke2.yaml
|
||||||
|
dest: "/home/{{ ansible_user }}/.kube/config"
|
||||||
|
remote_src: true
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
mode: "u=rw,g=,o="
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
||||||
|
|
||||||
|
# change IP from local to server 1 IP
|
||||||
|
- name: Replace IP address with server1
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: /home/{{ ansible_user }}/.kube/config
|
||||||
|
regexp: '127.0.0.1'
|
||||||
|
replace: "{{ hostvars['server1']['ansible_host'] }}"
|
||||||
|
when: inventory_hostname == groups['servers'][0]
|
|
@ -0,0 +1,13 @@
|
||||||
|
# rke2-agent.service.j2
|
||||||
|
[Unit]
|
||||||
|
Description=RKE2 Agent
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/local/bin/rke2 agent
|
||||||
|
KillMode=process
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5s
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
|
@ -0,0 +1,10 @@
|
||||||
|
write-kubeconfig-mode: "0644"
|
||||||
|
tls-san:
|
||||||
|
- {{ vip }}
|
||||||
|
- {{ hostvars['server1']['ansible_host'] }}
|
||||||
|
- {{ hostvars['server2']['ansible_host'] }}
|
||||||
|
- {{ hostvars['server3']['ansible_host'] }}
|
||||||
|
node-label:
|
||||||
|
- server=true
|
||||||
|
disable:
|
||||||
|
- rke2-ingress-nginx
|
|
@ -0,0 +1,13 @@
|
||||||
|
# rke2-server.service.j2
|
||||||
|
[Unit]
|
||||||
|
Description=RKE2 server
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
ExecStart=/usr/local/bin/rke2 server
|
||||||
|
KillMode=process
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5s
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
|
@ -0,0 +1,61 @@
|
||||||
|
# Hello, thanks for using my playbook, hopefully you can help to improve it.
|
||||||
|
# Things that need adding: (there are many more)
|
||||||
|
# 1) Support different OS & architectures
|
||||||
|
# 2) Support multiple CNIs
|
||||||
|
# 3) Improve the wait logic
|
||||||
|
# 4) Use kubernetes Ansible plugins more sensibly
|
||||||
|
# 5) Optimise flow logic
|
||||||
|
# 6) Clean up
|
||||||
|
|
||||||
|
###############################################################
|
||||||
|
# MAKE SURE YOU CHANGE group_vars/all.yaml VARIABLES!!!!!!!!!!!
|
||||||
|
###############################################################
|
||||||
|
|
||||||
|
# bootstraps first server and copies configs for others/agents
|
||||||
|
- name: Prepare all nodes
|
||||||
|
hosts: servers,agents
|
||||||
|
gather_facts: true # enables us to gather lots of useful variables: https://docs.ansible.com/ansible/latest/collections/ansible/builtin/setup_module.html
|
||||||
|
roles:
|
||||||
|
- prepare-nodes
|
||||||
|
|
||||||
|
# creates directories for download and then downloads RKE2 and changes permissions
|
||||||
|
- name: Download RKE2
|
||||||
|
hosts: servers,agents
|
||||||
|
gather_facts: true
|
||||||
|
roles:
|
||||||
|
- rke2-download
|
||||||
|
|
||||||
|
# Creates RKE2 bootstrap manifests folder and copies kube-vip template over (configured with variables)
|
||||||
|
- name: Deploy Kube VIP
|
||||||
|
hosts: servers
|
||||||
|
gather_facts: true
|
||||||
|
roles:
|
||||||
|
- kube-vip
|
||||||
|
|
||||||
|
# bootstraps the first server, copies configs to nodes, saves token to use later
|
||||||
|
- name: Prepare RKE2 on Servers and Agents
|
||||||
|
hosts: servers,agents
|
||||||
|
gather_facts: true
|
||||||
|
roles:
|
||||||
|
- rke2-prepare
|
||||||
|
|
||||||
|
# Adds additional servers using the token from the previous task
|
||||||
|
- name: Add additional RKE2 Servers
|
||||||
|
hosts: servers
|
||||||
|
gather_facts: true
|
||||||
|
roles:
|
||||||
|
- add-server
|
||||||
|
|
||||||
|
# Adds agents to the cluster
|
||||||
|
- name: Add additional RKE2 Agents
|
||||||
|
hosts: agents
|
||||||
|
gather_facts: true
|
||||||
|
roles:
|
||||||
|
- add-agent
|
||||||
|
|
||||||
|
# Finish kube-vip, add metallb
|
||||||
|
- name: Apply manifests after cluster is created
|
||||||
|
hosts: servers
|
||||||
|
gather_facts: true
|
||||||
|
roles:
|
||||||
|
- apply-manifests
|
Loading…
Reference in New Issue