This commit is contained in:
Ira Abramov 2025-03-26 15:08:40 +02:00 committed by GitHub
commit dd467ab7d3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 235 additions and 101 deletions

55
.editorconfig Normal file
View File

@ -0,0 +1,55 @@
# EditorConfig is awesome: http://EditorConfig.org
# top-most EditorConfig file
root = true
# Unix-style newlines with a newline ending every file
[*]
end_of_line = lf
insert_final_newline = true
indent_style = tab
indent_size = 4
[*.{c,h,cpp}]
# Matches multiple files with brace expansion notation
# Set default charset
[*.{js,py}]
charset = utf-8
# 4 space indentation by tab
[*.py]
indent_style = tab
indent_size = 4
# Tab indentation (no size specified)
[Makefile*]
indent_style = tab
# Indentation override for all JS under lib directory
[lib/**.js]
indent_style = tab
indent_size = 2
[.gitconfig]
indent_style = tab
indent_size = 2
[Vagrantfile]
indent_style = tab
indent_size = 2
# Matches the exact files either package.json or .travis.yml
[{package.json,.travis.yml}]
indent_style = spaces
indent_size = 2
# Standards I like
[{*.groovy,*.rb,*.sh,.bash*}]
indent_style = tab
indent_size = 4
# Standards I'm forced to...
[{*.md,*.MD,*.yaml,*.yml}]
indent_style = spaces
indent_size = 2

View File

@ -1,13 +1,52 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
---
# fail_fast: true
minimum_pre_commit_version: 1.18.1
# exclude: docs/_build/
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: check-symlinks
- id: destroyed-symlinks
- id: detect-aws-credentials
args: [--allow-missing-credentials]
- id: trailing-whitespace # trims trailing whitespace.
- id: requirements-txt-fixer
exclude: ".(md|rst)$"
- id: end-of-file-fixer # ensures that a file is either empty, or ends with one newline.
- id: check-ast # simply checks whether the files parse as valid python.
- id: check-merge-conflict # checks for files that contain merge conflict strings.
- id: check-symlinks # checks for symlinks which do not point to anything.
- id: check-added-large-files # prevents giant files from being committed.
args: ["--maxkb=4096"]
- id: check-builtin-literals
- id: check-case-conflict
- id: check-toml # checks toml files for parseable syntax.
- id: check-docstring-first # checks a common error of defining a docstring after code.
- id: check-executables-have-shebangs # ensures that (non-binary) executables have a shebang.
- id: check-shebang-scripts-are-executable
- id: check-yaml # checks yaml files for parseable syntax.
- id: debug-statements # checks for debugger imports and py37+ `breakpoint()` calls in python source.
- id: destroyed-symlinks # detects symlinks which are changed to regular files with a content of a path which that symlink was pointing to.
- id: detect-private-key # detects the presence of private keys.
- id: mixed-line-ending # replaces or checks mixed line ending.
args: ["--fix=lf"]
- repo: https://github.com/IamTheFij/docker-pre-commit
rev: v3.0.1
hooks:
- id: docker-compose-check
- repo: https://github.com/pre-commit/pre-commit
rev: v3.5.0
hooks:
- id: validate_manifest
- repo: https://github.com/jumanjihouse/pre-commit-hooks
rev: 2.1.5
hooks:
#- id: git-check # Configure in .gitattributes
- id: shellcheck
# exclude: ".bats$"
- id: shfmt
# exclude: ".bats$"

238
Kubernetes/RKE2/rke2.sh Normal file → Executable file
View File

@ -15,48 +15,51 @@ echo -e " \033[36;5m \
echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m"
echo -e " \033[32;5m \033[0m"
#############################################
# YOU SHOULD ONLY NEED TO EDIT THIS SECTION #
#############################################
# Version of Kube-VIP to deploy
KVVERSION="v0.6.3"
export KVVERSION="v0.7.2"
# The domain name of your cluster, inherit env by default
DOMAIN=${DOMAIN:-my.org}
# Set the IP addresses of the admin, masters, and workers nodes
admin=192.168.3.5
master1=192.168.3.21
master2=192.168.3.22
master3=192.168.3.23
worker1=192.168.3.24
worker2=192.168.3.25
# "admin" is your desktop machine from which you will be running the ops,
# just for this run, make sure you have sshd
# running and accessible here!
admin=192.168.60.22
master1=192.168.60.37
master2=192.168.60.38
master3=192.168.60.39
# Array of worker nodes
workers=(192.168.60.26 192.168.60.83)
# User of remote machines
user=ubuntu
remoteuser=ubuntu
# Interface used on remotes
interface=eth0
# Set the virtual IP address (VIP)
vip=192.168.3.50
vip=192.168.60.171
# Array of extra master nodes
extramasters=("$master2" "$master3")
# Array of all master nodes
allmasters=($master1 $master2 $master3)
# Array of master nodes
masters=($master2 $master3)
# Array of worker nodes
workers=($worker1 $worker2)
# Array of all
all=($master1 $master2 $master3 $worker1 $worker2)
allmasters=("$master1" "${extramasters[@]}")
# Array of all minus master1
allnomaster1=($master2 $master3 $worker1 $worker2)
allnomaster1=("${extramasters[@]}" "${workers[@]}")
# Array of all
all=("$master1" "${allnomaster1[@]}")
#Loadbalancer IP range
lbrange=192.168.3.60-192.168.3.80
lbrange=192.168.60.171-192.168.60.189
#ssh certificate name variable
certName=id_rsa
@ -64,23 +67,42 @@ certName=id_rsa
#############################################
# DO NOT EDIT BELOW #
#############################################
# For testing purposes - in case time is wrong due to VM snapshots
sudo timedatectl set-ntp off
sudo timedatectl set-ntp on
# Move SSH certs to ~/.ssh and change permissions
cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh
chmod 600 /home/$user/.ssh/$certName
chmod 644 /home/$user/.ssh/$certName.pub
#fail immediately on errors
set -e
# For testing purposes - in case time is wrong due to VM snapshots
if hash timedatectl 2>/dev/null; then
sudo timedatectl set-ntp off
sudo timedatectl set-ntp on
fi
# Create a directory for the SSH certs
mkdir -p ~/.ssh
# Generate SSH certs if missing
if [ ! -f "$HOME"/.ssh/$certName ]; then
if [ -f "$HOME"/$certName ]; then
# Move SSH certs to ~/.ssh and change permissions
cp "$HOME"/$certName{,.pub} "$HOME"/.ssh
chmod 400 "$HOME"/.ssh/*
chmod 700 "$HOME"/.ssh
else
ssh-keygen -t rsa -f "$HOME"/.ssh/$certName -N ""
fi
fi
# Install Kubectl if not already present
if ! command -v kubectl version &> /dev/null
then
echo -e " \033[31;5mKubectl not found, installing\033[0m"
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
if ! command -v kubectl version &>/dev/null; then
if [ "$OS" == "Darwin" ]; then
brew install kubernetes-cli
else # assume Linux?
echo -e " \033[31;5mKubectl not found, installing\033[0m"
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$(uname -m)/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
fi
else
echo -e " \033[32;5mKubectl already installed\033[0m"
echo -e " \033[32;5mKubectl already installed\033[0m"
fi
# Create SSH Config file to ignore checking (don't use in production!)
@ -88,73 +110,86 @@ sed -i '1s/^/StrictHostKeyChecking no\n/' ~/.ssh/config
#add ssh keys for all nodes
for node in "${all[@]}"; do
ssh-copy-id $user@$node
ssh-copy-id "$remoteuser@$node"
done
# Step 1: Create Kube VIP
# create RKE2's self-installing manifest dir
sudo mkdir -p /var/lib/rancher/rke2/server/manifests
# Install the kube-vip deployment into rke2's self-installing manifest folder
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip
cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml
sudo mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml
# shellcheck disable=SC2016
curl -s https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip |
sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' >~/kube-vip.yaml
# Find/Replace all k3s entries to represent rke2
sudo sed -i 's/k3s/rke2/g' /var/lib/rancher/rke2/server/manifests/kube-vip.yaml
# copy kube-vip.yaml to home directory
sudo cp /var/lib/rancher/rke2/server/manifests/kube-vip.yaml ~/kube-vip.yaml
# change owner
sudo chown $user:$user kube-vip.yaml
sed -i 's/k3s/rke2/g' ~/kube-vip.yaml
sudo cp ~/kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml
# make kube folder to run kubectl later
mkdir ~/.kube
mkdir -p ~/.kube
# create the rke2 config file
sudo mkdir -p /etc/rancher/rke2
touch config.yaml
echo "tls-san:" >> config.yaml
echo " - $vip" >> config.yaml
echo " - $master1" >> config.yaml
echo " - $master2" >> config.yaml
echo " - $master3" >> config.yaml
echo "write-kubeconfig-mode: 0644" >> config.yaml
echo "disable:" >> config.yaml
echo " - rke2-ingress-nginx" >> config.yaml
echo >~/config.yaml
{
echo "tls-san:"
echo " - $vip"
echo " - $master1"
echo " - $master2"
echo " - $master3"
echo "write-kubeconfig-mode: 0644"
echo "disable:"
echo " - rke2-ingress-nginx"
} >>~/config.yaml
# copy config.yaml to rancher directory
sudo cp ~/config.yaml /etc/rancher/rke2/config.yaml
# update path with rke2-binaries
echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' >> ~/.bashrc ; echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >> ~/.bashrc ; echo 'alias k=kubectl' >> ~/.bashrc ; source ~/.bashrc ;
{
# update path with rke2-binaries
echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml'
# shellcheck disable=SC2016
echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin'
echo 'alias k=kubectl'
} >>~/.bashrc
# shellcheck disable=SC1090
source ~/.bashrc
# Step 2: Copy kube-vip.yaml and certs to all masters
for newnode in "${allmasters[@]}"; do
scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$newnode:~/kube-vip.yaml
scp -i ~/.ssh/$certName $HOME/config.yaml $user@$newnode:~/config.yaml
scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} $user@$newnode:~/.ssh
echo -e " \033[32;5mCopied successfully!\033[0m"
scp -i ~/.ssh/$certName ~/kube-vip.yaml "$remoteuser@$newnode":~/kube-vip.yaml
scp -i ~/.ssh/$certName ~/config.yaml "$remoteuser@$newnode":~/config.yaml
scp -i ~/.ssh/$certName ~/.ssh/$certName{,.pub} "$remoteuser@$newnode":~/.ssh
echo -e " \033[32;5mCopied successfully!\033[0m"
done
# Step 3: Connect to Master1 and move kube-vip.yaml and config.yaml. Then install RKE2, copy token back to admin machine. We then use the token to bootstrap additional masternodes
ssh -tt $user@$master1 -i ~/.ssh/$certName sudo su <<EOF
# shellcheck disable=SC2087
ssh -tt $remoteuser@$master1 -i ~/.ssh/$certName sudo su <<EOF
mkdir -p /var/lib/rancher/rke2/server/manifests
mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml
mkdir -p /etc/rancher/rke2
mv config.yaml /etc/rancher/rke2/config.yaml
echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' >> ~/.bashrc ; echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >> ~/.bashrc ; echo 'alias k=kubectl' >> ~/.bashrc ; source ~/.bashrc ;
{
echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml'
echo 'export PATH=\${PATH}:/var/lib/rancher/rke2/bin'
echo 'alias k=kubectl'
} >> ~/.bashrc
source ~/.bashrc
curl -sfL https://get.rke2.io | sh -
systemctl enable rke2-server.service
systemctl start rke2-server.service
echo "StrictHostKeyChecking no" > ~/.ssh/config
ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin
scp -i /home/$user/.ssh/$certName /var/lib/rancher/rke2/server/token $user@$admin:~/token
scp -i /home/$user/.ssh/$certName /etc/rancher/rke2/rke2.yaml $user@$admin:~/.kube/rke2.yaml
ssh-copy-id -i ~/.ssh/$certName $USER@$admin
scp -i ~/.ssh/$certName /var/lib/rancher/rke2/server/token $USER@$admin:~/token
scp -i ~/.ssh/$certName /etc/rancher/rke2/rke2.yaml $USER@$admin:~/.kube/rke2.yaml
exit
EOF
echo -e " \033[32;5mMaster1 Completed\033[0m"
# Step 4: Set variable to the token we just extracted, set kube config location
token=`cat token`
sudo cat ~/.kube/rke2.yaml | sed 's/127.0.0.1/'$master1'/g' > $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
token=$(cat ~/token)
sed 's/127.0.0.1/'$master1'/g' <~/.kube/rke2.yaml >~/.kube/config
sudo chown "$(id -u):$(id -g)" ~/.kube/config
export KUBECONFIG=${HOME}/.kube/config
sudo cp ~/.kube/config /etc/rancher/rke2/rke2.yaml
kubectl get nodes
@ -164,32 +199,36 @@ kubectl apply -f https://kube-vip.io/manifests/rbac.yaml
kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml
# Step 6: Add other Masternodes, note we import the token we extracted from step 3
for newnode in "${masters[@]}"; do
ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
for newnode in "${extramasters[@]}"; do
# shellcheck disable=SC2087
ssh -tt "$remoteuser@$newnode" -i ~/.ssh/$certName sudo su <<EOF
mkdir -p /etc/rancher/rke2
touch /etc/rancher/rke2/config.yaml
echo "token: $token" >> /etc/rancher/rke2/config.yaml
echo "server: https://$master1:9345" >> /etc/rancher/rke2/config.yaml
echo "tls-san:" >> /etc/rancher/rke2/config.yaml
echo " - $vip" >> /etc/rancher/rke2/config.yaml
echo " - $master1" >> /etc/rancher/rke2/config.yaml
echo " - $master2" >> /etc/rancher/rke2/config.yaml
echo " - $master3" >> /etc/rancher/rke2/config.yaml
echo > /etc/rancher/rke2/config.yaml
{
echo "token: $token"
echo "server: https://$master1:9345"
echo "tls-san:"
echo " - $vip"
echo " - $master1"
echo " - $master2"
echo " - $master3"
} >> /etc/rancher/rke2/config.yaml
curl -sfL https://get.rke2.io | sh -
systemctl enable rke2-server.service
systemctl start rke2-server.service
time systemctl start rke2-server.service
exit
EOF
echo -e " \033[32;5mMaster node joined successfully!\033[0m"
echo -e " \033[32;5mMaster node joined successfully!\033[0m"
done
kubectl get nodes
# Step 7: Add Workers
for newnode in "${workers[@]}"; do
ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
# shellcheck disable=SC2087
ssh -tt "$remoteuser@$newnode" -i ~/.ssh/$certName sudo su <<EOF
mkdir -p /etc/rancher/rke2
touch /etc/rancher/rke2/config.yaml
echo > /etc/rancher/rke2/config.yaml
echo "token: $token" >> /etc/rancher/rke2/config.yaml
echo "server: https://$vip:9345" >> /etc/rancher/rke2/config.yaml
echo "node-label:" >> /etc/rancher/rke2/config.yaml
@ -197,10 +236,10 @@ for newnode in "${workers[@]}"; do
echo " - longhorn=true" >> /etc/rancher/rke2/config.yaml
curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh -
systemctl enable rke2-agent.service
systemctl start rke2-agent.service
time systemctl start rke2-agent.service
exit
EOF
echo -e " \033[32;5mWorker node joined successfully!\033[0m"
echo -e " \033[32;5mWorker node joined successfully!\033[0m"
done
kubectl get nodes
@ -210,16 +249,17 @@ echo -e " \033[32;5mDeploying Metallb\033[0m"
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
# Download ipAddressPool and configure using lbrange above
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool
cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml
# shellcheck disable=SC2016
curl -s https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool |
sed 's/$lbrange/'$lbrange'/g' >~/ipAddressPool.yaml
# Step 9: Deploy IP Pools and l2Advertisement
echo -e " \033[32;5mAdding IP Pools, waiting for Metallb to be available first. This can take a long time as we're likely being rate limited for container pulls...\033[0m"
kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=component=controller \
--timeout=1800s
kubectl apply -f ipAddressPool.yaml
--for=condition=ready pod \
--selector=component=controller \
--timeout=1800s
kubectl apply -f ~/ipAddressPool.yaml
kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml
# Step 10: Install Rancher (Optional - Delete if not required)
@ -239,17 +279,17 @@ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager \
--namespace cert-manager \
--create-namespace \
--version v1.13.2
--namespace cert-manager \
--create-namespace \
--version v1.13.2
kubectl get pods --namespace cert-manager
# Install Rancher
echo -e " \033[32;5mDeploying Rancher\033[0m"
helm install rancher rancher-latest/rancher \
--namespace cattle-system \
--set hostname=rancher.my.org \
--set bootstrapPassword=admin
--namespace cattle-system \
--set hostname="rancher.$DOMAIN" \
--set bootstrapPassword=admin
kubectl -n cattle-system rollout status deploy/rancher
kubectl -n cattle-system get deploy rancher
@ -257,8 +297,8 @@ kubectl -n cattle-system get deploy rancher
kubectl get svc -n cattle-system
kubectl expose deployment rancher --name=rancher-lb --port=443 --type=LoadBalancer -n cattle-system
while [[ $(kubectl get svc -n cattle-system 'jsonpath={..status.conditions[?(@.type=="Pending")].status}') = "True" ]]; do
sleep 5
echo -e " \033[32;5mWaiting for LoadBalancer to come online\033[0m"
sleep 5
echo -e " \033[32;5mWaiting for LoadBalancer to come online\033[0m"
done
kubectl get svc -n cattle-system