fix run of whitespaces, shfmt, indentations

This commit is contained in:
Ira Abramov 2024-04-04 16:06:12 +03:00
parent 39883fa083
commit 50c7274c95
1 changed files with 42 additions and 41 deletions

83
Kubernetes/RKE2/rke2.sh Normal file → Executable file
View File

@ -15,7 +15,6 @@ echo -e " \033[36;5m \
echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m"
echo -e " \033[32;5m \033[0m" echo -e " \033[32;5m \033[0m"
############################################# #############################################
# YOU SHOULD ONLY NEED TO EDIT THIS SECTION # # YOU SHOULD ONLY NEED TO EDIT THIS SECTION #
############################################# #############################################
@ -73,17 +72,16 @@ sudo timedatectl set-ntp on
# Move SSH certs to ~/.ssh and change permissions # Move SSH certs to ~/.ssh and change permissions
cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh
chmod 600 /home/$user/.ssh/$certName chmod 600 /home/$user/.ssh/$certName
chmod 644 /home/$user/.ssh/$certName.pub chmod 644 /home/$user/.ssh/$certName.pub
# Install Kubectl if not already present # Install Kubectl if not already present
if ! command -v kubectl version &> /dev/null if ! command -v kubectl version &>/dev/null; then
then echo -e " \033[31;5mKubectl not found, installing\033[0m"
echo -e " \033[31;5mKubectl not found, installing\033[0m" curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
else else
echo -e " \033[32;5mKubectl already installed\033[0m" echo -e " \033[32;5mKubectl already installed\033[0m"
fi fi
# Create SSH Config file to ignore checking (don't use in production!) # Create SSH Config file to ignore checking (don't use in production!)
@ -91,7 +89,7 @@ sed -i '1s/^/StrictHostKeyChecking no\n/' ~/.ssh/config
#add ssh keys for all nodes #add ssh keys for all nodes
for node in "${all[@]}"; do for node in "${all[@]}"; do
ssh-copy-id $user@$node ssh-copy-id $user@$node
done done
# Step 1: Create Kube VIP # Step 1: Create Kube VIP
@ -99,7 +97,7 @@ done
sudo mkdir -p /var/lib/rancher/rke2/server/manifests sudo mkdir -p /var/lib/rancher/rke2/server/manifests
# Install the kube-vip deployment into rke2's self-installing manifest folder # Install the kube-vip deployment into rke2's self-installing manifest folder
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip
cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' >$HOME/kube-vip.yaml
sudo mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml sudo mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml
# Find/Replace all k3s entries to represent rke2 # Find/Replace all k3s entries to represent rke2
@ -114,26 +112,29 @@ mkdir ~/.kube
# create the rke2 config file # create the rke2 config file
sudo mkdir -p /etc/rancher/rke2 sudo mkdir -p /etc/rancher/rke2
touch config.yaml touch config.yaml
echo "tls-san:" >> config.yaml echo "tls-san:" >>config.yaml
echo " - $vip" >> config.yaml echo " - $vip" >>config.yaml
echo " - $master1" >> config.yaml echo " - $master1" >>config.yaml
echo " - $master2" >> config.yaml echo " - $master2" >>config.yaml
echo " - $master3" >> config.yaml echo " - $master3" >>config.yaml
echo "write-kubeconfig-mode: 0644" >> config.yaml echo "write-kubeconfig-mode: 0644" >>config.yaml
echo "disable:" >> config.yaml echo "disable:" >>config.yaml
echo " - rke2-ingress-nginx" >> config.yaml echo " - rke2-ingress-nginx" >>config.yaml
# copy config.yaml to rancher directory # copy config.yaml to rancher directory
sudo cp ~/config.yaml /etc/rancher/rke2/config.yaml sudo cp ~/config.yaml /etc/rancher/rke2/config.yaml
# update path with rke2-binaries # update path with rke2-binaries
echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' >> ~/.bashrc ; echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >> ~/.bashrc ; echo 'alias k=kubectl' >> ~/.bashrc ; source ~/.bashrc ; echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' >>~/.bashrc
echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >>~/.bashrc
echo 'alias k=kubectl' >>~/.bashrc
source ~/.bashrc
# Step 2: Copy kube-vip.yaml and certs to all masters # Step 2: Copy kube-vip.yaml and certs to all masters
for newnode in "${allmasters[@]}"; do for newnode in "${allmasters[@]}"; do
scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$newnode:~/kube-vip.yaml scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$newnode:~/kube-vip.yaml
scp -i ~/.ssh/$certName $HOME/config.yaml $user@$newnode:~/config.yaml scp -i ~/.ssh/$certName $HOME/config.yaml $user@$newnode:~/config.yaml
scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} $user@$newnode:~/.ssh scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} $user@$newnode:~/.ssh
echo -e " \033[32;5mCopied successfully!\033[0m" echo -e " \033[32;5mCopied successfully!\033[0m"
done done
# Step 3: Connect to Master1 and move kube-vip.yaml and config.yaml. Then install RKE2, copy token back to admin machine. We then use the token to bootstrap additional masternodes # Step 3: Connect to Master1 and move kube-vip.yaml and config.yaml. Then install RKE2, copy token back to admin machine. We then use the token to bootstrap additional masternodes
@ -155,8 +156,8 @@ EOF
echo -e " \033[32;5mMaster1 Completed\033[0m" echo -e " \033[32;5mMaster1 Completed\033[0m"
# Step 4: Set variable to the token we just extracted, set kube config location # Step 4: Set variable to the token we just extracted, set kube config location
token=`cat token` token=$(cat token)
sudo cat ~/.kube/rke2.yaml | sed 's/127.0.0.1/'$master1'/g' > $HOME/.kube/config sudo cat ~/.kube/rke2.yaml | sed 's/127.0.0.1/'$master1'/g' >$HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=${HOME}/.kube/config export KUBECONFIG=${HOME}/.kube/config
sudo cp ~/.kube/config /etc/rancher/rke2/rke2.yaml sudo cp ~/.kube/config /etc/rancher/rke2/rke2.yaml
@ -168,7 +169,7 @@ kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provi
# Step 6: Add other Masternodes, note we import the token we extracted from step 3 # Step 6: Add other Masternodes, note we import the token we extracted from step 3
for newnode in "${masters[@]}"; do for newnode in "${masters[@]}"; do
ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <<EOF ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
mkdir -p /etc/rancher/rke2 mkdir -p /etc/rancher/rke2
touch /etc/rancher/rke2/config.yaml touch /etc/rancher/rke2/config.yaml
echo "token: $token" >> /etc/rancher/rke2/config.yaml echo "token: $token" >> /etc/rancher/rke2/config.yaml
@ -183,14 +184,14 @@ for newnode in "${masters[@]}"; do
systemctl start rke2-server.service systemctl start rke2-server.service
exit exit
EOF EOF
echo -e " \033[32;5mMaster node joined successfully!\033[0m" echo -e " \033[32;5mMaster node joined successfully!\033[0m"
done done
kubectl get nodes kubectl get nodes
# Step 7: Add Workers # Step 7: Add Workers
for newnode in "${workers[@]}"; do for newnode in "${workers[@]}"; do
ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <<EOF ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <<EOF
mkdir -p /etc/rancher/rke2 mkdir -p /etc/rancher/rke2
touch /etc/rancher/rke2/config.yaml touch /etc/rancher/rke2/config.yaml
echo "token: $token" >> /etc/rancher/rke2/config.yaml echo "token: $token" >> /etc/rancher/rke2/config.yaml
@ -203,7 +204,7 @@ for newnode in "${workers[@]}"; do
systemctl start rke2-agent.service systemctl start rke2-agent.service
exit exit
EOF EOF
echo -e " \033[32;5mWorker node joined successfully!\033[0m" echo -e " \033[32;5mWorker node joined successfully!\033[0m"
done done
kubectl get nodes kubectl get nodes
@ -214,14 +215,14 @@ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manif
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
# Download ipAddressPool and configure using lbrange above # Download ipAddressPool and configure using lbrange above
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool
cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' >$HOME/ipAddressPool.yaml
# Step 9: Deploy IP Pools and l2Advertisement # Step 9: Deploy IP Pools and l2Advertisement
echo -e " \033[32;5mAdding IP Pools, waiting for Metallb to be available first. This can take a long time as we're likely being rate limited for container pulls...\033[0m" echo -e " \033[32;5mAdding IP Pools, waiting for Metallb to be available first. This can take a long time as we're likely being rate limited for container pulls...\033[0m"
kubectl wait --namespace metallb-system \ kubectl wait --namespace metallb-system \
--for=condition=ready pod \ --for=condition=ready pod \
--selector=component=controller \ --selector=component=controller \
--timeout=1800s --timeout=1800s
kubectl apply -f ipAddressPool.yaml kubectl apply -f ipAddressPool.yaml
kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml
@ -242,17 +243,17 @@ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/
helm repo add jetstack https://charts.jetstack.io helm repo add jetstack https://charts.jetstack.io
helm repo update helm repo update
helm install cert-manager jetstack/cert-manager \ helm install cert-manager jetstack/cert-manager \
--namespace cert-manager \ --namespace cert-manager \
--create-namespace \ --create-namespace \
--version v1.13.2 --version v1.13.2
kubectl get pods --namespace cert-manager kubectl get pods --namespace cert-manager
# Install Rancher # Install Rancher
echo -e " \033[32;5mDeploying Rancher\033[0m" echo -e " \033[32;5mDeploying Rancher\033[0m"
helm install rancher rancher-latest/rancher \ helm install rancher rancher-latest/rancher \
--namespace cattle-system \ --namespace cattle-system \
--set hostname=rancher.${DOMAIN} \ --set hostname=rancher.${DOMAIN} \
--set bootstrapPassword=admin --set bootstrapPassword=admin
kubectl -n cattle-system rollout status deploy/rancher kubectl -n cattle-system rollout status deploy/rancher
kubectl -n cattle-system get deploy rancher kubectl -n cattle-system get deploy rancher
@ -260,8 +261,8 @@ kubectl -n cattle-system get deploy rancher
kubectl get svc -n cattle-system kubectl get svc -n cattle-system
kubectl expose deployment rancher --name=rancher-lb --port=443 --type=LoadBalancer -n cattle-system kubectl expose deployment rancher --name=rancher-lb --port=443 --type=LoadBalancer -n cattle-system
while [[ $(kubectl get svc -n cattle-system 'jsonpath={..status.conditions[?(@.type=="Pending")].status}') = "True" ]]; do while [[ $(kubectl get svc -n cattle-system 'jsonpath={..status.conditions[?(@.type=="Pending")].status}') = "True" ]]; do
sleep 5 sleep 5
echo -e " \033[32;5mWaiting for LoadBalancer to come online\033[0m" echo -e " \033[32;5mWaiting for LoadBalancer to come online\033[0m"
done done
kubectl get svc -n cattle-system kubectl get svc -n cattle-system