Merge pull request #3 from theycallmeloki/theycallmeloki-patch-2

Swap interfaces + username dynamically alongside management ip
This commit is contained in:
theycallmeloki 2024-07-18 11:48:14 +05:30 committed by GitHub
commit 73613bc44d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 99 additions and 59 deletions

View File

@ -30,44 +30,67 @@ KVVERSION="v0.6.3"
# K3S Version # K3S Version
k3sVersion="v1.26.10+k3s2" k3sVersion="v1.26.10+k3s2"
# Set the IP addresses of the master and work nodes # Define node types and their properties
master1=192.168.3.21 declare -A nodes=(
master2=192.168.3.22 ["master1"]="ip=192.168.2.132,user=laneone,interface=eth0,type=master"
master3=192.168.3.23 ["master2"]="ip=192.168.2.133,user=laneone,interface=eth0,type=master"
worker1=192.168.3.24 ["master3"]="ip=192.168.2.134,user=laneone,interface=eth0,type=master"
worker2=192.168.3.25 ["worker1"]="ip=192.168.2.129,user=laneone,interface=eth0,type=worker,labels=longhorn=true,worker=true"
worker3=192.168.3.26 ["worker2"]="ip=192.168.2.130,user=laneone,interface=eth0,type=worker,labels=longhorn=true,worker=true"
["worker3"]="ip=192.168.2.131,user=laneone,interface=eth0,type=worker,labels=longhorn=true,worker=true"
# User of remote machines ["worker4"]="ip=192.168.2.125,user=laneone,interface=enp34s0,type=worker,labels=worker=true,auth=password,password=l"
user=ubuntu )
# Interface used on remotes
interface=eth0
# Set the virtual IP address (VIP) # Set the virtual IP address (VIP)
vip=192.168.3.50 vip=192.168.2.50
# Array of master nodes
masters=($master2 $master3)
# Array of worker nodes
workers=($worker1 $worker2 $worker3)
# Array of all
all=($master1 $master2 $master3 $worker1 $worker2 $worker3)
# Array of all minus master
allnomaster1=($master2 $master3 $worker1 $worker2 $worker3)
#Loadbalancer IP range #Loadbalancer IP range
lbrange=192.168.3.60-192.168.3.80 lbrange=192.168.2.60-192.168.2.100
#ssh certificate name variable #ssh certificate name variable
certName=id_rsa certName=id_rsa
# Additional k3s flags for metrics
common_extra_args="--kubelet-arg containerd=/run/k3s/containerd/containerd.sock"
server_extra_args="--no-deploy servicelb --no-deploy traefik --kube-controller-manager-arg bind-address=0.0.0.0 --kube-proxy-arg metrics-bind-address=0.0.0.0 --kube-scheduler-arg bind-address=0.0.0.0 --etcd-expose-metrics true"
agent_extra_args="--node-label worker=true"
#############################################
# HELPER FUNCTIONS #
#############################################
get_node_ip() {
echo "${nodes[$1]}" | grep -oP 'ip=\K[^,]+'
}
get_node_user() {
echo "${nodes[$1]}" | grep -oP 'user=\K[^,]+'
}
get_node_interface() {
echo "${nodes[$1]}" | grep -oP 'interface=\K[^,]+'
}
get_node_type() {
echo "${nodes[$1]}" | grep -oP 'type=\K[^,]+'
}
get_node_labels() {
echo "${nodes[$1]}" | grep -oP 'labels=\K[^,]*' | tr ',' ' '
}
get_node_auth() {
echo "${nodes[$1]}" | grep -oP 'auth=\K[^,]*'
}
get_node_password() {
echo "${nodes[$1]}" | grep -oP 'password=\K[^,]*'
}
############################################# #############################################
# DO NOT EDIT BELOW # # DO NOT EDIT BELOW #
############################################# #############################################
# For testing purposes - in case time is wrong due to VM snapshots # For testing purposes - in case time is wrong due to VM snapshots
sudo timedatectl set-ntp off sudo timedatectl set-ntp off
sudo timedatectl set-ntp on sudo timedatectl set-ntp on
@ -115,19 +138,21 @@ EOF
done done
# Step 1: Bootstrap First k3s Node # Step 1: Bootstrap First k3s Node
mkdir ~/.kube mkdir -p ~/.kube
first_master=$(echo "${!nodes[@]}" | tr ' ' '\n' | grep "master" | head -n1)
k3sup install \ k3sup install \
--ip $master1 \ --ip $(get_node_ip $first_master) \
--user $user \ --user $(get_node_user $first_master) \
--tls-san $vip \ --tls-san $vip \
--cluster \ --cluster \
--k3s-version $k3sVersion \ --k3s-version $k3sVersion \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1 --node-taint node-role.kubernetes.io/master=true:NoSchedule --kube-controller-manager-arg bind-address=0.0.0.0 --kube-proxy-arg metrics-bind-address=0.0.0.0 --kube-scheduler-arg bind-address=0.0.0.0 --etcd-expose-metrics true --kubelet-arg containerd=/run/k3s/containerd/containerd.sock" \ --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$(get_node_interface $first_master) --node-ip=$(get_node_ip $first_master) --node-taint node-role.kubernetes.io/master=true:NoSchedule $common_extra_args $server_extra_args" \
--merge \ --merge \
--sudo \ --sudo \
--local-path $HOME/.kube/config \ --local-path $HOME/.kube/config \
--ssh-key $HOME/.ssh/$certName \ --ssh-key $HOME/.ssh/$certName \
--context k3s-ha --context k3s-ha
echo -e " \033[32;5mFirst Node bootstrapped successfully!\033[0m" echo -e " \033[32;5mFirst Node bootstrapped successfully!\033[0m"
# Step 2: Install Kube-VIP for HA # Step 2: Install Kube-VIP for HA
@ -148,31 +173,48 @@ ssh $user@$master1 -i ~/.ssh/$certName <<- EOF
EOF EOF
# Step 6: Add new master nodes (servers) & workers # Step 6: Add new master nodes (servers) & workers
for newnode in "${masters[@]}"; do for node in "${!nodes[@]}"; do
if [ "$(get_node_type $node)" == "master" ] && [ "$node" != "$first_master" ]; then
k3sup join \ k3sup join \
--ip $newnode \ --ip $(get_node_ip $node) \
--user $user \ --user $(get_node_user $node) \
--sudo \ --sudo \
--k3s-version $k3sVersion \ --k3s-version $k3sVersion \
--server \ --server \
--server-ip $master1 \ --server-ip $(get_node_ip $first_master) \
--ssh-key $HOME/.ssh/$certName \ --ssh-key $HOME/.ssh/$certName \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode --node-taint node-role.kubernetes.io/master=true:NoSchedule --kube-controller-manager-arg bind-address=0.0.0.0 --kube-proxy-arg metrics-bind-address=0.0.0.0 --kube-scheduler-arg bind-address=0.0.0.0 --etcd-expose-metrics true --kubelet-arg containerd=/run/k3s/containerd/containerd.sock" \ --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$(get_node_interface $node) --node-ip=$(get_node_ip $node) --node-taint node-role.kubernetes.io/master=true:NoSchedule $common_extra_args $server_extra_args" \
--server-user $user --server-user $(get_node_user $first_master)
echo -e " \033[32;5mMaster node joined successfully!\033[0m" echo -e " \033[32;5mMaster node $node joined successfully!\033[0m"
done elif [ "$(get_node_type $node)" == "worker" ]; then
labels=$(get_node_labels $node)
# add workers label_args=""
for newagent in "${workers[@]}"; do if [ ! -z "$labels" ]; then
k3sup join \ label_args="--node-label \"$labels\""
--ip $newagent \ fi
--user $user \ auth_method=$(get_node_auth $node)
if [ "$auth_method" == "password" ]; then
password=$(get_node_password $node)
sshpass -p "$password" k3sup join \
--ip $(get_node_ip $node) \
--user $(get_node_user $node) \
--sudo \ --sudo \
--k3s-version $k3sVersion \ --k3s-version $k3sVersion \
--server-ip $master1 \ --server-ip $(get_node_ip $first_master) \
--k3s-extra-args "$label_args $common_extra_args $agent_extra_args" \
--ssh-key $HOME/.ssh/$certName
else
k3sup join \
--ip $(get_node_ip $node) \
--user $(get_node_user $node) \
--sudo \
--k3s-version $k3sVersion \
--server-ip $(get_node_ip $first_master) \
--ssh-key $HOME/.ssh/$certName \ --ssh-key $HOME/.ssh/$certName \
--k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\" --kube-proxy-arg metrics-bind-address=0.0.0.0 --kubelet-arg containerd=/run/k3s/containerd/containerd.sock" --k3s-extra-args "$label_args $common_extra_args $agent_extra_args"
echo -e " \033[32;5mAgent node joined successfully!\033[0m" fi
echo -e " \033[32;5mWorker node $node joined successfully!\033[0m"
fi
done done
# Step 7: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider # Step 7: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider
@ -184,8 +226,6 @@ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/conf
# Download ipAddressPool and configure using lbrange above # Download ipAddressPool and configure using lbrange above
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/ipAddressPool curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/ipAddressPool
cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml
# Apply the ip address pool
kubectl apply -f $HOME/ipAddressPool.yaml
# Step 9: Test with Nginx # Step 9: Test with Nginx
kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default