This commit is contained in:
theycallmeloki 2025-04-15 01:36:00 +00:00 committed by GitHub
commit 104edf90a7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 258 additions and 60 deletions

View File

@ -30,44 +30,74 @@ KVVERSION="v0.6.3"
# K3S Version
k3sVersion="v1.28.7+k3s1"
# Set the IP addresses of the master and work nodes
master1=192.168.3.21
master2=192.168.3.22
master3=192.168.3.23
worker1=192.168.3.24
worker2=192.168.3.25
worker3=192.168.3.26
# User of remote machines
user=ubuntu
# Interface used on remotes
interface=eth0
# Define node types and their properties
declare -A nodes=(
["master1"]="ip=192.168.2.132,user=laneone,interface=eth0,type=master"
["master2"]="ip=192.168.2.133,user=laneone,interface=eth0,type=master"
["master3"]="ip=192.168.2.134,user=laneone,interface=eth0,type=master"
["worker1"]="ip=192.168.2.129,user=laneone,interface=eth0,type=worker,labels=longhorn=true,worker=true"
["worker2"]="ip=192.168.2.130,user=laneone,interface=eth0,type=worker,labels=longhorn=true,worker=true"
["worker3"]="ip=192.168.2.131,user=laneone,interface=eth0,type=worker,labels=longhorn=true,worker=true"
["worker4"]="ip=192.168.2.125,user=laneone,interface=enp34s0,type=worker,labels=worker=true,auth=password,password=l"
["worker5"]="ip=192.168.2.104,user=laneone,interface=enp104s0,type=worker,labels=worker=true,auth=password,password=l"
)
# Set the virtual IP address (VIP)
vip=192.168.3.50
# Array of master nodes
masters=($master2 $master3)
# Array of worker nodes
workers=($worker1 $worker2 $worker3)
# Array of all
all=($master1 $master2 $master3 $worker1 $worker2 $worker3)
# Array of all minus master
allnomaster1=($master2 $master3 $worker1 $worker2 $worker3)
vip=192.168.2.50
#Loadbalancer IP range
lbrange=192.168.3.60-192.168.3.80
lbrange=192.168.2.60-192.168.2.100
#ssh certificate name variable
certName=id_rsa
# Additional k3s flags for metrics
common_extra_args="--kubelet-arg containerd=/run/k3s/containerd/containerd.sock"
server_extra_args="--no-deploy servicelb --no-deploy traefik --kube-controller-manager-arg bind-address=0.0.0.0 --kube-proxy-arg metrics-bind-address=0.0.0.0 --kube-scheduler-arg bind-address=0.0.0.0 --etcd-expose-metrics true"
agent_extra_args="--node-label worker=true"
# Create Grafana admin credentials
grafana_user="adminuser" # desired grafana username
grafana_password="adminpassword" # Generates a random 12-character password
#############################################
# HELPER FUNCTIONS #
#############################################
get_node_ip() {
echo "${nodes[$1]}" | grep -oP 'ip=\K[^,]+'
}
get_node_user() {
echo "${nodes[$1]}" | grep -oP 'user=\K[^,]+'
}
get_node_interface() {
echo "${nodes[$1]}" | grep -oP 'interface=\K[^,]+'
}
get_node_type() {
echo "${nodes[$1]}" | grep -oP 'type=\K[^,]+'
}
get_node_labels() {
echo "${nodes[$1]}" | grep -oP 'labels=\K[^,]*' | tr ',' ' '
}
get_node_auth() {
echo "${nodes[$1]}" | grep -oP 'auth=\K[^,]*'
}
get_node_password() {
echo "${nodes[$1]}" | grep -oP 'password=\K[^,]*'
}
#############################################
# DO NOT EDIT BELOW #
#############################################
# For testing purposes - in case time is wrong due to VM snapshots
sudo timedatectl set-ntp off
sudo timedatectl set-ntp on
@ -115,19 +145,21 @@ EOF
done
# Step 1: Bootstrap First k3s Node
mkdir ~/.kube
mkdir -p ~/.kube
first_master=$(echo "${!nodes[@]}" | tr ' ' '\n' | grep "master" | head -n1)
k3sup install \
--ip $master1 \
--user $user \
--ip $(get_node_ip $first_master) \
--user $(get_node_user $first_master) \
--tls-san $vip \
--cluster \
--k3s-version $k3sVersion \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1 --node-taint node-role.kubernetes.io/master=true:NoSchedule" \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$(get_node_interface $first_master) --node-ip=$(get_node_ip $first_master) --node-taint node-role.kubernetes.io/master=true:NoSchedule $common_extra_args $server_extra_args" \
--merge \
--sudo \
--local-path $HOME/.kube/config \
--ssh-key $HOME/.ssh/$certName \
--context k3s-ha
echo -e " \033[32;5mFirst Node bootstrapped successfully!\033[0m"
# Step 2: Install Kube-VIP for HA
@ -140,39 +172,74 @@ cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-v
# Step 4: Copy kube-vip.yaml to master1
scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml
# Step 5: Connect to Master1 and move kube-vip.yaml
ssh $user@$master1 -i ~/.ssh/$certName <<- EOF
sudo mkdir -p /var/lib/rancher/k3s/server/manifests
sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml
EOF
# Step 6: Add new master nodes (servers) & workers
for newnode in "${masters[@]}"; do
k3sup join \
--ip $newnode \
--user $user \
--sudo \
--k3s-version $k3sVersion \
--server \
--server-ip $master1 \
--ssh-key $HOME/.ssh/$certName \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode --node-taint node-role.kubernetes.io/master=true:NoSchedule" \
--server-user $user
echo -e " \033[32;5mMaster node joined successfully!\033[0m"
done
# Function to set up passwordless sudo
setup_passwordless_sudo() {
local node=$1
local user=$(get_node_user $node)
local ip=$(get_node_ip $node)
local auth_method=$(get_node_auth $node)
local password=$(get_node_password $node)
# add workers
for newagent in "${workers[@]}"; do
k3sup join \
--ip $newagent \
--user $user \
--sudo \
--k3s-version $k3sVersion \
--server-ip $master1 \
--ssh-key $HOME/.ssh/$certName \
--k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\""
echo -e " \033[32;5mAgent node joined successfully!\033[0m"
echo "Setting up passwordless sudo for $user on $ip"
if [ "$auth_method" == "password" ]; then
sshpass -p "$password" ssh -o StrictHostKeyChecking=no $user@$ip "echo '$password' | sudo -S sh -c 'echo \"$user ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/$user && chmod 0440 /etc/sudoers.d/$user'"
else
ssh -i $HOME/.ssh/$certName -o StrictHostKeyChecking=no $user@$ip "sudo sh -c 'echo \"$user ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/$user && chmod 0440 /etc/sudoers.d/$user'"
fi
}
# Step 6: Add new master nodes (servers) & workers
for node in "${!nodes[@]}"; do
setup_passwordless_sudo $node
if [ "$(get_node_type $node)" == "master" ] && [ "$node" != "$first_master" ]; then
k3sup join \
--ip $(get_node_ip $node) \
--user $(get_node_user $node) \
--sudo \
--k3s-version $k3sVersion \
--server \
--server-ip $(get_node_ip $first_master) \
--ssh-key $HOME/.ssh/$certName \
--k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$(get_node_interface $node) --node-ip=$(get_node_ip $node) --node-taint node-role.kubernetes.io/master=true:NoSchedule $common_extra_args $server_extra_args" \
--server-user $(get_node_user $first_master)
echo -e " \033[32;5mMaster node $node joined successfully!\033[0m"
elif [ "$(get_node_type $node)" == "worker" ]; then
labels=$(get_node_labels $node)
label_args=""
if [ ! -z "$labels" ]; then
label_args="--node-label \"$labels\""
fi
auth_method=$(get_node_auth $node)
if [ "$auth_method" == "password" ]; then
password=$(get_node_password $node)
sshpass -p "$password" k3sup join \
--ip $(get_node_ip $node) \
--user $(get_node_user $node) \
--sudo \
--k3s-version $k3sVersion \
--server-ip $(get_node_ip $first_master) \
--k3s-extra-args "$label_args $common_extra_args $agent_extra_args" \
--ssh-key $HOME/.ssh/$certName
else
k3sup join \
--ip $(get_node_ip $node) \
--user $(get_node_user $node) \
--sudo \
--k3s-version $k3sVersion \
--server-ip $(get_node_ip $first_master) \
--ssh-key $HOME/.ssh/$certName \
--k3s-extra-args "$label_args $common_extra_args $agent_extra_args"
fi
echo -e " \033[32;5mWorker node $node joined successfully!\033[0m"
fi
done
# Step 7: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider
@ -183,7 +250,7 @@ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manif
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
# Download ipAddressPool and configure using lbrange above
curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/ipAddressPool
cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml
cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > ipAddressPool.yaml
# Step 9: Test with Nginx
kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default
@ -249,8 +316,18 @@ echo -e " \033[32;5mBe patient as it downloads and configures a number of pods i
echo -e " \033[32;5mInstalling Longhorn - It can take a while for all pods to deploy...\033[0m"
kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/Longhorn/longhorn.yaml
kubectl get pods \
--namespace longhorn-system \
--watch
--namespace longhorn-system
echo "Waiting for Longhorn UI deployment to be fully ready..."
while ! (kubectl wait --for=condition=available deployment/longhorn-driver-deployer -n longhorn-system --timeout=600s && \
kubectl wait --for=condition=available deployment/longhorn-ui -n longhorn-system --timeout=600s && \
kubectl wait --for=condition=available deployment/csi-attacher -n longhorn-system --timeout=600s && \
kubectl wait --for=condition=available deployment/csi-provisioner -n longhorn-system --timeout=600s && \
kubectl wait --for=condition=available deployment/csi-resizer -n longhorn-system --timeout=600s && \
kubectl wait --for=condition=available deployment/csi-snapshotter -n longhorn-system --timeout=600s); do
echo "Waiting for Longhorn UI deployment to be fully ready..."
sleep 1
done
# Step 17: Print out confirmation
@ -258,3 +335,124 @@ kubectl get nodes
kubectl get svc -n longhorn-system
echo -e " \033[32;5mHappy Kubing! Access Longhorn through Rancher UI\033[0m"
# Step 18: Download and modify values.yaml for Prometheus
# Ensure yq is installed
if ! command -v yq &> /dev/null; then
echo "yq is not installed. Installing yq..."
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
fi
echo -e " \033[32;5mSetting up Prometheus...\033[0m"
# Download values.yaml
wget https://raw.githubusercontent.com/techno-tim/launchpad/master/kubernetes/kube-prometheus-stack/values.yml -O values.yaml
# Get master node IPs
master_ips=$(for node in "${!nodes[@]}"; do
if [ "$(get_node_type $node)" == "master" ]; then
echo "$(get_node_ip $node)"
fi
done | sort -u)
echo '------'
echo 'Master IPs: '
echo $master_ips
echo '------'
# Function to update endpoints in values.yaml
update_endpoints() {
local component=$1
echo "Updating endpoints for $component"
# Create the new endpoints content
local new_endpoints=""
for ip in $master_ips; do
new_endpoints+=" - $ip\n"
done
# Use awk to replace the endpoints section
awk -v component="$component" -v new_endpoints="$new_endpoints" '
$0 ~ "^" component ":" {
print $0
in_component = 1
next
}
in_component && /^[a-z]/ {
in_component = 0
}
in_component && /^ *endpoints:/ {
print " endpoints:"
print new_endpoints
skip = 1
next
}
skip && /^[^ ]/ {
skip = 0
}
!skip { print }
' values.yaml > values.yaml.tmp && mv values.yaml.tmp values.yaml
echo "Updated $component endpoints"
}
# Update endpoints for different components
components=("kubeControllerManager" "kubeEtcd" "kubeScheduler" "kubeProxy")
for component in "${components[@]}"; do
update_endpoints "$component"
done
# Create Grafana admin credentials
echo -e " \033[32;5mCreating Grafana admin credentials...\033[0m"
# Create Kubernetes secret for Grafana
kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f -
kubectl create secret generic grafana-admin-credentials \
--from-literal=admin-user=$grafana_user \
--from-literal=admin-password=$grafana_password \
-n monitoring \
--dry-run=client -o yaml | kubectl apply -f -
echo -e " \033[32;5mGrafana admin credentials created. Username: $grafana_user, Password: $grafana_password\033[0m"
echo -e " \033[32;5mPlease make note of these credentials and store them securely.\033[0m"
# Update Grafana admin credentials in values.yaml
yq eval '.grafana.admin.existingSecret = "grafana-admin-credentials"' -i values.yaml
yq eval '.grafana.admin.userKey = "admin-user"' -i values.yaml
yq eval '.grafana.admin.passwordKey = "admin-password"' -i values.yaml
# Verify the changes
for component in "${components[@]}"; do
echo "Endpoints for ${component}:"
yq eval ".${component}.endpoints" values.yaml
done
echo -e " \033[32;5mvalues.yaml has been updated with master node IPs\033[0m"
# Step 19: Install Prometheus using Helm
echo -e " \033[32;5mInstalling Prometheus...\033[0m"
# Add prometheus-community helm repo
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
# Install Prometheus stack
helm install prometheus prometheus-community/kube-prometheus-stack \
-f values.yaml \
--namespace monitoring \
--create-namespace
# Wait for the Grafana deployment to be ready
kubectl -n monitoring rollout status deploy/grafana
echo "Changing Grafana service to LoadBalancer type..."
kubectl patch svc grafana -n monitoring -p '{"spec": {"type": "LoadBalancer"}}'
echo -e " \033[32;5mPrometheus has been installed!\033[0m"
# Show external ip on which to access grafana
kubectl get svc/grafana -n monitoring
echo -e " \033[32;5m Happy Charting! \033[0m"