From 14c4f795a61be56cbf48898b2b28a862ac86ebba Mon Sep 17 00:00:00 2001 From: Peter Fodor Date: Sun, 5 Nov 2023 18:39:30 +0000 Subject: [PATCH 01/67] Add create-vms.sh script with a short readme. --- Kubernetes/Create-VMS/create-vms.sh | 301 ++++++++++++++++++++++++++++ Kubernetes/Create-VMS/readme.md | 9 + 2 files changed, 310 insertions(+) create mode 100644 Kubernetes/Create-VMS/create-vms.sh create mode 100644 Kubernetes/Create-VMS/readme.md diff --git a/Kubernetes/Create-VMS/create-vms.sh b/Kubernetes/Create-VMS/create-vms.sh new file mode 100644 index 0000000..5983bf4 --- /dev/null +++ b/Kubernetes/Create-VMS/create-vms.sh @@ -0,0 +1,301 @@ +#!/bin/bash + +# for Debian this must be installed for Longhorn to work +# sudo apt-get install -y open-iscsi + +########################### +# DEFAULT VALUES # +########################### +os_options=("Debian" "Ubuntu") +os="Debian" +# Proxmox path to the template folder +template_path="/var/lib/vz/template" +# Proxmox certificate path +cert_path="/root/.ssh" +# Number of VMs to be created +vm_number=3 +# The first VM id, smallest id is 100 +id=121 +# Name prefix of the first VM +name=k3s + +drive_name=local-zfs +agent=0 # TODO: Implement User Option for it +disk_size=20G +memory=2048 +core=2 + +# IP for the first VM +ip=192.168.0.21 +gateway=192.168.0.1 + +# ssh certificate name variable +cert_name=id_rsa + +# User settings +user=$USER +password=password + +ubuntu_url=https://cloud-images.ubuntu.com/lunar/current/lunar-server-cloudimg-amd64.img +ubuntu_filename=lunar-server-cloudimg-amd64.img + +debian_url=https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2 +debian_filename=debian-12-genericcloud-amd64.qcow2 + +os_url=https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-genericcloud-amd64.qcow2 +os_filename=debian-12-genericcloud-amd64.qcow2 + +################## +# Functions # +################## +function run() { + get_user_variables + print_info # Prints information about what will be created based on defaults/user inputs + setup # Do not worry it asks for confirmation before the setup/installation starts + start_vms # You can choose to start all VMs if you want + #qemu_agent # Not implemented yet, you can choose to add qemu-agent to the installation image +} + +function get_user_variables() { + echo -e -n "\e[36mWhich OS cloud image would you like to use?\n\e[0m" + PS3="" + select option in "${os_options[@]}"; do + # Check if the user selected an option + if [[ -n "$option" ]]; then + # Do something with the selected option + case $option in + "Debian") ;; + "Ubuntu") ;; + *) + echo -e "\e[31mInvalid option selected. Exiting...\e[0m" + exit + ;; + esac + else + # No option was selected + echo -e "\e[31mNo option was selected. Exiting...\e[0m" + exit + fi + # Set the selected Operating system + os=$option + # Exit the select loop + break + done + echo -e "\e[36mHow many VM do you want to create? \e[0m" + read -e -p "" -i "$vm_number" vm_number + echo -e "\e[36mFirst VM ID? (minimum 100)\e[0m" + read -e -p "" -i $id id + echo -e "\e[36mVM name prefix? \e[0m" + read -e -p "" -i $name name + echo -e "\e[36mIP address? \e[0m" + read -e -p "" -i $ip ip + + # Split the IP address into its parts using the '.' character as the delimiter. + ip_address_parts=(${ip//./ }) + octet1=${ip_address_parts[0]} + octet2=${ip_address_parts[1]} + octet3=${ip_address_parts[2]} + octet4=${ip_address_parts[3]} + + echo -e "\e[36mGateway? \e[0m" + read -e -p "" -i $gateway gateway + echo -e "\e[36mDisk Size? \e[0m" + read -e -p "" -i $disk_size disk_size + echo -e "\e[36mMemory Size? \e[0m" + read -e -p "" -i $memory memory + echo -e "\e[36mNumber of processor cores? \e[0m" + read -e -p "" -i $core core + echo -e "\e[36mUser name? \e[0m" + read -e -p "" -i $user user + echo -e "\e[36mUser password? \e[0m" + read -e -p "" -i $password password + echo -e "\e[36mCertification name? \e[0m" + read -e -p "" -i $cert_name cert_name + echo -e "\e[36mDrive name to store images? \e[0m" + read -e -p "" -i $drive_name drive_name +} + +# +function qemu_agent() { + yesno=n + echo -e "\e[36mDo you want to add qemu agent to the VM images? (y/n) \e[0m" + read -e -p "" -i $yesno yesno + case $yesno in + [Yy]*) + # Install qemu agent packages for each VM + echo -e "\e[32mInstalling qemu agent packages.\e[0m" + + for ((i = 1; i <= $vm_number; i++)); do + if [[ $i -le 9 ]]; then + idx="0$i" + else + idx=$i + fi + + # TODO: ssh into all VMs one by one and intalll the necessary qemu agent packages + done + ;; + [Nn]*) + echo -e "\e[33mSkipping qemu agent installation.\e[0m" + ;; + *) ;; + esac +} + +function print_info() { + echo -e "\e[36m\nThe following Virtual Machines will be created:\e[0m" + for ((i = 1; i <= $vm_number; i++)); do + if [[ $i -le 9 ]]; then + idx="0$i" + else + idx=$i + fi + echo -e "\e[32mVM ID: $(($id + $i - 1)), Name: $name-$idx, IP address: $octet1.$octet2.$octet3.$(($octet4 + $i - 1))\e[0m" + done + echo -e "\e[36m\nCommon VM parameters:\e[0m" + echo -e "\e[32mOS cloud image:\e[0m" "$os" + echo -e "\e[32mPublic Proxmox Certificate:\e[0m" "$cert_path/$cert_name.pub\n" + echo -e "\e[32mGateway:\e[0m" "$gateway" + echo -e "\e[32mDisk size:\e[0m" "$disk_size""B" + echo -e "\e[32mMemory size:\e[0m" "$memory""GB" + echo -e "\e[32mCPU cores:\e[0m" "$core" + echo -e "\e[32mDrive name:\e[0m" "$drive_name" +} + +function setup() { + yesno=n + echo -e "\e[36mDo you want to proceed with the setup? (y/n) \e[0m" + read -e -p "" -i $yesno yesno + case $yesno in + [Yy]*) + get_os_image + create_vms + ;; + [Nn]*) + echo -e "\e[31mInstallation aborted by user. No changes were made.\e[0m" + exit + ;; + *) ;; + esac +} + +function start_vms() { + yesno=n + echo -e "\e[36mDo you want to start up the Virtual Machines now? (y/n) \e[0m" + read -e -p "" -i $yesno yesno + case $yesno in + [Yy]*) + # Start VMs + for ((i = 1; i <= $vm_number; i++)); do + if [[ $i -le 9 ]]; then + idx="0$i" + else + idx=$i + fi + echo -e "\e[33mStarting Virtual Machine $idx\e[0m" + qm start $(($id + $i - 1)) + done + # Print VMs statuses + for ((i = 1; i <= $vm_number; i++)); do + if [[ $i -le 9 ]]; then + idx="0$i" + else + idx=$i + fi + echo -e "\e[33mVirtual Machine $idx status: \e[0m" + qm status $(($id + $i - 1)) + done + ;; + [Nn]*) + exit + ;; + *) ;; + esac +} + +function get_os_image() { + case $os in + "Debian") + os_url=$debian_url + os_filename=$debian_filename + # Check if the directory exists. + if [ ! -d "$template_path/qcow" ]; then + mkdir $template_path/qcow + fi + cd $template_path/qcow + ;; + "Ubuntu") + os_url=$ubuntu_url + os_filename=$ubuntu_filename + # Check if the directory exists. + if [ ! -d "$template_path/iso" ]; then + mkdir $template_path/iso + fi + cd $template_path/iso + ;; + *) + echo -e "\e[31Invalid option.\e[0m" + ;; + esac + + # Check if the os image file already exists. + # If not then download it. + if [ ! -f "$os_filename" ]; then + # Download the selected os cloud image + echo -e "\e[33mDownloading $os cloud image ...\e[0m" + wget $os_url + fi + +} + +# Only runs if you uncomment the function in `create_vms`. Please be careful +function destroy_existing_vms() { + # Stop and destroy Virtual Machine if it already exists + # TODO: Put loop and confirmation before doing anything + qm stop $(($id + $i - 1)) + qm destroy $(($id + $i - 1)) --destroy-unreferenced-disks --purge +} + +function create_vms() { + for ((i = 1; i <= $vm_number; i++)); do + # Stop and destroy Virtual Machine if it already exists. + # Be really careful with this only uncomment if you know what are you doing. !!! + # + # destroy_existing_vms + # + # ############################# + # Create VM from the cloud image + if [[ $i -le 9 ]]; then + idx="0$i" + else + idx=$i + fi + echo -e "\e[33mCreating Virtual Machine: $idx\e[0m" + echo "VM ID: $(($id + $i - 1)), Name: $name-$idx, IP address: $octet1.$octet2.$octet3.$(($octet4 + $i - 1))" + qm create $(($id + $i - 1)) \ + --memory $memory \ + --core $core \ + --numa 1 \ + --name $name-$idx \ + --net0 virtio,bridge=vmbr0 \ + --balloon 0 \ + --ipconfig0 gw=$gateway,ip=$octet1.$octet2.$octet3.$(($octet4 + $i - 1))/24 \ + --cipassword $password \ + --ciuser $user \ + --ciupgrade 1 \ + --sshkeys $cert_path/$cert_name.pub \ + --agent=$agent + + qm importdisk $(($id + $i - 1)) $os_filename $drive_name + qm set $(($id + $i - 1)) --scsihw virtio-scsi-pci --scsi0 $drive_name:vm-$(($id + $i - 1))-disk-0 + qm disk resize $(($id + $i - 1)) scsi0 $disk_size + qm set $(($id + $i - 1)) --ide2 $drive_name:cloudinit + qm set $(($id + $i - 1)) --boot c --bootdisk scsi0 + qm set $(($id + $i - 1)) --serial0 socket --vga serial0 + done +} + +######################### +# Run the script # +######################### +run diff --git a/Kubernetes/Create-VMS/readme.md b/Kubernetes/Create-VMS/readme.md new file mode 100644 index 0000000..d156491 --- /dev/null +++ b/Kubernetes/Create-VMS/readme.md @@ -0,0 +1,9 @@ +# Simple script to create multiple Virtual Machines automatically + +1. It will ask you some questions about your wished Virtual Machines. + - You can select Debian or Ubuntu image +2. Prints a detailed info with about the VMs going tyo be created. +3. Let you confirm if You want to continue +4. You can chose to start all VMs at the end 🚀 + +Enjoy 🙂 From 59a6fbb5b5785844c0c98b5af3318794041c09f4 Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 09:20:35 +0000 Subject: [PATCH 02/67] update --- Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh index 73f7151..d5e2634 100644 --- a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh +++ b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh @@ -190,6 +190,10 @@ while [[ $(kubectl get pods -l app=nginx -o 'jsonpath={..status.conditions[?(@.t done # Step 10: Deploy IP Pools and l2Advertisement +kubectl wait --namespace metallb-system \ + --for=condition=ready pod \ + --selector=component=controller \ + --timeout=120s kubectl apply -f ipAddressPool.yaml kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/l2Advertisement.yaml From 1ed2dcb049afc30750817c2c7c3b4cf7084066b4 Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 09:26:48 +0000 Subject: [PATCH 03/67] update --- Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh index d5e2634..7c9c3ba 100644 --- a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh +++ b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh @@ -130,14 +130,13 @@ kubectl k3s-ha kubectl apply -f https://kube-vip.io/manifests/rbac.yaml # Step 3: Download kube-vip -curl -sL https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip.yaml +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip.yaml # Step 4: Copy kube-vip.yaml to master1 scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml # Step 5: Connect to Master1 and move kube-vip.yaml - ssh $user@$master1 -i ~/.ssh/$certName <<- EOF sudo mkdir -p /var/lib/rancher/k3s/server/manifests sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml From 52ed6b908fc7fe71736ba581e7e4f6141994e283 Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 09:34:08 +0000 Subject: [PATCH 04/67] update --- Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh | 2 +- Kubernetes/K3S-Deploy/kube-vip.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh index 7c9c3ba..985e3d9 100644 --- a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh +++ b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh @@ -182,7 +182,7 @@ cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default kubectl expose deployment nginx-1 --port=80 --type=LoadBalancer -n default -echo -e " \033[32;5mWaiting 20s for K3S to sync and LoadBalancer to come online\033[0m" +echo -e " \033[32;5mWaiting for K3S to sync and LoadBalancer to come online\033[0m" while [[ $(kubectl get pods -l app=nginx -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do sleep 1 diff --git a/Kubernetes/K3S-Deploy/kube-vip.yaml b/Kubernetes/K3S-Deploy/kube-vip.yaml index e703687..83dcbc5 100644 --- a/Kubernetes/K3S-Deploy/kube-vip.yaml +++ b/Kubernetes/K3S-Deploy/kube-vip.yaml @@ -47,7 +47,7 @@ spec: - name: vip_ddns value: "false" - name: svc_enable - value: "true" + value: "false" - name: svc_leasename value: plndr-svcs-lock - name: vip_leaderelection From a757f239dae495bd92f888c72b25c908c83c1ee2 Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 09:42:26 +0000 Subject: [PATCH 05/67] update --- Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh index 985e3d9..baa3b7b 100644 --- a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh +++ b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh @@ -130,7 +130,8 @@ kubectl k3s-ha kubectl apply -f https://kube-vip.io/manifests/rbac.yaml # Step 3: Download kube-vip -curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip.yaml +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip +cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml # Step 4: Copy kube-vip.yaml to master1 scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml From 72e350505c9d211b79f266f0421d2b587a7e1f43 Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 09:44:10 +0000 Subject: [PATCH 06/67] update --- Kubernetes/K3S-Deploy/{kube-vip.yaml => kube-vip} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename Kubernetes/K3S-Deploy/{kube-vip.yaml => kube-vip} (100%) diff --git a/Kubernetes/K3S-Deploy/kube-vip.yaml b/Kubernetes/K3S-Deploy/kube-vip similarity index 100% rename from Kubernetes/K3S-Deploy/kube-vip.yaml rename to Kubernetes/K3S-Deploy/kube-vip From 85abace9315e130118e63d428f932531d5404d38 Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 09:52:43 +0000 Subject: [PATCH 07/67] update --- Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh index baa3b7b..d99e45c 100644 --- a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh +++ b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh @@ -126,7 +126,6 @@ k3sup install \ echo -e " \033[32;5mFirst Node bootstrapped successfully!\033[0m" # Step 2: Install Kube-VIP for HA -kubectl k3s-ha kubectl apply -f https://kube-vip.io/manifests/rbac.yaml # Step 3: Download kube-vip From 7d2b9b34cb448cbfe7ffba3dc5749828b20bdc05 Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 09:57:38 +0000 Subject: [PATCH 08/67] update --- Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh index d99e45c..b9ff68c 100644 --- a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh +++ b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh @@ -142,7 +142,7 @@ ssh $user@$master1 -i ~/.ssh/$certName <<- EOF sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml EOF -# Step 6: Add new master nodes (servers) +# Step 6: Add new master nodes (servers) & workers for newnode in "${masters[@]}"; do k3sup join \ --ip $newnode \ @@ -157,6 +157,7 @@ for newnode in "${masters[@]}"; do echo -e " \033[32;5mMaster node joined successfully!\033[0m" done +# add workers for newagent in "${workers[@]}"; do k3sup join \ --ip $newagent \ @@ -164,7 +165,8 @@ for newagent in "${workers[@]}"; do --sudo \ --k3s-version $k3sVersion \ --server-ip $master1 \ - --ssh-key $HOME/.ssh/$certName + --ssh-key $HOME/.ssh/$certName \ + --k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\"" echo -e " \033[32;5mAgent node joined successfully!\033[0m" done From 78701624c4701db9d4f3f06067dde416a73e997a Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 10:00:00 +0000 Subject: [PATCH 09/67] update --- Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh index b9ff68c..24eaf9a 100644 --- a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh +++ b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh @@ -166,7 +166,7 @@ for newagent in "${workers[@]}"; do --k3s-version $k3sVersion \ --server-ip $master1 \ --ssh-key $HOME/.ssh/$certName \ - --k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\"" + --k3s-extra-args "--node-label \"longhorn=true\",\"worker=true\"" echo -e " \033[32;5mAgent node joined successfully!\033[0m" done From 9af9a88fc378ff3db61f37dc5414f693188f727c Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 11:24:05 +0000 Subject: [PATCH 10/67] update --- Kubernetes/K3S-Deploy/k3s.sh | 58 +++--- Kubernetes/RKE2/rke2-test-do-not-use.sh | 244 ++++++++++++++++++++++++ 2 files changed, 270 insertions(+), 32 deletions(-) create mode 100644 Kubernetes/RKE2/rke2-test-do-not-use.sh diff --git a/Kubernetes/K3S-Deploy/k3s.sh b/Kubernetes/K3S-Deploy/k3s.sh index e6876d4..d5fb684 100644 --- a/Kubernetes/K3S-Deploy/k3s.sh +++ b/Kubernetes/K3S-Deploy/k3s.sh @@ -40,7 +40,7 @@ user=ubuntu interface=eth0 # Set the virtual IP address (VIP) -vip=192.168.1.50 +vip=192.168.3.50 # Array of master nodes masters=($master2 $master3) @@ -55,7 +55,7 @@ all=($master1 $master2 $master3 $worker1 $worker2) allnomaster1=($master2 $master3 $worker1 $worker2) #Loadbalancer IP range -lbrange=192.168.1.61-192.168.1.79 +lbrange=192.168.3.60-192.168.3.80 #ssh certificate name variable certName=id_rsa @@ -92,17 +92,6 @@ else echo -e " \033[32;5mKubectl already installed\033[0m" fi -# Install Docker to generate manifest and daemonset if not already present -if ! command -v docker version &> /dev/null -then - echo -e " \033[31;5mDocker not found, installing\033[0m" - curl -fsSL https://get.docker.com -o get-docker.sh - sudo sh get-docker.sh - wait $! -else - echo -e " \033[32;5mDocker already installed\033[0m" -fi - # Create SSH Config file to ignore checking (don't use in production!) echo "StrictHostKeyChecking no" > ~/.ssh/config @@ -137,32 +126,23 @@ k3sup install \ echo -e " \033[32;5mFirst Node bootstrapped successfully!\033[0m" # Step 2: Install Kube-VIP for HA -kubectl k3s-ha kubectl apply -f https://kube-vip.io/manifests/rbac.yaml -# Step 3: Generate Daemonset with Docker -sudo docker run --network host --rm ghcr.io/kube-vip/kube-vip:$KVVERSION manifest daemonset \ - --interface $interface \ - --address $vip \ - --inCluster \ - --taint \ - --controlplane \ - --services \ - --arp \ - --leaderElection | tee $HOME/kube-vip.yaml +# Step 3: Download kube-vip +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip +cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml # Step 4: Copy kube-vip.yaml to master1 scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml # Step 5: Connect to Master1 and move kube-vip.yaml - ssh $user@$master1 -i ~/.ssh/$certName <<- EOF sudo mkdir -p /var/lib/rancher/k3s/server/manifests sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml EOF -# Step 6: Add new master nodes (servers) +# Step 6: Add new master nodes (servers) & workers for newnode in "${masters[@]}"; do k3sup join \ --ip $newnode \ @@ -177,6 +157,7 @@ for newnode in "${masters[@]}"; do echo -e " \033[32;5mMaster node joined successfully!\033[0m" done +# add workers for newagent in "${workers[@]}"; do k3sup join \ --ip $newagent \ @@ -184,28 +165,41 @@ for newagent in "${workers[@]}"; do --sudo \ --k3s-version $k3sVersion \ --server-ip $master1 \ - --ssh-key $HOME/.ssh/$certName + --ssh-key $HOME/.ssh/$certName \ + --k3s-extra-args "--node-label \"longhorn=true\",\"worker=true\"" echo -e " \033[32;5mAgent node joined successfully!\033[0m" done # Step 7: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml -#IP range for loadbalancer services to use -kubectl create configmap -n kube-system kubevip --from-literal range-global=$lbrange +# Step 8: Install Metallb +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml +# Download ipAddressPool and configure using lbrange above +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/ipAddressPool +cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml -# Step 8: Test with Nginx +# Step 9: Test with Nginx kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default kubectl expose deployment nginx-1 --port=80 --type=LoadBalancer -n default -echo -e " \033[32;5mWaiting 20s for K3S to sync and LoadBalancer to come online\033[0m" +echo -e " \033[32;5mWaiting for K3S to sync and LoadBalancer to come online\033[0m" while [[ $(kubectl get pods -l app=nginx -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do sleep 1 done +# Step 10: Deploy IP Pools and l2Advertisement +kubectl wait --namespace metallb-system \ + --for=condition=ready pod \ + --selector=component=controller \ + --timeout=120s +kubectl apply -f ipAddressPool.yaml +kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/l2Advertisement.yaml + kubectl get nodes kubectl get svc kubectl get pods --all-namespaces -o wide -echo -e " \033[32;5mHappy Kubing! Access Nginx at EXTERNAL-IP above\033[0m" +echo -e " \033[32;5mHappy Kubing! Access Nginx at EXTERNAL-IP above\033[0m" \ No newline at end of file diff --git a/Kubernetes/RKE2/rke2-test-do-not-use.sh b/Kubernetes/RKE2/rke2-test-do-not-use.sh new file mode 100644 index 0000000..957e136 --- /dev/null +++ b/Kubernetes/RKE2/rke2-test-do-not-use.sh @@ -0,0 +1,244 @@ +#!/bin/bash + +echo -e " \033[33;5m __ _ _ ___ \033[0m" +echo -e " \033[33;5m \ \(_)_ __ ___( )__ / _ \__ _ _ __ __ _ __ _ ___ \033[0m" +echo -e " \033[33;5m \ \ | '_ \` _ \/ __| / /_\/ _\` | '__/ _\` |/ _\` |/ _ \ \033[0m" +echo -e " \033[33;5m /\_/ / | | | | | \__ \ / /_\\ (_| | | | (_| | (_| | __/ \033[0m" +echo -e " \033[33;5m \___/|_|_| |_| |_|___/ \____/\__,_|_| \__,_|\__, |\___| \033[0m" +echo -e " \033[33;5m |___/ \033[0m" + +echo -e " \033[36;5m ___ _ _____ ___ \033[0m" +echo -e " \033[36;5m | _ \ |/ / __|_ ) \033[0m" +echo -e " \033[36;5m | / ' <| _| / / \033[0m" +echo -e " \033[36;5m |_|_\_|\_\___/___| \033[0m" +echo -e " \033[36;5m \033[0m" +echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" +echo -e " \033[32;5m \033[0m" + + +############################################# +# YOU SHOULD ONLY NEED TO EDIT THIS SECTION # +############################################# + +# Version of Kube-VIP to deploy +KVVERSION="v0.6.3" + +# Set the IP addresses of the admin, masters, and workers nodes +admin=192.168.3.5 +master1=192.168.3.21 +master2=192.168.3.22 +master3=192.168.3.23 +worker1=192.168.3.24 +worker2=192.168.3.25 + +# User of remote machines +user=ubuntu + +# Interface used on remotes +interface=eth0 + +# Set the virtual IP address (VIP) +vip=192.168.3.50 + +# Array of all master nodes +allmasters=($master1 $master2 $master3) + +# Array of master nodes +masters=($master2 $master3) + +# Array of worker nodes +workers=($worker1 $worker2) + +# Array of all +all=($master1 $master2 $master3 $worker1 $worker2) + +# Array of all minus master1 +allnomaster1=($master2 $master3 $worker1 $worker2) + +#Loadbalancer IP range +lbrange=192.168.3.60-192.168.3.80 + +#ssh certificate name variable +certName=id_rsa + +############################################# +# DO NOT EDIT BELOW # +############################################# +# For testing purposes - in case time is wrong due to VM snapshots +sudo timedatectl set-ntp off +sudo timedatectl set-ntp on + +# Move SSH certs to ~/.ssh and change permissions +cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh +chmod 600 /home/$user/.ssh/$certName +chmod 644 /home/$user/.ssh/$certName.pub + +# Install Kubectl if not already present +if ! command -v kubectl version &> /dev/null +then + echo -e " \033[31;5mKubectl not found, installing\033[0m" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl +else + echo -e " \033[32;5mKubectl already installed\033[0m" +fi + +# Create SSH Config file to ignore checking (don't use in production!) +echo "StrictHostKeyChecking no" > ~/.ssh/config + +#add ssh keys for all nodes +for node in "${all[@]}"; do + ssh-copy-id $user@$node +done + +# Step 1: Create Kube VIP +# create RKE2's self-installing manifest dir +sudo mkdir -p /var/lib/rancher/rke2/server/manifests +# Install the kube-vip deployment into rke2's self-installing manifest folder +curl -sL https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/k3s | vipAddress=$vip vipInterface=$interface sh | sudo tee /var/lib/rancher/rke2/server/manifests/kube-vip.yaml +# Find/Replace all k3s entries to represent rke2 +sudo sed -i 's/k3s/rke2/g' /var/lib/rancher/rke2/server/manifests/kube-vip.yaml +# copy kube-vip.yaml to home directory +sudo cp /var/lib/rancher/rke2/server/manifests/kube-vip.yaml ~/kube-vip.yaml +# change owner +sudo chown $user:$user kube-vip.yaml +# make kube folder to run kubectl later +mkdir ~/.kube + +# create the rke2 config file +sudo mkdir -p /etc/rancher/rke2 +touch config.yaml +echo "tls-san:" >> config.yaml +echo " - $vip" >> config.yaml +echo " - $master1" >> config.yaml +echo " - $master2" >> config.yaml +echo " - $master3" >> config.yaml +echo "write-kubeconfig-mode: 0644" >> config.yaml +echo "disable:" >> config.yaml +echo " - rke2-ingress-nginx" >> config.yaml +# copy config.yaml to rancher directory +sudo cp ~/config.yaml /etc/rancher/rke2/config.yaml + +# update path with rke2-binaries +echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' >> ~/.bashrc ; echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >> ~/.bashrc ; echo 'alias k=kubectl' >> ~/.bashrc ; source ~/.bashrc ; + +# Step 2: Copy kube-vip.yaml and certs to all masters +for newnode in "${allmasters[@]}"; do + scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$newnode:~/kube-vip.yaml + scp -i ~/.ssh/$certName $HOME/config.yaml $user@$newnode:~/config.yaml + scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} $user@$newnode:~/.ssh + echo -e " \033[32;5mCopied successfully!\033[0m" +done + +# Step 3: Connect to Master1 and move kube-vip.yaml and config.yaml. Then install RKE2, copy token back to admin machine. We then use the token to bootstrap additional masternodes +ssh -tt $user@$master1 -i ~/.ssh/$certName sudo su <> ~/.bashrc ; echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >> ~/.bashrc ; echo 'alias k=kubectl' >> ~/.bashrc ; source ~/.bashrc ; +curl -sfL https://get.rke2.io | sh - +systemctl enable rke2-server.service +systemctl start rke2-server.service +echo "StrictHostKeyChecking no" > ~/.ssh/config +ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin +scp -i /home/$user/.ssh/$certName /var/lib/rancher/rke2/server/token $user@$admin:~/token +scp -i /home/$user/.ssh/$certName /etc/rancher/rke2/rke2.yaml $user@$admin:~/.kube/rke2.yaml +exit +EOF +echo -e " \033[32;5mMaster1 Completed\033[0m" + +# Step 4: Set variable to the token we just extracted, set kube config location +token=`cat token` +sudo cat ~/.kube/rke2.yaml | sed 's/127.0.0.1/'$master1'/g' > $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config +export KUBECONFIG=${HOME}/.kube/config +sudo cp ~/.kube/config /etc/rancher/rke2/rke2.yaml +kubectl get nodes + +# Step 5: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider +kubectl apply -f https://kube-vip.io/manifests/rbac.yaml +kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml +#IP range for loadbalancer services to use +kubectl create configmap -n kube-system kubevip --from-literal range-global=$lbrange + +# Step 6: Add other Masternodes, note we import the token we extracted from step 3 +for newnode in "${masters[@]}"; do + ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/rancher/rke2/config.yaml + echo "server: https://$master1:9345" >> /etc/rancher/rke2/config.yaml + echo "tls-san:" >> /etc/rancher/rke2/config.yaml + echo " - $vip" >> /etc/rancher/rke2/config.yaml + echo " - $master1" >> /etc/rancher/rke2/config.yaml + echo " - $master2" >> /etc/rancher/rke2/config.yaml + echo " - $master3" >> /etc/rancher/rke2/config.yaml + curl -sfL https://get.rke2.io | sh - + systemctl enable rke2-server.service + systemctl start rke2-server.service + exit +EOF + echo -e " \033[32;5mMaster node joined successfully!\033[0m" +done + +kubectl get nodes + +# Step 7: Add Workers +for newnode in "${workers[@]}"; do + ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/rancher/rke2/config.yaml + echo "server: https://$vip:9345" >> /etc/rancher/rke2/config.yaml + echo "node-label:" >> /etc/rancher/rke2/config.yaml + echo " - worker=true" >> /etc/rancher/rke2/config.yaml + echo " - longhorn=true" >> /etc/rancher/rke2/config.yaml + curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - + systemctl enable rke2-agent.service + systemctl start rke2-agent.service + exit +EOF + echo -e " \033[32;5mMaster node joined successfully!\033[0m" +done + +kubectl get nodes + +# Step 8: Install Rancher (Optional - Delete if not required) +#Install Helm +curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 +chmod 700 get_helm.sh +./get_helm.sh + +# Add Rancher Helm Repo & create namespace +helm repo add rancher-latest https://releases.rancher.com/server-charts/latest +kubectl create namespace cattle-system + +# Install Cert-Manager +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml +helm repo add jetstack https://charts.jetstack.io +helm repo update +helm install cert-manager jetstack/cert-manager \ +--namespace cert-manager \ +--create-namespace \ +--version v1.13.2 +kubectl get pods --namespace cert-manager + +# Install Rancher +helm install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set bootstrapPassword=admin +kubectl -n cattle-system rollout status deploy/rancher +kubectl -n cattle-system get deploy rancher + +# Add Rancher LoadBalancer +kubectl get svc -n cattle-system +kubectl expose deployment rancher --name=rancher-lb --port=443 --type=LoadBalancer -n cattle-system +while [[ $(kubectl get svc -n cattle-system 'jsonpath={..status.conditions[?(@.type=="Pending")].status}') = "True" ]]; do + sleep 5 + echo -e " \033[32;5mWaiting for LoadBalancer to come online\033[0m" +done +kubectl get svc -n cattle-system + +echo -e " \033[32;5mAccess Rancher from the IP above - Password is admin!\033[0m" From 84c228b43b201c9aee71ab5c425ea47f62a3ab3c Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 11:34:59 +0000 Subject: [PATCH 11/67] update --- Kubernetes/RKE2/ipAddressPool | 8 +++ Kubernetes/RKE2/k3s | 66 ++++++++++++++++++------- Kubernetes/RKE2/l2Advertisement.yaml | 8 +++ Kubernetes/RKE2/rke2-test-do-not-use.sh | 19 +++++-- 4 files changed, 80 insertions(+), 21 deletions(-) create mode 100644 Kubernetes/RKE2/ipAddressPool create mode 100644 Kubernetes/RKE2/l2Advertisement.yaml diff --git a/Kubernetes/RKE2/ipAddressPool b/Kubernetes/RKE2/ipAddressPool new file mode 100644 index 0000000..ffd58cc --- /dev/null +++ b/Kubernetes/RKE2/ipAddressPool @@ -0,0 +1,8 @@ +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - $lbrange \ No newline at end of file diff --git a/Kubernetes/RKE2/k3s b/Kubernetes/RKE2/k3s index 41b889c..83dcbc5 100644 --- a/Kubernetes/RKE2/k3s +++ b/Kubernetes/RKE2/k3s @@ -1,42 +1,70 @@ -#!/bin/bash - -echo "apiVersion: apps/v1 +apiVersion: apps/v1 kind: DaemonSet metadata: creationTimestamp: null + labels: + app.kubernetes.io/name: kube-vip-ds + app.kubernetes.io/version: v0.6.3 name: kube-vip-ds namespace: kube-system spec: selector: matchLabels: - name: kube-vip-ds + app.kubernetes.io/name: kube-vip-ds template: metadata: creationTimestamp: null labels: - name: kube-vip-ds + app.kubernetes.io/name: kube-vip-ds + app.kubernetes.io/version: v0.6.3 spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists containers: - args: - manager env: - name: vip_arp - value: \"true\" - - name: vip_interface - value: $vipInterface + value: "true" - name: port - value: \"6443\" + value: "6443" + - name: vip_interface + value: $interface - name: vip_cidr - value: \"32\" + value: "32" - name: cp_enable - value: \"true\" + value: "true" - name: cp_namespace value: kube-system + - name: vip_ddns + value: "false" - name: svc_enable - value: \"true\" - - name: vip_address - value: $vipAddress - image: ghcr.io/kube-vip/kube-vip:v0.5.11 + value: "false" + - name: svc_leasename + value: plndr-svcs-lock + - name: vip_leaderelection + value: "true" + - name: vip_leasename + value: plndr-cp-lock + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: address + value: $vip + - name: prometheus_server + value: :2112 + image: ghcr.io/kube-vip/kube-vip:v0.6.3 imagePullPolicy: Always name: kube-vip resources: {} @@ -45,15 +73,17 @@ spec: add: - NET_ADMIN - NET_RAW - - SYS_TIME hostNetwork: true serviceAccountName: kube-vip tolerations: - effect: NoSchedule - key: node-role.kubernetes.io/master + operator: Exists + - effect: NoExecute + operator: Exists updateStrategy: {} status: currentNumberScheduled: 0 desiredNumberScheduled: 0 numberMisscheduled: 0 - numberReady: 0" + numberReady: 0 + diff --git a/Kubernetes/RKE2/l2Advertisement.yaml b/Kubernetes/RKE2/l2Advertisement.yaml new file mode 100644 index 0000000..b6f8c4d --- /dev/null +++ b/Kubernetes/RKE2/l2Advertisement.yaml @@ -0,0 +1,8 @@ +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: example + namespace: metallb-system +spec: + ipAddressPools: + - first-pool \ No newline at end of file diff --git a/Kubernetes/RKE2/rke2-test-do-not-use.sh b/Kubernetes/RKE2/rke2-test-do-not-use.sh index 957e136..fc267fc 100644 --- a/Kubernetes/RKE2/rke2-test-do-not-use.sh +++ b/Kubernetes/RKE2/rke2-test-do-not-use.sh @@ -159,8 +159,6 @@ kubectl get nodes # Step 5: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider kubectl apply -f https://kube-vip.io/manifests/rbac.yaml kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml -#IP range for loadbalancer services to use -kubectl create configmap -n kube-system kubevip --from-literal range-global=$lbrange # Step 6: Add other Masternodes, note we import the token we extracted from step 3 for newnode in "${masters[@]}"; do @@ -204,7 +202,22 @@ done kubectl get nodes -# Step 8: Install Rancher (Optional - Delete if not required) +# Step 8: Install Metallb +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml +# Download ipAddressPool and configure using lbrange above +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool +cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml + +# Step 9: Deploy IP Pools and l2Advertisement +kubectl wait --namespace metallb-system \ + --for=condition=ready pod \ + --selector=component=controller \ + --timeout=120s +kubectl apply -f ipAddressPool.yaml +kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml + +# Step 10: Install Rancher (Optional - Delete if not required) #Install Helm curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 chmod 700 get_helm.sh From 967d6bbca07734d6b41a778a94b80fadc0f8530a Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 11:39:18 +0000 Subject: [PATCH 12/67] update --- Kubernetes/RKE2/rke2-test-do-not-use.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Kubernetes/RKE2/rke2-test-do-not-use.sh b/Kubernetes/RKE2/rke2-test-do-not-use.sh index fc267fc..f099bda 100644 --- a/Kubernetes/RKE2/rke2-test-do-not-use.sh +++ b/Kubernetes/RKE2/rke2-test-do-not-use.sh @@ -203,6 +203,7 @@ done kubectl get nodes # Step 8: Install Metallb +echo -e " \033[32;5mDeploying Metallb\033[0m" kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml # Download ipAddressPool and configure using lbrange above @@ -210,6 +211,7 @@ curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernet cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml # Step 9: Deploy IP Pools and l2Advertisement +echo -e " \033[32;5mAdding IP Pools\033[0m" kubectl wait --namespace metallb-system \ --for=condition=ready pod \ --selector=component=controller \ @@ -219,6 +221,7 @@ kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/ # Step 10: Install Rancher (Optional - Delete if not required) #Install Helm +echo -e " \033[32;5mInstalling Helm\033[0m" curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 chmod 700 get_helm.sh ./get_helm.sh @@ -228,6 +231,7 @@ helm repo add rancher-latest https://releases.rancher.com/server-charts/latest kubectl create namespace cattle-system # Install Cert-Manager +echo -e " \033[32;5mDeploying Cert-Manager\033[0m" kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml helm repo add jetstack https://charts.jetstack.io helm repo update @@ -238,6 +242,7 @@ helm install cert-manager jetstack/cert-manager \ kubectl get pods --namespace cert-manager # Install Rancher +echo -e " \033[32;5mDeploying Rancher\033[0m" helm install rancher rancher-latest/rancher \ --namespace cattle-system \ --set hostname=rancher.my.org \ @@ -254,4 +259,4 @@ while [[ $(kubectl get svc -n cattle-system 'jsonpath={..status.conditions[?(@.t done kubectl get svc -n cattle-system -echo -e " \033[32;5mAccess Rancher from the IP above - Password is admin!\033[0m" +echo -e " \033[32;5mAccess Rancher from the IP above - Password is admin!\033[0m" \ No newline at end of file From 44af234c81cec1f17744a4ab0766a24d01e8c994 Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 11:47:38 +0000 Subject: [PATCH 13/67] update --- Kubernetes/RKE2/{k3s => kube-vip} | 0 Kubernetes/RKE2/rke2-test-do-not-use.sh | 5 ++++- 2 files changed, 4 insertions(+), 1 deletion(-) rename Kubernetes/RKE2/{k3s => kube-vip} (100%) diff --git a/Kubernetes/RKE2/k3s b/Kubernetes/RKE2/kube-vip similarity index 100% rename from Kubernetes/RKE2/k3s rename to Kubernetes/RKE2/kube-vip diff --git a/Kubernetes/RKE2/rke2-test-do-not-use.sh b/Kubernetes/RKE2/rke2-test-do-not-use.sh index f099bda..890d1bf 100644 --- a/Kubernetes/RKE2/rke2-test-do-not-use.sh +++ b/Kubernetes/RKE2/rke2-test-do-not-use.sh @@ -95,7 +95,10 @@ done # create RKE2's self-installing manifest dir sudo mkdir -p /var/lib/rancher/rke2/server/manifests # Install the kube-vip deployment into rke2's self-installing manifest folder -curl -sL https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/k3s | vipAddress=$vip vipInterface=$interface sh | sudo tee /var/lib/rancher/rke2/server/manifests/kube-vip.yaml +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip +cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml +sudo mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml + # Find/Replace all k3s entries to represent rke2 sudo sed -i 's/k3s/rke2/g' /var/lib/rancher/rke2/server/manifests/kube-vip.yaml # copy kube-vip.yaml to home directory From c33a8dc101eff045c98665e3a68627078668adab Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 4 Dec 2023 12:18:14 +0000 Subject: [PATCH 14/67] update --- Kubernetes/RKE2/rke2-test-do-not-use.sh | 6 ++--- Kubernetes/RKE2/rke2.sh | 33 ++++++++++++++++++++----- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/Kubernetes/RKE2/rke2-test-do-not-use.sh b/Kubernetes/RKE2/rke2-test-do-not-use.sh index 890d1bf..db1ea80 100644 --- a/Kubernetes/RKE2/rke2-test-do-not-use.sh +++ b/Kubernetes/RKE2/rke2-test-do-not-use.sh @@ -200,7 +200,7 @@ for newnode in "${workers[@]}"; do systemctl start rke2-agent.service exit EOF - echo -e " \033[32;5mMaster node joined successfully!\033[0m" + echo -e " \033[32;5mWorker node joined successfully!\033[0m" done kubectl get nodes @@ -214,11 +214,11 @@ curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernet cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml # Step 9: Deploy IP Pools and l2Advertisement -echo -e " \033[32;5mAdding IP Pools\033[0m" +echo -e " \033[32;5mAdding IP Pools, waiting for Metallb to be available first. This can take a long time as we're likely being rate limited for container pulls...\033[0m" kubectl wait --namespace metallb-system \ --for=condition=ready pod \ --selector=component=controller \ - --timeout=120s + --timeout=1800s kubectl apply -f ipAddressPool.yaml kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh index 957e136..db1ea80 100644 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -95,7 +95,10 @@ done # create RKE2's self-installing manifest dir sudo mkdir -p /var/lib/rancher/rke2/server/manifests # Install the kube-vip deployment into rke2's self-installing manifest folder -curl -sL https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/k3s | vipAddress=$vip vipInterface=$interface sh | sudo tee /var/lib/rancher/rke2/server/manifests/kube-vip.yaml +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip +cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml +sudo mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml + # Find/Replace all k3s entries to represent rke2 sudo sed -i 's/k3s/rke2/g' /var/lib/rancher/rke2/server/manifests/kube-vip.yaml # copy kube-vip.yaml to home directory @@ -159,8 +162,6 @@ kubectl get nodes # Step 5: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider kubectl apply -f https://kube-vip.io/manifests/rbac.yaml kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml -#IP range for loadbalancer services to use -kubectl create configmap -n kube-system kubevip --from-literal range-global=$lbrange # Step 6: Add other Masternodes, note we import the token we extracted from step 3 for newnode in "${masters[@]}"; do @@ -199,13 +200,31 @@ for newnode in "${workers[@]}"; do systemctl start rke2-agent.service exit EOF - echo -e " \033[32;5mMaster node joined successfully!\033[0m" + echo -e " \033[32;5mWorker node joined successfully!\033[0m" done kubectl get nodes -# Step 8: Install Rancher (Optional - Delete if not required) +# Step 8: Install Metallb +echo -e " \033[32;5mDeploying Metallb\033[0m" +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml +# Download ipAddressPool and configure using lbrange above +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool +cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml + +# Step 9: Deploy IP Pools and l2Advertisement +echo -e " \033[32;5mAdding IP Pools, waiting for Metallb to be available first. This can take a long time as we're likely being rate limited for container pulls...\033[0m" +kubectl wait --namespace metallb-system \ + --for=condition=ready pod \ + --selector=component=controller \ + --timeout=1800s +kubectl apply -f ipAddressPool.yaml +kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml + +# Step 10: Install Rancher (Optional - Delete if not required) #Install Helm +echo -e " \033[32;5mInstalling Helm\033[0m" curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 chmod 700 get_helm.sh ./get_helm.sh @@ -215,6 +234,7 @@ helm repo add rancher-latest https://releases.rancher.com/server-charts/latest kubectl create namespace cattle-system # Install Cert-Manager +echo -e " \033[32;5mDeploying Cert-Manager\033[0m" kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml helm repo add jetstack https://charts.jetstack.io helm repo update @@ -225,6 +245,7 @@ helm install cert-manager jetstack/cert-manager \ kubectl get pods --namespace cert-manager # Install Rancher +echo -e " \033[32;5mDeploying Rancher\033[0m" helm install rancher rancher-latest/rancher \ --namespace cattle-system \ --set hostname=rancher.my.org \ @@ -241,4 +262,4 @@ while [[ $(kubectl get svc -n cattle-system 'jsonpath={..status.conditions[?(@.t done kubectl get svc -n cattle-system -echo -e " \033[32;5mAccess Rancher from the IP above - Password is admin!\033[0m" +echo -e " \033[32;5mAccess Rancher from the IP above - Password is admin!\033[0m" \ No newline at end of file From 15db5c8d8095bf3035d7ef36cf5a0c62521ae3fe Mon Sep 17 00:00:00 2001 From: tehNooB <125163838+JamesTurland@users.noreply.github.com> Date: Tue, 5 Dec 2023 07:37:14 +0000 Subject: [PATCH 15/67] Update k3s.sh --- Kubernetes/K3S-Deploy/k3s.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Kubernetes/K3S-Deploy/k3s.sh b/Kubernetes/K3S-Deploy/k3s.sh index d5fb684..b9ff68c 100644 --- a/Kubernetes/K3S-Deploy/k3s.sh +++ b/Kubernetes/K3S-Deploy/k3s.sh @@ -166,7 +166,7 @@ for newagent in "${workers[@]}"; do --k3s-version $k3sVersion \ --server-ip $master1 \ --ssh-key $HOME/.ssh/$certName \ - --k3s-extra-args "--node-label \"longhorn=true\",\"worker=true\"" + --k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\"" echo -e " \033[32;5mAgent node joined successfully!\033[0m" done @@ -202,4 +202,4 @@ kubectl get nodes kubectl get svc kubectl get pods --all-namespaces -o wide -echo -e " \033[32;5mHappy Kubing! Access Nginx at EXTERNAL-IP above\033[0m" \ No newline at end of file +echo -e " \033[32;5mHappy Kubing! Access Nginx at EXTERNAL-IP above\033[0m" From 8442ed88c4c234fb4a90b197d5ae001a4dc38513 Mon Sep 17 00:00:00 2001 From: tehNooB <125163838+JamesTurland@users.noreply.github.com> Date: Tue, 5 Dec 2023 17:57:10 +0000 Subject: [PATCH 16/67] Delete Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh --- Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh | 205 ------------------- 1 file changed, 205 deletions(-) delete mode 100644 Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh diff --git a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh b/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh deleted file mode 100644 index 24eaf9a..0000000 --- a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh +++ /dev/null @@ -1,205 +0,0 @@ -#!/bin/bash - -echo -e " \033[33;5m __ _ _ ___ \033[0m" -echo -e " \033[33;5m \ \(_)_ __ ___( )__ / _ \__ _ _ __ __ _ __ _ ___ \033[0m" -echo -e " \033[33;5m \ \ | '_ \` _ \/ __| / /_\/ _\` | '__/ _\` |/ _\` |/ _ \ \033[0m" -echo -e " \033[33;5m /\_/ / | | | | | \__ \ / /_\\ (_| | | | (_| | (_| | __/ \033[0m" -echo -e " \033[33;5m \___/|_|_| |_| |_|___/ \____/\__,_|_| \__,_|\__, |\___| \033[0m" -echo -e " \033[33;5m |___/ \033[0m" - -echo -e " \033[36;5m _ _________ ___ _ _ _ \033[0m" -echo -e " \033[36;5m | |/ |__ / __| |_ _|_ _ __| |_ __ _| | | \033[0m" -echo -e " \033[36;5m | ' < |_ \__ \ | || ' \(_-| _/ _\` | | | \033[0m" -echo -e " \033[36;5m |_|\_|___|___/ |___|_||_/__/\__\__,_|_|_| \033[0m" -echo -e " \033[36;5m \033[0m" -echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" -echo -e " \033[32;5m \033[0m" - - -############################################# -# YOU SHOULD ONLY NEED TO EDIT THIS SECTION # -############################################# - -# Version of Kube-VIP to deploy -KVVERSION="v0.6.3" - -# K3S Version -k3sVersion="v1.26.10+k3s2" - -# Set the IP addresses of the master and work nodes -master1=192.168.3.21 -master2=192.168.3.22 -master3=192.168.3.23 -worker1=192.168.3.24 -worker2=192.168.3.25 - -# User of remote machines -user=ubuntu - -# Interface used on remotes -interface=eth0 - -# Set the virtual IP address (VIP) -vip=192.168.3.50 - -# Array of master nodes -masters=($master2 $master3) - -# Array of worker nodes -workers=($worker1 $worker2) - -# Array of all -all=($master1 $master2 $master3 $worker1 $worker2) - -# Array of all minus master -allnomaster1=($master2 $master3 $worker1 $worker2) - -#Loadbalancer IP range -lbrange=192.168.3.60-192.168.3.80 - -#ssh certificate name variable -certName=id_rsa - -############################################# -# DO NOT EDIT BELOW # -############################################# -# For testing purposes - in case time is wrong due to VM snapshots -sudo timedatectl set-ntp off -sudo timedatectl set-ntp on - -# Move SSH certs to ~/.ssh and change permissions -cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh -chmod 600 /home/$user/.ssh/$certName -chmod 644 /home/$user/.ssh/$certName.pub - -# Install k3sup to local machine if not already present -if ! command -v k3sup version &> /dev/null -then - echo -e " \033[31;5mk3sup not found, installing\033[0m" - curl -sLS https://get.k3sup.dev | sh - sudo install k3sup /usr/local/bin/ -else - echo -e " \033[32;5mk3sup already installed\033[0m" -fi - -# Install Kubectl if not already present -if ! command -v kubectl version &> /dev/null -then - echo -e " \033[31;5mKubectl not found, installing\033[0m" - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" - sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl -else - echo -e " \033[32;5mKubectl already installed\033[0m" -fi - -# Create SSH Config file to ignore checking (don't use in production!) -echo "StrictHostKeyChecking no" > ~/.ssh/config - -#add ssh keys for all nodes -for node in "${all[@]}"; do - ssh-copy-id $user@$node -done - -# Install policycoreutils for each node -for newnode in "${all[@]}"; do - ssh $user@$newnode -i ~/.ssh/$certName sudo su < $HOME/kube-vip.yaml - -# Step 4: Copy kube-vip.yaml to master1 -scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml - - -# Step 5: Connect to Master1 and move kube-vip.yaml -ssh $user@$master1 -i ~/.ssh/$certName <<- EOF - sudo mkdir -p /var/lib/rancher/k3s/server/manifests - sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml -EOF - -# Step 6: Add new master nodes (servers) & workers -for newnode in "${masters[@]}"; do - k3sup join \ - --ip $newnode \ - --user $user \ - --sudo \ - --k3s-version $k3sVersion \ - --server \ - --server-ip $master1 \ - --ssh-key $HOME/.ssh/$certName \ - --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode" \ - --server-user $user - echo -e " \033[32;5mMaster node joined successfully!\033[0m" -done - -# add workers -for newagent in "${workers[@]}"; do - k3sup join \ - --ip $newagent \ - --user $user \ - --sudo \ - --k3s-version $k3sVersion \ - --server-ip $master1 \ - --ssh-key $HOME/.ssh/$certName \ - --k3s-extra-args "--node-label \"longhorn=true\",\"worker=true\"" - echo -e " \033[32;5mAgent node joined successfully!\033[0m" -done - -# Step 7: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider -kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml - -# Step 8: Install Metallb -kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml -kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml -# Download ipAddressPool and configure using lbrange above -curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/ipAddressPool -cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml - -# Step 9: Test with Nginx -kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default -kubectl expose deployment nginx-1 --port=80 --type=LoadBalancer -n default - -echo -e " \033[32;5mWaiting for K3S to sync and LoadBalancer to come online\033[0m" - -while [[ $(kubectl get pods -l app=nginx -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do - sleep 1 -done - -# Step 10: Deploy IP Pools and l2Advertisement -kubectl wait --namespace metallb-system \ - --for=condition=ready pod \ - --selector=component=controller \ - --timeout=120s -kubectl apply -f ipAddressPool.yaml -kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/l2Advertisement.yaml - -kubectl get nodes -kubectl get svc -kubectl get pods --all-namespaces -o wide - -echo -e " \033[32;5mHappy Kubing! Access Nginx at EXTERNAL-IP above\033[0m" From 2e9d3dd8c34d12b25d29b41f216d781d07ef745f Mon Sep 17 00:00:00 2001 From: tehNooB <125163838+JamesTurland@users.noreply.github.com> Date: Tue, 5 Dec 2023 18:02:39 +0000 Subject: [PATCH 17/67] Delete Kubernetes/RKE2/rke2-test-do-not-use.sh --- Kubernetes/RKE2/rke2-test-do-not-use.sh | 265 ------------------------ 1 file changed, 265 deletions(-) delete mode 100644 Kubernetes/RKE2/rke2-test-do-not-use.sh diff --git a/Kubernetes/RKE2/rke2-test-do-not-use.sh b/Kubernetes/RKE2/rke2-test-do-not-use.sh deleted file mode 100644 index db1ea80..0000000 --- a/Kubernetes/RKE2/rke2-test-do-not-use.sh +++ /dev/null @@ -1,265 +0,0 @@ -#!/bin/bash - -echo -e " \033[33;5m __ _ _ ___ \033[0m" -echo -e " \033[33;5m \ \(_)_ __ ___( )__ / _ \__ _ _ __ __ _ __ _ ___ \033[0m" -echo -e " \033[33;5m \ \ | '_ \` _ \/ __| / /_\/ _\` | '__/ _\` |/ _\` |/ _ \ \033[0m" -echo -e " \033[33;5m /\_/ / | | | | | \__ \ / /_\\ (_| | | | (_| | (_| | __/ \033[0m" -echo -e " \033[33;5m \___/|_|_| |_| |_|___/ \____/\__,_|_| \__,_|\__, |\___| \033[0m" -echo -e " \033[33;5m |___/ \033[0m" - -echo -e " \033[36;5m ___ _ _____ ___ \033[0m" -echo -e " \033[36;5m | _ \ |/ / __|_ ) \033[0m" -echo -e " \033[36;5m | / ' <| _| / / \033[0m" -echo -e " \033[36;5m |_|_\_|\_\___/___| \033[0m" -echo -e " \033[36;5m \033[0m" -echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" -echo -e " \033[32;5m \033[0m" - - -############################################# -# YOU SHOULD ONLY NEED TO EDIT THIS SECTION # -############################################# - -# Version of Kube-VIP to deploy -KVVERSION="v0.6.3" - -# Set the IP addresses of the admin, masters, and workers nodes -admin=192.168.3.5 -master1=192.168.3.21 -master2=192.168.3.22 -master3=192.168.3.23 -worker1=192.168.3.24 -worker2=192.168.3.25 - -# User of remote machines -user=ubuntu - -# Interface used on remotes -interface=eth0 - -# Set the virtual IP address (VIP) -vip=192.168.3.50 - -# Array of all master nodes -allmasters=($master1 $master2 $master3) - -# Array of master nodes -masters=($master2 $master3) - -# Array of worker nodes -workers=($worker1 $worker2) - -# Array of all -all=($master1 $master2 $master3 $worker1 $worker2) - -# Array of all minus master1 -allnomaster1=($master2 $master3 $worker1 $worker2) - -#Loadbalancer IP range -lbrange=192.168.3.60-192.168.3.80 - -#ssh certificate name variable -certName=id_rsa - -############################################# -# DO NOT EDIT BELOW # -############################################# -# For testing purposes - in case time is wrong due to VM snapshots -sudo timedatectl set-ntp off -sudo timedatectl set-ntp on - -# Move SSH certs to ~/.ssh and change permissions -cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh -chmod 600 /home/$user/.ssh/$certName -chmod 644 /home/$user/.ssh/$certName.pub - -# Install Kubectl if not already present -if ! command -v kubectl version &> /dev/null -then - echo -e " \033[31;5mKubectl not found, installing\033[0m" - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" - sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl -else - echo -e " \033[32;5mKubectl already installed\033[0m" -fi - -# Create SSH Config file to ignore checking (don't use in production!) -echo "StrictHostKeyChecking no" > ~/.ssh/config - -#add ssh keys for all nodes -for node in "${all[@]}"; do - ssh-copy-id $user@$node -done - -# Step 1: Create Kube VIP -# create RKE2's self-installing manifest dir -sudo mkdir -p /var/lib/rancher/rke2/server/manifests -# Install the kube-vip deployment into rke2's self-installing manifest folder -curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip -cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml -sudo mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml - -# Find/Replace all k3s entries to represent rke2 -sudo sed -i 's/k3s/rke2/g' /var/lib/rancher/rke2/server/manifests/kube-vip.yaml -# copy kube-vip.yaml to home directory -sudo cp /var/lib/rancher/rke2/server/manifests/kube-vip.yaml ~/kube-vip.yaml -# change owner -sudo chown $user:$user kube-vip.yaml -# make kube folder to run kubectl later -mkdir ~/.kube - -# create the rke2 config file -sudo mkdir -p /etc/rancher/rke2 -touch config.yaml -echo "tls-san:" >> config.yaml -echo " - $vip" >> config.yaml -echo " - $master1" >> config.yaml -echo " - $master2" >> config.yaml -echo " - $master3" >> config.yaml -echo "write-kubeconfig-mode: 0644" >> config.yaml -echo "disable:" >> config.yaml -echo " - rke2-ingress-nginx" >> config.yaml -# copy config.yaml to rancher directory -sudo cp ~/config.yaml /etc/rancher/rke2/config.yaml - -# update path with rke2-binaries -echo 'export KUBECONFIG=/etc/rancher/rke2/rke2.yaml' >> ~/.bashrc ; echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >> ~/.bashrc ; echo 'alias k=kubectl' >> ~/.bashrc ; source ~/.bashrc ; - -# Step 2: Copy kube-vip.yaml and certs to all masters -for newnode in "${allmasters[@]}"; do - scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$newnode:~/kube-vip.yaml - scp -i ~/.ssh/$certName $HOME/config.yaml $user@$newnode:~/config.yaml - scp -i ~/.ssh/$certName ~/.ssh/{$certName,$certName.pub} $user@$newnode:~/.ssh - echo -e " \033[32;5mCopied successfully!\033[0m" -done - -# Step 3: Connect to Master1 and move kube-vip.yaml and config.yaml. Then install RKE2, copy token back to admin machine. We then use the token to bootstrap additional masternodes -ssh -tt $user@$master1 -i ~/.ssh/$certName sudo su <> ~/.bashrc ; echo 'export PATH=${PATH}:/var/lib/rancher/rke2/bin' >> ~/.bashrc ; echo 'alias k=kubectl' >> ~/.bashrc ; source ~/.bashrc ; -curl -sfL https://get.rke2.io | sh - -systemctl enable rke2-server.service -systemctl start rke2-server.service -echo "StrictHostKeyChecking no" > ~/.ssh/config -ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin -scp -i /home/$user/.ssh/$certName /var/lib/rancher/rke2/server/token $user@$admin:~/token -scp -i /home/$user/.ssh/$certName /etc/rancher/rke2/rke2.yaml $user@$admin:~/.kube/rke2.yaml -exit -EOF -echo -e " \033[32;5mMaster1 Completed\033[0m" - -# Step 4: Set variable to the token we just extracted, set kube config location -token=`cat token` -sudo cat ~/.kube/rke2.yaml | sed 's/127.0.0.1/'$master1'/g' > $HOME/.kube/config -sudo chown $(id -u):$(id -g) $HOME/.kube/config -export KUBECONFIG=${HOME}/.kube/config -sudo cp ~/.kube/config /etc/rancher/rke2/rke2.yaml -kubectl get nodes - -# Step 5: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider -kubectl apply -f https://kube-vip.io/manifests/rbac.yaml -kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml - -# Step 6: Add other Masternodes, note we import the token we extracted from step 3 -for newnode in "${masters[@]}"; do - ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/rancher/rke2/config.yaml - echo "server: https://$master1:9345" >> /etc/rancher/rke2/config.yaml - echo "tls-san:" >> /etc/rancher/rke2/config.yaml - echo " - $vip" >> /etc/rancher/rke2/config.yaml - echo " - $master1" >> /etc/rancher/rke2/config.yaml - echo " - $master2" >> /etc/rancher/rke2/config.yaml - echo " - $master3" >> /etc/rancher/rke2/config.yaml - curl -sfL https://get.rke2.io | sh - - systemctl enable rke2-server.service - systemctl start rke2-server.service - exit -EOF - echo -e " \033[32;5mMaster node joined successfully!\033[0m" -done - -kubectl get nodes - -# Step 7: Add Workers -for newnode in "${workers[@]}"; do - ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/rancher/rke2/config.yaml - echo "server: https://$vip:9345" >> /etc/rancher/rke2/config.yaml - echo "node-label:" >> /etc/rancher/rke2/config.yaml - echo " - worker=true" >> /etc/rancher/rke2/config.yaml - echo " - longhorn=true" >> /etc/rancher/rke2/config.yaml - curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - - systemctl enable rke2-agent.service - systemctl start rke2-agent.service - exit -EOF - echo -e " \033[32;5mWorker node joined successfully!\033[0m" -done - -kubectl get nodes - -# Step 8: Install Metallb -echo -e " \033[32;5mDeploying Metallb\033[0m" -kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml -kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml -# Download ipAddressPool and configure using lbrange above -curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool -cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml - -# Step 9: Deploy IP Pools and l2Advertisement -echo -e " \033[32;5mAdding IP Pools, waiting for Metallb to be available first. This can take a long time as we're likely being rate limited for container pulls...\033[0m" -kubectl wait --namespace metallb-system \ - --for=condition=ready pod \ - --selector=component=controller \ - --timeout=1800s -kubectl apply -f ipAddressPool.yaml -kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml - -# Step 10: Install Rancher (Optional - Delete if not required) -#Install Helm -echo -e " \033[32;5mInstalling Helm\033[0m" -curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 -chmod 700 get_helm.sh -./get_helm.sh - -# Add Rancher Helm Repo & create namespace -helm repo add rancher-latest https://releases.rancher.com/server-charts/latest -kubectl create namespace cattle-system - -# Install Cert-Manager -echo -e " \033[32;5mDeploying Cert-Manager\033[0m" -kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml -helm repo add jetstack https://charts.jetstack.io -helm repo update -helm install cert-manager jetstack/cert-manager \ ---namespace cert-manager \ ---create-namespace \ ---version v1.13.2 -kubectl get pods --namespace cert-manager - -# Install Rancher -echo -e " \033[32;5mDeploying Rancher\033[0m" -helm install rancher rancher-latest/rancher \ - --namespace cattle-system \ - --set hostname=rancher.my.org \ - --set bootstrapPassword=admin -kubectl -n cattle-system rollout status deploy/rancher -kubectl -n cattle-system get deploy rancher - -# Add Rancher LoadBalancer -kubectl get svc -n cattle-system -kubectl expose deployment rancher --name=rancher-lb --port=443 --type=LoadBalancer -n cattle-system -while [[ $(kubectl get svc -n cattle-system 'jsonpath={..status.conditions[?(@.type=="Pending")].status}') = "True" ]]; do - sleep 5 - echo -e " \033[32;5mWaiting for LoadBalancer to come online\033[0m" -done -kubectl get svc -n cattle-system - -echo -e " \033[32;5mAccess Rancher from the IP above - Password is admin!\033[0m" \ No newline at end of file From e32775d66ef73af47c41419d5ca6f765d51b0f48 Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 5 Dec 2023 22:46:43 +0000 Subject: [PATCH 18/67] update --- Kubernetes/GPU-Passthrough/readme.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 Kubernetes/GPU-Passthrough/readme.md diff --git a/Kubernetes/GPU-Passthrough/readme.md b/Kubernetes/GPU-Passthrough/readme.md new file mode 100644 index 0000000..7a02ca4 --- /dev/null +++ b/Kubernetes/GPU-Passthrough/readme.md @@ -0,0 +1,22 @@ +# Create directory +``` + mkdir -p /etc/rancher/rke2 +``` +# Create File for RKE2 - Config +``` + touch /etc/rancher/rke2/config.yaml + echo "token: " >> /etc/rancher/rke2/config.yaml + echo "server: https://:9345" >> /etc/rancher/rke2/config.yaml + echo "node-label:" >> /etc/rancher/rke2/config.yaml + echo " - worker=true" >> /etc/rancher/rke2/config.yaml + echo " - longhorn=true" >> /etc/rancher/rke2/config.yaml +``` +# Install RKE2 +``` + curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - +``` +# Enable RKE2 +``` + systemctl enable rke2-agent.service + systemctl start rke2-agent.service +``` \ No newline at end of file From eb7f1da64a77d6d889ad82a592fcef1cbc7701b0 Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 5 Dec 2023 22:53:55 +0000 Subject: [PATCH 19/67] update --- Kubernetes/GPU-Passthrough/readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Kubernetes/GPU-Passthrough/readme.md b/Kubernetes/GPU-Passthrough/readme.md index 7a02ca4..2efb252 100644 --- a/Kubernetes/GPU-Passthrough/readme.md +++ b/Kubernetes/GPU-Passthrough/readme.md @@ -4,7 +4,7 @@ ``` # Create File for RKE2 - Config ``` - touch /etc/rancher/rke2/config.yaml + sudo touch /etc/rancher/rke2/config.yaml echo "token: " >> /etc/rancher/rke2/config.yaml echo "server: https://:9345" >> /etc/rancher/rke2/config.yaml echo "node-label:" >> /etc/rancher/rke2/config.yaml From dcafeaa65114dae3d29eae5259c35a9d24403e82 Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 5 Dec 2023 23:01:24 +0000 Subject: [PATCH 20/67] update --- Kubernetes/GPU-Passthrough/readme.md | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/Kubernetes/GPU-Passthrough/readme.md b/Kubernetes/GPU-Passthrough/readme.md index 2efb252..f7e94ce 100644 --- a/Kubernetes/GPU-Passthrough/readme.md +++ b/Kubernetes/GPU-Passthrough/readme.md @@ -1,22 +1,24 @@ # Create directory ``` - mkdir -p /etc/rancher/rke2 +mkdir -p /etc/rancher/rke2 ``` # Create File for RKE2 - Config ``` - sudo touch /etc/rancher/rke2/config.yaml - echo "token: " >> /etc/rancher/rke2/config.yaml - echo "server: https://:9345" >> /etc/rancher/rke2/config.yaml - echo "node-label:" >> /etc/rancher/rke2/config.yaml - echo " - worker=true" >> /etc/rancher/rke2/config.yaml - echo " - longhorn=true" >> /etc/rancher/rke2/config.yaml +sudo nano /etc/rancher/rke2/config.yaml +``` +# Add values +token: +server: https://:9345 +node-label: + - worker=true + - longhorn=true ``` # Install RKE2 ``` - curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - +curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - ``` # Enable RKE2 ``` - systemctl enable rke2-agent.service - systemctl start rke2-agent.service +systemctl enable rke2-agent.service +systemctl start rke2-agent.service ``` \ No newline at end of file From 0da953a812829dc93e0b3be1a8085f60c333c6e9 Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 5 Dec 2023 23:02:14 +0000 Subject: [PATCH 21/67] update --- Kubernetes/GPU-Passthrough/readme.md | 1 + 1 file changed, 1 insertion(+) diff --git a/Kubernetes/GPU-Passthrough/readme.md b/Kubernetes/GPU-Passthrough/readme.md index f7e94ce..dd2b30c 100644 --- a/Kubernetes/GPU-Passthrough/readme.md +++ b/Kubernetes/GPU-Passthrough/readme.md @@ -7,6 +7,7 @@ mkdir -p /etc/rancher/rke2 sudo nano /etc/rancher/rke2/config.yaml ``` # Add values +``` token: server: https://:9345 node-label: From a57894d16235b4a36ecd324ba5fa838142460dc7 Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 5 Dec 2023 23:05:44 +0000 Subject: [PATCH 22/67] update --- Kubernetes/GPU-Passthrough/readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Kubernetes/GPU-Passthrough/readme.md b/Kubernetes/GPU-Passthrough/readme.md index dd2b30c..4845a54 100644 --- a/Kubernetes/GPU-Passthrough/readme.md +++ b/Kubernetes/GPU-Passthrough/readme.md @@ -16,7 +16,7 @@ node-label: ``` # Install RKE2 ``` -curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - +sudo curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - ``` # Enable RKE2 ``` From 28b83bf28300a8335d9c6dc7b66d70c2bccab7fd Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 5 Dec 2023 23:07:10 +0000 Subject: [PATCH 23/67] update --- Kubernetes/GPU-Passthrough/readme.md | 1 + 1 file changed, 1 insertion(+) diff --git a/Kubernetes/GPU-Passthrough/readme.md b/Kubernetes/GPU-Passthrough/readme.md index 4845a54..2d1b17d 100644 --- a/Kubernetes/GPU-Passthrough/readme.md +++ b/Kubernetes/GPU-Passthrough/readme.md @@ -16,6 +16,7 @@ node-label: ``` # Install RKE2 ``` +sudo -su sudo curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - ``` # Enable RKE2 From b2007be66b510a89adb193f444bd385a68e8600c Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 5 Dec 2023 23:08:01 +0000 Subject: [PATCH 24/67] update --- Kubernetes/GPU-Passthrough/readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Kubernetes/GPU-Passthrough/readme.md b/Kubernetes/GPU-Passthrough/readme.md index 2d1b17d..f0031d2 100644 --- a/Kubernetes/GPU-Passthrough/readme.md +++ b/Kubernetes/GPU-Passthrough/readme.md @@ -16,7 +16,7 @@ node-label: ``` # Install RKE2 ``` -sudo -su +sudo -u sudo curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - ``` # Enable RKE2 From 1c71a54f5abe18df7bba8c9a06bebeddd5c08f3a Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 5 Dec 2023 23:09:08 +0000 Subject: [PATCH 25/67] update --- Kubernetes/GPU-Passthrough/readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Kubernetes/GPU-Passthrough/readme.md b/Kubernetes/GPU-Passthrough/readme.md index f0031d2..2f3a037 100644 --- a/Kubernetes/GPU-Passthrough/readme.md +++ b/Kubernetes/GPU-Passthrough/readme.md @@ -16,7 +16,7 @@ node-label: ``` # Install RKE2 ``` -sudo -u +sudo su sudo curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - ``` # Enable RKE2 From 1c69ba67a4da523b0940db662343024c380e6422 Mon Sep 17 00:00:00 2001 From: James Turland Date: Wed, 6 Dec 2023 01:02:07 +0000 Subject: [PATCH 26/67] update --- Kubernetes/GPU-Passthrough/jellyfin.yaml | 68 ++++++++++++++++++++++++ Kubernetes/GPU-Passthrough/readme.md | 2 +- 2 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 Kubernetes/GPU-Passthrough/jellyfin.yaml diff --git a/Kubernetes/GPU-Passthrough/jellyfin.yaml b/Kubernetes/GPU-Passthrough/jellyfin.yaml new file mode 100644 index 0000000..0266b3f --- /dev/null +++ b/Kubernetes/GPU-Passthrough/jellyfin.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: jellyfin + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/name: jellyfin + name: jellyfin + namespace: jellyfin +spec: + replicas: 1 + selector: + matchLabels: + app: jellyfin + template: + metadata: + labels: + app: jellyfin + app.kubernetes.io/name: jellyfin + spec: + nodeSelector: + worker: "true" + containers: + - image: jellyfin/jellyfin + imagePullPolicy: Always + name: jellyfin + resources: + limits: + gpu.intel.com/i915: "1" # requesting 1 GPU + ports: + - containerPort: 8096 + name: web + protocol: TCP + env: + - name: TZ + value: Europe/London + volumeMounts: + - mountPath: /config + name: jellyfin + subPath: config + - mountPath: /cache + name: jellyfin + subPath: cache + volumes: + - name: jellyfin + persistentVolumeClaim: + claimName: jellyfin +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: jellyfin + name: jellyfin + namespace: jellyfin +spec: + ports: + - name: web-tcp + port: 8096 + protocol: TCP + targetPort: 8096 + - name: web-udp + port: 8096 + protocol: UDP + targetPort: 8096 + selector: + app: jellyfin \ No newline at end of file diff --git a/Kubernetes/GPU-Passthrough/readme.md b/Kubernetes/GPU-Passthrough/readme.md index 2f3a037..4d67802 100644 --- a/Kubernetes/GPU-Passthrough/readme.md +++ b/Kubernetes/GPU-Passthrough/readme.md @@ -17,7 +17,7 @@ node-label: # Install RKE2 ``` sudo su -sudo curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - +curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - ``` # Enable RKE2 ``` From afbdb976b57944d591b5fec85c1630f5b1e867ea Mon Sep 17 00:00:00 2001 From: James Turland Date: Wed, 6 Dec 2023 14:15:28 +0000 Subject: [PATCH 27/67] update --- Homelab-Buyer's-Guide/Q3-2023.md | 51 -------------------------------- Homelab-Buyer's-Guide/Q4-2023.md | 51 ++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 51 deletions(-) delete mode 100644 Homelab-Buyer's-Guide/Q3-2023.md create mode 100644 Homelab-Buyer's-Guide/Q4-2023.md diff --git a/Homelab-Buyer's-Guide/Q3-2023.md b/Homelab-Buyer's-Guide/Q3-2023.md deleted file mode 100644 index ce79a4e..0000000 --- a/Homelab-Buyer's-Guide/Q3-2023.md +++ /dev/null @@ -1,51 +0,0 @@ -# Homelab Buyer's Guide Q3-2023 - -* Consumer server build - * High End - * Intel - * CPU: Intel Core i7-13700K (with iGPU) : [https://amzn.to/3E6DbUT](https://amzn.to/44wT8yz) - * Mobo: Z690D4U (if you can find one) or MSI MAG Z790 TOMAHAWK WIFI : [https://amzn.to/3OICGoL](https://amzn.to/44tser9) - * RAM: Corsair Vengeance DDR5 32 GB (or more) : [https://amzn.to/3E3Gc8o](https://amzn.to/47S3Br2) - * PSU:  - * AMD - * CPU: AMD Ryzen 9 7900 : [https://amzn.to/45CDLoZ](https://amzn.to/47TqV7N) - * Mobo: ASRock B650D4U-2T/BCM (or B650D4U-2L2T/BCM for 10G)  or ASRock X670E Steel Legend ATX : [https://amzn.to/3KPrRA8](https://amzn.to/3YTrMkI) - * RAM: Corsair Vengeance DDR5 32 GB (or more) : [https://amzn.to/3E3Gc8o](https://amzn.to/47PgzWD) - * Budget - * Intel - * CPU: Intel Core i5-12400 : [https://amzn.to/3KKPhqA](https://amzn.to/3EjiG7m) - * Mobo: MSI MAG B660M MORTAR : [https://amzn.to/3P4HpSb](https://amzn.to/3sy1QPG) - * RAM: Corsair Vengeance LPX 16 GB : [https://amzn.to/3E3Gc8o](https://amzn.to/47PgzWD) - * AMD - * CPU: amd ryzen 5 5600 : [https://amzn.to/3QLToq0](https://amzn.to/3Ej9EYi) - * Mobo: MSI MAG B550 TOMAHAWK : [https://amzn.to/3OKh0bV](https://amzn.to/3OW3l1J) - * RAM: Corsair Vengeance LPX 16 GB : [https://amzn.to/3E3Gc8o](https://amzn.to/3Z2vIzN) -* PSU: - * Corsair HX: [https://amzn.to/3P4YfRN](https://amzn.to/3LoJveD) -* GPU: - * Budget: Intel Arc a380: [https://amzn.to/47Fa60k](https://amzn.to/3OU9hrS) -* All-in-One: - * Budget: Lenovo ThinkCentre : [https://amzn.to/3KLPdH1](https://amzn.to/3swN0c8) - * Premium: Intel NUC - pick generation to suit budget : [https://amzn.to/3YR0jQL](https://amzn.to/3KXW6VG) -* Enterprise server - * Server Form Factor: Dell r730 - * Workstation ATX: Epyc 7302p with Gigabyte or SuperMicro Mobo (Check eBay) -* Switch - * Entry-level: Netgear GS108E (5/8/16 ports) : [https://amzn.to/3qCQBVz](https://amzn.to/3L25APA) - * Mid-level: Mikrotik CRS326-24G-2S+RM (or IN - non rack mount) (2x 10Gb SFP+) : [https://amzn.to/3P3BY76](https://amzn.to/3Piz0fd) - * Pro-sumer: Mikrotik CRS328-24P-4S+RM (POE, 1Gb, 10Gb SFP+) (£500) vs Unifi Professional 48 PoE (£1000) : [https://amzn.to/44lVhwC](https://amzn.to/3OYo3xI) -* NIC - * 1G: Intel i210 or i350t4v2 - * 10G: Mellanox Connect-X3 10Gb SFP+, Intel x520DA2 or t2 -* HBA: - * LSI -* SSD/HDD - * NAS: Toshiba MG Series (16TB), Seagate Ironwolf 16TB : [https://amzn.to/3ONcOs9](https://amzn.to/3qRXTVu) - * NVME: Firecuda 530 gen 4, or Samsung 970 EVO : [https://amzn.to/3E5rpKn](https://amzn.to/3KWnoMk) -* Access Point: Unifi U6 (choose model for situation) : [https://amzn.to/3E4x9UD](https://amzn.to/3qQjn5a) -* Rack: TrippLite -* Patch Panel: TRENDnet 24-Port Cat6A Shielded 1U Patch Panel : [https://amzn.to/3QO0fzp](https://amzn.to/3PcU4U9) -* UPS: APC SmartUPS : [https://amzn.to/3QRuaqf](https://amzn.to/3sysW9v) -* Cooling: - * Rack: AC Infinity CLOUDPLATE : [https://amzn.to/3QINupG](https://amzn.to/3QZq7bF) - * Fans: Nocuta : [https://amzn.to/3qxMcTT](https://amzn.to/3YU7t6M)https://amzn.to/3YU7t6M diff --git a/Homelab-Buyer's-Guide/Q4-2023.md b/Homelab-Buyer's-Guide/Q4-2023.md new file mode 100644 index 0000000..73ce630 --- /dev/null +++ b/Homelab-Buyer's-Guide/Q4-2023.md @@ -0,0 +1,51 @@ +# Homelab Buyer's Guide Q3-2023 + +* Consumer server build + * High End + * Intel + * CPU: Intel Core i7-13700K (with iGPU) : [https://amzn.to/46KzJeu](https://amzn.to/46KzJeu) + * Mobo: Z690D4U (if you can find one [https://amzn.to/3uG0Qdc](https://amzn.to/3uG0Qdc)) or MSI MAG Z790 TOMAHAWK WIFI : [https://amzn.to/48n68cr](https://amzn.to/48n68cr) + * RAM: Corsair Vengeance DDR5 32 GB (or more) : [https://amzn.to/3R6VhMB](https://amzn.to/3R6VhMB) + * PSU:  + * AMD + * CPU: AMD Ryzen 9 7900 : [https://amzn.to/47GRdd1](https://amzn.to/47GRdd1) + * Mobo: ASRock B650D4U-2T/BCM (or B650D4U-2L2T/BCM for 10G)  or ASRock X670E Steel Legend ATX : [https://amzn.to/3GvShUZ](https://amzn.to/3GvShUZ) + * RAM: Corsair Vengeance DDR5 32 GB (or more) : [https://amzn.to/3R6VhMB](https://amzn.to/3R6VhMB) + * Budget + * Intel + * CPU: Intel Core i5-12400 : [https://amzn.to/4aaUG5o](https://amzn.to/4aaUG5o) + * Mobo: MSI MAG B660M MORTAR : [https://amzn.to/3R4swjA](https://amzn.to/3R4swjA) + * RAM: Corsair Vengeance LPX 16 GB : [https://amzn.to/3teJeES](https://amzn.to/3teJeES) + * AMD + * CPU: AMD Ryzen 5 5600 : [https://amzn.to/3R8HKUD](https://amzn.to/3R8HKUD) + * Mobo: MSI MAG B550 TOMAHAWK : [https://amzn.to/3Rc0liz](https://amzn.to/3Rc0liz) + * RAM: Corsair Vengeance LPX 16 GB : [https://amzn.to/3teJeES](https://amzn.to/3teJeES) +* PSU: + * Corsair HX: [https://amzn.to/4ab2wvx](https://amzn.to/4ab2wvx) +* GPU: + * Budget: Intel Arc a380: [https://amzn.to/3RsEcOC](https://amzn.to/3RsEcOC) +* All-in-One: + * Budget: Lenovo ThinkCentre : [https://amzn.to/3TjGSiC](https://amzn.to/3TjGSiC) + * Premium: Intel NUC - pick generation to suit budget : [https://amzn.to/4aauE1O](https://amzn.to/4aauE1O) +* Enterprise server + * Server Form Factor: Dell r730 + * Workstation ATX: Epyc 7302p with Gigabyte or SuperMicro Mobo (Check eBay) +* Switch + * Entry-level: Netgear GS108E (5/8/16 ports) : [https://amzn.to/3uJFUCe](https://amzn.to/3uJFUCe) + * Mid-level: Mikrotik CRS326-24G-2S+RM (or IN - non rack mount) (2x 10Gb SFP+) : [https://amzn.to/471cWLL](https://amzn.to/471cWLL) + * Pro-sumer: Mikrotik CRS328-24P-4S+RM (POE, 1Gb, 10Gb SFP+) (£500) vs Unifi Professional 48 PoE (£1000) : [https://amzn.to/3R8I8T5](https://amzn.to/3R8I8T5) +* NIC + * 1G: Intel i210 or i350t4v2 + * 10G: Mellanox Connect-X3 10Gb SFP+, Intel x520DA2 or t2 +* HBA: + * LSI +* SSD/HDD + * NAS: Toshiba MG Series (16TB), Seagate Ironwolf 16TB : [https://amzn.to/417HwSs](https://amzn.to/417HwSs) + * NVME: Firecuda 530 gen 4, or Samsung 970 EVO : [https://amzn.to/486pAtQ](https://amzn.to/486pAtQ) +* Access Point: Unifi U6 (choose model for situation) : [https://amzn.to/484NrKd](https://amzn.to/484NrKd) +* Rack: TrippLite +* Patch Panel: TRENDnet 24-Port Cat6A Shielded 1U Patch Panel : [https://amzn.to/4879mAD](https://amzn.to/4879mAD) +* UPS: APC SmartUPS : [https://amzn.to/46IfSfT](https://amzn.to/46IfSfT) +* Cooling: + * Rack: AC Infinity CLOUDPLATE : [https://amzn.to/3NeUFmX](https://amzn.to/3NeUFmX) + * Fans: Nocuta : [https://amzn.to/46NaAzZ](https://amzn.to/46NaAzZ) From 3c111a06681c53e751c9c2563c457e379799dea8 Mon Sep 17 00:00:00 2001 From: James Turland Date: Wed, 6 Dec 2023 14:18:38 +0000 Subject: [PATCH 28/67] update --- Homelab-Buyer's-Guide/{Q4-2023.md => Q3-2023.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename Homelab-Buyer's-Guide/{Q4-2023.md => Q3-2023.md} (100%) diff --git a/Homelab-Buyer's-Guide/Q4-2023.md b/Homelab-Buyer's-Guide/Q3-2023.md similarity index 100% rename from Homelab-Buyer's-Guide/Q4-2023.md rename to Homelab-Buyer's-Guide/Q3-2023.md From 2f9621802eb14448e6508edb61c3bfef9d998f87 Mon Sep 17 00:00:00 2001 From: James Turland Date: Wed, 6 Dec 2023 22:25:29 +0000 Subject: [PATCH 29/67] add s mb --- Kubernetes/SMB/deployment.yaml | 75 ++++++++++++++++++++++++++++++++++ Kubernetes/SMB/pv-smb.yaml | 27 ++++++++++++ Kubernetes/SMB/pvc-smb.yaml | 14 +++++++ Kubernetes/SMB/readme.md | 33 +++++++++++++++ 4 files changed, 149 insertions(+) create mode 100644 Kubernetes/SMB/deployment.yaml create mode 100644 Kubernetes/SMB/pv-smb.yaml create mode 100644 Kubernetes/SMB/pvc-smb.yaml create mode 100644 Kubernetes/SMB/readme.md diff --git a/Kubernetes/SMB/deployment.yaml b/Kubernetes/SMB/deployment.yaml new file mode 100644 index 0000000..0ff46d5 --- /dev/null +++ b/Kubernetes/SMB/deployment.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: jellyfin + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/name: jellyfin + name: jellyfin + namespace: jellyfin +spec: + replicas: 1 + selector: + matchLabels: + app: jellyfin + template: + metadata: + labels: + app: jellyfin + app.kubernetes.io/name: jellyfin + spec: + nodeSelector: + worker: "true" + containers: + - image: jellyfin/jellyfin + imagePullPolicy: Always + name: jellyfin + ports: + - containerPort: 8096 + name: web + protocol: TCP + env: + - name: TZ + value: Europe/London + volumeMounts: + - mountPath: "/Audiobooks" + readOnly: false + name: smb + subPath: Audiobooks + - mountPath: "/Films" + readOnly: false + name: smb + subPath: Films + - mountPath: "/TVShows" + readOnly: false + name: smb + subPath: TVShows + - mountPath: "/Music" + readOnly: false + name: smb + subPath: Music + volumes: + - name: smb + persistentVolumeClaim: + claimName: pvc-jellyfin-smb +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: jellyfin + name: jellyfin + namespace: jellyfin +spec: + ports: + - name: web-tcp + port: 8096 + protocol: TCP + targetPort: 8096 + - name: web-udp + port: 8096 + protocol: UDP + targetPort: 8096 + selector: + app: jellyfin \ No newline at end of file diff --git a/Kubernetes/SMB/pv-smb.yaml b/Kubernetes/SMB/pv-smb.yaml new file mode 100644 index 0000000..d7db3f1 --- /dev/null +++ b/Kubernetes/SMB/pv-smb.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + annotations: + pv.kubernetes.io/provisioned-by: smb.csi.k8s.io + name: pv-jellyfin-smb +spec: + capacity: + storage: 100Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: smb + mountOptions: + - dir_mode=0777 + - file_mode=0777 + csi: + driver: smb.csi.k8s.io + readOnly: false + # volumeHandle format: {smb-server-address}#{sub-dir-name}#{share-name} + # make sure this value is unique for every share in the cluster + volumeHandle: jellyfin + volumeAttributes: + source: "//192.168.6.2/FreeNAS" # Change this to your SMB IP and share name + nodeStageSecretRef: + name: smbcreds + namespace: default \ No newline at end of file diff --git a/Kubernetes/SMB/pvc-smb.yaml b/Kubernetes/SMB/pvc-smb.yaml new file mode 100644 index 0000000..87402b0 --- /dev/null +++ b/Kubernetes/SMB/pvc-smb.yaml @@ -0,0 +1,14 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvc-jellyfin-smb + namespace: jellyfin +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + volumeName: pv-jellyfin-smb + storageClassName: smb \ No newline at end of file diff --git a/Kubernetes/SMB/readme.md b/Kubernetes/SMB/readme.md new file mode 100644 index 0000000..ac40f60 --- /dev/null +++ b/Kubernetes/SMB/readme.md @@ -0,0 +1,33 @@ +# Install CSI driver +``` +curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-smb/v1.13.0/deploy/install-driver.sh | bash -s v1.13.0 -- +``` + +# Create SMB creds +``` +kubectl create secret generic smbcreds --from-literal username=USERNAME --from-literal password="PASSWORD" +``` + +# Create storage class +``` +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: smb +provisioner: smb.csi.k8s.io +parameters: + source: //smb-server.default.svc.cluster.local/share + # if csi.storage.k8s.io/provisioner-secret is provided, will create a sub directory + # with PV name under source + csi.storage.k8s.io/provisioner-secret-name: smbcreds + csi.storage.k8s.io/provisioner-secret-namespace: default + csi.storage.k8s.io/node-stage-secret-name: smbcreds + csi.storage.k8s.io/node-stage-secret-namespace: default +reclaimPolicy: Delete # available values: Delete, Retain +volumeBindingMode: Immediate +mountOptions: + - dir_mode=0777 + - file_mode=0777 + - uid=1001 + - gid=1001 +``` \ No newline at end of file From 7468da18da72cf00b656cf94e32a5f800149535e Mon Sep 17 00:00:00 2001 From: James Turland Date: Wed, 6 Dec 2023 22:37:08 +0000 Subject: [PATCH 30/67] update --- Kubernetes/SMB/readme.md | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/Kubernetes/SMB/readme.md b/Kubernetes/SMB/readme.md index ac40f60..6a63b59 100644 --- a/Kubernetes/SMB/readme.md +++ b/Kubernetes/SMB/readme.md @@ -10,24 +10,5 @@ kubectl create secret generic smbcreds --from-literal username=USERNAME --from-l # Create storage class ``` -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: smb -provisioner: smb.csi.k8s.io -parameters: - source: //smb-server.default.svc.cluster.local/share - # if csi.storage.k8s.io/provisioner-secret is provided, will create a sub directory - # with PV name under source - csi.storage.k8s.io/provisioner-secret-name: smbcreds - csi.storage.k8s.io/provisioner-secret-namespace: default - csi.storage.k8s.io/node-stage-secret-name: smbcreds - csi.storage.k8s.io/node-stage-secret-namespace: default -reclaimPolicy: Delete # available values: Delete, Retain -volumeBindingMode: Immediate -mountOptions: - - dir_mode=0777 - - file_mode=0777 - - uid=1001 - - gid=1001 +kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-smb/master/deploy/example/storageclass-smb.yaml ``` \ No newline at end of file From 1145a4af9675b5be1faef56e9f1614118b18e412 Mon Sep 17 00:00:00 2001 From: James Turland Date: Thu, 7 Dec 2023 10:24:39 +0000 Subject: [PATCH 31/67] update --- Kubernetes/SMB/readme.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Kubernetes/SMB/readme.md b/Kubernetes/SMB/readme.md index 6a63b59..8c2a887 100644 --- a/Kubernetes/SMB/readme.md +++ b/Kubernetes/SMB/readme.md @@ -11,4 +11,10 @@ kubectl create secret generic smbcreds --from-literal username=USERNAME --from-l # Create storage class ``` kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-smb/master/deploy/example/storageclass-smb.yaml +``` + +# Check status +``` +kubectl -n kube-system get pod -o wide --watch -l app=csi-smb-controller +kubectl -n kube-system get pod -o wide --watch -l app=csi-smb-node ``` \ No newline at end of file From 90b2be761053526658d53a5bbe7224d298c4dde1 Mon Sep 17 00:00:00 2001 From: "timothy.nilles" Date: Mon, 11 Dec 2023 13:16:08 -0500 Subject: [PATCH 32/67] Modified the externalTrafficPolicy to point to Local instead of Cluster - Shows external IP in Pihole now --- .../Traefik-PiHole/Manifest/PiHole/PiHole-Deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Kubernetes/Traefik-PiHole/Manifest/PiHole/PiHole-Deployment.yaml b/Kubernetes/Traefik-PiHole/Manifest/PiHole/PiHole-Deployment.yaml index e5ced22..df1dac5 100644 --- a/Kubernetes/Traefik-PiHole/Manifest/PiHole/PiHole-Deployment.yaml +++ b/Kubernetes/Traefik-PiHole/Manifest/PiHole/PiHole-Deployment.yaml @@ -109,6 +109,6 @@ spec: targetPort: 53 selector: app: pihole - externalTrafficPolicy: Cluster + externalTrafficPolicy: Local loadBalancerIP: 192.168.3.67 # this is your DNS IP, NOT THE GUI! type: LoadBalancer From 3af4fb99668b0506c75480b725e37d7f66ceeaab Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 12 Dec 2023 08:56:29 +0000 Subject: [PATCH 33/67] update --- Docker-Swarm/swarm.sh | 110 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 Docker-Swarm/swarm.sh diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh new file mode 100644 index 0000000..0205c0a --- /dev/null +++ b/Docker-Swarm/swarm.sh @@ -0,0 +1,110 @@ +#!/bin/bash + +echo -e " \033[33;5m __ _ _ ___ \033[0m" +echo -e " \033[33;5m \ \(_)_ __ ___( )__ / _ \__ _ _ __ __ _ __ _ ___ \033[0m" +echo -e " \033[33;5m \ \ | '_ \` _ \/ __| / /_\/ _\` | '__/ _\` |/ _\` |/ _ \ \033[0m" +echo -e " \033[33;5m /\_/ / | | | | | \__ \ / /_\\ (_| | | | (_| | (_| | __/ \033[0m" +echo -e " \033[33;5m \___/|_|_| |_| |_|___/ \____/\__,_|_| \__,_|\__, |\___| \033[0m" +echo -e " \033[33;5m |___/ \033[0m" + +echo -e " \033[36;5m ___ _ ___ \033[0m" +echo -e " \033[36;5m | \ ___ __| |_____ _ _ / __|_ __ ____ _ _ _ _ __ \033[0m" +echo -e " \033[36;5m | |) / _ \/ _| / / -_) \'_| \__ \ V V / _\` | '_| ' \ \033[0m" +echo -e " \033[36;5m |___/\___/\__|_\_\___|_| |___/\_/\_/\__,_|_| |_|_|_| \033[0m" +echo -e " \033[36;5m \033[0m" +echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" +echo -e " \033[32;5m \033[0m" + + +############################################# +# YOU SHOULD ONLY NEED TO EDIT THIS SECTION # +############################################# + +# Set the IP addresses of the admin, masters, and workers nodes +admin=192.168.3.5 +master1=192.168.3.21 +master2=192.168.3.22 +master3=192.168.3.23 +worker1=192.168.3.24 +worker2=192.168.3.25 + +# User of remote machines +user=ubuntu + +# Interface used on remotes +interface=eth0 + +# Set the virtual IP address (VIP) +vip=192.168.3.50 + +# Array of all master nodes +allmasters=($master1 $master2 $master3) + +# Array of master nodes +masters=($master2 $master3) + +# Array of worker nodes +workers=($worker1 $worker2) + +# Array of all +all=($master1 $master2 $master3 $worker1 $worker2) + +# Array of all minus master1 +allnomaster1=($master2 $master3 $worker1 $worker2) + +#Loadbalancer IP range +lbrange=192.168.3.60-192.168.3.80 + +#ssh certificate name variable +certName=id_rsa + +############################################# +# DO NOT EDIT BELOW # +############################################# +# For testing purposes - in case time is wrong due to VM snapshots +sudo timedatectl set-ntp off +sudo timedatectl set-ntp on + +# Move SSH certs to ~/.ssh and change permissions +cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh +chmod 600 /home/$user/.ssh/$certName +chmod 644 /home/$user/.ssh/$certName.pub + +# Create SSH Config file to ignore checking (don't use in production!) +echo "StrictHostKeyChecking no" > ~/.ssh/config + +#add ssh keys for all nodes +for node in "${all[@]}"; do + ssh-copy-id $user@$node +done + +# Install Docker for each node +for newnode in "${all[@]}"; do + ssh $user@$newnode -i ~/.ssh/$certName sudo su < /dev/null + apt-get update + NEEDRESTART_MODE=a apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y + exit +EOF + echo -e " \033[32;5mPolicyCoreUtils installed!\033[0m" +done + +# Step 1: Create Swarm on first node +ssh -tt $user@$master1 -i ~/.ssh/$certName sudo su <> master.txt +docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*'` >> master.txt +exit +EOF +echo -e " \033[32;5mMaster1 Completed\033[0m" \ No newline at end of file From eb5a17605a88f7c118a5ebfc90311a803a31acd2 Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 12 Dec 2023 09:10:27 +0000 Subject: [PATCH 34/67] update --- Docker-Swarm/swarm.sh | 61 +++++++++++++++++++++++++++++++------------ 1 file changed, 45 insertions(+), 16 deletions(-) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index 0205c0a..c9108bd 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -20,11 +20,11 @@ echo -e " \033[32;5m \ # YOU SHOULD ONLY NEED TO EDIT THIS SECTION # ############################################# -# Set the IP addresses of the admin, masters, and workers nodes +# Set the IP addresses of the admin, managers, and workers nodes admin=192.168.3.5 -master1=192.168.3.21 -master2=192.168.3.22 -master3=192.168.3.23 +manager1=192.168.3.21 +manager2=192.168.3.22 +manager3=192.168.3.23 worker1=192.168.3.24 worker2=192.168.3.25 @@ -37,20 +37,20 @@ interface=eth0 # Set the virtual IP address (VIP) vip=192.168.3.50 -# Array of all master nodes -allmasters=($master1 $master2 $master3) +# Array of all manager nodes +allmanagers=($manager1 $manager2 $manager3) -# Array of master nodes -masters=($master2 $master3) +# Array of manager nodes +managers=($manager2 $manager3) # Array of worker nodes workers=($worker1 $worker2) # Array of all -all=($master1 $master2 $master3 $worker1 $worker2) +all=($manager1 $manager2 $manager3 $worker1 $worker2) -# Array of all minus master1 -allnomaster1=($master2 $master3 $worker1 $worker2) +# Array of all minus manager1 +allnomanager1=($manager2 $manager3 $worker1 $worker2) #Loadbalancer IP range lbrange=192.168.3.60-192.168.3.80 @@ -101,10 +101,39 @@ EOF done # Step 1: Create Swarm on first node -ssh -tt $user@$master1 -i ~/.ssh/$certName sudo su <> master.txt -docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*'` >> master.txt +ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <> manager.txt +docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' >> worker.txt +ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin +scp -i /home/$user/.ssh/$certName /home/$user/manager.txt $user@$admin:~/manager +scp -i /home/$user/.ssh/$certName /home/$user/worker.txt $user@$admin:~/worker exit EOF -echo -e " \033[32;5mMaster1 Completed\033[0m" \ No newline at end of file +echo -e " \033[32;5mManager1 Completed\033[0m" + +# Step 2: Set variables +managerToken='cat manager' +workerToken='cat worker' + +# Step 3: Connect additional managers +for newnode in "${managers[@]}"; do + ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su < Date: Wed, 13 Dec 2023 15:15:11 +0000 Subject: [PATCH 35/67] update --- Docker-Swarm/swarm.sh | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index c9108bd..245b60e 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -9,7 +9,7 @@ echo -e " \033[33;5m |___/ \ echo -e " \033[36;5m ___ _ ___ \033[0m" echo -e " \033[36;5m | \ ___ __| |_____ _ _ / __|_ __ ____ _ _ _ _ __ \033[0m" -echo -e " \033[36;5m | |) / _ \/ _| / / -_) \'_| \__ \ V V / _\` | '_| ' \ \033[0m" +echo -e " \033[36;5m | |) / _ \/ _| / / -_) \'_| \__ \ V V / _\` | '_| ' \ \033[0m" echo -e " \033[36;5m |___/\___/\__|_\_\___|_| |___/\_/\_/\__,_|_| |_|_|_| \033[0m" echo -e " \033[36;5m \033[0m" echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" @@ -78,6 +78,11 @@ for node in "${all[@]}"; do ssh-copy-id $user@$node done +# Copy SSH keys to MN1 to copy tokens back later +scp -i /home/$user/.ssh/$certName /home/$user/$certName $user@$manager1:~/.ssh +scp -i /home/$user/.ssh/$certName /home/$user/$certName.pub $user@$manager1:~/.ssh + + # Install Docker for each node for newnode in "${all[@]}"; do ssh $user@$newnode -i ~/.ssh/$certName sudo su <> manager.txt -docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' >> worker.txt +docker swarm join-token manager | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > manager.txt +docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > worker.txt +echo "StrictHostKeyChecking no" > ~/.ssh/config ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin scp -i /home/$user/.ssh/$certName /home/$user/manager.txt $user@$admin:~/manager scp -i /home/$user/.ssh/$certName /home/$user/worker.txt $user@$admin:~/worker @@ -113,8 +119,8 @@ EOF echo -e " \033[32;5mManager1 Completed\033[0m" # Step 2: Set variables -managerToken='cat manager' -workerToken='cat worker' +managerToken=`cat manager` +workerToken=`cat worker` # Step 3: Connect additional managers for newnode in "${managers[@]}"; do From 8fafd4c5614726aa145390bb6c4a2332a2f66c9d Mon Sep 17 00:00:00 2001 From: James Turland Date: Wed, 13 Dec 2023 15:19:16 +0000 Subject: [PATCH 36/67] update --- Docker-Swarm/swarm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index 245b60e..6e72fb5 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -9,7 +9,7 @@ echo -e " \033[33;5m |___/ \ echo -e " \033[36;5m ___ _ ___ \033[0m" echo -e " \033[36;5m | \ ___ __| |_____ _ _ / __|_ __ ____ _ _ _ _ __ \033[0m" -echo -e " \033[36;5m | |) / _ \/ _| / / -_) \'_| \__ \ V V / _\` | '_| ' \ \033[0m" +echo -e " \033[36;5m | |) / _ \/ _| / / -_) '_| \__ \ V V / _\` | '_| ' \ \033[0m" echo -e " \033[36;5m |___/\___/\__|_\_\___|_| |___/\_/\_/\__,_|_| |_|_|_| \033[0m" echo -e " \033[36;5m \033[0m" echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" From f5f5013f622b35a6cd8e58a5d504d1921c8d23c9 Mon Sep 17 00:00:00 2001 From: James Turland Date: Thu, 14 Dec 2023 17:13:05 +0000 Subject: [PATCH 37/67] update --- Docker-Swarm/swarm.sh | 47 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index 6e72fb5..e13656e 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -28,6 +28,10 @@ manager3=192.168.3.23 worker1=192.168.3.24 worker2=192.168.3.25 +# Set the workers' hostnames (if using cloud-init in Proxmox it's the name of the VM) +workerHostname1=dockerSwarm-04 +workerHostname2=dockerSwarm-05 + # User of remote machines user=ubuntu @@ -83,7 +87,7 @@ scp -i /home/$user/.ssh/$certName /home/$user/$certName $user@$manager1:~/.ssh scp -i /home/$user/.ssh/$certName /home/$user/$certName.pub $user@$manager1:~/.ssh -# Install Docker for each node +# Install dependencies for each node (Docker, GlusterFS) for newnode in "${all[@]}"; do ssh $user@$newnode -i ~/.ssh/$certName sudo su < /dev/null apt-get update NEEDRESTART_MODE=a apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y + NEEDRESTART_MODE=a apt install software-properties-common glusterfs-server -y + systemctl start glusterd + systemctl enable glusterd + mkdir -p /gluster/volume1 exit EOF echo -e " \033[32;5mDocker installed!\033[0m" @@ -142,4 +150,39 @@ for newnode in "${workers[@]}"; do exit EOF echo -e " \033[32;5mWorker node joined successfully!\033[0m" -done \ No newline at end of file +done + +# Step 5: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only +ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su <> /etc/fstab + mount.glusterfs localhost:/staging-gfs /mnt + chown -R root:docker /mnt + exit +EOF + echo -e " \033[32;5mGlusterFS mounted on reboot\033[0m" +done + +# OPTIONAL # +# Step 7: Add Portainer +ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < Date: Thu, 14 Dec 2023 20:25:52 +0000 Subject: [PATCH 38/67] update --- Docker-Swarm/swarm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index e13656e..3355f24 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -159,7 +159,7 @@ gluster volume start staging-gfs chmod 666 /var/run/docker.sock docker node update --label-add worker=true $workerHostname1 docker node update --label-add worker=true $workerHostname2 -gluster gluster peer probe $master2; gluster peer probe $master3; gluster peer probe $worker1; gluster peer probe $worker2; +gluster peer probe $master2; gluster peer probe $master3; gluster peer probe $worker1; gluster peer probe $worker2; exit EOF echo -e " \033[32;5mGlusterFS created\033[0m" From d02dceffb28ae6fd6e9bf0ebc7c61bc89603f9b6 Mon Sep 17 00:00:00 2001 From: James Turland Date: Thu, 14 Dec 2023 20:35:42 +0000 Subject: [PATCH 39/67] update --- Docker-Swarm/swarm.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index 3355f24..7c366e6 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -154,12 +154,12 @@ done # Step 5: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < Date: Thu, 14 Dec 2023 20:40:28 +0000 Subject: [PATCH 40/67] update --- Docker-Swarm/swarm.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index 7c366e6..4af1db4 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -110,7 +110,7 @@ for newnode in "${all[@]}"; do mkdir -p /gluster/volume1 exit EOF - echo -e " \033[32;5mDocker installed!\033[0m" + echo -e " \033[32;5m$newnode - Docker & GlusterFS installed!\033[0m" done # Step 1: Create Swarm on first node @@ -138,7 +138,7 @@ for newnode in "${managers[@]}"; do $manager1 exit EOF - echo -e " \033[32;5mManager node joined successfully!\033[0m" + echo -e " \033[32;5m$newnode - Manager node joined successfully!\033[0m" done # Step 4: Connect additional worker @@ -149,7 +149,7 @@ for newnode in "${workers[@]}"; do $manager1 exit EOF - echo -e " \033[32;5mWorker node joined successfully!\033[0m" + echo -e " \033[32;5m$newnode - Worker node joined successfully!\033[0m" done # Step 5: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only @@ -172,7 +172,7 @@ for newnode in "${all[@]}"; do chown -R root:docker /mnt exit EOF - echo -e " \033[32;5mGlusterFS mounted on reboot\033[0m" + echo -e " \033[32;5m$newnode - GlusterFS mounted on reboot\033[0m" done # OPTIONAL # @@ -185,4 +185,4 @@ docker service ls gluster pool list exit EOF -echo -e " \033[32;5mGlusterFS created\033[0m" \ No newline at end of file +echo -e " \033[32;5mPortainer deployed\033[0m" \ No newline at end of file From 9c9fd483a0baf252b202dd29108ffe42f0d3fce6 Mon Sep 17 00:00:00 2001 From: James Turland Date: Thu, 14 Dec 2023 22:51:23 +0000 Subject: [PATCH 41/67] update --- Docker-Swarm/swarm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index 4af1db4..b18b0fe 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -154,7 +154,7 @@ done # Step 5: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < Date: Thu, 14 Dec 2023 23:03:56 +0000 Subject: [PATCH 42/67] udpate --- Docker-Swarm/swarm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index b18b0fe..4af1db4 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -154,7 +154,7 @@ done # Step 5: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < Date: Thu, 14 Dec 2023 23:12:29 +0000 Subject: [PATCH 43/67] update --- Docker-Swarm/swarm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index 4af1db4..a8d1ac5 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -154,12 +154,12 @@ done # Step 5: Create GlusterFS Cluster across all nodes (connect to Manager1) - we will also label our nodes to restrict deployment of services to workers only ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < Date: Fri, 15 Dec 2023 14:30:20 +0000 Subject: [PATCH 44/67] update --- Docker-Swarm/swarm-3-nodes.sh | 181 ++++++++++++++++++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 Docker-Swarm/swarm-3-nodes.sh diff --git a/Docker-Swarm/swarm-3-nodes.sh b/Docker-Swarm/swarm-3-nodes.sh new file mode 100644 index 0000000..9935dd1 --- /dev/null +++ b/Docker-Swarm/swarm-3-nodes.sh @@ -0,0 +1,181 @@ +#!/bin/bash + +echo -e " \033[33;5m __ _ _ ___ \033[0m" +echo -e " \033[33;5m \ \(_)_ __ ___( )__ / _ \__ _ _ __ __ _ __ _ ___ \033[0m" +echo -e " \033[33;5m \ \ | '_ \` _ \/ __| / /_\/ _\` | '__/ _\` |/ _\` |/ _ \ \033[0m" +echo -e " \033[33;5m /\_/ / | | | | | \__ \ / /_\\ (_| | | | (_| | (_| | __/ \033[0m" +echo -e " \033[33;5m \___/|_|_| |_| |_|___/ \____/\__,_|_| \__,_|\__, |\___| \033[0m" +echo -e " \033[33;5m |___/ \033[0m" + +echo -e " \033[36;5m ___ _ ___ \033[0m" +echo -e " \033[36;5m | \ ___ __| |_____ _ _ / __|_ __ ____ _ _ _ _ __ \033[0m" +echo -e " \033[36;5m | |) / _ \/ _| / / -_) '_| \__ \ V V / _\` | '_| ' \ \033[0m" +echo -e " \033[36;5m |___/\___/\__|_\_\___|_| |___/\_/\_/\__,_|_| |_|_|_| \033[0m" +echo -e " \033[36;5m \033[0m" +echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" +echo -e " \033[32;5m \033[0m" + + +############################################# +# YOU SHOULD ONLY NEED TO EDIT THIS SECTION # +############################################# + +# Set the IP addresses of the admin, managers, and workers nodes +admin=192.168.3.5 +manager1=192.168.3.21 +manager2=192.168.3.22 +manager3=192.168.3.23 +worker1=192.168.3.24 +worker2=192.168.3.25 + +# Set the workers' hostnames (if using cloud-init in Proxmox it's the name of the VM) +workerHostname1=dockerSwarm-04 +workerHostname2=dockerSwarm-05 + +# User of remote machines +user=ubuntu + +# Interface used on remotes +interface=eth0 + +# Set the virtual IP address (VIP) +vip=192.168.3.50 + +# Array of all manager nodes +allmanagers=($manager1 $manager2 $manager3) + +# Array of manager nodes +managers=($manager2 $manager3) + +# Array of worker nodes +workers=($worker1 $worker2) + +# Array of all +all=($manager1 $worker1 $worker2) + +# Array of all minus manager1 +allnomanager1=($manager2 $manager3 $worker1 $worker2) + +#Loadbalancer IP range +lbrange=192.168.3.60-192.168.3.80 + +#ssh certificate name variable +certName=id_rsa + +############################################# +# DO NOT EDIT BELOW # +############################################# +# For testing purposes - in case time is wrong due to VM snapshots +sudo timedatectl set-ntp off +sudo timedatectl set-ntp on + +# Move SSH certs to ~/.ssh and change permissions +cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh +chmod 600 /home/$user/.ssh/$certName +chmod 644 /home/$user/.ssh/$certName.pub + +# Create SSH Config file to ignore checking (don't use in production!) +echo "StrictHostKeyChecking no" > ~/.ssh/config + +#add ssh keys for all nodes +for node in "${all[@]}"; do + ssh-copy-id $user@$node +done + +# Copy SSH keys to MN1 to copy tokens back later +scp -i /home/$user/.ssh/$certName /home/$user/$certName $user@$manager1:~/.ssh +scp -i /home/$user/.ssh/$certName /home/$user/$certName.pub $user@$manager1:~/.ssh + + +# Install dependencies for each node (Docker, GlusterFS) +for newnode in "${all[@]}"; do + ssh $user@$newnode -i ~/.ssh/$certName sudo su < /dev/null + apt-get update + NEEDRESTART_MODE=a apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y + NEEDRESTART_MODE=a apt install software-properties-common glusterfs-server -y + systemctl start glusterd + systemctl enable glusterd + mkdir -p /gluster/volume1 + exit +EOF + echo -e " \033[32;5m$newnode - Docker & GlusterFS installed!\033[0m" +done + +# Step 1: Create Swarm on first node +ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < manager.txt +docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > worker.txt +echo "StrictHostKeyChecking no" > ~/.ssh/config +ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin +scp -i /home/$user/.ssh/$certName /home/$user/manager.txt $user@$admin:~/manager +scp -i /home/$user/.ssh/$certName /home/$user/worker.txt $user@$admin:~/worker +exit +EOF +echo -e " \033[32;5mManager1 Completed\033[0m" + +# Step 2: Set variables +managerToken=`cat manager` +workerToken=`cat worker` + + + +# Step 4: Connect additional worker +for newnode in "${workers[@]}"; do + ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/fstab + mount.glusterfs localhost:/staging-gfs /mnt + chown -R root:docker /mnt + exit +EOF + echo -e " \033[32;5m$newnode - GlusterFS mounted on reboot\033[0m" +done + +# OPTIONAL # +# Step 7: Add Portainer +ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < Date: Sun, 17 Dec 2023 22:05:36 +0000 Subject: [PATCH 45/67] update --- Docker-Swarm/portainer-agent-stack.yml | 36 ++++++++++++++++++++++++++ Docker-Swarm/swarm-3-nodes.sh | 19 +++----------- Docker-Swarm/swarm.sh | 15 +++-------- 3 files changed, 44 insertions(+), 26 deletions(-) create mode 100644 Docker-Swarm/portainer-agent-stack.yml diff --git a/Docker-Swarm/portainer-agent-stack.yml b/Docker-Swarm/portainer-agent-stack.yml new file mode 100644 index 0000000..140e76a --- /dev/null +++ b/Docker-Swarm/portainer-agent-stack.yml @@ -0,0 +1,36 @@ +version: '3.2' + +services: + agent: + image: portainer/agent:2.19.4 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/volumes:/var/lib/docker/volumes + networks: + - agent_network + deploy: + mode: global + placement: + constraints: [node.platform.os == linux] + + portainer: + image: portainer/portainer-ce:2.19.4 + command: -H tcp://tasks.agent:9001 --tlsskipverify + ports: + - "9443:9443" + - "9000:9000" + - "8000:8000" + volumes: + - /mnt/Portainer:/data + networks: + - agent_network + deploy: + mode: replicated + replicas: 1 + placement: + constraints: [node.role == manager] + +networks: + agent_network: + driver: overlay + attachable: true \ No newline at end of file diff --git a/Docker-Swarm/swarm-3-nodes.sh b/Docker-Swarm/swarm-3-nodes.sh index 9935dd1..68c0dad 100644 --- a/Docker-Swarm/swarm-3-nodes.sh +++ b/Docker-Swarm/swarm-3-nodes.sh @@ -38,9 +38,6 @@ user=ubuntu # Interface used on remotes interface=eth0 -# Set the virtual IP address (VIP) -vip=192.168.3.50 - # Array of all manager nodes allmanagers=($manager1 $manager2 $manager3) @@ -53,12 +50,6 @@ workers=($worker1 $worker2) # Array of all all=($manager1 $worker1 $worker2) -# Array of all minus manager1 -allnomanager1=($manager2 $manager3 $worker1 $worker2) - -#Loadbalancer IP range -lbrange=192.168.3.60-192.168.3.80 - #ssh certificate name variable certName=id_rsa @@ -132,9 +123,7 @@ echo -e " \033[32;5mManager1 Completed\033[0m" managerToken=`cat manager` workerToken=`cat worker` - - -# Step 4: Connect additional worker +# Step 3: Connect additional worker for newnode in "${workers[@]}"; do ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/fstab @@ -169,7 +158,7 @@ EOF done # OPTIONAL # -# Step 7: Add Portainer +# Step 6: Add Portainer ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < Date: Sun, 17 Dec 2023 22:11:27 +0000 Subject: [PATCH 46/67] update --- Docker-Swarm/swarm.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index 6a267dd..170d0f4 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -42,7 +42,7 @@ interface=eth0 allmanagers=($manager1 $manager2 $manager3) # Array of extra managers -$managers=($manager2 $manager3) +managers=($manager2 $manager3) # Array of worker nodes workers=($worker1 $worker2) @@ -171,7 +171,7 @@ done # OPTIONAL # # Step 7: Add Portainer ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < Date: Sun, 17 Dec 2023 22:31:42 +0000 Subject: [PATCH 47/67] update --- Docker-Swarm/portainer-agent-stack.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Docker-Swarm/portainer-agent-stack.yml b/Docker-Swarm/portainer-agent-stack.yml index 140e76a..d2dac40 100644 --- a/Docker-Swarm/portainer-agent-stack.yml +++ b/Docker-Swarm/portainer-agent-stack.yml @@ -21,7 +21,9 @@ services: - "9000:9000" - "8000:8000" volumes: - - /mnt/Portainer:/data + - type: bind + source: /mnt/Portainer + target: /data networks: - agent_network deploy: From dd1e7e903ffd88672cedbcb6caeb444c1a9e8da7 Mon Sep 17 00:00:00 2001 From: James Turland Date: Sun, 17 Dec 2023 22:52:56 +0000 Subject: [PATCH 48/67] update --- Docker-Swarm/swarm.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/Docker-Swarm/swarm.sh b/Docker-Swarm/swarm.sh index 170d0f4..b16a0b7 100644 --- a/Docker-Swarm/swarm.sh +++ b/Docker-Swarm/swarm.sh @@ -171,6 +171,7 @@ done # OPTIONAL # # Step 7: Add Portainer ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < Date: Sun, 17 Dec 2023 22:53:16 +0000 Subject: [PATCH 49/67] update --- Docker-Swarm/swarm-3-nodes.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Docker-Swarm/swarm-3-nodes.sh b/Docker-Swarm/swarm-3-nodes.sh index 68c0dad..a17b929 100644 --- a/Docker-Swarm/swarm-3-nodes.sh +++ b/Docker-Swarm/swarm-3-nodes.sh @@ -160,7 +160,8 @@ done # OPTIONAL # # Step 6: Add Portainer ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < Date: Tue, 19 Dec 2023 17:10:40 +0000 Subject: [PATCH 50/67] ollama --- Ollama/docker-compose.yml | 44 +++++++++++++++++++++++++++++++++++++++ Ollama/readme.md | 5 +++++ 2 files changed, 49 insertions(+) create mode 100644 Ollama/docker-compose.yml create mode 100644 Ollama/readme.md diff --git a/Ollama/docker-compose.yml b/Ollama/docker-compose.yml new file mode 100644 index 0000000..b503635 --- /dev/null +++ b/Ollama/docker-compose.yml @@ -0,0 +1,44 @@ +version: '3.6' + +services: + ollama: + # Uncomment below for GPU support + # deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: 1 + # capabilities: + # - gpu + volumes: + - ollama:/root/.ollama + # Uncomment below to expose Ollama API outside the container stack + # ports: + # - 11434:11434 + container_name: ollama + pull_policy: always + tty: true + restart: unless-stopped + image: ollama/ollama:latest + + ollama-webui: + build: + context: . + args: + OLLAMA_API_BASE_URL: '/ollama/api' + dockerfile: Dockerfile + image: ollama-webui:latest + container_name: ollama-webui + depends_on: + - ollama + ports: + - 3000:8080 + environment: + - "OLLAMA_API_BASE_URL=http://ollama:11434/api" + extra_hosts: + - host.docker.internal:host-gateway + restart: unless-stopped + +volumes: + ollama: {} diff --git a/Ollama/readme.md b/Ollama/readme.md new file mode 100644 index 0000000..7621e5d --- /dev/null +++ b/Ollama/readme.md @@ -0,0 +1,5 @@ +1. Clone the repo from: https://github.com/ollama-webui/ollama-webui +2. Tweak the docker-compose to your liking +3. Run the container: sudo docker compose up -d + +Let it build :) \ No newline at end of file From e217e74db4a13a11ddb19c03c6896c9f1ac02814 Mon Sep 17 00:00:00 2001 From: James Turland Date: Fri, 22 Dec 2023 11:39:01 +0000 Subject: [PATCH 51/67] update --- Kubernetes/K3S-Deploy/k3s.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Kubernetes/K3S-Deploy/k3s.sh b/Kubernetes/K3S-Deploy/k3s.sh index b9ff68c..2319e47 100644 --- a/Kubernetes/K3S-Deploy/k3s.sh +++ b/Kubernetes/K3S-Deploy/k3s.sh @@ -117,7 +117,7 @@ k3sup install \ --tls-san $vip \ --cluster \ --k3s-version $k3sVersion \ - --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1" \ + --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1 --node-taint node-role.kubernetes.io/master=true:NoSchedule" \ --merge \ --sudo \ --local-path $HOME/.kube/config \ @@ -152,7 +152,7 @@ for newnode in "${masters[@]}"; do --server \ --server-ip $master1 \ --ssh-key $HOME/.ssh/$certName \ - --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode" \ + --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode --node-taint node-role.kubernetes.io/master=true:NoSchedule" \ --server-user $user echo -e " \033[32;5mMaster node joined successfully!\033[0m" done From ce1c32bd5ac44bfc0a6ed44472bcb1b5a557f89b Mon Sep 17 00:00:00 2001 From: pgumpoldsberger <60177408+pgumpoldsberger@users.noreply.github.com> Date: Thu, 28 Dec 2023 15:14:54 +0100 Subject: [PATCH 52/67] Remove deprecated environment variables --- Pihole/docker-compose.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Pihole/docker-compose.yml b/Pihole/docker-compose.yml index 34ef122..b42f423 100644 --- a/Pihole/docker-compose.yml +++ b/Pihole/docker-compose.yml @@ -41,8 +41,7 @@ services: environment: TZ: 'Europe/London' WEBPASSWORD: 'password' - DNS1: '172.70.9.2#5053' - DNS2: 'no' + PIHOLE_DNS_: '172.70.9.2#5053' DNSMASQ_LISTENING: 'all' VIRTUAL_HOST: pihole.yourdomain.com # Volumes store your data between container upgrades From 90355c354cbc8f6e8ad2cc35b9970d47c9f4a29b Mon Sep 17 00:00:00 2001 From: James Turland Date: Fri, 5 Jan 2024 23:36:35 +0000 Subject: [PATCH 53/67] update --- Unifi-Controller/docker-compose.yaml | 62 ++++++++++++++++++++++++++++ Unifi-Controller/init-mongo.js | 2 + 2 files changed, 64 insertions(+) create mode 100644 Unifi-Controller/docker-compose.yaml create mode 100644 Unifi-Controller/init-mongo.js diff --git a/Unifi-Controller/docker-compose.yaml b/Unifi-Controller/docker-compose.yaml new file mode 100644 index 0000000..6b09acf --- /dev/null +++ b/Unifi-Controller/docker-compose.yaml @@ -0,0 +1,62 @@ +--- +version: "2.1" +services: + unifi-network-application: + image: lscr.io/linuxserver/unifi-network-application:latest + container_name: unifi-network-application + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + - MONGO_USER=unifi + - MONGO_PASS=5nHgg3G0cH9d + - MONGO_HOST=unifi-db + - MONGO_PORT=27017 + - MONGO_DBNAME=unifi + - MEM_LIMIT=1024 #optional + - MEM_STARTUP=1024 #optional + # - MONGO_TLS= #optional + # - MONGO_AUTHSOURCE= #optional + volumes: + - /home/ubuntu/docker/unifi-controller:/config + ports: + - 8443:8443 + - 3478:3478/udp + - 10001:10001/udp + - 8080:8080 + - 1900:1900/udp #optional + - 8843:8843 #optional + - 8880:8880 #optional + - 6789:6789 #optional + - 5514:5514/udp #optional + labels: + - "traefik.enable=true" + - "traefik.http.routers.unifi.entrypoints=http" + - "traefik.http.routers.unifi.rule=Host(`unifi.jimsgarage.co.uk`)" + - "traefik.http.middlewares.unifi-https-redirect.redirectscheme.scheme=https" + - "traefik.http.routers.unifi.middlewares=unifi-https-redirect" + - "traefik.http.routers.unifi-secure.entrypoints=https" + - "traefik.http.routers.unifi-secure.rule=Host(`unifi.jimsgarage.co.uk`)" + - "traefik.http.routers.unifi-secure.tls=true" + - "traefik.http.routers.unifi-secure.service=unifi" + - "traefik.http.services.unifi.loadbalancer.server.port=8443" + - "traefik.http.services.unifi.loadbalancer.server.scheme=https" + - "traefik.docker.network=proxy" + networks: + proxy: + unifi: + restart: unless-stopped + unifi-db: + image: docker.io/mongo:4.4 + container_name: unifi-db + volumes: + - /home/ubuntu/docker/unifi-controller-db:/data/db + - /home/ubuntu/docker-compose/unifi-controller/init-mongo.js:/docker-entrypoint-initdb.d/init-mongo.js:ro + networks: + unifi: + restart: unless-stopped + +networks: + proxy: + external: true + unifi: \ No newline at end of file diff --git a/Unifi-Controller/init-mongo.js b/Unifi-Controller/init-mongo.js new file mode 100644 index 0000000..a200d9e --- /dev/null +++ b/Unifi-Controller/init-mongo.js @@ -0,0 +1,2 @@ +db.getSiblingDB("unifi").createUser({user: "unifi", pwd: "5nHgg3G0cH9d", roles: [{role: "dbOwner", db: "unifi"}]}); +db.getSiblingDB("unifi_stat").createUser({user: "unifi", pwd: "5nHgg3G0cH9d", roles: [{role: "dbOwner", db: "unifi_stat"}]}); \ No newline at end of file From 3b0c2e21ca22dddfa97849f953541036b7694bb3 Mon Sep 17 00:00:00 2001 From: James Turland Date: Wed, 10 Jan 2024 14:09:36 +0000 Subject: [PATCH 54/67] update --- Kubernetes/Upgrade/readme.md | 59 ++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 Kubernetes/Upgrade/readme.md diff --git a/Kubernetes/Upgrade/readme.md b/Kubernetes/Upgrade/readme.md new file mode 100644 index 0000000..1223de7 --- /dev/null +++ b/Kubernetes/Upgrade/readme.md @@ -0,0 +1,59 @@ +# Recommendations Before Upgrading +1. Snapshot / Backup your VMs! +2. Backup data and volumes if necessary +3. Drain nodes / scale down deployments + +# Upgrade Rancher +``` +helm upgrade rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ +``` +# Upgrade RKE2 (Each node, not Admin!) +``` +sudo curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL=latest sh - +``` +then servers: +``` +sudo systemctl restart rke2-server +``` +or agents +``` +sudo systemctl restart rke2-agent +``` +# Upgrade K3S (Each node, not Admin!) +``` +sudo curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=latest sh -s - +``` +then servers: +``` +sudo systemctl restart k3s +``` +or agents +``` +sudo systemctl restart k3s-agent +``` + +# Upgrade Longhorn +``` +kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.5.3/deploy/longhorn.yaml +``` + +# Upgrade Metallb +1. Change version on the delete command to the version you are currently running (e.g., v0.13.11) +2. Change version on the apply to the new version (e.g., v0.13.12) +3. Ensure your Lbrange is still the one you want (check ipAddressPool.yaml) +``` +kubectl delete -f https://raw.githubusercontent.com/metallb/metallb/v0.13.11/config/manifests/metallb-native.yaml +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml +kubectl apply -f ipAddressPool.yaml +kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml +``` + +# Upgrade Kube-VIP +1. Delete the daemonset in Rancher or use kubectl delete +2. Redeploy the daemonset with updated values (check kube-vip file) +``` +kubectl delete -f kube-vip +kubectl apply -f kube-vip +``` \ No newline at end of file From 0f8f9abcbd67ab0461f5099276b39ad4031e028f Mon Sep 17 00:00:00 2001 From: James Turland Date: Thu, 11 Jan 2024 21:29:23 +0000 Subject: [PATCH 55/67] update --- Docker-Swarm/swarm-3-nodes.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker-Swarm/swarm-3-nodes.sh b/Docker-Swarm/swarm-3-nodes.sh index 9935dd1..c60cd2b 100644 --- a/Docker-Swarm/swarm-3-nodes.sh +++ b/Docker-Swarm/swarm-3-nodes.sh @@ -117,7 +117,7 @@ done # Step 1: Create Swarm on first node ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < manager.txt docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > worker.txt echo "StrictHostKeyChecking no" > ~/.ssh/config From d7a86a5ff534468a770eb9623d3be31f6a21f2f0 Mon Sep 17 00:00:00 2001 From: James Turland Date: Sat, 13 Jan 2024 21:25:01 +0000 Subject: [PATCH 56/67] unbound --- Unbound/docker-compose.yaml | 43 +++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 Unbound/docker-compose.yaml diff --git a/Unbound/docker-compose.yaml b/Unbound/docker-compose.yaml new file mode 100644 index 0000000..dc1d472 --- /dev/null +++ b/Unbound/docker-compose.yaml @@ -0,0 +1,43 @@ +version: '3' + +networks: + dns_net: + driver: bridge + ipam: + config: + - subnet: 172.23.0.0/16 + +services: + pihole: + container_name: pihole + hostname: pihole + image: pihole/pihole:latest + networks: + dns_net: + ipv4_address: 172.23.0.7 + ports: + - "53:53/tcp" + - "53:53/udp" + - "85:80/tcp" + #- "443:443/tcp" + environment: + - 'TZ=Europe/London' + - 'WEBPASSWORD=password' + - 'DNS1=172.23.0.7#5053' + - 'DNS2=no' + volumes: + - '/home/ubuntu/docker/pihole/etc-pihole/:/etc/pihole/' + - '/home/ubuntu/docker/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/' + restart: unless-stopped + unbound: + container_name: unbound + image: mvance/unbound:latest + networks: + dns_net: + ipv4_address: 172.23.0.8 + volumes: + - /home/ubuntu/docker/unbound:/opt/unbound/etc/unbound + ports: + - "5053:53/tcp" + - "5053:53/udp" + restart: unless-stopped \ No newline at end of file From b857eac73188d2e11abcc83ab128237894a55142 Mon Sep 17 00:00:00 2001 From: James Turland Date: Sat, 13 Jan 2024 21:37:41 +0000 Subject: [PATCH 57/67] update-network --- Unbound/a-records.conf | 0 Unbound/docker-compose.yaml | 17 +- Unbound/forward-records.conf | 54 +++++ Unbound/srv-records.conf | 0 Unbound/unbound.conf | 387 +++++++++++++++++++++++++++++++++++ 5 files changed, 457 insertions(+), 1 deletion(-) create mode 100644 Unbound/a-records.conf create mode 100644 Unbound/forward-records.conf create mode 100644 Unbound/srv-records.conf create mode 100644 Unbound/unbound.conf diff --git a/Unbound/a-records.conf b/Unbound/a-records.conf new file mode 100644 index 0000000..e69de29 diff --git a/Unbound/docker-compose.yaml b/Unbound/docker-compose.yaml index dc1d472..205dcc2 100644 --- a/Unbound/docker-compose.yaml +++ b/Unbound/docker-compose.yaml @@ -6,6 +6,8 @@ networks: ipam: config: - subnet: 172.23.0.0/16 + proxy: + external: true services: pihole: @@ -15,6 +17,7 @@ services: networks: dns_net: ipv4_address: 172.23.0.7 + proxy: ports: - "53:53/tcp" - "53:53/udp" @@ -23,12 +26,24 @@ services: environment: - 'TZ=Europe/London' - 'WEBPASSWORD=password' - - 'DNS1=172.23.0.7#5053' + - 'DNS1=172.23.0.8#5053' - 'DNS2=no' volumes: - '/home/ubuntu/docker/pihole/etc-pihole/:/etc/pihole/' - '/home/ubuntu/docker/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/' restart: unless-stopped + labels: + - "traefik.enable=true" + - "traefik.http.routers.pihole.entrypoints=http" + - "traefik.http.routers.pihole.rule=Host(`pihole.yourdomain.com`)" + - "traefik.http.middlewares.pihole-https-redirect.redirectscheme.scheme=https" + - "traefik.http.routers.pihole.middlewares=pihole-https-redirect" + - "traefik.http.routers.pihole-secure.entrypoints=https" + - "traefik.http.routers.pihole-secure.rule=Host(`pihole.yourdomain.com`)" + - "traefik.http.routers.pihole-secure.tls=true" + - "traefik.http.routers.pihole-secure.service=pihole" + - "traefik.http.services.pihole.loadbalancer.server.port=80" + - "traefik.docker.network=proxy" unbound: container_name: unbound image: mvance/unbound:latest diff --git a/Unbound/forward-records.conf b/Unbound/forward-records.conf new file mode 100644 index 0000000..557667b --- /dev/null +++ b/Unbound/forward-records.conf @@ -0,0 +1,54 @@ +forward-zone: + # Forward all queries (except those in cache and local zone) to + # upstream recursive servers + name: "." + # Queries to this forward zone use TLS + forward-tls-upstream: yes + + # https://dnsprivacy.org/wiki/display/DP/DNS+Privacy+Test+Servers + + ## Cloudflare + #forward-addr: 1.1.1.1@853#cloudflare-dns.com + #forward-addr: 1.0.0.1@853#cloudflare-dns.com + #forward-addr: 2606:4700:4700::1111@853#cloudflare-dns.com + #forward-addr: 2606:4700:4700::1001@853#cloudflare-dns.com + + ## Cloudflare Malware + # forward-addr: 1.1.1.2@853#security.cloudflare-dns.com + # forward-addr: 1.0.0.2@853#security.cloudflare-dns.com + # forward-addr: 2606:4700:4700::1112@853#security.cloudflare-dns.com + # forward-addr: 2606:4700:4700::1002@853#security.cloudflare-dns.com + + ## Cloudflare Malware and Adult Content + # forward-addr: 1.1.1.3@853#family.cloudflare-dns.com + # forward-addr: 1.0.0.3@853#family.cloudflare-dns.com + # forward-addr: 2606:4700:4700::1113@853#family.cloudflare-dns.com + # forward-addr: 2606:4700:4700::1003@853#family.cloudflare-dns.com + + ## CleanBrowsing Security Filter + # forward-addr: 185.228.168.9@853#security-filter-dns.cleanbrowsing.org + # forward-addr: 185.228.169.9@853#security-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:1::2@853#security-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:2::2@853#security-filter-dns.cleanbrowsing.org + + ## CleanBrowsing Adult Filter + # forward-addr: 185.228.168.10@853#adult-filter-dns.cleanbrowsing.org + # forward-addr: 185.228.169.11@853#adult-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:1::1@853#adult-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:2::1@853#adult-filter-dns.cleanbrowsing.org + + ## CleanBrowsing Family Filter + # forward-addr: 185.228.168.168@853#family-filter-dns.cleanbrowsing.org + # forward-addr: 185.228.169.168@853#family-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:1::@853#family-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:2::@853#family-filter-dns.cleanbrowsing.org + + ## Quad9 + forward-addr: 9.9.9.9@853#dns.quad9.net + forward-addr: 149.112.112.112@853#dns.quad9.net + forward-addr: 2620:fe::fe@853#dns.quad9.net + forward-addr: 2620:fe::9@853#dns.quad9.net + + ## getdnsapi.net + # forward-addr: 185.49.141.37@853#getdnsapi.net + # forward-addr: 2a04:b900:0:100::37@853#getdnsapi.net \ No newline at end of file diff --git a/Unbound/srv-records.conf b/Unbound/srv-records.conf new file mode 100644 index 0000000..e69de29 diff --git a/Unbound/unbound.conf b/Unbound/unbound.conf new file mode 100644 index 0000000..e3496f8 --- /dev/null +++ b/Unbound/unbound.conf @@ -0,0 +1,387 @@ +server: + ########################################################################### + # BASIC SETTINGS + ########################################################################### + # Time to live maximum for RRsets and messages in the cache. If the maximum + # kicks in, responses to clients still get decrementing TTLs based on the + # original (larger) values. When the internal TTL expires, the cache item + # has expired. Can be set lower to force the resolver to query for data + # often, and not trust (very large) TTL values. + cache-max-ttl: 86400 + + # Time to live minimum for RRsets and messages in the cache. If the minimum + # kicks in, the data is cached for longer than the domain owner intended, + # and thus less queries are made to look up the data. Zero makes sure the + # data in the cache is as the domain owner intended, higher values, + # especially more than an hour or so, can lead to trouble as the data in + # the cache does not match up with the actual data any more. + cache-min-ttl: 300 + + # Set the working directory for the program. + directory: "/opt/unbound/etc/unbound" + + # Enable or disable whether IPv4 queries are answered or issued. + # Default: yes + do-ip4: yes + + # Enable or disable whether IPv6 queries are answered or issued. + # If disabled, queries are not answered on IPv6, and queries are not sent + # on IPv6 to the internet nameservers. With this option you can disable the + # IPv6 transport for sending DNS traffic, it does not impact the contents + # of the DNS traffic, which may have IPv4 (A) and IPv6 (AAAA) addresses in + # it. + # Default: yes + # May be set to yes if you have IPv6 connectivity + do-ip6: yes + + # Enable or disable whether TCP queries are answered or issued. + # Default: yes + do-tcp: yes + + # Enable or disable whether UDP queries are answered or issued. + # Default: yes + do-udp: yes + + # RFC 6891. Number of bytes size to advertise as the EDNS reassembly buffer + # size. This is the value put into datagrams over UDP towards peers. + # The actual buffer size is determined by msg-buffer-size (both for TCP and + # UDP). Do not set higher than that value. + # Default is 1232 which is the DNS Flag Day 2020 recommendation. + # Setting to 512 bypasses even the most stringent path MTU problems, but + # is seen as extreme, since the amount of TCP fallback generated is + # excessive (probably also for this resolver, consider tuning the outgoing + # tcp number). + edns-buffer-size: 1232 + + # Listen to for queries from clients and answer from this network interface + # and port. + interface: 0.0.0.0@5053 + # interface: ::0 + port: 53 + + # If enabled, prefer IPv6 transport for sending DNS queries to internet + # nameservers. + # Default: yes + # You want to leave this to no unless you have *native* IPv6. With 6to4 and + # Terredo tunnels your web browser should favor IPv4 for the same reasons + prefer-ip6: no + + # Rotates RRSet order in response (the pseudo-random number is taken from + # the query ID, for speed and thread safety). + rrset-roundrobin: yes + + # Drop user privileges after binding the port. + username: "_unbound" + + ########################################################################### + # LOGGING + ########################################################################### + + # Do not print log lines to inform about local zone actions + log-local-actions: no + + # Do not print one line per query to the log + log-queries: no + + # Do not print one line per reply to the log + log-replies: no + + # Do not print log lines that say why queries return SERVFAIL to clients + log-servfail: no + + # If you want to log to a file, use: + # logfile: /opt/unbound/etc/unbound/unbound.log + # Set log location (using /dev/null further limits logging) + logfile: /dev/null + + # Set logging level + # Level 0: No verbosity, only errors. + # Level 1: Gives operational information. + # Level 2: Gives detailed operational information including short information per query. + # Level 3: Gives query level information, output per query. + # Level 4: Gives algorithm level information. + # Level 5: Logs client identification for cache misses. + verbosity: 0 + + ########################################################################### + # PERFORMANCE SETTINGS + ########################################################################### + # https://nlnetlabs.nl/documentation/unbound/howto-optimise/ + # https://nlnetlabs.nl/news/2019/Feb/05/unbound-1.9.0-released/ + + # Number of slabs in the infrastructure cache. Slabs reduce lock contention + # by threads. Must be set to a power of 2. + infra-cache-slabs: 4 + + # Number of incoming TCP buffers to allocate per thread. Default + # is 10. If set to 0, or if do-tcp is "no", no TCP queries from + # clients are accepted. For larger installations increasing this + # value is a good idea. + incoming-num-tcp: 10 + + # Number of slabs in the key cache. Slabs reduce lock contention by + # threads. Must be set to a power of 2. Setting (close) to the number + # of cpus is a reasonable guess. + key-cache-slabs: 4 + + # Number of bytes size of the message cache. + # Unbound recommendation is to Use roughly twice as much rrset cache memory + # as you use msg cache memory. + msg-cache-size: 142768128 + + # Number of slabs in the message cache. Slabs reduce lock contention by + # threads. Must be set to a power of 2. Setting (close) to the number of + # cpus is a reasonable guess. + msg-cache-slabs: 4 + + # The number of queries that every thread will service simultaneously. If + # more queries arrive that need servicing, and no queries can be jostled + # out (see jostle-timeout), then the queries are dropped. + # This is best set at half the number of the outgoing-range. + # This Unbound instance was compiled with libevent so it can efficiently + # use more than 1024 file descriptors. + num-queries-per-thread: 4096 + + # The number of threads to create to serve clients. + # This is set dynamically at run time to effectively use available CPUs + # resources + num-threads: 3 + + # Number of ports to open. This number of file descriptors can be opened + # per thread. + # This Unbound instance was compiled with libevent so it can efficiently + # use more than 1024 file descriptors. + outgoing-range: 8192 + + # Number of bytes size of the RRset cache. + # Use roughly twice as much rrset cache memory as msg cache memory + rrset-cache-size: 285536256 + + # Number of slabs in the RRset cache. Slabs reduce lock contention by + # threads. Must be set to a power of 2. + rrset-cache-slabs: 4 + + # Do no insert authority/additional sections into response messages when + # those sections are not required. This reduces response size + # significantly, and may avoid TCP fallback for some responses. This may + # cause a slight speedup. + minimal-responses: yes + + # # Fetch the DNSKEYs earlier in the validation process, when a DS record + # is encountered. This lowers the latency of requests at the expense of + # little more CPU usage. + prefetch: yes + + # Fetch the DNSKEYs earlier in the validation process, when a DS record is + # encountered. This lowers the latency of requests at the expense of little + # more CPU usage. + prefetch-key: yes + + # Have unbound attempt to serve old responses from cache with a TTL of 0 in + # the response without waiting for the actual resolution to finish. The + # actual resolution answer ends up in the cache later on. + serve-expired: yes + + # If not 0, then set the SO_RCVBUF socket option to get more buffer space on + # UDP port 53 incoming queries. So that short spikes on busy servers do not + # drop packets (see counter in netstat -su). Otherwise, the number of bytes + # to ask for, try “4m” on a busy server. + # The OS caps it at a maximum, on linux Unbound needs root permission to + # bypass the limit, or the admin can use sysctl net.core.rmem_max. + # Default: 0 (use system value) + # For example: sysctl -w net.core.rmem_max=4194304 + # To persist reboots, edit /etc/sysctl.conf to include: + # net.core.rmem_max=4194304 + # Larger socket buffer. OS may need config. + # Ensure kernel buffer is large enough to not lose messages in traffic spikes + #so-rcvbuf: 4m + + # Open dedicated listening sockets for incoming queries for each thread and + # try to set the SO_REUSEPORT socket option on each socket. May distribute + # incoming queries to threads more evenly. + so-reuseport: yes + + # If not 0, then set the SO_SNDBUF socket option to get more buffer space + # on UDP port 53 outgoing queries. + # Specify the number of bytes to ask for, try “4m” on a very busy server. + # The OS caps it at a maximum, on linux Unbound needs root permission to + # bypass the limit, or the admin can use sysctl net.core.wmem_max. + # For example: sysctl -w net.core.wmem_max=4194304 + # To persist reboots, edit /etc/sysctl.conf to include: + # net.core.wmem_max=4194304 + # Default: 0 (use system value) + # Larger socket buffer. OS may need config. + # Ensure kernel buffer is large enough to not lose messages in traffic spikes + #so-sndbuf: 4m + + ########################################################################### + # PRIVACY SETTINGS + ########################################################################### + + # RFC 8198. Use the DNSSEC NSEC chain to synthesize NXDO-MAIN and other + # denials, using information from previous NXDO-MAINs answers. In other + # words, use cached NSEC records to generate negative answers within a + # range and positive answers from wildcards. This increases performance, + # decreases latency and resource utilization on both authoritative and + # recursive servers, and increases privacy. Also, it may help increase + # resilience to certain DoS attacks in some circumstances. + aggressive-nsec: yes + + # Extra delay for timeouted UDP ports before they are closed, in msec. + # This prevents very delayed answer packets from the upstream (recursive) + # servers from bouncing against closed ports and setting off all sort of + # close-port counters, with eg. 1500 msec. When timeouts happen you need + # extra sockets, it checks the ID and remote IP of packets, and unwanted + # packets are added to the unwanted packet counter. + delay-close: 10000 + + # Prevent the unbound server from forking into the background as a daemon + do-daemonize: no + + # Add localhost to the do-not-query-address list. + do-not-query-localhost: no + + # Number of bytes size of the aggressive negative cache. + neg-cache-size: 4M + + # Send minimum amount of information to upstream servers to enhance + # privacy (best privacy). + qname-minimisation: yes + + ########################################################################### + # SECURITY SETTINGS + ########################################################################### + # Only give access to recursion clients from LAN IPs + access-control: 127.0.0.1/32 allow + access-control: 192.168.0.0/16 allow + access-control: 172.16.0.0/12 allow + access-control: 10.0.0.0/8 allow + access-control: fc00::/7 allow + access-control: ::1/128 allow + + # File with trust anchor for one zone, which is tracked with RFC5011 + # probes. + auto-trust-anchor-file: "var/root.key" + + # Enable chroot (i.e, change apparent root directory for the current + # running process and its children) + chroot: "/opt/unbound/etc/unbound" + + # Deny queries of type ANY with an empty response. + deny-any: yes + + # Harden against algorithm downgrade when multiple algorithms are + # advertised in the DS record. + harden-algo-downgrade: yes + + # RFC 8020. returns nxdomain to queries for a name below another name that + # is already known to be nxdomain. + harden-below-nxdomain: yes + + # Require DNSSEC data for trust-anchored zones, if such data is absent, the + # zone becomes bogus. If turned off you run the risk of a downgrade attack + # that disables security for a zone. + harden-dnssec-stripped: yes + + # Only trust glue if it is within the servers authority. + harden-glue: yes + + # Ignore very large queries. + harden-large-queries: yes + + # Perform additional queries for infrastructure data to harden the referral + # path. Validates the replies if trust anchors are configured and the zones + # are signed. This enforces DNSSEC validation on nameserver NS sets and the + # nameserver addresses that are encountered on the referral path to the + # answer. Experimental option. + harden-referral-path: no + + # Ignore very small EDNS buffer sizes from queries. + harden-short-bufsize: yes + + # If enabled the HTTP header User-Agent is not set. Use with caution + # as some webserver configurations may reject HTTP requests lacking + # this header. If needed, it is better to explicitly set the + # the http-user-agent. + hide-http-user-agent: no + + # Refuse id.server and hostname.bind queries + hide-identity: yes + + # Refuse version.server and version.bind queries + hide-version: yes + + # Set the HTTP User-Agent header for outgoing HTTP requests. If + # set to "", the default, then the package name and version are + # used. + http-user-agent: "DNS" + + # Report this identity rather than the hostname of the server. + identity: "DNS" + + # These private network addresses are not allowed to be returned for public + # internet names. Any occurrence of such addresses are removed from DNS + # answers. Additionally, the DNSSEC validator may mark the answers bogus. + # This protects against DNS Rebinding + private-address: 10.0.0.0/8 + private-address: 172.16.0.0/12 + private-address: 192.168.0.0/16 + private-address: 169.254.0.0/16 + private-address: fd00::/8 + private-address: fe80::/10 + private-address: ::ffff:0:0/96 + + # Enable ratelimiting of queries (per second) sent to nameserver for + # performing recursion. More queries are turned away with an error + # (servfail). This stops recursive floods (e.g., random query names), but + # not spoofed reflection floods. Cached responses are not rate limited by + # this setting. Experimental option. + ratelimit: 1000 + + # Use this certificate bundle for authenticating connections made to + # outside peers (e.g., auth-zone urls, DNS over TLS connections). + tls-cert-bundle: /etc/ssl/certs/ca-certificates.crt + + # Set the total number of unwanted replies to eep track of in every thread. + # When it reaches the threshold, a defensive action of clearing the rrset + # and message caches is taken, hopefully flushing away any poison. + # Unbound suggests a value of 10 million. + unwanted-reply-threshold: 10000 + + # Use 0x20-encoded random bits in the query to foil spoof attempts. This + # perturbs the lowercase and uppercase of query names sent to authority + # servers and checks if the reply still has the correct casing. + # This feature is an experimental implementation of draft dns-0x20. + # Experimental option. + # Don't use Capitalization randomization as it known to cause DNSSEC issues + # see https://discourse.pi-hole.net/t/unbound-stubby-or-dnscrypt-proxy/9378 + use-caps-for-id: yes + + # Help protect users that rely on this validator for authentication from + # potentially bad data in the additional section. Instruct the validator to + # remove data from the additional section of secure messages that are not + # signed properly. Messages that are insecure, bogus, indeterminate or + # unchecked are not affected. + val-clean-additional: yes + + ########################################################################### + # FORWARD ZONE + ########################################################################### + + include: /opt/unbound/etc/unbound/forward-records.conf + + ########################################################################### + # LOCAL ZONE + ########################################################################### + + # Include file for local-data and local-data-ptr + include: /opt/unbound/etc/unbound/a-records.conf + include: /opt/unbound/etc/unbound/srv-records.conf + + ########################################################################### + # WILDCARD INCLUDE + ########################################################################### + #include: "/opt/unbound/etc/unbound/*.conf" + +remote-control: + control-enable: no \ No newline at end of file From 3af51802b5500b295bef53b7422c7164c21a7509 Mon Sep 17 00:00:00 2001 From: James Turland Date: Sat, 13 Jan 2024 22:46:56 +0000 Subject: [PATCH 58/67] update --- Unbound/docker-compose.yaml | 7 +++---- Unbound/unbound.conf | 10 +++++----- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/Unbound/docker-compose.yaml b/Unbound/docker-compose.yaml index 205dcc2..025c54d 100644 --- a/Unbound/docker-compose.yaml +++ b/Unbound/docker-compose.yaml @@ -24,10 +24,9 @@ services: - "85:80/tcp" #- "443:443/tcp" environment: - - 'TZ=Europe/London' - - 'WEBPASSWORD=password' - - 'DNS1=172.23.0.8#5053' - - 'DNS2=no' + - TZ: 'Europe/London' + - WEBPASSWORD: 'password' + - PIHOLE_DNS_: '172.23.0.8#5053' volumes: - '/home/ubuntu/docker/pihole/etc-pihole/:/etc/pihole/' - '/home/ubuntu/docker/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/' diff --git a/Unbound/unbound.conf b/Unbound/unbound.conf index e3496f8..90fe9c5 100644 --- a/Unbound/unbound.conf +++ b/Unbound/unbound.conf @@ -185,7 +185,7 @@ server: # If not 0, then set the SO_RCVBUF socket option to get more buffer space on # UDP port 53 incoming queries. So that short spikes on busy servers do not # drop packets (see counter in netstat -su). Otherwise, the number of bytes - # to ask for, try “4m” on a busy server. + # to ask for, try �4m� on a busy server. # The OS caps it at a maximum, on linux Unbound needs root permission to # bypass the limit, or the admin can use sysctl net.core.rmem_max. # Default: 0 (use system value) @@ -203,7 +203,7 @@ server: # If not 0, then set the SO_SNDBUF socket option to get more buffer space # on UDP port 53 outgoing queries. - # Specify the number of bytes to ask for, try “4m” on a very busy server. + # Specify the number of bytes to ask for, try �4m� on a very busy server. # The OS caps it at a maximum, on linux Unbound needs root permission to # bypass the limit, or the admin can use sysctl net.core.wmem_max. # For example: sysctl -w net.core.wmem_max=4194304 @@ -368,15 +368,15 @@ server: # FORWARD ZONE ########################################################################### - include: /opt/unbound/etc/unbound/forward-records.conf + #include: /opt/unbound/etc/unbound/forward-records.conf ########################################################################### # LOCAL ZONE ########################################################################### # Include file for local-data and local-data-ptr - include: /opt/unbound/etc/unbound/a-records.conf - include: /opt/unbound/etc/unbound/srv-records.conf + #include: /opt/unbound/etc/unbound/a-records.conf + #include: /opt/unbound/etc/unbound/srv-records.conf ########################################################################### # WILDCARD INCLUDE From d02b15e7a2d281c2384eef0cb10526db3b554684 Mon Sep 17 00:00:00 2001 From: James Turland Date: Sun, 14 Jan 2024 00:43:21 +0000 Subject: [PATCH 59/67] vpn --- Unbound/docker-compose-vpn.yaml | 90 +++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 Unbound/docker-compose-vpn.yaml diff --git a/Unbound/docker-compose-vpn.yaml b/Unbound/docker-compose-vpn.yaml new file mode 100644 index 0000000..21ec2ab --- /dev/null +++ b/Unbound/docker-compose-vpn.yaml @@ -0,0 +1,90 @@ +version: '3' + +networks: + dns_net: + driver: bridge + ipam: + config: + - subnet: 172.23.0.0/16 + proxy: + external: true + +services: + gluetun: + image: qmcgaw/gluetun + networks: + dns_net: + ipv4_address: 172.23.0.9 + container_name: gluetun + # line above must be uncommented to allow external containers to connect. + # See https://github.com/qdm12/gluetun-wiki/blob/main/setup/connect-a-container-to-gluetun.md#external-container-to-gluetun + cap_add: + - NET_ADMIN + devices: + - /dev/net/tun:/dev/net/tun + ports: + - 6881:6881 + - 6881:6881/udp + volumes: + - /home/ubuntu/docker/gluetun:/gluetun + environment: + # See https://github.com/qdm12/gluetun-wiki/tree/main/setup#setup + - VPN_SERVICE_PROVIDER=nordvpn + - VPN_TYPE=wireguard + # OpenVPN: + # - OPENVPN_USER= + # - OPENVPN_PASSWORD= + # Wireguard: + - WIREGUARD_PRIVATE_KEY= # See https://github.com/qdm12/gluetun-wiki/blob/main/setup/providers/nordvpn.md#obtain-your-wireguard-private-key + - WIREGUARD_ADDRESSES=10.5.0.2/32 + # Timezone for accurate log times + - TZ=Europe/London + # Server list updater + # See https://github.com/qdm12/gluetun-wiki/blob/main/setup/servers.md#update-the-vpn-servers-list + - UPDATER_PERIOD=24h + pihole: + container_name: pihole + hostname: pihole + image: pihole/pihole:latest + networks: + dns_net: + ipv4_address: 172.23.0.7 + proxy: + ports: + - "53:53/tcp" + - "53:53/udp" + - "85:80/tcp" + #- "443:443/tcp" + environment: + - TZ: 'Europe/London' + - WEBPASSWORD: 'password' + - PIHOLE_DNS_: '172.23.0.8#5053' + volumes: + - '/home/ubuntu/docker/pihole/etc-pihole/:/etc/pihole/' + - '/home/ubuntu/docker/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/' + restart: unless-stopped + labels: + - "traefik.enable=true" + - "traefik.http.routers.pihole.entrypoints=http" + - "traefik.http.routers.pihole.rule=Host(`pihole.yourdomain.com`)" + - "traefik.http.middlewares.pihole-https-redirect.redirectscheme.scheme=https" + - "traefik.http.routers.pihole.middlewares=pihole-https-redirect" + - "traefik.http.routers.pihole-secure.entrypoints=https" + - "traefik.http.routers.pihole-secure.rule=Host(`pihole.yourdomain.com`)" + - "traefik.http.routers.pihole-secure.tls=true" + - "traefik.http.routers.pihole-secure.service=pihole" + - "traefik.http.services.pihole.loadbalancer.server.port=80" + - "traefik.docker.network=proxy" + unbound: + container_name: unbound + image: mvance/unbound:latest + networks: + dns_net: + ipv4_address: 172.23.0.8 + network_mode: "service:gluetun" + volumes: + - /home/ubuntu/docker/unbound:/opt/unbound/etc/unbound + ports: + - "5053:53/tcp" + - "5053:53/udp" + restart: unless-stopped \ No newline at end of file From b5c11f4a603cb0fcd05d0130bb0fe3014a78680c Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 15 Jan 2024 08:51:49 +0000 Subject: [PATCH 60/67] update --- Unbound/docker-compose.yaml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Unbound/docker-compose.yaml b/Unbound/docker-compose.yaml index 025c54d..4659c9a 100644 --- a/Unbound/docker-compose.yaml +++ b/Unbound/docker-compose.yaml @@ -13,23 +13,23 @@ services: pihole: container_name: pihole hostname: pihole - image: pihole/pihole:latest + image: pihole/pihole:latest # remember to change this if you're using rpi networks: dns_net: ipv4_address: 172.23.0.7 proxy: ports: - - "53:53/tcp" - - "53:53/udp" - - "85:80/tcp" + - "53:53/tcp" + - "53:53/udp" + - "85:80/tcp" #- "443:443/tcp" environment: - - TZ: 'Europe/London' - - WEBPASSWORD: 'password' - - PIHOLE_DNS_: '172.23.0.8#5053' + TZ: 'Europe/London' + WEBPASSWORD: 'password' + PIHOLE_DNS_: '172.23.0.8#5053' volumes: - - '/home/ubuntu/docker/pihole/etc-pihole/:/etc/pihole/' - - '/home/ubuntu/docker/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/' + - '/home/ubuntu/docker/pihole/etc-pihole/:/etc/pihole/' + - '/home/ubuntu/docker/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/' restart: unless-stopped labels: - "traefik.enable=true" @@ -45,7 +45,7 @@ services: - "traefik.docker.network=proxy" unbound: container_name: unbound - image: mvance/unbound:latest + image: mvance/unbound:latest # remember to change this if you're using rpi networks: dns_net: ipv4_address: 172.23.0.8 From f7f035768478d4d90062e4368e7c0a996616109a Mon Sep 17 00:00:00 2001 From: James Turland Date: Mon, 15 Jan 2024 08:52:27 +0000 Subject: [PATCH 61/67] update --- Unbound/docker-compose.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Unbound/docker-compose.yaml b/Unbound/docker-compose.yaml index 4659c9a..1ec7334 100644 --- a/Unbound/docker-compose.yaml +++ b/Unbound/docker-compose.yaml @@ -50,8 +50,8 @@ services: dns_net: ipv4_address: 172.23.0.8 volumes: - - /home/ubuntu/docker/unbound:/opt/unbound/etc/unbound + - /home/ubuntu/docker/unbound:/opt/unbound/etc/unbound ports: - - "5053:53/tcp" - - "5053:53/udp" + - "5053:53/tcp" + - "5053:53/udp" restart: unless-stopped \ No newline at end of file From 567dbfe3c0a2c85c7c061a29412c7e130be12fc7 Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 16 Jan 2024 12:55:53 +0000 Subject: [PATCH 62/67] add networkpolicies --- .../NetworkPolicies/allow-all-ingress.yaml | 11 ++++++ .../default-deny-all-ingress.yaml | 9 +++++ Kubernetes/NetworkPolicies/example.yaml | 35 +++++++++++++++++++ .../NetworkPolicies/namespace-example.yaml | 17 +++++++++ .../NetworkPolicies/networkpolicy-egress.yaml | 24 +++++++++++++ .../networkpolicy-ingress.yaml | 17 +++++++++ Kubernetes/NetworkPolicies/port-example.yaml | 20 +++++++++++ 7 files changed, 133 insertions(+) create mode 100644 Kubernetes/NetworkPolicies/allow-all-ingress.yaml create mode 100644 Kubernetes/NetworkPolicies/default-deny-all-ingress.yaml create mode 100644 Kubernetes/NetworkPolicies/example.yaml create mode 100644 Kubernetes/NetworkPolicies/namespace-example.yaml create mode 100644 Kubernetes/NetworkPolicies/networkpolicy-egress.yaml create mode 100644 Kubernetes/NetworkPolicies/networkpolicy-ingress.yaml create mode 100644 Kubernetes/NetworkPolicies/port-example.yaml diff --git a/Kubernetes/NetworkPolicies/allow-all-ingress.yaml b/Kubernetes/NetworkPolicies/allow-all-ingress.yaml new file mode 100644 index 0000000..462912d --- /dev/null +++ b/Kubernetes/NetworkPolicies/allow-all-ingress.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all-ingress +spec: + podSelector: {} + ingress: + - {} + policyTypes: + - Ingress diff --git a/Kubernetes/NetworkPolicies/default-deny-all-ingress.yaml b/Kubernetes/NetworkPolicies/default-deny-all-ingress.yaml new file mode 100644 index 0000000..e823802 --- /dev/null +++ b/Kubernetes/NetworkPolicies/default-deny-all-ingress.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-deny-ingress +spec: + podSelector: {} + policyTypes: + - Ingress diff --git a/Kubernetes/NetworkPolicies/example.yaml b/Kubernetes/NetworkPolicies/example.yaml new file mode 100644 index 0000000..e91eed2 --- /dev/null +++ b/Kubernetes/NetworkPolicies/example.yaml @@ -0,0 +1,35 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: test-network-policy + namespace: default +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Ingress + - Egress + ingress: + - from: + - ipBlock: + cidr: 172.17.0.0/16 + except: + - 172.17.1.0/24 + - namespaceSelector: + matchLabels: + project: myproject + - podSelector: + matchLabels: + role: frontend + ports: + - protocol: TCP + port: 6379 + egress: + - to: + - ipBlock: + cidr: 10.0.0.0/24 + ports: + - protocol: TCP + port: 5978 + diff --git a/Kubernetes/NetworkPolicies/namespace-example.yaml b/Kubernetes/NetworkPolicies/namespace-example.yaml new file mode 100644 index 0000000..e8ed653 --- /dev/null +++ b/Kubernetes/NetworkPolicies/namespace-example.yaml @@ -0,0 +1,17 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: egress-namespaces +spec: + podSelector: + matchLabels: + app: myapp + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchExpressions: + - key: namespace + operator: In + values: ["frontend", "backend"] \ No newline at end of file diff --git a/Kubernetes/NetworkPolicies/networkpolicy-egress.yaml b/Kubernetes/NetworkPolicies/networkpolicy-egress.yaml new file mode 100644 index 0000000..5671ac8 --- /dev/null +++ b/Kubernetes/NetworkPolicies/networkpolicy-egress.yaml @@ -0,0 +1,24 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-internet-only + namespace: pihole +spec: + podSelector: {} + policyTypes: + - Egress + egress: + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 + - 192.168.0.0/16 + - 172.16.0.0/20 + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: "kube-system" + - podSelector: + matchLabels: + k8s-app: "kube-dns" \ No newline at end of file diff --git a/Kubernetes/NetworkPolicies/networkpolicy-ingress.yaml b/Kubernetes/NetworkPolicies/networkpolicy-ingress.yaml new file mode 100644 index 0000000..bdc8c95 --- /dev/null +++ b/Kubernetes/NetworkPolicies/networkpolicy-ingress.yaml @@ -0,0 +1,17 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: restrict-internal + namespace: pihole +spec: + podSelector: {} + policyTypes: + - Ingress + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 + - 192.168.0.0/16 + - 172.16.0.0/20 \ No newline at end of file diff --git a/Kubernetes/NetworkPolicies/port-example.yaml b/Kubernetes/NetworkPolicies/port-example.yaml new file mode 100644 index 0000000..f4c914b --- /dev/null +++ b/Kubernetes/NetworkPolicies/port-example.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: multi-port-egress + namespace: default +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Egress + egress: + - to: + - ipBlock: + cidr: 10.0.0.0/24 + ports: + - protocol: TCP + port: 32000 + endPort: 32768 + From 2b00729b69fc5c60211631c4db07fff550ffbee7 Mon Sep 17 00:00:00 2001 From: James Turland Date: Tue, 16 Jan 2024 14:26:10 +0000 Subject: [PATCH 63/67] remove healthcheck --- Unbound/docker-compose.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Unbound/docker-compose.yaml b/Unbound/docker-compose.yaml index 1ec7334..c288ba7 100644 --- a/Unbound/docker-compose.yaml +++ b/Unbound/docker-compose.yaml @@ -54,4 +54,6 @@ services: ports: - "5053:53/tcp" - "5053:53/udp" + healthcheck: + test: ["NONE"] restart: unless-stopped \ No newline at end of file From 14a7f44a168497fc13eab4becc7b0d7020378d3a Mon Sep 17 00:00:00 2001 From: James Turland Date: Fri, 19 Jan 2024 23:29:40 +0000 Subject: [PATCH 64/67] add proxmox-nas --- Proxmox-NAS/config.yml | 49 +++++++++++++++++++++++++++++++++ Proxmox-NAS/docker-compose.yaml | 28 +++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 Proxmox-NAS/config.yml create mode 100644 Proxmox-NAS/docker-compose.yaml diff --git a/Proxmox-NAS/config.yml b/Proxmox-NAS/config.yml new file mode 100644 index 0000000..fc91435 --- /dev/null +++ b/Proxmox-NAS/config.yml @@ -0,0 +1,49 @@ +auth: + - user: foo + group: foo + uid: 1000 + gid: 1000 + password: bar +# - user: baz +# group: xxx +# uid: 1100 +# gid: 1200 +# password_file: /run/secrets/baz_password + +global: + - "force user = foo" + - "force group = foo" + +share: + - name: public + comment: Public + path: /samba/public + browsable: yes + readonly: no + guestok: yes + veto: no + recycle: yes +# - name: share +# path: /samba/share +# browsable: yes +# readonly: no +# guestok: yes +# writelist: foo +# veto: no +# - name: foo +# path: /samba/foo +# browsable: yes +# readonly: no +# guestok: no +# validusers: foo +# writelist: foo +# veto: no +# hidefiles: /_*/ +# - name: foo-baz +# path: /samba/foo-baz +# browsable: yes +# readonly: no +# guestok: no +# validusers: foo,baz +# writelist: foo,baz +# veto: no \ No newline at end of file diff --git a/Proxmox-NAS/docker-compose.yaml b/Proxmox-NAS/docker-compose.yaml new file mode 100644 index 0000000..ff2ff6d --- /dev/null +++ b/Proxmox-NAS/docker-compose.yaml @@ -0,0 +1,28 @@ +name: samba + +services: + samba: + image: crazymax/samba + container_name: samba + network_mode: host + volumes: + - "./data:/data" # Contains cache, configuration and runtime data + - "/smb:/samba/public" + # - "./share:/samba/share" - optional additional share - see config.yml for permissions + # - "./foo:/samba/foo" - optional additional share - see config.yml for permissions + # - "./foo-baz:/samba/foo-baz" - optional additional share - see config.yml for permissions + environment: + - "TZ=Europe/London" + # - "CONFIG_FILE=/your-location" this can be anywhere you want. Default is /data + # - "SAMBA_WORKGROUP=WORKGROUP" change to your workgroup, default it WORKGROUP + # - "SAMBA_SERVER_STRING=some string" is the equivalent of the NT Description field + - "SAMBA_LOG_LEVEL=0" + # - "SAMBA_FOLLOW_SYMLINKS=NO" default is yes + # - "SAMBA_WIDE_LINKS=NO" default is yes + # - "SAMBA_HOSTS_ALLOW=0.0.0.0/0" default 127.0.0.0/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 + # - "SAMBA_INTERFACES=some-interface" default all + # - "WSDD2_ENABLE=1" default is 0 + # - "WSDD2_HOSTNAME=string" Override hostname (default to host or container name) + # - "WSDD2_NETBIOS_NAME=some-name" Set NetBIOS name (default to hostname) + # - "WSDD2_INTERFANCE=interface-name" Reply only on this interface + restart: always \ No newline at end of file From 3f567c6b5862884796b120d596ef8c6db44679e5 Mon Sep 17 00:00:00 2001 From: James Turland Date: Wed, 24 Jan 2024 14:04:10 +0000 Subject: [PATCH 65/67] add kubernetes-lite --- Kubernetes/Kubernetes-Lite/k3s.sh | 223 ++++++++++++++++++++++++++++++ 1 file changed, 223 insertions(+) create mode 100644 Kubernetes/Kubernetes-Lite/k3s.sh diff --git a/Kubernetes/Kubernetes-Lite/k3s.sh b/Kubernetes/Kubernetes-Lite/k3s.sh new file mode 100644 index 0000000..4353758 --- /dev/null +++ b/Kubernetes/Kubernetes-Lite/k3s.sh @@ -0,0 +1,223 @@ +#!/bin/bash + +echo -e " \033[33;5m __ _ _ ___ \033[0m" +echo -e " \033[33;5m \ \(_)_ __ ___( )__ / _ \__ _ _ __ __ _ __ _ ___ \033[0m" +echo -e " \033[33;5m \ \ | '_ \` _ \/ __| / /_\/ _\` | '__/ _\` |/ _\` |/ _ \ \033[0m" +echo -e " \033[33;5m /\_/ / | | | | | \__ \ / /_\\ (_| | | | (_| | (_| | __/ \033[0m" +echo -e " \033[33;5m \___/|_|_| |_| |_|___/ \____/\__,_|_| \__,_|\__, |\___| \033[0m" +echo -e " \033[33;5m |___/ \033[0m" + +echo -e " \033[36;5m _ _________ ___ _ _ _ \033[0m" +echo -e " \033[36;5m | |/ |__ / __| |_ _|_ _ __| |_ __ _| | | \033[0m" +echo -e " \033[36;5m | ' < |_ \__ \ | || ' \(_-| _/ _\` | | | \033[0m" +echo -e " \033[36;5m |_|\_|___|___/ |___|_||_/__/\__\__,_|_|_| \033[0m" +echo -e " \033[36;5m \033[0m" +echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" +echo -e " \033[32;5m \033[0m" + + +############################################# +# YOU SHOULD ONLY NEED TO EDIT THIS SECTION # +############################################# + +# This is an update version of the K3S script that install longhorn on the worker nodes. +# The worker nodes are scaled to 3 for redundancy and HA +# This has the added benefit of using local storage on worker nodes (faster) + +# Version of Kube-VIP to deploy +KVVERSION="v0.6.3" + +# K3S Version +k3sVersion="v1.26.10+k3s2" + +# Set the IP addresses of the master and work nodes +master1=192.168.3.21 +master2=192.168.3.22 +master3=192.168.3.23 +worker1=192.168.3.24 +worker2=192.168.3.25 +worker3=192.168.3.26 + +# User of remote machines +user=ubuntu + +# Interface used on remotes +interface=eth0 + +# Set the virtual IP address (VIP) +vip=192.168.3.50 + +# Array of master nodes +masters=($master2 $master3) + +# Array of worker nodes +workers=($worker1 $worker2 $worker3) + +# Array of all +all=($master1 $master2 $master3 $worker1 $worker2 $worker3) + +# Array of all minus master +allnomaster1=($master2 $master3 $worker1 $worker2 $worker3) + +#Loadbalancer IP range +lbrange=192.168.3.60-192.168.3.80 + +#ssh certificate name variable +certName=id_rsa + +############################################# +# DO NOT EDIT BELOW # +############################################# +# For testing purposes - in case time is wrong due to VM snapshots +sudo timedatectl set-ntp off +sudo timedatectl set-ntp on + +# Move SSH certs to ~/.ssh and change permissions +cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh +chmod 600 /home/$user/.ssh/$certName +chmod 644 /home/$user/.ssh/$certName.pub + +# Install k3sup to local machine if not already present +if ! command -v k3sup version &> /dev/null +then + echo -e " \033[31;5mk3sup not found, installing\033[0m" + curl -sLS https://get.k3sup.dev | sh + sudo install k3sup /usr/local/bin/ +else + echo -e " \033[32;5mk3sup already installed\033[0m" +fi + +# Install Kubectl if not already present +if ! command -v kubectl version &> /dev/null +then + echo -e " \033[31;5mKubectl not found, installing\033[0m" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl +else + echo -e " \033[32;5mKubectl already installed\033[0m" +fi + +# Create SSH Config file to ignore checking (don't use in production!) +echo "StrictHostKeyChecking no" > ~/.ssh/config + +#add ssh keys for all nodes +for node in "${all[@]}"; do + ssh-copy-id $user@$node +done + +# Install policycoreutils for each node +for newnode in "${all[@]}"; do + ssh $user@$newnode -i ~/.ssh/$certName sudo su < $HOME/kube-vip.yaml + +# Step 4: Copy kube-vip.yaml to master1 +scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml + + +# Step 5: Connect to Master1 and move kube-vip.yaml +ssh $user@$master1 -i ~/.ssh/$certName <<- EOF + sudo mkdir -p /var/lib/rancher/k3s/server/manifests + sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml +EOF + +# Step 6: Add new master nodes (servers) & workers +for newnode in "${masters[@]}"; do + k3sup join \ + --ip $newnode \ + --user $user \ + --sudo \ + --k3s-version $k3sVersion \ + --server \ + --server-ip $master1 \ + --ssh-key $HOME/.ssh/$certName \ + --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode --node-taint node-role.kubernetes.io/master=true:NoSchedule" \ + --server-user $user + echo -e " \033[32;5mMaster node joined successfully!\033[0m" +done + +# add workers +for newagent in "${workers[@]}"; do + k3sup join \ + --ip $newagent \ + --user $user \ + --sudo \ + --k3s-version $k3sVersion \ + --server-ip $master1 \ + --ssh-key $HOME/.ssh/$certName \ + --k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\"" + echo -e " \033[32;5mAgent node joined successfully!\033[0m" +done + +# Step 7: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider +kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml + +# Step 8: Install Metallb +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml +# Download ipAddressPool and configure using lbrange above +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/ipAddressPool +cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml + +# Step 9: Test with Nginx +kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default +kubectl expose deployment nginx-1 --port=80 --type=LoadBalancer -n default + +echo -e " \033[32;5mWaiting for K3S to sync and LoadBalancer to come online\033[0m" + +while [[ $(kubectl get pods -l app=nginx -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do + sleep 1 +done + +# Step 10: Deploy IP Pools and l2Advertisement +kubectl wait --namespace metallb-system \ + --for=condition=ready pod \ + --selector=component=controller \ + --timeout=120s +kubectl apply -f ipAddressPool.yaml +kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/l2Advertisement.yaml + +kubectl get nodes +kubectl get svc +kubectl get pods --all-namespaces -o wide + +echo -e " \033[32;5mHappy Kubing! Access Nginx at EXTERNAL-IP above\033[0m" + +# Step 11: Install Longhorn (using modified Official to pin to Longhorn Nodes) +kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/Longhorn/longhorn.yaml +kubectl get pods \ +--namespace longhorn-system \ +--watch + +# Step 12: Print out confirmation + +kubectl get nodes +kubectl get svc -n longhorn-system + +echo -e " \033[32;5mHappy Kubing! Access Longhorn through Rancher UI\033[0m" From 98ab64248dd52c104ad05587f737844176fecb5f Mon Sep 17 00:00:00 2001 From: James Turland Date: Wed, 24 Jan 2024 20:55:28 +0000 Subject: [PATCH 66/67] update --- Kubernetes/Kubernetes-Lite/k3s.sh | 43 ++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/Kubernetes/Kubernetes-Lite/k3s.sh b/Kubernetes/Kubernetes-Lite/k3s.sh index 4353758..fc99626 100644 --- a/Kubernetes/Kubernetes-Lite/k3s.sh +++ b/Kubernetes/Kubernetes-Lite/k3s.sh @@ -209,15 +209,52 @@ kubectl get pods --all-namespaces -o wide echo -e " \033[32;5mHappy Kubing! Access Nginx at EXTERNAL-IP above\033[0m" -# Step 11: Install Longhorn (using modified Official to pin to Longhorn Nodes) +# Step 11: Install helm +curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 +chmod 700 get_helm.sh +./get_helm.sh + +# Step 12: Add Rancher Helm Repository +helm repo add rancher-latest https://releases.rancher.com/server-charts/latest +kubectl create namespace cattle-system + +# Step 13: Install Cert-Manager +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml +helm repo add jetstack https://charts.jetstack.io +helm repo update +helm install cert-manager jetstack/cert-manager \ +--namespace cert-manager \ +--create-namespace \ +--version v1.13.2 +kubectl get pods --namespace cert-manager + +# Step 14: Install Rancher +helm install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set bootstrapPassword=admin +kubectl -n cattle-system rollout status deploy/rancher +kubectl -n cattle-system get deploy rancher + +# Step 15: Expose Rancher via Loadbalancer +kubectl get svc -n cattle-system +kubectl expose deployment rancher --name=rancher-lb --port=443 --type=LoadBalancer -n cattle-system +kubectl get svc -n cattle-system + +# Profit: Go to Rancher GUI +echo -e " \033[32;5mHit the url… and create your account\033[0m" +echo -e " \033[32;5mBe patient as it downloads and configures a number of pods in the background to support the UI (can be 5-10mins)\033[0m" + +# Step 16: Install Longhorn (using modified Official to pin to Longhorn Nodes) +echo -e " \033[32;5mInstalling Longhorn - It can take a while for all pods to deploy...\033[0m" kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/Longhorn/longhorn.yaml kubectl get pods \ --namespace longhorn-system \ --watch -# Step 12: Print out confirmation +# Step 17: Print out confirmation kubectl get nodes kubectl get svc -n longhorn-system -echo -e " \033[32;5mHappy Kubing! Access Longhorn through Rancher UI\033[0m" +echo -e " \033[32;5mHappy Kubing! Access Longhorn through Rancher UI\033[0m" \ No newline at end of file From d4adc0ff9b1e61e0490bb4769d194e44b2afdc00 Mon Sep 17 00:00:00 2001 From: James Turland Date: Wed, 24 Jan 2024 20:59:44 +0000 Subject: [PATCH 67/67] update v1.5.3 --- Kubernetes/Longhorn/longhorn.yaml | 100 +++++++++++++++--------------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/Kubernetes/Longhorn/longhorn.yaml b/Kubernetes/Longhorn/longhorn.yaml index 30ae09f..5442cef 100644 --- a/Kubernetes/Longhorn/longhorn.yaml +++ b/Kubernetes/Longhorn/longhorn.yaml @@ -14,7 +14,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 --- # Source: longhorn/templates/serviceaccount.yaml apiVersion: v1 @@ -25,7 +25,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 --- # Source: longhorn/templates/default-setting.yaml apiVersion: v1 @@ -36,7 +36,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 data: default-setting.yaml: |- system-managed-components-node-selector: longhorn=true @@ -50,7 +50,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 data: storageclass.yaml: | kind: StorageClass @@ -80,7 +80,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backingimagedatasources.longhorn.io spec: @@ -251,7 +251,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backingimagemanagers.longhorn.io spec: @@ -427,7 +427,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backingimages.longhorn.io spec: @@ -586,7 +586,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backups.longhorn.io spec: @@ -782,7 +782,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backuptargets.longhorn.io spec: @@ -965,7 +965,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backupvolumes.longhorn.io spec: @@ -1132,7 +1132,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: engineimages.longhorn.io spec: @@ -1324,7 +1324,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: engines.longhorn.io spec: @@ -1679,7 +1679,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: instancemanagers.longhorn.io spec: @@ -1920,7 +1920,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: nodes.longhorn.io spec: @@ -2164,7 +2164,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: orphans.longhorn.io spec: @@ -2435,7 +2435,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: replicas.longhorn.io spec: @@ -2652,7 +2652,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: settings.longhorn.io spec: @@ -2743,7 +2743,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: sharemanagers.longhorn.io spec: @@ -2858,7 +2858,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: snapshots.longhorn.io spec: @@ -2985,7 +2985,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: supportbundles.longhorn.io spec: @@ -3111,7 +3111,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: systembackups.longhorn.io spec: @@ -3239,7 +3239,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: systemrestores.longhorn.io spec: @@ -3341,7 +3341,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: volumes.longhorn.io spec: @@ -3703,7 +3703,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: volumeattachments.longhorn.io spec: @@ -3832,7 +3832,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 rules: - apiGroups: - apiextensions.k8s.io @@ -3898,7 +3898,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -3916,7 +3916,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -3933,7 +3933,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-manager name: longhorn-backend namespace: longhorn-system @@ -3954,7 +3954,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-ui name: longhorn-frontend namespace: longhorn-system @@ -3975,7 +3975,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-conversion-webhook name: longhorn-conversion-webhook namespace: longhorn-system @@ -3996,7 +3996,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-admission-webhook name: longhorn-admission-webhook namespace: longhorn-system @@ -4017,7 +4017,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-recovery-backend name: longhorn-recovery-backend namespace: longhorn-system @@ -4038,7 +4038,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 name: longhorn-engine-manager namespace: longhorn-system spec: @@ -4054,7 +4054,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 name: longhorn-replica-manager namespace: longhorn-system spec: @@ -4070,7 +4070,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-manager name: longhorn-manager namespace: longhorn-system @@ -4083,12 +4083,12 @@ spec: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-manager spec: containers: - name: longhorn-manager - image: longhornio/longhorn-manager:v1.5.1 + image: longhornio/longhorn-manager:v1.5.3 imagePullPolicy: IfNotPresent securityContext: privileged: true @@ -4097,17 +4097,17 @@ spec: - -d - daemon - --engine-image - - "longhornio/longhorn-engine:v1.5.1" + - "longhornio/longhorn-engine:v1.5.3" - --instance-manager-image - - "longhornio/longhorn-instance-manager:v1.5.1" + - "longhornio/longhorn-instance-manager:v1.5.3" - --share-manager-image - - "longhornio/longhorn-share-manager:v1.5.1" + - "longhornio/longhorn-share-manager:v1.5.3" - --backing-image-manager-image - - "longhornio/backing-image-manager:v1.5.1" + - "longhornio/backing-image-manager:v1.5.3" - --support-bundle-manager-image - "longhornio/support-bundle-kit:v0.0.25" - --manager-image - - "longhornio/longhorn-manager:v1.5.1" + - "longhornio/longhorn-manager:v1.5.3" - --service-account - longhorn-service-account ports: @@ -4177,7 +4177,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 spec: replicas: 1 selector: @@ -4188,23 +4188,23 @@ spec: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-driver-deployer spec: initContainers: - name: wait-longhorn-manager - image: longhornio/longhorn-manager:v1.5.1 + image: longhornio/longhorn-manager:v1.5.3 command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: longhornio/longhorn-manager:v1.5.1 + image: longhornio/longhorn-manager:v1.5.3 imagePullPolicy: IfNotPresent command: - longhorn-manager - -d - deploy-driver - --manager-image - - "longhornio/longhorn-manager:v1.5.1" + - "longhornio/longhorn-manager:v1.5.3" - --manager-url - http://longhorn-backend:9500/v1 env: @@ -4245,7 +4245,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-ui name: longhorn-ui namespace: longhorn-system @@ -4259,7 +4259,7 @@ spec: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-ui spec: affinity: @@ -4276,7 +4276,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: longhorn-ui - image: longhornio/longhorn-ui:v1.5.1 + image: longhornio/longhorn-ui:v1.5.3 imagePullPolicy: IfNotPresent volumeMounts: - name : nginx-cache