diff --git a/Docker-Swarm/portainer-agent-stack.yml b/Docker-Swarm/portainer-agent-stack.yml new file mode 100644 index 0000000..d2dac40 --- /dev/null +++ b/Docker-Swarm/portainer-agent-stack.yml @@ -0,0 +1,38 @@ +version: '3.2' + +services: + agent: + image: portainer/agent:2.19.4 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - /var/lib/docker/volumes:/var/lib/docker/volumes + networks: + - agent_network + deploy: + mode: global + placement: + constraints: [node.platform.os == linux] + + portainer: + image: portainer/portainer-ce:2.19.4 + command: -H tcp://tasks.agent:9001 --tlsskipverify + ports: + - "9443:9443" + - "9000:9000" + - "8000:8000" + volumes: + - type: bind + source: /mnt/Portainer + target: /data + networks: + - agent_network + deploy: + mode: replicated + replicas: 1 + placement: + constraints: [node.role == manager] + +networks: + agent_network: + driver: overlay + attachable: true \ No newline at end of file diff --git a/Docker-Swarm/swarm-3-nodes.sh b/Docker-Swarm/swarm-3-nodes.sh new file mode 100644 index 0000000..57a562f --- /dev/null +++ b/Docker-Swarm/swarm-3-nodes.sh @@ -0,0 +1,171 @@ +#!/bin/bash + +echo -e " \033[33;5m __ _ _ ___ \033[0m" +echo -e " \033[33;5m \ \(_)_ __ ___( )__ / _ \__ _ _ __ __ _ __ _ ___ \033[0m" +echo -e " \033[33;5m \ \ | '_ \` _ \/ __| / /_\/ _\` | '__/ _\` |/ _\` |/ _ \ \033[0m" +echo -e " \033[33;5m /\_/ / | | | | | \__ \ / /_\\ (_| | | | (_| | (_| | __/ \033[0m" +echo -e " \033[33;5m \___/|_|_| |_| |_|___/ \____/\__,_|_| \__,_|\__, |\___| \033[0m" +echo -e " \033[33;5m |___/ \033[0m" + +echo -e " \033[36;5m ___ _ ___ \033[0m" +echo -e " \033[36;5m | \ ___ __| |_____ _ _ / __|_ __ ____ _ _ _ _ __ \033[0m" +echo -e " \033[36;5m | |) / _ \/ _| / / -_) '_| \__ \ V V / _\` | '_| ' \ \033[0m" +echo -e " \033[36;5m |___/\___/\__|_\_\___|_| |___/\_/\_/\__,_|_| |_|_|_| \033[0m" +echo -e " \033[36;5m \033[0m" +echo -e " \033[32;5m https://youtube.com/@jims-garage \033[0m" +echo -e " \033[32;5m \033[0m" + + +############################################# +# YOU SHOULD ONLY NEED TO EDIT THIS SECTION # +############################################# + +# Set the IP addresses of the admin, managers, and workers nodes +admin=192.168.3.5 +manager1=192.168.3.21 +manager2=192.168.3.22 +manager3=192.168.3.23 +worker1=192.168.3.24 +worker2=192.168.3.25 + +# Set the workers' hostnames (if using cloud-init in Proxmox it's the name of the VM) +workerHostname1=dockerSwarm-04 +workerHostname2=dockerSwarm-05 + +# User of remote machines +user=ubuntu + +# Interface used on remotes +interface=eth0 + +# Array of all manager nodes +allmanagers=($manager1 $manager2 $manager3) + +# Array of manager nodes +managers=($manager2 $manager3) + +# Array of worker nodes +workers=($worker1 $worker2) + +# Array of all +all=($manager1 $worker1 $worker2) + +#ssh certificate name variable +certName=id_rsa + +############################################# +# DO NOT EDIT BELOW # +############################################# +# For testing purposes - in case time is wrong due to VM snapshots +sudo timedatectl set-ntp off +sudo timedatectl set-ntp on + +# Move SSH certs to ~/.ssh and change permissions +cp /home/$user/{$certName,$certName.pub} /home/$user/.ssh +chmod 600 /home/$user/.ssh/$certName +chmod 644 /home/$user/.ssh/$certName.pub + +# Create SSH Config file to ignore checking (don't use in production!) +echo "StrictHostKeyChecking no" > ~/.ssh/config + +#add ssh keys for all nodes +for node in "${all[@]}"; do + ssh-copy-id $user@$node +done + +# Copy SSH keys to MN1 to copy tokens back later +scp -i /home/$user/.ssh/$certName /home/$user/$certName $user@$manager1:~/.ssh +scp -i /home/$user/.ssh/$certName /home/$user/$certName.pub $user@$manager1:~/.ssh + + +# Install dependencies for each node (Docker, GlusterFS) +for newnode in "${all[@]}"; do + ssh $user@$newnode -i ~/.ssh/$certName sudo su < /dev/null + apt-get update + NEEDRESTART_MODE=a apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y + NEEDRESTART_MODE=a apt install software-properties-common glusterfs-server -y + systemctl start glusterd + systemctl enable glusterd + mkdir -p /gluster/volume1 + exit +EOF + echo -e " \033[32;5m$newnode - Docker & GlusterFS installed!\033[0m" +done + +# Step 1: Create Swarm on first node +ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < manager.txt +docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > worker.txt +echo "StrictHostKeyChecking no" > ~/.ssh/config +ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin +scp -i /home/$user/.ssh/$certName /home/$user/manager.txt $user@$admin:~/manager +scp -i /home/$user/.ssh/$certName /home/$user/worker.txt $user@$admin:~/worker +exit +EOF +echo -e " \033[32;5mManager1 Completed\033[0m" + +# Step 2: Set variables +managerToken=`cat manager` +workerToken=`cat worker` + +# Step 3: Connect additional worker +for newnode in "${workers[@]}"; do + ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/fstab + mount.glusterfs localhost:/staging-gfs /mnt + chown -R root:docker /mnt + exit +EOF + echo -e " \033[32;5m$newnode - GlusterFS mounted on reboot\033[0m" +done + +# OPTIONAL # +# Step 6: Add Portainer +ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < ~/.ssh/config + +#add ssh keys for all nodes +for node in "${all[@]}"; do + ssh-copy-id $user@$node +done + +# Copy SSH keys to MN1 to copy tokens back later +scp -i /home/$user/.ssh/$certName /home/$user/$certName $user@$manager1:~/.ssh +scp -i /home/$user/.ssh/$certName /home/$user/$certName.pub $user@$manager1:~/.ssh + + +# Install dependencies for each node (Docker, GlusterFS) +for newnode in "${all[@]}"; do + ssh $user@$newnode -i ~/.ssh/$certName sudo su < /dev/null + apt-get update + NEEDRESTART_MODE=a apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -y + NEEDRESTART_MODE=a apt install software-properties-common glusterfs-server -y + systemctl start glusterd + systemctl enable glusterd + mkdir -p /gluster/volume1 + exit +EOF + echo -e " \033[32;5m$newnode - Docker & GlusterFS installed!\033[0m" +done + +# Step 1: Create Swarm on first node +ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < manager.txt +docker swarm join-token worker | sed -n 3p | grep -Po 'docker swarm join --token \\K[^\\s]*' > worker.txt +echo "StrictHostKeyChecking no" > ~/.ssh/config +ssh-copy-id -i /home/$user/.ssh/$certName $user@$admin +scp -i /home/$user/.ssh/$certName /home/$user/manager.txt $user@$admin:~/manager +scp -i /home/$user/.ssh/$certName /home/$user/worker.txt $user@$admin:~/worker +exit +EOF +echo -e " \033[32;5mManager1 Completed\033[0m" + +# Step 2: Set variables +managerToken=`cat manager` +workerToken=`cat worker` + +# Step 3: Connect additional managers +for newnode in "${managers[@]}"; do + ssh -tt $user@$newnode -i ~/.ssh/$certName sudo su <> /etc/fstab + mount.glusterfs localhost:/staging-gfs /mnt + chown -R root:docker /mnt + exit +EOF + echo -e " \033[32;5m$newnode - GlusterFS mounted on reboot\033[0m" +done + +# OPTIONAL # +# Step 7: Add Portainer +ssh -tt $user@$manager1 -i ~/.ssh/$certName sudo su < +server: https://:9345 +node-label: + - worker=true + - longhorn=true +``` +# Install RKE2 +``` +sudo su +curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - +``` +# Enable RKE2 +``` +systemctl enable rke2-agent.service +systemctl start rke2-agent.service +``` \ No newline at end of file diff --git a/Kubernetes/K3S-Deploy/k3s.sh b/Kubernetes/K3S-Deploy/k3s.sh index e6876d4..2319e47 100644 --- a/Kubernetes/K3S-Deploy/k3s.sh +++ b/Kubernetes/K3S-Deploy/k3s.sh @@ -40,7 +40,7 @@ user=ubuntu interface=eth0 # Set the virtual IP address (VIP) -vip=192.168.1.50 +vip=192.168.3.50 # Array of master nodes masters=($master2 $master3) @@ -55,7 +55,7 @@ all=($master1 $master2 $master3 $worker1 $worker2) allnomaster1=($master2 $master3 $worker1 $worker2) #Loadbalancer IP range -lbrange=192.168.1.61-192.168.1.79 +lbrange=192.168.3.60-192.168.3.80 #ssh certificate name variable certName=id_rsa @@ -92,17 +92,6 @@ else echo -e " \033[32;5mKubectl already installed\033[0m" fi -# Install Docker to generate manifest and daemonset if not already present -if ! command -v docker version &> /dev/null -then - echo -e " \033[31;5mDocker not found, installing\033[0m" - curl -fsSL https://get.docker.com -o get-docker.sh - sudo sh get-docker.sh - wait $! -else - echo -e " \033[32;5mDocker already installed\033[0m" -fi - # Create SSH Config file to ignore checking (don't use in production!) echo "StrictHostKeyChecking no" > ~/.ssh/config @@ -128,7 +117,7 @@ k3sup install \ --tls-san $vip \ --cluster \ --k3s-version $k3sVersion \ - --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1" \ + --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1 --node-taint node-role.kubernetes.io/master=true:NoSchedule" \ --merge \ --sudo \ --local-path $HOME/.kube/config \ @@ -137,32 +126,23 @@ k3sup install \ echo -e " \033[32;5mFirst Node bootstrapped successfully!\033[0m" # Step 2: Install Kube-VIP for HA -kubectl k3s-ha kubectl apply -f https://kube-vip.io/manifests/rbac.yaml -# Step 3: Generate Daemonset with Docker -sudo docker run --network host --rm ghcr.io/kube-vip/kube-vip:$KVVERSION manifest daemonset \ - --interface $interface \ - --address $vip \ - --inCluster \ - --taint \ - --controlplane \ - --services \ - --arp \ - --leaderElection | tee $HOME/kube-vip.yaml +# Step 3: Download kube-vip +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip +cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml # Step 4: Copy kube-vip.yaml to master1 scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml # Step 5: Connect to Master1 and move kube-vip.yaml - ssh $user@$master1 -i ~/.ssh/$certName <<- EOF sudo mkdir -p /var/lib/rancher/k3s/server/manifests sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml EOF -# Step 6: Add new master nodes (servers) +# Step 6: Add new master nodes (servers) & workers for newnode in "${masters[@]}"; do k3sup join \ --ip $newnode \ @@ -172,11 +152,12 @@ for newnode in "${masters[@]}"; do --server \ --server-ip $master1 \ --ssh-key $HOME/.ssh/$certName \ - --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode" \ + --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode --node-taint node-role.kubernetes.io/master=true:NoSchedule" \ --server-user $user echo -e " \033[32;5mMaster node joined successfully!\033[0m" done +# add workers for newagent in "${workers[@]}"; do k3sup join \ --ip $newagent \ @@ -184,26 +165,39 @@ for newagent in "${workers[@]}"; do --sudo \ --k3s-version $k3sVersion \ --server-ip $master1 \ - --ssh-key $HOME/.ssh/$certName + --ssh-key $HOME/.ssh/$certName \ + --k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\"" echo -e " \033[32;5mAgent node joined successfully!\033[0m" done # Step 7: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml -#IP range for loadbalancer services to use -kubectl create configmap -n kube-system kubevip --from-literal range-global=$lbrange +# Step 8: Install Metallb +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml +# Download ipAddressPool and configure using lbrange above +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/ipAddressPool +cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml -# Step 8: Test with Nginx +# Step 9: Test with Nginx kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default kubectl expose deployment nginx-1 --port=80 --type=LoadBalancer -n default -echo -e " \033[32;5mWaiting 20s for K3S to sync and LoadBalancer to come online\033[0m" +echo -e " \033[32;5mWaiting for K3S to sync and LoadBalancer to come online\033[0m" while [[ $(kubectl get pods -l app=nginx -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do sleep 1 done +# Step 10: Deploy IP Pools and l2Advertisement +kubectl wait --namespace metallb-system \ + --for=condition=ready pod \ + --selector=component=controller \ + --timeout=120s +kubectl apply -f ipAddressPool.yaml +kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/l2Advertisement.yaml + kubectl get nodes kubectl get svc kubectl get pods --all-namespaces -o wide diff --git a/Kubernetes/K3S-Deploy/kube-vip.yaml b/Kubernetes/K3S-Deploy/kube-vip similarity index 98% rename from Kubernetes/K3S-Deploy/kube-vip.yaml rename to Kubernetes/K3S-Deploy/kube-vip index e703687..83dcbc5 100644 --- a/Kubernetes/K3S-Deploy/kube-vip.yaml +++ b/Kubernetes/K3S-Deploy/kube-vip @@ -47,7 +47,7 @@ spec: - name: vip_ddns value: "false" - name: svc_enable - value: "true" + value: "false" - name: svc_leasename value: plndr-svcs-lock - name: vip_leaderelection diff --git a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh b/Kubernetes/Kubernetes-Lite/k3s.sh similarity index 67% rename from Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh rename to Kubernetes/Kubernetes-Lite/k3s.sh index 73f7151..fc99626 100644 --- a/Kubernetes/K3S-Deploy/k3s-test-do-not-use.sh +++ b/Kubernetes/Kubernetes-Lite/k3s.sh @@ -20,6 +20,10 @@ echo -e " \033[32;5m \ # YOU SHOULD ONLY NEED TO EDIT THIS SECTION # ############################################# +# This is an update version of the K3S script that install longhorn on the worker nodes. +# The worker nodes are scaled to 3 for redundancy and HA +# This has the added benefit of using local storage on worker nodes (faster) + # Version of Kube-VIP to deploy KVVERSION="v0.6.3" @@ -32,6 +36,7 @@ master2=192.168.3.22 master3=192.168.3.23 worker1=192.168.3.24 worker2=192.168.3.25 +worker3=192.168.3.26 # User of remote machines user=ubuntu @@ -46,13 +51,13 @@ vip=192.168.3.50 masters=($master2 $master3) # Array of worker nodes -workers=($worker1 $worker2) +workers=($worker1 $worker2 $worker3) # Array of all -all=($master1 $master2 $master3 $worker1 $worker2) +all=($master1 $master2 $master3 $worker1 $worker2 $worker3) # Array of all minus master -allnomaster1=($master2 $master3 $worker1 $worker2) +allnomaster1=($master2 $master3 $worker1 $worker2 $worker3) #Loadbalancer IP range lbrange=192.168.3.60-192.168.3.80 @@ -117,7 +122,7 @@ k3sup install \ --tls-san $vip \ --cluster \ --k3s-version $k3sVersion \ - --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1" \ + --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$master1 --node-taint node-role.kubernetes.io/master=true:NoSchedule" \ --merge \ --sudo \ --local-path $HOME/.kube/config \ @@ -126,24 +131,23 @@ k3sup install \ echo -e " \033[32;5mFirst Node bootstrapped successfully!\033[0m" # Step 2: Install Kube-VIP for HA -kubectl k3s-ha kubectl apply -f https://kube-vip.io/manifests/rbac.yaml # Step 3: Download kube-vip -curl -sL https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip.yaml +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/kube-vip +cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml # Step 4: Copy kube-vip.yaml to master1 scp -i ~/.ssh/$certName $HOME/kube-vip.yaml $user@$master1:~/kube-vip.yaml # Step 5: Connect to Master1 and move kube-vip.yaml - ssh $user@$master1 -i ~/.ssh/$certName <<- EOF sudo mkdir -p /var/lib/rancher/k3s/server/manifests sudo mv kube-vip.yaml /var/lib/rancher/k3s/server/manifests/kube-vip.yaml EOF -# Step 6: Add new master nodes (servers) +# Step 6: Add new master nodes (servers) & workers for newnode in "${masters[@]}"; do k3sup join \ --ip $newnode \ @@ -153,11 +157,12 @@ for newnode in "${masters[@]}"; do --server \ --server-ip $master1 \ --ssh-key $HOME/.ssh/$certName \ - --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode" \ + --k3s-extra-args "--disable traefik --disable servicelb --flannel-iface=$interface --node-ip=$newnode --node-taint node-role.kubernetes.io/master=true:NoSchedule" \ --server-user $user echo -e " \033[32;5mMaster node joined successfully!\033[0m" done +# add workers for newagent in "${workers[@]}"; do k3sup join \ --ip $newagent \ @@ -165,7 +170,8 @@ for newagent in "${workers[@]}"; do --sudo \ --k3s-version $k3sVersion \ --server-ip $master1 \ - --ssh-key $HOME/.ssh/$certName + --ssh-key $HOME/.ssh/$certName \ + --k3s-extra-args "--node-label \"longhorn=true\" --node-label \"worker=true\"" echo -e " \033[32;5mAgent node joined successfully!\033[0m" done @@ -183,13 +189,17 @@ cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml kubectl apply -f https://raw.githubusercontent.com/inlets/inlets-operator/master/contrib/nginx-sample-deployment.yaml -n default kubectl expose deployment nginx-1 --port=80 --type=LoadBalancer -n default -echo -e " \033[32;5mWaiting 20s for K3S to sync and LoadBalancer to come online\033[0m" +echo -e " \033[32;5mWaiting for K3S to sync and LoadBalancer to come online\033[0m" while [[ $(kubectl get pods -l app=nginx -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do sleep 1 done # Step 10: Deploy IP Pools and l2Advertisement +kubectl wait --namespace metallb-system \ + --for=condition=ready pod \ + --selector=component=controller \ + --timeout=120s kubectl apply -f ipAddressPool.yaml kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/K3S-Deploy/l2Advertisement.yaml @@ -198,3 +208,53 @@ kubectl get svc kubectl get pods --all-namespaces -o wide echo -e " \033[32;5mHappy Kubing! Access Nginx at EXTERNAL-IP above\033[0m" + +# Step 11: Install helm +curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 +chmod 700 get_helm.sh +./get_helm.sh + +# Step 12: Add Rancher Helm Repository +helm repo add rancher-latest https://releases.rancher.com/server-charts/latest +kubectl create namespace cattle-system + +# Step 13: Install Cert-Manager +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml +helm repo add jetstack https://charts.jetstack.io +helm repo update +helm install cert-manager jetstack/cert-manager \ +--namespace cert-manager \ +--create-namespace \ +--version v1.13.2 +kubectl get pods --namespace cert-manager + +# Step 14: Install Rancher +helm install rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ + --set bootstrapPassword=admin +kubectl -n cattle-system rollout status deploy/rancher +kubectl -n cattle-system get deploy rancher + +# Step 15: Expose Rancher via Loadbalancer +kubectl get svc -n cattle-system +kubectl expose deployment rancher --name=rancher-lb --port=443 --type=LoadBalancer -n cattle-system +kubectl get svc -n cattle-system + +# Profit: Go to Rancher GUI +echo -e " \033[32;5mHit the url… and create your account\033[0m" +echo -e " \033[32;5mBe patient as it downloads and configures a number of pods in the background to support the UI (can be 5-10mins)\033[0m" + +# Step 16: Install Longhorn (using modified Official to pin to Longhorn Nodes) +echo -e " \033[32;5mInstalling Longhorn - It can take a while for all pods to deploy...\033[0m" +kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/Longhorn/longhorn.yaml +kubectl get pods \ +--namespace longhorn-system \ +--watch + +# Step 17: Print out confirmation + +kubectl get nodes +kubectl get svc -n longhorn-system + +echo -e " \033[32;5mHappy Kubing! Access Longhorn through Rancher UI\033[0m" \ No newline at end of file diff --git a/Kubernetes/Longhorn/longhorn.yaml b/Kubernetes/Longhorn/longhorn.yaml index 30ae09f..5442cef 100644 --- a/Kubernetes/Longhorn/longhorn.yaml +++ b/Kubernetes/Longhorn/longhorn.yaml @@ -14,7 +14,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 --- # Source: longhorn/templates/serviceaccount.yaml apiVersion: v1 @@ -25,7 +25,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 --- # Source: longhorn/templates/default-setting.yaml apiVersion: v1 @@ -36,7 +36,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 data: default-setting.yaml: |- system-managed-components-node-selector: longhorn=true @@ -50,7 +50,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 data: storageclass.yaml: | kind: StorageClass @@ -80,7 +80,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backingimagedatasources.longhorn.io spec: @@ -251,7 +251,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backingimagemanagers.longhorn.io spec: @@ -427,7 +427,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backingimages.longhorn.io spec: @@ -586,7 +586,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backups.longhorn.io spec: @@ -782,7 +782,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backuptargets.longhorn.io spec: @@ -965,7 +965,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: backupvolumes.longhorn.io spec: @@ -1132,7 +1132,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: engineimages.longhorn.io spec: @@ -1324,7 +1324,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: engines.longhorn.io spec: @@ -1679,7 +1679,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: instancemanagers.longhorn.io spec: @@ -1920,7 +1920,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: nodes.longhorn.io spec: @@ -2164,7 +2164,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: orphans.longhorn.io spec: @@ -2435,7 +2435,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: replicas.longhorn.io spec: @@ -2652,7 +2652,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: settings.longhorn.io spec: @@ -2743,7 +2743,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: sharemanagers.longhorn.io spec: @@ -2858,7 +2858,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: snapshots.longhorn.io spec: @@ -2985,7 +2985,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: supportbundles.longhorn.io spec: @@ -3111,7 +3111,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: systembackups.longhorn.io spec: @@ -3239,7 +3239,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: systemrestores.longhorn.io spec: @@ -3341,7 +3341,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: volumes.longhorn.io spec: @@ -3703,7 +3703,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 longhorn-manager: "" name: volumeattachments.longhorn.io spec: @@ -3832,7 +3832,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 rules: - apiGroups: - apiextensions.k8s.io @@ -3898,7 +3898,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -3916,7 +3916,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -3933,7 +3933,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-manager name: longhorn-backend namespace: longhorn-system @@ -3954,7 +3954,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-ui name: longhorn-frontend namespace: longhorn-system @@ -3975,7 +3975,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-conversion-webhook name: longhorn-conversion-webhook namespace: longhorn-system @@ -3996,7 +3996,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-admission-webhook name: longhorn-admission-webhook namespace: longhorn-system @@ -4017,7 +4017,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-recovery-backend name: longhorn-recovery-backend namespace: longhorn-system @@ -4038,7 +4038,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 name: longhorn-engine-manager namespace: longhorn-system spec: @@ -4054,7 +4054,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 name: longhorn-replica-manager namespace: longhorn-system spec: @@ -4070,7 +4070,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-manager name: longhorn-manager namespace: longhorn-system @@ -4083,12 +4083,12 @@ spec: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-manager spec: containers: - name: longhorn-manager - image: longhornio/longhorn-manager:v1.5.1 + image: longhornio/longhorn-manager:v1.5.3 imagePullPolicy: IfNotPresent securityContext: privileged: true @@ -4097,17 +4097,17 @@ spec: - -d - daemon - --engine-image - - "longhornio/longhorn-engine:v1.5.1" + - "longhornio/longhorn-engine:v1.5.3" - --instance-manager-image - - "longhornio/longhorn-instance-manager:v1.5.1" + - "longhornio/longhorn-instance-manager:v1.5.3" - --share-manager-image - - "longhornio/longhorn-share-manager:v1.5.1" + - "longhornio/longhorn-share-manager:v1.5.3" - --backing-image-manager-image - - "longhornio/backing-image-manager:v1.5.1" + - "longhornio/backing-image-manager:v1.5.3" - --support-bundle-manager-image - "longhornio/support-bundle-kit:v0.0.25" - --manager-image - - "longhornio/longhorn-manager:v1.5.1" + - "longhornio/longhorn-manager:v1.5.3" - --service-account - longhorn-service-account ports: @@ -4177,7 +4177,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 spec: replicas: 1 selector: @@ -4188,23 +4188,23 @@ spec: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-driver-deployer spec: initContainers: - name: wait-longhorn-manager - image: longhornio/longhorn-manager:v1.5.1 + image: longhornio/longhorn-manager:v1.5.3 command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] containers: - name: longhorn-driver-deployer - image: longhornio/longhorn-manager:v1.5.1 + image: longhornio/longhorn-manager:v1.5.3 imagePullPolicy: IfNotPresent command: - longhorn-manager - -d - deploy-driver - --manager-image - - "longhornio/longhorn-manager:v1.5.1" + - "longhornio/longhorn-manager:v1.5.3" - --manager-url - http://longhorn-backend:9500/v1 env: @@ -4245,7 +4245,7 @@ metadata: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-ui name: longhorn-ui namespace: longhorn-system @@ -4259,7 +4259,7 @@ spec: labels: app.kubernetes.io/name: longhorn app.kubernetes.io/instance: longhorn - app.kubernetes.io/version: v1.5.1 + app.kubernetes.io/version: v1.5.3 app: longhorn-ui spec: affinity: @@ -4276,7 +4276,7 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: longhorn-ui - image: longhornio/longhorn-ui:v1.5.1 + image: longhornio/longhorn-ui:v1.5.3 imagePullPolicy: IfNotPresent volumeMounts: - name : nginx-cache diff --git a/Kubernetes/NetworkPolicies/allow-all-ingress.yaml b/Kubernetes/NetworkPolicies/allow-all-ingress.yaml new file mode 100644 index 0000000..462912d --- /dev/null +++ b/Kubernetes/NetworkPolicies/allow-all-ingress.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all-ingress +spec: + podSelector: {} + ingress: + - {} + policyTypes: + - Ingress diff --git a/Kubernetes/NetworkPolicies/default-deny-all-ingress.yaml b/Kubernetes/NetworkPolicies/default-deny-all-ingress.yaml new file mode 100644 index 0000000..e823802 --- /dev/null +++ b/Kubernetes/NetworkPolicies/default-deny-all-ingress.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-deny-ingress +spec: + podSelector: {} + policyTypes: + - Ingress diff --git a/Kubernetes/NetworkPolicies/example.yaml b/Kubernetes/NetworkPolicies/example.yaml new file mode 100644 index 0000000..e91eed2 --- /dev/null +++ b/Kubernetes/NetworkPolicies/example.yaml @@ -0,0 +1,35 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: test-network-policy + namespace: default +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Ingress + - Egress + ingress: + - from: + - ipBlock: + cidr: 172.17.0.0/16 + except: + - 172.17.1.0/24 + - namespaceSelector: + matchLabels: + project: myproject + - podSelector: + matchLabels: + role: frontend + ports: + - protocol: TCP + port: 6379 + egress: + - to: + - ipBlock: + cidr: 10.0.0.0/24 + ports: + - protocol: TCP + port: 5978 + diff --git a/Kubernetes/NetworkPolicies/namespace-example.yaml b/Kubernetes/NetworkPolicies/namespace-example.yaml new file mode 100644 index 0000000..e8ed653 --- /dev/null +++ b/Kubernetes/NetworkPolicies/namespace-example.yaml @@ -0,0 +1,17 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: egress-namespaces +spec: + podSelector: + matchLabels: + app: myapp + policyTypes: + - Egress + egress: + - to: + - namespaceSelector: + matchExpressions: + - key: namespace + operator: In + values: ["frontend", "backend"] \ No newline at end of file diff --git a/Kubernetes/NetworkPolicies/networkpolicy-egress.yaml b/Kubernetes/NetworkPolicies/networkpolicy-egress.yaml new file mode 100644 index 0000000..5671ac8 --- /dev/null +++ b/Kubernetes/NetworkPolicies/networkpolicy-egress.yaml @@ -0,0 +1,24 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-internet-only + namespace: pihole +spec: + podSelector: {} + policyTypes: + - Egress + egress: + - to: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 + - 192.168.0.0/16 + - 172.16.0.0/20 + - to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: "kube-system" + - podSelector: + matchLabels: + k8s-app: "kube-dns" \ No newline at end of file diff --git a/Kubernetes/NetworkPolicies/networkpolicy-ingress.yaml b/Kubernetes/NetworkPolicies/networkpolicy-ingress.yaml new file mode 100644 index 0000000..bdc8c95 --- /dev/null +++ b/Kubernetes/NetworkPolicies/networkpolicy-ingress.yaml @@ -0,0 +1,17 @@ +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: restrict-internal + namespace: pihole +spec: + podSelector: {} + policyTypes: + - Ingress + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + except: + - 10.0.0.0/8 + - 192.168.0.0/16 + - 172.16.0.0/20 \ No newline at end of file diff --git a/Kubernetes/NetworkPolicies/port-example.yaml b/Kubernetes/NetworkPolicies/port-example.yaml new file mode 100644 index 0000000..f4c914b --- /dev/null +++ b/Kubernetes/NetworkPolicies/port-example.yaml @@ -0,0 +1,20 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: multi-port-egress + namespace: default +spec: + podSelector: + matchLabels: + role: db + policyTypes: + - Egress + egress: + - to: + - ipBlock: + cidr: 10.0.0.0/24 + ports: + - protocol: TCP + port: 32000 + endPort: 32768 + diff --git a/Kubernetes/RKE2/ipAddressPool b/Kubernetes/RKE2/ipAddressPool new file mode 100644 index 0000000..ffd58cc --- /dev/null +++ b/Kubernetes/RKE2/ipAddressPool @@ -0,0 +1,8 @@ +apiVersion: metallb.io/v1beta1 +kind: IPAddressPool +metadata: + name: first-pool + namespace: metallb-system +spec: + addresses: + - $lbrange \ No newline at end of file diff --git a/Kubernetes/RKE2/k3s b/Kubernetes/RKE2/k3s deleted file mode 100644 index 41b889c..0000000 --- a/Kubernetes/RKE2/k3s +++ /dev/null @@ -1,59 +0,0 @@ -#!/bin/bash - -echo "apiVersion: apps/v1 -kind: DaemonSet -metadata: - creationTimestamp: null - name: kube-vip-ds - namespace: kube-system -spec: - selector: - matchLabels: - name: kube-vip-ds - template: - metadata: - creationTimestamp: null - labels: - name: kube-vip-ds - spec: - containers: - - args: - - manager - env: - - name: vip_arp - value: \"true\" - - name: vip_interface - value: $vipInterface - - name: port - value: \"6443\" - - name: vip_cidr - value: \"32\" - - name: cp_enable - value: \"true\" - - name: cp_namespace - value: kube-system - - name: svc_enable - value: \"true\" - - name: vip_address - value: $vipAddress - image: ghcr.io/kube-vip/kube-vip:v0.5.11 - imagePullPolicy: Always - name: kube-vip - resources: {} - securityContext: - capabilities: - add: - - NET_ADMIN - - NET_RAW - - SYS_TIME - hostNetwork: true - serviceAccountName: kube-vip - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - updateStrategy: {} -status: - currentNumberScheduled: 0 - desiredNumberScheduled: 0 - numberMisscheduled: 0 - numberReady: 0" diff --git a/Kubernetes/RKE2/kube-vip b/Kubernetes/RKE2/kube-vip new file mode 100644 index 0000000..83dcbc5 --- /dev/null +++ b/Kubernetes/RKE2/kube-vip @@ -0,0 +1,89 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kube-vip-ds + app.kubernetes.io/version: v0.6.3 + name: kube-vip-ds + namespace: kube-system +spec: + selector: + matchLabels: + app.kubernetes.io/name: kube-vip-ds + template: + metadata: + creationTimestamp: null + labels: + app.kubernetes.io/name: kube-vip-ds + app.kubernetes.io/version: v0.6.3 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: $interface + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "false" + - name: svc_leasename + value: plndr-svcs-lock + - name: vip_leaderelection + value: "true" + - name: vip_leasename + value: plndr-cp-lock + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: address + value: $vip + - name: prometheus_server + value: :2112 + image: ghcr.io/kube-vip/kube-vip:v0.6.3 + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + hostNetwork: true + serviceAccountName: kube-vip + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 + diff --git a/Kubernetes/RKE2/l2Advertisement.yaml b/Kubernetes/RKE2/l2Advertisement.yaml new file mode 100644 index 0000000..b6f8c4d --- /dev/null +++ b/Kubernetes/RKE2/l2Advertisement.yaml @@ -0,0 +1,8 @@ +apiVersion: metallb.io/v1beta1 +kind: L2Advertisement +metadata: + name: example + namespace: metallb-system +spec: + ipAddressPools: + - first-pool \ No newline at end of file diff --git a/Kubernetes/RKE2/rke2.sh b/Kubernetes/RKE2/rke2.sh index 957e136..db1ea80 100644 --- a/Kubernetes/RKE2/rke2.sh +++ b/Kubernetes/RKE2/rke2.sh @@ -95,7 +95,10 @@ done # create RKE2's self-installing manifest dir sudo mkdir -p /var/lib/rancher/rke2/server/manifests # Install the kube-vip deployment into rke2's self-installing manifest folder -curl -sL https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/k3s | vipAddress=$vip vipInterface=$interface sh | sudo tee /var/lib/rancher/rke2/server/manifests/kube-vip.yaml +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/kube-vip +cat kube-vip | sed 's/$interface/'$interface'/g; s/$vip/'$vip'/g' > $HOME/kube-vip.yaml +sudo mv kube-vip.yaml /var/lib/rancher/rke2/server/manifests/kube-vip.yaml + # Find/Replace all k3s entries to represent rke2 sudo sed -i 's/k3s/rke2/g' /var/lib/rancher/rke2/server/manifests/kube-vip.yaml # copy kube-vip.yaml to home directory @@ -159,8 +162,6 @@ kubectl get nodes # Step 5: Install kube-vip as network LoadBalancer - Install the kube-vip Cloud Provider kubectl apply -f https://kube-vip.io/manifests/rbac.yaml kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml -#IP range for loadbalancer services to use -kubectl create configmap -n kube-system kubevip --from-literal range-global=$lbrange # Step 6: Add other Masternodes, note we import the token we extracted from step 3 for newnode in "${masters[@]}"; do @@ -199,13 +200,31 @@ for newnode in "${workers[@]}"; do systemctl start rke2-agent.service exit EOF - echo -e " \033[32;5mMaster node joined successfully!\033[0m" + echo -e " \033[32;5mWorker node joined successfully!\033[0m" done kubectl get nodes -# Step 8: Install Rancher (Optional - Delete if not required) +# Step 8: Install Metallb +echo -e " \033[32;5mDeploying Metallb\033[0m" +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml +# Download ipAddressPool and configure using lbrange above +curl -sO https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/ipAddressPool +cat ipAddressPool | sed 's/$lbrange/'$lbrange'/g' > $HOME/ipAddressPool.yaml + +# Step 9: Deploy IP Pools and l2Advertisement +echo -e " \033[32;5mAdding IP Pools, waiting for Metallb to be available first. This can take a long time as we're likely being rate limited for container pulls...\033[0m" +kubectl wait --namespace metallb-system \ + --for=condition=ready pod \ + --selector=component=controller \ + --timeout=1800s +kubectl apply -f ipAddressPool.yaml +kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml + +# Step 10: Install Rancher (Optional - Delete if not required) #Install Helm +echo -e " \033[32;5mInstalling Helm\033[0m" curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 chmod 700 get_helm.sh ./get_helm.sh @@ -215,6 +234,7 @@ helm repo add rancher-latest https://releases.rancher.com/server-charts/latest kubectl create namespace cattle-system # Install Cert-Manager +echo -e " \033[32;5mDeploying Cert-Manager\033[0m" kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.2/cert-manager.crds.yaml helm repo add jetstack https://charts.jetstack.io helm repo update @@ -225,6 +245,7 @@ helm install cert-manager jetstack/cert-manager \ kubectl get pods --namespace cert-manager # Install Rancher +echo -e " \033[32;5mDeploying Rancher\033[0m" helm install rancher rancher-latest/rancher \ --namespace cattle-system \ --set hostname=rancher.my.org \ @@ -241,4 +262,4 @@ while [[ $(kubectl get svc -n cattle-system 'jsonpath={..status.conditions[?(@.t done kubectl get svc -n cattle-system -echo -e " \033[32;5mAccess Rancher from the IP above - Password is admin!\033[0m" +echo -e " \033[32;5mAccess Rancher from the IP above - Password is admin!\033[0m" \ No newline at end of file diff --git a/Kubernetes/SMB/deployment.yaml b/Kubernetes/SMB/deployment.yaml new file mode 100644 index 0000000..0ff46d5 --- /dev/null +++ b/Kubernetes/SMB/deployment.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: jellyfin + app.kubernetes.io/instance: jellyfin + app.kubernetes.io/name: jellyfin + name: jellyfin + namespace: jellyfin +spec: + replicas: 1 + selector: + matchLabels: + app: jellyfin + template: + metadata: + labels: + app: jellyfin + app.kubernetes.io/name: jellyfin + spec: + nodeSelector: + worker: "true" + containers: + - image: jellyfin/jellyfin + imagePullPolicy: Always + name: jellyfin + ports: + - containerPort: 8096 + name: web + protocol: TCP + env: + - name: TZ + value: Europe/London + volumeMounts: + - mountPath: "/Audiobooks" + readOnly: false + name: smb + subPath: Audiobooks + - mountPath: "/Films" + readOnly: false + name: smb + subPath: Films + - mountPath: "/TVShows" + readOnly: false + name: smb + subPath: TVShows + - mountPath: "/Music" + readOnly: false + name: smb + subPath: Music + volumes: + - name: smb + persistentVolumeClaim: + claimName: pvc-jellyfin-smb +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: jellyfin + name: jellyfin + namespace: jellyfin +spec: + ports: + - name: web-tcp + port: 8096 + protocol: TCP + targetPort: 8096 + - name: web-udp + port: 8096 + protocol: UDP + targetPort: 8096 + selector: + app: jellyfin \ No newline at end of file diff --git a/Kubernetes/SMB/pv-smb.yaml b/Kubernetes/SMB/pv-smb.yaml new file mode 100644 index 0000000..d7db3f1 --- /dev/null +++ b/Kubernetes/SMB/pv-smb.yaml @@ -0,0 +1,27 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + annotations: + pv.kubernetes.io/provisioned-by: smb.csi.k8s.io + name: pv-jellyfin-smb +spec: + capacity: + storage: 100Gi + accessModes: + - ReadWriteMany + persistentVolumeReclaimPolicy: Retain + storageClassName: smb + mountOptions: + - dir_mode=0777 + - file_mode=0777 + csi: + driver: smb.csi.k8s.io + readOnly: false + # volumeHandle format: {smb-server-address}#{sub-dir-name}#{share-name} + # make sure this value is unique for every share in the cluster + volumeHandle: jellyfin + volumeAttributes: + source: "//192.168.6.2/FreeNAS" # Change this to your SMB IP and share name + nodeStageSecretRef: + name: smbcreds + namespace: default \ No newline at end of file diff --git a/Kubernetes/SMB/pvc-smb.yaml b/Kubernetes/SMB/pvc-smb.yaml new file mode 100644 index 0000000..87402b0 --- /dev/null +++ b/Kubernetes/SMB/pvc-smb.yaml @@ -0,0 +1,14 @@ +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvc-jellyfin-smb + namespace: jellyfin +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + volumeName: pv-jellyfin-smb + storageClassName: smb \ No newline at end of file diff --git a/Kubernetes/SMB/readme.md b/Kubernetes/SMB/readme.md new file mode 100644 index 0000000..8c2a887 --- /dev/null +++ b/Kubernetes/SMB/readme.md @@ -0,0 +1,20 @@ +# Install CSI driver +``` +curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-smb/v1.13.0/deploy/install-driver.sh | bash -s v1.13.0 -- +``` + +# Create SMB creds +``` +kubectl create secret generic smbcreds --from-literal username=USERNAME --from-literal password="PASSWORD" +``` + +# Create storage class +``` +kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-smb/master/deploy/example/storageclass-smb.yaml +``` + +# Check status +``` +kubectl -n kube-system get pod -o wide --watch -l app=csi-smb-controller +kubectl -n kube-system get pod -o wide --watch -l app=csi-smb-node +``` \ No newline at end of file diff --git a/Kubernetes/Traefik-PiHole/Manifest/PiHole/PiHole-Deployment.yaml b/Kubernetes/Traefik-PiHole/Manifest/PiHole/PiHole-Deployment.yaml index e5ced22..df1dac5 100644 --- a/Kubernetes/Traefik-PiHole/Manifest/PiHole/PiHole-Deployment.yaml +++ b/Kubernetes/Traefik-PiHole/Manifest/PiHole/PiHole-Deployment.yaml @@ -109,6 +109,6 @@ spec: targetPort: 53 selector: app: pihole - externalTrafficPolicy: Cluster + externalTrafficPolicy: Local loadBalancerIP: 192.168.3.67 # this is your DNS IP, NOT THE GUI! type: LoadBalancer diff --git a/Kubernetes/Upgrade/readme.md b/Kubernetes/Upgrade/readme.md new file mode 100644 index 0000000..1223de7 --- /dev/null +++ b/Kubernetes/Upgrade/readme.md @@ -0,0 +1,59 @@ +# Recommendations Before Upgrading +1. Snapshot / Backup your VMs! +2. Backup data and volumes if necessary +3. Drain nodes / scale down deployments + +# Upgrade Rancher +``` +helm upgrade rancher rancher-latest/rancher \ + --namespace cattle-system \ + --set hostname=rancher.my.org \ +``` +# Upgrade RKE2 (Each node, not Admin!) +``` +sudo curl -sfL https://get.rke2.io | INSTALL_RKE2_CHANNEL=latest sh - +``` +then servers: +``` +sudo systemctl restart rke2-server +``` +or agents +``` +sudo systemctl restart rke2-agent +``` +# Upgrade K3S (Each node, not Admin!) +``` +sudo curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=latest sh -s - +``` +then servers: +``` +sudo systemctl restart k3s +``` +or agents +``` +sudo systemctl restart k3s-agent +``` + +# Upgrade Longhorn +``` +kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.5.3/deploy/longhorn.yaml +``` + +# Upgrade Metallb +1. Change version on the delete command to the version you are currently running (e.g., v0.13.11) +2. Change version on the apply to the new version (e.g., v0.13.12) +3. Ensure your Lbrange is still the one you want (check ipAddressPool.yaml) +``` +kubectl delete -f https://raw.githubusercontent.com/metallb/metallb/v0.13.11/config/manifests/metallb-native.yaml +kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml +kubectl apply -f ipAddressPool.yaml +kubectl apply -f https://raw.githubusercontent.com/JamesTurland/JimsGarage/main/Kubernetes/RKE2/l2Advertisement.yaml +``` + +# Upgrade Kube-VIP +1. Delete the daemonset in Rancher or use kubectl delete +2. Redeploy the daemonset with updated values (check kube-vip file) +``` +kubectl delete -f kube-vip +kubectl apply -f kube-vip +``` \ No newline at end of file diff --git a/Ollama/docker-compose.yml b/Ollama/docker-compose.yml new file mode 100644 index 0000000..b503635 --- /dev/null +++ b/Ollama/docker-compose.yml @@ -0,0 +1,44 @@ +version: '3.6' + +services: + ollama: + # Uncomment below for GPU support + # deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: 1 + # capabilities: + # - gpu + volumes: + - ollama:/root/.ollama + # Uncomment below to expose Ollama API outside the container stack + # ports: + # - 11434:11434 + container_name: ollama + pull_policy: always + tty: true + restart: unless-stopped + image: ollama/ollama:latest + + ollama-webui: + build: + context: . + args: + OLLAMA_API_BASE_URL: '/ollama/api' + dockerfile: Dockerfile + image: ollama-webui:latest + container_name: ollama-webui + depends_on: + - ollama + ports: + - 3000:8080 + environment: + - "OLLAMA_API_BASE_URL=http://ollama:11434/api" + extra_hosts: + - host.docker.internal:host-gateway + restart: unless-stopped + +volumes: + ollama: {} diff --git a/Ollama/readme.md b/Ollama/readme.md new file mode 100644 index 0000000..7621e5d --- /dev/null +++ b/Ollama/readme.md @@ -0,0 +1,5 @@ +1. Clone the repo from: https://github.com/ollama-webui/ollama-webui +2. Tweak the docker-compose to your liking +3. Run the container: sudo docker compose up -d + +Let it build :) \ No newline at end of file diff --git a/Pihole/docker-compose.yml b/Pihole/docker-compose.yml index 34ef122..b42f423 100644 --- a/Pihole/docker-compose.yml +++ b/Pihole/docker-compose.yml @@ -41,8 +41,7 @@ services: environment: TZ: 'Europe/London' WEBPASSWORD: 'password' - DNS1: '172.70.9.2#5053' - DNS2: 'no' + PIHOLE_DNS_: '172.70.9.2#5053' DNSMASQ_LISTENING: 'all' VIRTUAL_HOST: pihole.yourdomain.com # Volumes store your data between container upgrades diff --git a/Proxmox-NAS/config.yml b/Proxmox-NAS/config.yml new file mode 100644 index 0000000..fc91435 --- /dev/null +++ b/Proxmox-NAS/config.yml @@ -0,0 +1,49 @@ +auth: + - user: foo + group: foo + uid: 1000 + gid: 1000 + password: bar +# - user: baz +# group: xxx +# uid: 1100 +# gid: 1200 +# password_file: /run/secrets/baz_password + +global: + - "force user = foo" + - "force group = foo" + +share: + - name: public + comment: Public + path: /samba/public + browsable: yes + readonly: no + guestok: yes + veto: no + recycle: yes +# - name: share +# path: /samba/share +# browsable: yes +# readonly: no +# guestok: yes +# writelist: foo +# veto: no +# - name: foo +# path: /samba/foo +# browsable: yes +# readonly: no +# guestok: no +# validusers: foo +# writelist: foo +# veto: no +# hidefiles: /_*/ +# - name: foo-baz +# path: /samba/foo-baz +# browsable: yes +# readonly: no +# guestok: no +# validusers: foo,baz +# writelist: foo,baz +# veto: no \ No newline at end of file diff --git a/Proxmox-NAS/docker-compose.yaml b/Proxmox-NAS/docker-compose.yaml new file mode 100644 index 0000000..ff2ff6d --- /dev/null +++ b/Proxmox-NAS/docker-compose.yaml @@ -0,0 +1,28 @@ +name: samba + +services: + samba: + image: crazymax/samba + container_name: samba + network_mode: host + volumes: + - "./data:/data" # Contains cache, configuration and runtime data + - "/smb:/samba/public" + # - "./share:/samba/share" - optional additional share - see config.yml for permissions + # - "./foo:/samba/foo" - optional additional share - see config.yml for permissions + # - "./foo-baz:/samba/foo-baz" - optional additional share - see config.yml for permissions + environment: + - "TZ=Europe/London" + # - "CONFIG_FILE=/your-location" this can be anywhere you want. Default is /data + # - "SAMBA_WORKGROUP=WORKGROUP" change to your workgroup, default it WORKGROUP + # - "SAMBA_SERVER_STRING=some string" is the equivalent of the NT Description field + - "SAMBA_LOG_LEVEL=0" + # - "SAMBA_FOLLOW_SYMLINKS=NO" default is yes + # - "SAMBA_WIDE_LINKS=NO" default is yes + # - "SAMBA_HOSTS_ALLOW=0.0.0.0/0" default 127.0.0.0/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 + # - "SAMBA_INTERFACES=some-interface" default all + # - "WSDD2_ENABLE=1" default is 0 + # - "WSDD2_HOSTNAME=string" Override hostname (default to host or container name) + # - "WSDD2_NETBIOS_NAME=some-name" Set NetBIOS name (default to hostname) + # - "WSDD2_INTERFANCE=interface-name" Reply only on this interface + restart: always \ No newline at end of file diff --git a/Unbound/a-records.conf b/Unbound/a-records.conf new file mode 100644 index 0000000..e69de29 diff --git a/Unbound/docker-compose-vpn.yaml b/Unbound/docker-compose-vpn.yaml new file mode 100644 index 0000000..21ec2ab --- /dev/null +++ b/Unbound/docker-compose-vpn.yaml @@ -0,0 +1,90 @@ +version: '3' + +networks: + dns_net: + driver: bridge + ipam: + config: + - subnet: 172.23.0.0/16 + proxy: + external: true + +services: + gluetun: + image: qmcgaw/gluetun + networks: + dns_net: + ipv4_address: 172.23.0.9 + container_name: gluetun + # line above must be uncommented to allow external containers to connect. + # See https://github.com/qdm12/gluetun-wiki/blob/main/setup/connect-a-container-to-gluetun.md#external-container-to-gluetun + cap_add: + - NET_ADMIN + devices: + - /dev/net/tun:/dev/net/tun + ports: + - 6881:6881 + - 6881:6881/udp + volumes: + - /home/ubuntu/docker/gluetun:/gluetun + environment: + # See https://github.com/qdm12/gluetun-wiki/tree/main/setup#setup + - VPN_SERVICE_PROVIDER=nordvpn + - VPN_TYPE=wireguard + # OpenVPN: + # - OPENVPN_USER= + # - OPENVPN_PASSWORD= + # Wireguard: + - WIREGUARD_PRIVATE_KEY= # See https://github.com/qdm12/gluetun-wiki/blob/main/setup/providers/nordvpn.md#obtain-your-wireguard-private-key + - WIREGUARD_ADDRESSES=10.5.0.2/32 + # Timezone for accurate log times + - TZ=Europe/London + # Server list updater + # See https://github.com/qdm12/gluetun-wiki/blob/main/setup/servers.md#update-the-vpn-servers-list + - UPDATER_PERIOD=24h + pihole: + container_name: pihole + hostname: pihole + image: pihole/pihole:latest + networks: + dns_net: + ipv4_address: 172.23.0.7 + proxy: + ports: + - "53:53/tcp" + - "53:53/udp" + - "85:80/tcp" + #- "443:443/tcp" + environment: + - TZ: 'Europe/London' + - WEBPASSWORD: 'password' + - PIHOLE_DNS_: '172.23.0.8#5053' + volumes: + - '/home/ubuntu/docker/pihole/etc-pihole/:/etc/pihole/' + - '/home/ubuntu/docker/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/' + restart: unless-stopped + labels: + - "traefik.enable=true" + - "traefik.http.routers.pihole.entrypoints=http" + - "traefik.http.routers.pihole.rule=Host(`pihole.yourdomain.com`)" + - "traefik.http.middlewares.pihole-https-redirect.redirectscheme.scheme=https" + - "traefik.http.routers.pihole.middlewares=pihole-https-redirect" + - "traefik.http.routers.pihole-secure.entrypoints=https" + - "traefik.http.routers.pihole-secure.rule=Host(`pihole.yourdomain.com`)" + - "traefik.http.routers.pihole-secure.tls=true" + - "traefik.http.routers.pihole-secure.service=pihole" + - "traefik.http.services.pihole.loadbalancer.server.port=80" + - "traefik.docker.network=proxy" + unbound: + container_name: unbound + image: mvance/unbound:latest + networks: + dns_net: + ipv4_address: 172.23.0.8 + network_mode: "service:gluetun" + volumes: + - /home/ubuntu/docker/unbound:/opt/unbound/etc/unbound + ports: + - "5053:53/tcp" + - "5053:53/udp" + restart: unless-stopped \ No newline at end of file diff --git a/Unbound/docker-compose.yaml b/Unbound/docker-compose.yaml new file mode 100644 index 0000000..c288ba7 --- /dev/null +++ b/Unbound/docker-compose.yaml @@ -0,0 +1,59 @@ +version: '3' + +networks: + dns_net: + driver: bridge + ipam: + config: + - subnet: 172.23.0.0/16 + proxy: + external: true + +services: + pihole: + container_name: pihole + hostname: pihole + image: pihole/pihole:latest # remember to change this if you're using rpi + networks: + dns_net: + ipv4_address: 172.23.0.7 + proxy: + ports: + - "53:53/tcp" + - "53:53/udp" + - "85:80/tcp" + #- "443:443/tcp" + environment: + TZ: 'Europe/London' + WEBPASSWORD: 'password' + PIHOLE_DNS_: '172.23.0.8#5053' + volumes: + - '/home/ubuntu/docker/pihole/etc-pihole/:/etc/pihole/' + - '/home/ubuntu/docker/pihole/etc-dnsmasq.d/:/etc/dnsmasq.d/' + restart: unless-stopped + labels: + - "traefik.enable=true" + - "traefik.http.routers.pihole.entrypoints=http" + - "traefik.http.routers.pihole.rule=Host(`pihole.yourdomain.com`)" + - "traefik.http.middlewares.pihole-https-redirect.redirectscheme.scheme=https" + - "traefik.http.routers.pihole.middlewares=pihole-https-redirect" + - "traefik.http.routers.pihole-secure.entrypoints=https" + - "traefik.http.routers.pihole-secure.rule=Host(`pihole.yourdomain.com`)" + - "traefik.http.routers.pihole-secure.tls=true" + - "traefik.http.routers.pihole-secure.service=pihole" + - "traefik.http.services.pihole.loadbalancer.server.port=80" + - "traefik.docker.network=proxy" + unbound: + container_name: unbound + image: mvance/unbound:latest # remember to change this if you're using rpi + networks: + dns_net: + ipv4_address: 172.23.0.8 + volumes: + - /home/ubuntu/docker/unbound:/opt/unbound/etc/unbound + ports: + - "5053:53/tcp" + - "5053:53/udp" + healthcheck: + test: ["NONE"] + restart: unless-stopped \ No newline at end of file diff --git a/Unbound/forward-records.conf b/Unbound/forward-records.conf new file mode 100644 index 0000000..557667b --- /dev/null +++ b/Unbound/forward-records.conf @@ -0,0 +1,54 @@ +forward-zone: + # Forward all queries (except those in cache and local zone) to + # upstream recursive servers + name: "." + # Queries to this forward zone use TLS + forward-tls-upstream: yes + + # https://dnsprivacy.org/wiki/display/DP/DNS+Privacy+Test+Servers + + ## Cloudflare + #forward-addr: 1.1.1.1@853#cloudflare-dns.com + #forward-addr: 1.0.0.1@853#cloudflare-dns.com + #forward-addr: 2606:4700:4700::1111@853#cloudflare-dns.com + #forward-addr: 2606:4700:4700::1001@853#cloudflare-dns.com + + ## Cloudflare Malware + # forward-addr: 1.1.1.2@853#security.cloudflare-dns.com + # forward-addr: 1.0.0.2@853#security.cloudflare-dns.com + # forward-addr: 2606:4700:4700::1112@853#security.cloudflare-dns.com + # forward-addr: 2606:4700:4700::1002@853#security.cloudflare-dns.com + + ## Cloudflare Malware and Adult Content + # forward-addr: 1.1.1.3@853#family.cloudflare-dns.com + # forward-addr: 1.0.0.3@853#family.cloudflare-dns.com + # forward-addr: 2606:4700:4700::1113@853#family.cloudflare-dns.com + # forward-addr: 2606:4700:4700::1003@853#family.cloudflare-dns.com + + ## CleanBrowsing Security Filter + # forward-addr: 185.228.168.9@853#security-filter-dns.cleanbrowsing.org + # forward-addr: 185.228.169.9@853#security-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:1::2@853#security-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:2::2@853#security-filter-dns.cleanbrowsing.org + + ## CleanBrowsing Adult Filter + # forward-addr: 185.228.168.10@853#adult-filter-dns.cleanbrowsing.org + # forward-addr: 185.228.169.11@853#adult-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:1::1@853#adult-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:2::1@853#adult-filter-dns.cleanbrowsing.org + + ## CleanBrowsing Family Filter + # forward-addr: 185.228.168.168@853#family-filter-dns.cleanbrowsing.org + # forward-addr: 185.228.169.168@853#family-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:1::@853#family-filter-dns.cleanbrowsing.org + # forward-addr: 2a0d:2a00:2::@853#family-filter-dns.cleanbrowsing.org + + ## Quad9 + forward-addr: 9.9.9.9@853#dns.quad9.net + forward-addr: 149.112.112.112@853#dns.quad9.net + forward-addr: 2620:fe::fe@853#dns.quad9.net + forward-addr: 2620:fe::9@853#dns.quad9.net + + ## getdnsapi.net + # forward-addr: 185.49.141.37@853#getdnsapi.net + # forward-addr: 2a04:b900:0:100::37@853#getdnsapi.net \ No newline at end of file diff --git a/Unbound/srv-records.conf b/Unbound/srv-records.conf new file mode 100644 index 0000000..e69de29 diff --git a/Unbound/unbound.conf b/Unbound/unbound.conf new file mode 100644 index 0000000..90fe9c5 --- /dev/null +++ b/Unbound/unbound.conf @@ -0,0 +1,387 @@ +server: + ########################################################################### + # BASIC SETTINGS + ########################################################################### + # Time to live maximum for RRsets and messages in the cache. If the maximum + # kicks in, responses to clients still get decrementing TTLs based on the + # original (larger) values. When the internal TTL expires, the cache item + # has expired. Can be set lower to force the resolver to query for data + # often, and not trust (very large) TTL values. + cache-max-ttl: 86400 + + # Time to live minimum for RRsets and messages in the cache. If the minimum + # kicks in, the data is cached for longer than the domain owner intended, + # and thus less queries are made to look up the data. Zero makes sure the + # data in the cache is as the domain owner intended, higher values, + # especially more than an hour or so, can lead to trouble as the data in + # the cache does not match up with the actual data any more. + cache-min-ttl: 300 + + # Set the working directory for the program. + directory: "/opt/unbound/etc/unbound" + + # Enable or disable whether IPv4 queries are answered or issued. + # Default: yes + do-ip4: yes + + # Enable or disable whether IPv6 queries are answered or issued. + # If disabled, queries are not answered on IPv6, and queries are not sent + # on IPv6 to the internet nameservers. With this option you can disable the + # IPv6 transport for sending DNS traffic, it does not impact the contents + # of the DNS traffic, which may have IPv4 (A) and IPv6 (AAAA) addresses in + # it. + # Default: yes + # May be set to yes if you have IPv6 connectivity + do-ip6: yes + + # Enable or disable whether TCP queries are answered or issued. + # Default: yes + do-tcp: yes + + # Enable or disable whether UDP queries are answered or issued. + # Default: yes + do-udp: yes + + # RFC 6891. Number of bytes size to advertise as the EDNS reassembly buffer + # size. This is the value put into datagrams over UDP towards peers. + # The actual buffer size is determined by msg-buffer-size (both for TCP and + # UDP). Do not set higher than that value. + # Default is 1232 which is the DNS Flag Day 2020 recommendation. + # Setting to 512 bypasses even the most stringent path MTU problems, but + # is seen as extreme, since the amount of TCP fallback generated is + # excessive (probably also for this resolver, consider tuning the outgoing + # tcp number). + edns-buffer-size: 1232 + + # Listen to for queries from clients and answer from this network interface + # and port. + interface: 0.0.0.0@5053 + # interface: ::0 + port: 53 + + # If enabled, prefer IPv6 transport for sending DNS queries to internet + # nameservers. + # Default: yes + # You want to leave this to no unless you have *native* IPv6. With 6to4 and + # Terredo tunnels your web browser should favor IPv4 for the same reasons + prefer-ip6: no + + # Rotates RRSet order in response (the pseudo-random number is taken from + # the query ID, for speed and thread safety). + rrset-roundrobin: yes + + # Drop user privileges after binding the port. + username: "_unbound" + + ########################################################################### + # LOGGING + ########################################################################### + + # Do not print log lines to inform about local zone actions + log-local-actions: no + + # Do not print one line per query to the log + log-queries: no + + # Do not print one line per reply to the log + log-replies: no + + # Do not print log lines that say why queries return SERVFAIL to clients + log-servfail: no + + # If you want to log to a file, use: + # logfile: /opt/unbound/etc/unbound/unbound.log + # Set log location (using /dev/null further limits logging) + logfile: /dev/null + + # Set logging level + # Level 0: No verbosity, only errors. + # Level 1: Gives operational information. + # Level 2: Gives detailed operational information including short information per query. + # Level 3: Gives query level information, output per query. + # Level 4: Gives algorithm level information. + # Level 5: Logs client identification for cache misses. + verbosity: 0 + + ########################################################################### + # PERFORMANCE SETTINGS + ########################################################################### + # https://nlnetlabs.nl/documentation/unbound/howto-optimise/ + # https://nlnetlabs.nl/news/2019/Feb/05/unbound-1.9.0-released/ + + # Number of slabs in the infrastructure cache. Slabs reduce lock contention + # by threads. Must be set to a power of 2. + infra-cache-slabs: 4 + + # Number of incoming TCP buffers to allocate per thread. Default + # is 10. If set to 0, or if do-tcp is "no", no TCP queries from + # clients are accepted. For larger installations increasing this + # value is a good idea. + incoming-num-tcp: 10 + + # Number of slabs in the key cache. Slabs reduce lock contention by + # threads. Must be set to a power of 2. Setting (close) to the number + # of cpus is a reasonable guess. + key-cache-slabs: 4 + + # Number of bytes size of the message cache. + # Unbound recommendation is to Use roughly twice as much rrset cache memory + # as you use msg cache memory. + msg-cache-size: 142768128 + + # Number of slabs in the message cache. Slabs reduce lock contention by + # threads. Must be set to a power of 2. Setting (close) to the number of + # cpus is a reasonable guess. + msg-cache-slabs: 4 + + # The number of queries that every thread will service simultaneously. If + # more queries arrive that need servicing, and no queries can be jostled + # out (see jostle-timeout), then the queries are dropped. + # This is best set at half the number of the outgoing-range. + # This Unbound instance was compiled with libevent so it can efficiently + # use more than 1024 file descriptors. + num-queries-per-thread: 4096 + + # The number of threads to create to serve clients. + # This is set dynamically at run time to effectively use available CPUs + # resources + num-threads: 3 + + # Number of ports to open. This number of file descriptors can be opened + # per thread. + # This Unbound instance was compiled with libevent so it can efficiently + # use more than 1024 file descriptors. + outgoing-range: 8192 + + # Number of bytes size of the RRset cache. + # Use roughly twice as much rrset cache memory as msg cache memory + rrset-cache-size: 285536256 + + # Number of slabs in the RRset cache. Slabs reduce lock contention by + # threads. Must be set to a power of 2. + rrset-cache-slabs: 4 + + # Do no insert authority/additional sections into response messages when + # those sections are not required. This reduces response size + # significantly, and may avoid TCP fallback for some responses. This may + # cause a slight speedup. + minimal-responses: yes + + # # Fetch the DNSKEYs earlier in the validation process, when a DS record + # is encountered. This lowers the latency of requests at the expense of + # little more CPU usage. + prefetch: yes + + # Fetch the DNSKEYs earlier in the validation process, when a DS record is + # encountered. This lowers the latency of requests at the expense of little + # more CPU usage. + prefetch-key: yes + + # Have unbound attempt to serve old responses from cache with a TTL of 0 in + # the response without waiting for the actual resolution to finish. The + # actual resolution answer ends up in the cache later on. + serve-expired: yes + + # If not 0, then set the SO_RCVBUF socket option to get more buffer space on + # UDP port 53 incoming queries. So that short spikes on busy servers do not + # drop packets (see counter in netstat -su). Otherwise, the number of bytes + # to ask for, try �4m� on a busy server. + # The OS caps it at a maximum, on linux Unbound needs root permission to + # bypass the limit, or the admin can use sysctl net.core.rmem_max. + # Default: 0 (use system value) + # For example: sysctl -w net.core.rmem_max=4194304 + # To persist reboots, edit /etc/sysctl.conf to include: + # net.core.rmem_max=4194304 + # Larger socket buffer. OS may need config. + # Ensure kernel buffer is large enough to not lose messages in traffic spikes + #so-rcvbuf: 4m + + # Open dedicated listening sockets for incoming queries for each thread and + # try to set the SO_REUSEPORT socket option on each socket. May distribute + # incoming queries to threads more evenly. + so-reuseport: yes + + # If not 0, then set the SO_SNDBUF socket option to get more buffer space + # on UDP port 53 outgoing queries. + # Specify the number of bytes to ask for, try �4m� on a very busy server. + # The OS caps it at a maximum, on linux Unbound needs root permission to + # bypass the limit, or the admin can use sysctl net.core.wmem_max. + # For example: sysctl -w net.core.wmem_max=4194304 + # To persist reboots, edit /etc/sysctl.conf to include: + # net.core.wmem_max=4194304 + # Default: 0 (use system value) + # Larger socket buffer. OS may need config. + # Ensure kernel buffer is large enough to not lose messages in traffic spikes + #so-sndbuf: 4m + + ########################################################################### + # PRIVACY SETTINGS + ########################################################################### + + # RFC 8198. Use the DNSSEC NSEC chain to synthesize NXDO-MAIN and other + # denials, using information from previous NXDO-MAINs answers. In other + # words, use cached NSEC records to generate negative answers within a + # range and positive answers from wildcards. This increases performance, + # decreases latency and resource utilization on both authoritative and + # recursive servers, and increases privacy. Also, it may help increase + # resilience to certain DoS attacks in some circumstances. + aggressive-nsec: yes + + # Extra delay for timeouted UDP ports before they are closed, in msec. + # This prevents very delayed answer packets from the upstream (recursive) + # servers from bouncing against closed ports and setting off all sort of + # close-port counters, with eg. 1500 msec. When timeouts happen you need + # extra sockets, it checks the ID and remote IP of packets, and unwanted + # packets are added to the unwanted packet counter. + delay-close: 10000 + + # Prevent the unbound server from forking into the background as a daemon + do-daemonize: no + + # Add localhost to the do-not-query-address list. + do-not-query-localhost: no + + # Number of bytes size of the aggressive negative cache. + neg-cache-size: 4M + + # Send minimum amount of information to upstream servers to enhance + # privacy (best privacy). + qname-minimisation: yes + + ########################################################################### + # SECURITY SETTINGS + ########################################################################### + # Only give access to recursion clients from LAN IPs + access-control: 127.0.0.1/32 allow + access-control: 192.168.0.0/16 allow + access-control: 172.16.0.0/12 allow + access-control: 10.0.0.0/8 allow + access-control: fc00::/7 allow + access-control: ::1/128 allow + + # File with trust anchor for one zone, which is tracked with RFC5011 + # probes. + auto-trust-anchor-file: "var/root.key" + + # Enable chroot (i.e, change apparent root directory for the current + # running process and its children) + chroot: "/opt/unbound/etc/unbound" + + # Deny queries of type ANY with an empty response. + deny-any: yes + + # Harden against algorithm downgrade when multiple algorithms are + # advertised in the DS record. + harden-algo-downgrade: yes + + # RFC 8020. returns nxdomain to queries for a name below another name that + # is already known to be nxdomain. + harden-below-nxdomain: yes + + # Require DNSSEC data for trust-anchored zones, if such data is absent, the + # zone becomes bogus. If turned off you run the risk of a downgrade attack + # that disables security for a zone. + harden-dnssec-stripped: yes + + # Only trust glue if it is within the servers authority. + harden-glue: yes + + # Ignore very large queries. + harden-large-queries: yes + + # Perform additional queries for infrastructure data to harden the referral + # path. Validates the replies if trust anchors are configured and the zones + # are signed. This enforces DNSSEC validation on nameserver NS sets and the + # nameserver addresses that are encountered on the referral path to the + # answer. Experimental option. + harden-referral-path: no + + # Ignore very small EDNS buffer sizes from queries. + harden-short-bufsize: yes + + # If enabled the HTTP header User-Agent is not set. Use with caution + # as some webserver configurations may reject HTTP requests lacking + # this header. If needed, it is better to explicitly set the + # the http-user-agent. + hide-http-user-agent: no + + # Refuse id.server and hostname.bind queries + hide-identity: yes + + # Refuse version.server and version.bind queries + hide-version: yes + + # Set the HTTP User-Agent header for outgoing HTTP requests. If + # set to "", the default, then the package name and version are + # used. + http-user-agent: "DNS" + + # Report this identity rather than the hostname of the server. + identity: "DNS" + + # These private network addresses are not allowed to be returned for public + # internet names. Any occurrence of such addresses are removed from DNS + # answers. Additionally, the DNSSEC validator may mark the answers bogus. + # This protects against DNS Rebinding + private-address: 10.0.0.0/8 + private-address: 172.16.0.0/12 + private-address: 192.168.0.0/16 + private-address: 169.254.0.0/16 + private-address: fd00::/8 + private-address: fe80::/10 + private-address: ::ffff:0:0/96 + + # Enable ratelimiting of queries (per second) sent to nameserver for + # performing recursion. More queries are turned away with an error + # (servfail). This stops recursive floods (e.g., random query names), but + # not spoofed reflection floods. Cached responses are not rate limited by + # this setting. Experimental option. + ratelimit: 1000 + + # Use this certificate bundle for authenticating connections made to + # outside peers (e.g., auth-zone urls, DNS over TLS connections). + tls-cert-bundle: /etc/ssl/certs/ca-certificates.crt + + # Set the total number of unwanted replies to eep track of in every thread. + # When it reaches the threshold, a defensive action of clearing the rrset + # and message caches is taken, hopefully flushing away any poison. + # Unbound suggests a value of 10 million. + unwanted-reply-threshold: 10000 + + # Use 0x20-encoded random bits in the query to foil spoof attempts. This + # perturbs the lowercase and uppercase of query names sent to authority + # servers and checks if the reply still has the correct casing. + # This feature is an experimental implementation of draft dns-0x20. + # Experimental option. + # Don't use Capitalization randomization as it known to cause DNSSEC issues + # see https://discourse.pi-hole.net/t/unbound-stubby-or-dnscrypt-proxy/9378 + use-caps-for-id: yes + + # Help protect users that rely on this validator for authentication from + # potentially bad data in the additional section. Instruct the validator to + # remove data from the additional section of secure messages that are not + # signed properly. Messages that are insecure, bogus, indeterminate or + # unchecked are not affected. + val-clean-additional: yes + + ########################################################################### + # FORWARD ZONE + ########################################################################### + + #include: /opt/unbound/etc/unbound/forward-records.conf + + ########################################################################### + # LOCAL ZONE + ########################################################################### + + # Include file for local-data and local-data-ptr + #include: /opt/unbound/etc/unbound/a-records.conf + #include: /opt/unbound/etc/unbound/srv-records.conf + + ########################################################################### + # WILDCARD INCLUDE + ########################################################################### + #include: "/opt/unbound/etc/unbound/*.conf" + +remote-control: + control-enable: no \ No newline at end of file diff --git a/Unifi-Controller/docker-compose.yaml b/Unifi-Controller/docker-compose.yaml new file mode 100644 index 0000000..6b09acf --- /dev/null +++ b/Unifi-Controller/docker-compose.yaml @@ -0,0 +1,62 @@ +--- +version: "2.1" +services: + unifi-network-application: + image: lscr.io/linuxserver/unifi-network-application:latest + container_name: unifi-network-application + environment: + - PUID=1000 + - PGID=1000 + - TZ=Etc/UTC + - MONGO_USER=unifi + - MONGO_PASS=5nHgg3G0cH9d + - MONGO_HOST=unifi-db + - MONGO_PORT=27017 + - MONGO_DBNAME=unifi + - MEM_LIMIT=1024 #optional + - MEM_STARTUP=1024 #optional + # - MONGO_TLS= #optional + # - MONGO_AUTHSOURCE= #optional + volumes: + - /home/ubuntu/docker/unifi-controller:/config + ports: + - 8443:8443 + - 3478:3478/udp + - 10001:10001/udp + - 8080:8080 + - 1900:1900/udp #optional + - 8843:8843 #optional + - 8880:8880 #optional + - 6789:6789 #optional + - 5514:5514/udp #optional + labels: + - "traefik.enable=true" + - "traefik.http.routers.unifi.entrypoints=http" + - "traefik.http.routers.unifi.rule=Host(`unifi.jimsgarage.co.uk`)" + - "traefik.http.middlewares.unifi-https-redirect.redirectscheme.scheme=https" + - "traefik.http.routers.unifi.middlewares=unifi-https-redirect" + - "traefik.http.routers.unifi-secure.entrypoints=https" + - "traefik.http.routers.unifi-secure.rule=Host(`unifi.jimsgarage.co.uk`)" + - "traefik.http.routers.unifi-secure.tls=true" + - "traefik.http.routers.unifi-secure.service=unifi" + - "traefik.http.services.unifi.loadbalancer.server.port=8443" + - "traefik.http.services.unifi.loadbalancer.server.scheme=https" + - "traefik.docker.network=proxy" + networks: + proxy: + unifi: + restart: unless-stopped + unifi-db: + image: docker.io/mongo:4.4 + container_name: unifi-db + volumes: + - /home/ubuntu/docker/unifi-controller-db:/data/db + - /home/ubuntu/docker-compose/unifi-controller/init-mongo.js:/docker-entrypoint-initdb.d/init-mongo.js:ro + networks: + unifi: + restart: unless-stopped + +networks: + proxy: + external: true + unifi: \ No newline at end of file diff --git a/Unifi-Controller/init-mongo.js b/Unifi-Controller/init-mongo.js new file mode 100644 index 0000000..a200d9e --- /dev/null +++ b/Unifi-Controller/init-mongo.js @@ -0,0 +1,2 @@ +db.getSiblingDB("unifi").createUser({user: "unifi", pwd: "5nHgg3G0cH9d", roles: [{role: "dbOwner", db: "unifi"}]}); +db.getSiblingDB("unifi_stat").createUser({user: "unifi", pwd: "5nHgg3G0cH9d", roles: [{role: "dbOwner", db: "unifi_stat"}]}); \ No newline at end of file