Reference: k0s Multi-node installation
sudo systemd-machine-id-setup
curl --proto '=https' --tlsv1.2 -sSf https://get.k0s.sh | sudo sh
curl --proto '=https' --tlsv1.2 -sSf https://get.k0s.sh | sudo K0S_VERSION=v1.32.4+k0s.0 sh
sudo su -
mkdir -p /etc/k0s
k0s config create > /etc/k0s/k0s.yaml
apiVersion: k0s.k0sproject.io/v1beta1
kind: ClusterConfig
metadata:
name: k0s
namespace: kube-system
spec:
api:
address: 192.168.1.21
k0sApiPort: 9443
port: 6443
sans:
- 192.168.1.21
controllerManager: {}
extensions:
helm:
concurrencyLevel: 5
installConfig:
users:
etcdUser: etcd
kineUser: kube-apiserver
konnectivityUser: konnectivity-server
kubeAPIserverUser: kube-apiserver
kubeSchedulerUser: kube-scheduler
konnectivity:
adminPort: 8133
agentPort: 8132
network:
clusterDomain: cluster.local
dualStack:
enabled: false
kubeProxy:
iptables:
minSyncPeriod: 0s
syncPeriod: 0s
ipvs:
minSyncPeriod: 0s
syncPeriod: 0s
tcpFinTimeout: 0s
tcpTimeout: 0s
udpTimeout: 0s
metricsBindAddress: 0.0.0.0:10249
mode: iptables
nftables:
minSyncPeriod: 0s
syncPeriod: 0s
kuberouter:
autoMTU: true
hairpin: Enabled
metricsPort: 8080
nodeLocalLoadBalancing:
enabled: false
envoyProxy:
apiServerBindPort: 7443
konnectivityServerBindPort: 7132
type: EnvoyProxy
podCIDR: 10.244.0.0/16
provider: kuberouter
serviceCIDR: 10.96.0.0/12
scheduler: {}
storage:
etcd:
peerAddress: 192.168.1.21
type: etcd
telemetry:
enabled: true
grep -i port /etc/k0s/k0s.yaml
firewall-cmd --add-port=9443/tcp --permanent firewall-cmd --add-port=6443/tcp --permanent firewall-cmd --add-port=7443/tcp --permanent firewall-cmd --add-port=8133/tcp --permanent firewall-cmd --add-port=8132/tcp --permanent firewall-cmd --add-port=7132/tcp --permanent firewall-cmd --add-port=8080/tcp --permanent firewall-cmd --reload
k0s install controller -c /etc/k0s/k0s.yaml
k0s start
k0s token create --role=worker --expiry=100h > token-file
scp token-file user@<worker>:~/
sudo su -
mv /home/<user>/token-file ~/
firewall-cmd --add-port=9443/tcp --permanent firewall-cmd --add-port=6443/tcp --permanent firewall-cmd --add-port=7443/tcp --permanent firewall-cmd --add-port=8133/tcp --permanent firewall-cmd --add-port=8132/tcp --permanent firewall-cmd --add-port=7132/tcp --permanent firewall-cmd --add-port=8080/tcp --permanent firewall-cmd --reload
k0s install worker --token-file ./token-file
k0s start
watch -n1 'k0s kubectl get all -A; echo; k0s kubectl get node'
Note that you should always have an odd number of controll nodes. Therefore, you should have 1 or 3 or 5 control nodes, depending on the cluster size. For a non-production environment, start wtih 1. For production environments, start with 3. Monitor the kube-apiserver performance. If it starts to show hi CPU utilization, increase the number of CPUs on the controll node(s).
k0s token create --role=controller --expiry=1h > token-file
scp token-file user@<new-controller>:~/
scp k0s.yaml user@<new-controller>:~/
k0s status Version: v1.32.4+k0s.0 Process ID: 109946 Role: controller Workloads: false SingleNode: false
k0s status Version: v1.32.4+k0s.0 Process ID: 91481 Role: worker Workloads: true SingleNode: false Kube-api probing successful: true Kube-api probing last error:
kubectl get nodes
It should look something like this
[garfield@k8s01 ~]$ kubectl get nodes NAME STATUS ROLES AGE VERSION k8s01.home.mygarfield.us Ready control-plane 17h v1.31.1 k8s02.home.mygarfield.us Ready <none> 10m v1.31.1 k8s03.home.mygarfield.us Ready <none> 6s v1.31.1