refactor(vars,hosts): networking & k3s clusters

This commit is contained in:
Ryan Yin
2024-03-23 01:42:17 +08:00
parent a3c6532376
commit 84c21ccd34
36 changed files with 336 additions and 571 deletions

View File

@@ -1,19 +1,28 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-master-1"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# the first node in the cluster should be the one to initialize the cluster
clusterInit = true;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,43 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /home/${myvars.username}/.kube/config"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,19 +1,28 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-master-2"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "k3s-prod-1-master-1";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,31 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = myvars.networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "server";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,19 +1,28 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-master-3"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "k3s-prod-1-master-1";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,31 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = myvars.networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "server";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,19 +1,27 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-1"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "k3s-prod-1-master-1";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sAgentModule {
inherit pkgs;
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,23 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = myvars.networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "agent";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags =
" --node-label=node-type=worker"
+ " --data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -1,19 +1,27 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-2"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "k3s-prod-1-master-1";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sAgentModule {
inherit pkgs;
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,23 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = myvars.networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "agent";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags =
" --node-label=node-type=worker"
+ " --data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -1,19 +1,27 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-3"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "k3s-prod-1-master-1";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sAgentModule {
inherit pkgs;
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,23 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = myvars.networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "agent";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags =
" --node-label=node-type=worker"
+ " --data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -1,4 +1,5 @@
{
config,
pkgs,
mylib,
myvars,
@@ -7,17 +8,25 @@
}: let
# MoreFine - S500Plus
hostName = "kubevirt-shoryu"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# the first node in the cluster should be the one to initialize the cluster
clusterInit = true;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
disko.nixosModules.default
../disko-config/kubevirt-disko-fs.nix
coreModule
k3sModule
];
}

View File

@@ -1,42 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,4 +1,5 @@
{
config,
pkgs,
mylib,
myvars,
@@ -6,17 +7,25 @@
...
}: let
hostName = "kubevirt-shushou"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "kubevirt-shoryu";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
disko.nixosModules.default
../disko-config/kubevirt-disko-fs.nix
coreModule
k3sModule
];
}

View File

@@ -1,42 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,4 +1,5 @@
{
config,
pkgs,
mylib,
myvars,
@@ -6,17 +7,25 @@
...
}: let
hostName = "kubevirt-youko"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "kubevirt-shoryu";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
disko.nixosModules.default
../disko-config/kubevirt-disko-fs.nix
coreModule
k3sModule
];
}

View File

@@ -1,42 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,86 +0,0 @@
{
gencoreModule = {
pkgs,
hostName,
networking,
...
}: let
hostAddress = networking.hostAddress.${hostName};
in {
# supported file systems, so we can mount any removable disks with these filesystems
boot.supportedFilesystems = [
"ext4"
"btrfs"
"xfs"
#"zfs"
"ntfs"
"fat"
"vfat"
"exfat"
"nfs" # required by longhorn
"cifs" # mount windows share
];
boot.kernelModules = ["kvm-amd" "vfio-pci"];
boot.extraModprobeConfig = "options kvm_amd nested=1"; # for amd cpu
environment.systemPackages = with pkgs; [
# Validate Hardware Virtualization Support via:
# virt-host-validate qemu
libvirt
# used by kubernetes' ovs-cni plugin
# https://github.com/k8snetworkplumbingwg/multus-cni
multus-cni
];
# Enable the Open vSwitch as a systemd service
# It's required by kubernetes' ovs-cni plugin.
virtualisation.vswitch = {
enable = true;
# reset the Open vSwitch configuration database to a default configuration on every start of the systemd ovsdb.service
resetOnStart = false;
};
networking.vswitches = {
# https://github.com/k8snetworkplumbingwg/ovs-cni/blob/main/docs/demo.md
ovsbr1 = {
interfaces = {
# Attach the interfaces to OVS bridge
# This interface should not used by the host itself!
ens18 = {};
};
};
};
# Workaround for longhorn running on NixOS
# https://github.com/longhorn/longhorn/issues/2166
systemd.tmpfiles.rules = [
"L+ /usr/local/bin - - - - /run/current-system/sw/bin/"
];
# Longhorn uses open-iscsi to create block devices.
services.openiscsi = {
name = "iqn.2020-08.org.linux-iscsi.initiatorhost:${hostName}";
enable = true;
};
networking = {
inherit hostName;
inherit (networking) defaultGateway nameservers;
networkmanager.enable = false;
# Set the host's address on the OVS bridge interface instead of the physical interface!
interfaces.ovsbr1 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};
};
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "23.11"; # Did you read the comment?
};
}