refactor(vars,hosts): networking & k3s clusters

This commit is contained in:
Ryan Yin
2024-03-23 01:42:17 +08:00
parent a3c6532376
commit 84c21ccd34
36 changed files with 336 additions and 571 deletions

View File

@@ -2,7 +2,13 @@
colmenaSystem = import ./colmenaSystem.nix;
macosSystem = import ./macosSystem.nix;
nixosSystem = import ./nixosSystem.nix;
attrs = import ./attrs.nix {inherit lib;};
genKubeVirtCoreModule = import ./genKubeVirtCoreModule.nix;
genK3sServerModule = import ./genK3sServerModule.nix;
genK3sAgentModule = import ./genK3sAgentModule.nix;
# use path relative to the root of the project
relativeToRoot = lib.path.append ../.;
scanPaths = path:

21
lib/genK3sAgentModule.nix Normal file
View File

@@ -0,0 +1,21 @@
{
pkgs,
serverIp,
tokenFile,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
enable = true;
inherit package tokenFile;
role = "agent";
serverAddr = "https://${serverIp}:6443";
# https://docs.k3s.io/cli/agent
extraFlags =
" --node-label=node-type=worker"
+ " --data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -0,0 +1,52 @@
{
pkgs,
kubeconfigFile,
tokenFile,
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using `serverAddr`.
serverIp ? null,
clusterInit ? (serverIp == null),
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
enable = true;
inherit package tokenFile clusterInit;
serverAddr =
if clusterInit
then ""
else "https://${serverIp}:6443";
role = "server";
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig ${kubeconfigFile}"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
+ " --data-dir /var/lib/rancher/k3s"
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'"
# disable some features we don't need
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --disable=servicelb" # we use kube-vip instead
+ " --flannel-backend=none" # we use cilium instead
+ " --disable-network-policy";
};
}

View File

@@ -0,0 +1,78 @@
{
pkgs,
hostName,
networking,
...
}: let
inherit (networking.hostsAddr.${hostName}) iface;
in {
# supported file systems, so we can mount any removable disks with these filesystems
boot.supportedFilesystems = [
"ext4"
"btrfs"
"xfs"
#"zfs"
"ntfs"
"fat"
"vfat"
"exfat"
"nfs" # required by longhorn
"cifs" # mount windows share
];
boot.kernelModules = ["kvm-amd" "vfio-pci"];
boot.extraModprobeConfig = "options kvm_amd nested=1"; # for amd cpu
environment.systemPackages = with pkgs; [
# Validate Hardware Virtualization Support via:
# virt-host-validate qemu
libvirt
# used by kubernetes' ovs-cni plugin
# https://github.com/k8snetworkplumbingwg/multus-cni
multus-cni
];
# Workaround for longhorn running on NixOS
# https://github.com/longhorn/longhorn/issues/2166
systemd.tmpfiles.rules = [
"L+ /usr/local/bin - - - - /run/current-system/sw/bin/"
];
# Longhorn uses open-iscsi to create block devices.
services.openiscsi = {
name = "iqn.2020-08.org.linux-iscsi.initiatorhost:${hostName}";
enable = true;
};
# Enable the Open vSwitch as a systemd service
# It's required by kubernetes' ovs-cni plugin.
virtualisation.vswitch = {
enable = true;
# reset the Open vSwitch configuration database to a default configuration on every start of the systemd ovsdb.service
resetOnStart = false;
};
networking.vswitches = {
# https://github.com/k8snetworkplumbingwg/ovs-cni/blob/main/docs/demo.md
ovsbr1 = {
# Attach the interfaces to OVS bridge
# This interface should not used by the host itself!
interfaces.${iface} = {};
};
};
networking = {
inherit hostName;
inherit (networking) defaultGateway nameservers;
networkmanager.enable = false;
# Set the host's address on the OVS bridge interface instead of the physical interface!
interfaces.ovsbr1 = networking.hostsInterface.${hostName}.interfaces.${iface};
};
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "23.11"; # Did you read the comment?
}