refactor(vars,hosts): networking & k3s clusters

This commit is contained in:
Ryan Yin
2024-03-23 01:42:17 +08:00
parent a3c6532376
commit 84c21ccd34
36 changed files with 336 additions and 571 deletions

View File

@@ -11,7 +11,6 @@
#############################################################
let
hostName = "rakushun"; # Define your hostname.
hostAddress = myvars.networking.hostAddress.${hostName};
in {
imports = [
# import the rk3588 module, which contains the configuration for bootloader/kernel/firmware
@@ -23,23 +22,14 @@ in {
./gitea.nix
./caddy.nix
./tailscale.nix
];
networking = {
inherit hostName;
inherit (myvars.networking) defaultGateway nameservers;
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
networkmanager.enable = false;
# RJ45 port 1
interfaces.enP4p65s0 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};
# RJ45 port 2
# interfaces.enP3p49s0 = {
# useDHCP = false;
# ipv4.addresses = [hostAddress];
# };
};
# This value determines the NixOS release from which the default

View File

@@ -11,7 +11,6 @@
#############################################################
let
hostName = "suzu"; # Define your hostname.
hostAddress = myvars.networking.hostAddress.${hostName};
in {
imports = [
# import the rk3588 module, which contains the configuration for bootloader/kernel/firmware
@@ -25,12 +24,8 @@ in {
networking = {
inherit hostName;
inherit (myvars.networking) defaultGateway nameservers;
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
networkmanager.enable = false;
interfaces.end1 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};
};
# This value determines the NixOS release from which the default

View File

@@ -11,9 +11,6 @@
prometheus, grafana, restic, etc.
4. `kana`: Yet another NixOS VM running some common applications, such as hompage, file browser,
torrent downloader, etc.
3. Homelab:
1. `tailscale-gw`: A tailscale subnet router(gateway) for accessing my homelab remotely. NixOS VM
running on Proxmox.
4. `rolling_girls`: My RISCV64 hosts.
1. `nozomi`: Lichee Pi 4A, TH1520(4xC910@2.0G), 16GB RAM + 32G eMMC + 128G SD Card.
2. `yukina`: Milk-V Mars, JH7110(4xU74@1.5 GHz), 4G RAM + No eMMC + 64G SD Card.

View File

@@ -1,48 +0,0 @@
{
myvars,
mylib,
...
}:
#############################################################
#
# Tailscale Gateway(homelab subnet router) - a NixOS VM running on Proxmox
#
#############################################################
let
hostName = "tailscale-gw"; # Define your hostname.
hostAddress = myvars.networking.hostAddress.${hostName};
in {
imports = mylib.scanPaths ./.;
# supported file systems, so we can mount any removable disks with these filesystems
boot.supportedFilesystems = [
"ext4"
"btrfs"
"xfs"
"fat"
"vfat"
"exfat"
];
networking = {
inherit hostName;
inherit (myvars.networking) nameservers;
# Use mainGateway instead of defaultGateway to make NAT Traversal work
defaultGateway = myvars.networking.mainGateway;
networkmanager.enable = false;
interfaces.ens18 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};
};
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "23.11"; # Did you read the comment?
}

View File

@@ -6,7 +6,6 @@
#############################################################
let
hostName = "ai"; # Define your hostname.
hostAddress = myvars.networking.hostAddress.${hostName};
in {
imports = [
./cifs-mount.nix
@@ -20,14 +19,8 @@ in {
networking = {
inherit hostName;
inherit (myvars.networking) defaultGateway nameservers;
wireless.enable = false; # Enables wireless support via wpa_supplicant.
# configures the network interface(include wireless) via `nmcli` & `nmtui`
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
networkmanager.enable = false;
interfaces.enp5s0 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};
};
# conflict with feature: containerd-snapshotter

View File

@@ -10,7 +10,6 @@
#############################################################
let
hostName = "kana"; # Define your hostname.
hostAddress = myvars.networking.hostAddress.${hostName};
in {
imports = mylib.scanPaths ./.;
@@ -33,12 +32,8 @@ in {
networking = {
inherit hostName;
inherit (myvars.networking) defaultGateway nameservers;
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
networkmanager.enable = false;
interfaces.ens18 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};
};
# This value determines the NixOS release from which the default

View File

@@ -10,7 +10,6 @@
#############################################################
let
hostName = "ruby"; # Define your hostname.
hostAddress = myvars.networking.hostAddress.${hostName};
in {
imports = mylib.scanPaths ./.;
@@ -35,12 +34,8 @@ in {
networking = {
inherit hostName;
inherit (myvars.networking) defaultGateway nameservers;
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
networkmanager.enable = false;
interfaces.ens18 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};
};
# This value determines the NixOS release from which the default

View File

@@ -55,8 +55,8 @@
{
# All my NixOS hosts.
targets =
map (host: "${host.address}:9100")
(builtins.attrValues myvars.networking.hostAddress);
map (addr: "${addr.ipv4}:9100")
(builtins.attrValues myvars.networking.hostsAddr);
labels.type = "node";
}
];
@@ -70,7 +70,7 @@
metrics_path = "/metrics";
static_configs = [
{
targets = ["${myvars.networking.hostAddress.aquamarine.address}:9153"];
targets = ["${myvars.networking.hostsAddr.aquamarine.ipv4}:9153"];
labels.type = "app";
labels.app = "dnsmasq";
}
@@ -83,7 +83,7 @@
metrics_path = "/metrics";
static_configs = [
{
targets = ["${myvars.networking.hostAddress.kana.address}:9153"];
targets = ["${myvars.networking.hostsAddr.kana.ipv4}:9153"];
labels.type = "app";
labels.app = "v2ray";
}
@@ -96,7 +96,7 @@
metrics_path = "/metrics";
static_configs = [
{
targets = ["${myvars.networking.hostAddress.kana.address}:10000"];
targets = ["${myvars.networking.hostsAddr.kana.ipv4}:10000"];
labels.type = "app";
labels.app = "v2ray";
}

View File

@@ -1,19 +1,28 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-master-1"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# the first node in the cluster should be the one to initialize the cluster
clusterInit = true;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,43 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /home/${myvars.username}/.kube/config"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,19 +1,28 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-master-2"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "k3s-prod-1-master-1";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,31 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = myvars.networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "server";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,19 +1,28 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-master-3"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "k3s-prod-1-master-1";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,31 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = myvars.networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "server";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,19 +1,27 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-1"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "k3s-prod-1-master-1";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sAgentModule {
inherit pkgs;
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,23 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = myvars.networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "agent";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags =
" --node-label=node-type=worker"
+ " --data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -1,19 +1,27 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-2"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "k3s-prod-1-master-1";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sAgentModule {
inherit pkgs;
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,23 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = myvars.networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "agent";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags =
" --node-label=node-type=worker"
+ " --data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -1,19 +1,27 @@
{
config,
pkgs,
myvars,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-3"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "k3s-prod-1-master-1";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sAgentModule {
inherit pkgs;
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
k3sModule
];
}

View File

@@ -1,23 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = myvars.networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "agent";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags =
" --node-label=node-type=worker"
+ " --data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -1,4 +1,5 @@
{
config,
pkgs,
mylib,
myvars,
@@ -7,17 +8,25 @@
}: let
# MoreFine - S500Plus
hostName = "kubevirt-shoryu"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# the first node in the cluster should be the one to initialize the cluster
clusterInit = true;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
disko.nixosModules.default
../disko-config/kubevirt-disko-fs.nix
coreModule
k3sModule
];
}

View File

@@ -1,42 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,4 +1,5 @@
{
config,
pkgs,
mylib,
myvars,
@@ -6,17 +7,25 @@
...
}: let
hostName = "kubevirt-shushou"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "kubevirt-shoryu";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
disko.nixosModules.default
../disko-config/kubevirt-disko-fs.nix
coreModule
k3sModule
];
}

View File

@@ -1,42 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,4 +1,5 @@
{
config,
pkgs,
mylib,
myvars,
@@ -6,17 +7,25 @@
...
}: let
hostName = "kubevirt-youko"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
k3sServerName = "kubevirt-shoryu";
coreModule = mylib.genKubeVirtCoreModule {
inherit pkgs hostName;
inherit (myvars) networking;
};
k3sModule = mylib.genK3sServerModule {
inherit pkgs;
kubeconfigFile = "/home/${myvars.username}/.kube/config";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
disko.nixosModules.default
../disko-config/kubevirt-disko-fs.nix
coreModule
k3sModule
];
}

View File

@@ -1,42 +0,0 @@
{
config,
pkgs,
myvars,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller" # we use fluxcd instead
+ " --disable=traefik" # deploy our own ingress controller instead
+ " --etcd-expose-metrics true"
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
};
}

View File

@@ -1,86 +0,0 @@
{
gencoreModule = {
pkgs,
hostName,
networking,
...
}: let
hostAddress = networking.hostAddress.${hostName};
in {
# supported file systems, so we can mount any removable disks with these filesystems
boot.supportedFilesystems = [
"ext4"
"btrfs"
"xfs"
#"zfs"
"ntfs"
"fat"
"vfat"
"exfat"
"nfs" # required by longhorn
"cifs" # mount windows share
];
boot.kernelModules = ["kvm-amd" "vfio-pci"];
boot.extraModprobeConfig = "options kvm_amd nested=1"; # for amd cpu
environment.systemPackages = with pkgs; [
# Validate Hardware Virtualization Support via:
# virt-host-validate qemu
libvirt
# used by kubernetes' ovs-cni plugin
# https://github.com/k8snetworkplumbingwg/multus-cni
multus-cni
];
# Enable the Open vSwitch as a systemd service
# It's required by kubernetes' ovs-cni plugin.
virtualisation.vswitch = {
enable = true;
# reset the Open vSwitch configuration database to a default configuration on every start of the systemd ovsdb.service
resetOnStart = false;
};
networking.vswitches = {
# https://github.com/k8snetworkplumbingwg/ovs-cni/blob/main/docs/demo.md
ovsbr1 = {
interfaces = {
# Attach the interfaces to OVS bridge
# This interface should not used by the host itself!
ens18 = {};
};
};
};
# Workaround for longhorn running on NixOS
# https://github.com/longhorn/longhorn/issues/2166
systemd.tmpfiles.rules = [
"L+ /usr/local/bin - - - - /run/current-system/sw/bin/"
];
# Longhorn uses open-iscsi to create block devices.
services.openiscsi = {
name = "iqn.2020-08.org.linux-iscsi.initiatorhost:${hostName}";
enable = true;
};
networking = {
inherit hostName;
inherit (networking) defaultGateway nameservers;
networkmanager.enable = false;
# Set the host's address on the OVS bridge interface instead of the physical interface!
interfaces.ovsbr1 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};
};
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "23.11"; # Did you read the comment?
};
}

View File

@@ -10,7 +10,6 @@
#############################################################
let
hostName = "nozomi"; # Define your hostname.
hostAddress = myvars.networking.hostAddress.${hostName};
in {
imports = [
# import the licheepi4a module, which contains the configuration for bootloader/kernel/firmware
@@ -23,6 +22,7 @@ in {
networking = {
inherit hostName;
inherit (myvars.networking) defaultGateway nameservers;
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
wireless = {
# https://wiki.archlinux.org/title/wpa_supplicant
@@ -46,11 +46,6 @@ in {
# proxy.default = "http://user:password@proxy:port/";
# proxy.noProxy = "127.0.0.1,localhost,internal.domain";
# LPI4A's wireless interface
interfaces.wlan0 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};
# LPI4A's first ethernet interface
# interfaces.end0 = {
# useDHCP = false;

View File

@@ -10,7 +10,6 @@
#############################################################
let
hostName = "yukina"; # Define your hostname.
hostAddress = myvars.networking.hostAddress.${hostName};
in {
imports = [
# import the licheepi4a module, which contains the configuration for bootloader/kernel/firmware
@@ -23,6 +22,7 @@ in {
networking = {
inherit hostName;
inherit (myvars.networking) defaultGateway nameservers;
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
wireless = {
# https://wiki.archlinux.org/title/wpa_supplicant
@@ -46,11 +46,6 @@ in {
# proxy.default = "http://user:password@proxy:port/";
# proxy.noProxy = "127.0.0.1,localhost,internal.domain";
# LPI4A's wireless interface
interfaces.wlan0 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};
# LPI4A's first ethernet interface
# interfaces.end0 = {
# useDHCP = false;