mirror of
https://github.com/ryan4yin/nix-config.git
synced 2026-01-11 20:40:24 +01:00
refactor(vars,hosts): networking & k3s clusters
This commit is contained in:
39
Justfile
39
Justfile
@@ -129,26 +129,6 @@ ruby-local mode="default":
|
||||
kana:
|
||||
colmena apply --on '@kana' --verbose --show-trace
|
||||
|
||||
tailscale:
|
||||
colmena apply --on '@tailscale-gw' --verbose --show-trace
|
||||
|
||||
# pve-aqua:
|
||||
# nom build .#aquamarine
|
||||
# rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-aquamarine.vma.zst
|
||||
#
|
||||
# pve-ruby:
|
||||
# nom build .#ruby
|
||||
# rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-ruby.vma.zst
|
||||
#
|
||||
# pve-kana:
|
||||
# nom build .#kana
|
||||
# rsync -avz --progress --copy-links result root@gtr5:/var/lib/vz/dump/vzdump-qemu-kana.vma.zst
|
||||
#
|
||||
# pve-tsgw:
|
||||
# nom build .#tailscale-gw
|
||||
# rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-tailscale-gw.vma.zst
|
||||
#
|
||||
|
||||
############################################################################
|
||||
#
|
||||
# Kubernetes related commands
|
||||
@@ -164,25 +144,6 @@ master:
|
||||
worker:
|
||||
colmena apply --on '@k3s-prod-1-worker-*' --verbose --show-trace
|
||||
|
||||
# pve-k8s:
|
||||
# nom build .#k3s-prod-1-master-1
|
||||
# rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-k3s-prod-1-master-1.vma.zst
|
||||
#
|
||||
# nom build .#k3s-prod-1-master-2
|
||||
# rsync -avz --progress --copy-links result root@gtr5:/var/lib/vz/dump/vzdump-qemu-k3s-prod-1-master-2.vma.zst
|
||||
#
|
||||
# nom build .#k3s-prod-1-master-3
|
||||
# rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s-prod-1-master-3.vma.zst
|
||||
#
|
||||
# nom build .#k3s-prod-1-worker-1
|
||||
# rsync -avz --progress --copy-links result root@gtr5:/var/lib/vz/dump/vzdump-qemu-k3s-prod-1-worker-1.vma.zst
|
||||
#
|
||||
# nom build .#k3s-prod-1-worker-2
|
||||
# rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s-prod-1-worker-2.vma.zst
|
||||
#
|
||||
# nom build .#k3s-prod-1-worker-3
|
||||
# rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s-prod-1-worker-3.vma.zst
|
||||
#
|
||||
|
||||
############################################################################
|
||||
#
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
#############################################################
|
||||
let
|
||||
hostName = "rakushun"; # Define your hostname.
|
||||
hostAddress = myvars.networking.hostAddress.${hostName};
|
||||
in {
|
||||
imports = [
|
||||
# import the rk3588 module, which contains the configuration for bootloader/kernel/firmware
|
||||
@@ -23,23 +22,14 @@ in {
|
||||
|
||||
./gitea.nix
|
||||
./caddy.nix
|
||||
./tailscale.nix
|
||||
];
|
||||
|
||||
networking = {
|
||||
inherit hostName;
|
||||
inherit (myvars.networking) defaultGateway nameservers;
|
||||
|
||||
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
|
||||
networkmanager.enable = false;
|
||||
# RJ45 port 1
|
||||
interfaces.enP4p65s0 = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [hostAddress];
|
||||
};
|
||||
# RJ45 port 2
|
||||
# interfaces.enP3p49s0 = {
|
||||
# useDHCP = false;
|
||||
# ipv4.addresses = [hostAddress];
|
||||
# };
|
||||
};
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
#############################################################
|
||||
let
|
||||
hostName = "suzu"; # Define your hostname.
|
||||
hostAddress = myvars.networking.hostAddress.${hostName};
|
||||
in {
|
||||
imports = [
|
||||
# import the rk3588 module, which contains the configuration for bootloader/kernel/firmware
|
||||
@@ -25,12 +24,8 @@ in {
|
||||
networking = {
|
||||
inherit hostName;
|
||||
inherit (myvars.networking) defaultGateway nameservers;
|
||||
|
||||
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
|
||||
networkmanager.enable = false;
|
||||
interfaces.end1 = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [hostAddress];
|
||||
};
|
||||
};
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
|
||||
@@ -11,9 +11,6 @@
|
||||
prometheus, grafana, restic, etc.
|
||||
4. `kana`: Yet another NixOS VM running some common applications, such as hompage, file browser,
|
||||
torrent downloader, etc.
|
||||
3. Homelab:
|
||||
1. `tailscale-gw`: A tailscale subnet router(gateway) for accessing my homelab remotely. NixOS VM
|
||||
running on Proxmox.
|
||||
4. `rolling_girls`: My RISCV64 hosts.
|
||||
1. `nozomi`: Lichee Pi 4A, TH1520(4xC910@2.0G), 16GB RAM + 32G eMMC + 128G SD Card.
|
||||
2. `yukina`: Milk-V Mars, JH7110(4xU74@1.5 GHz), 4G RAM + No eMMC + 64G SD Card.
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
{
|
||||
myvars,
|
||||
mylib,
|
||||
...
|
||||
}:
|
||||
#############################################################
|
||||
#
|
||||
# Tailscale Gateway(homelab subnet router) - a NixOS VM running on Proxmox
|
||||
#
|
||||
#############################################################
|
||||
let
|
||||
hostName = "tailscale-gw"; # Define your hostname.
|
||||
hostAddress = myvars.networking.hostAddress.${hostName};
|
||||
in {
|
||||
imports = mylib.scanPaths ./.;
|
||||
|
||||
# supported file systems, so we can mount any removable disks with these filesystems
|
||||
boot.supportedFilesystems = [
|
||||
"ext4"
|
||||
"btrfs"
|
||||
"xfs"
|
||||
"fat"
|
||||
"vfat"
|
||||
"exfat"
|
||||
];
|
||||
|
||||
networking = {
|
||||
inherit hostName;
|
||||
inherit (myvars.networking) nameservers;
|
||||
|
||||
# Use mainGateway instead of defaultGateway to make NAT Traversal work
|
||||
defaultGateway = myvars.networking.mainGateway;
|
||||
|
||||
networkmanager.enable = false;
|
||||
interfaces.ens18 = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [hostAddress];
|
||||
};
|
||||
};
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
# settings for stateful data, like file locations and database versions
|
||||
# on your system were taken. It‘s perfectly fine and recommended to leave
|
||||
# this value at the release version of the first install of this system.
|
||||
# Before changing this value read the documentation for this option
|
||||
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
}
|
||||
@@ -6,7 +6,6 @@
|
||||
#############################################################
|
||||
let
|
||||
hostName = "ai"; # Define your hostname.
|
||||
hostAddress = myvars.networking.hostAddress.${hostName};
|
||||
in {
|
||||
imports = [
|
||||
./cifs-mount.nix
|
||||
@@ -20,14 +19,8 @@ in {
|
||||
networking = {
|
||||
inherit hostName;
|
||||
inherit (myvars.networking) defaultGateway nameservers;
|
||||
|
||||
wireless.enable = false; # Enables wireless support via wpa_supplicant.
|
||||
# configures the network interface(include wireless) via `nmcli` & `nmtui`
|
||||
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
|
||||
networkmanager.enable = false;
|
||||
interfaces.enp5s0 = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [hostAddress];
|
||||
};
|
||||
};
|
||||
|
||||
# conflict with feature: containerd-snapshotter
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
#############################################################
|
||||
let
|
||||
hostName = "kana"; # Define your hostname.
|
||||
hostAddress = myvars.networking.hostAddress.${hostName};
|
||||
in {
|
||||
imports = mylib.scanPaths ./.;
|
||||
|
||||
@@ -33,12 +32,8 @@ in {
|
||||
networking = {
|
||||
inherit hostName;
|
||||
inherit (myvars.networking) defaultGateway nameservers;
|
||||
|
||||
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
|
||||
networkmanager.enable = false;
|
||||
interfaces.ens18 = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [hostAddress];
|
||||
};
|
||||
};
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
#############################################################
|
||||
let
|
||||
hostName = "ruby"; # Define your hostname.
|
||||
hostAddress = myvars.networking.hostAddress.${hostName};
|
||||
in {
|
||||
imports = mylib.scanPaths ./.;
|
||||
|
||||
@@ -35,12 +34,8 @@ in {
|
||||
networking = {
|
||||
inherit hostName;
|
||||
inherit (myvars.networking) defaultGateway nameservers;
|
||||
|
||||
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
|
||||
networkmanager.enable = false;
|
||||
interfaces.ens18 = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [hostAddress];
|
||||
};
|
||||
};
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
|
||||
@@ -55,8 +55,8 @@
|
||||
{
|
||||
# All my NixOS hosts.
|
||||
targets =
|
||||
map (host: "${host.address}:9100")
|
||||
(builtins.attrValues myvars.networking.hostAddress);
|
||||
map (addr: "${addr.ipv4}:9100")
|
||||
(builtins.attrValues myvars.networking.hostsAddr);
|
||||
labels.type = "node";
|
||||
}
|
||||
];
|
||||
@@ -70,7 +70,7 @@
|
||||
metrics_path = "/metrics";
|
||||
static_configs = [
|
||||
{
|
||||
targets = ["${myvars.networking.hostAddress.aquamarine.address}:9153"];
|
||||
targets = ["${myvars.networking.hostsAddr.aquamarine.ipv4}:9153"];
|
||||
labels.type = "app";
|
||||
labels.app = "dnsmasq";
|
||||
}
|
||||
@@ -83,7 +83,7 @@
|
||||
metrics_path = "/metrics";
|
||||
static_configs = [
|
||||
{
|
||||
targets = ["${myvars.networking.hostAddress.kana.address}:9153"];
|
||||
targets = ["${myvars.networking.hostsAddr.kana.ipv4}:9153"];
|
||||
labels.type = "app";
|
||||
labels.app = "v2ray";
|
||||
}
|
||||
@@ -96,7 +96,7 @@
|
||||
metrics_path = "/metrics";
|
||||
static_configs = [
|
||||
{
|
||||
targets = ["${myvars.networking.hostAddress.kana.address}:10000"];
|
||||
targets = ["${myvars.networking.hostsAddr.kana.ipv4}:10000"];
|
||||
labels.type = "app";
|
||||
labels.app = "v2ray";
|
||||
}
|
||||
|
||||
@@ -1,19 +1,28 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
mylib,
|
||||
...
|
||||
}: let
|
||||
hostName = "k3s-prod-1-master-1"; # Define your hostname.
|
||||
k8sLib = import ../lib.nix;
|
||||
coreModule = k8sLib.gencoreModule {
|
||||
|
||||
coreModule = mylib.genKubeVirtCoreModule {
|
||||
inherit pkgs hostName;
|
||||
inherit (myvars) networking;
|
||||
};
|
||||
k3sModule = mylib.genK3sServerModule {
|
||||
inherit pkgs;
|
||||
kubeconfigFile = "/home/${myvars.username}/.kube/config";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
# the first node in the cluster should be the one to initialize the cluster
|
||||
clusterInit = true;
|
||||
};
|
||||
in {
|
||||
imports =
|
||||
(mylib.scanPaths ./.)
|
||||
++ [
|
||||
coreModule
|
||||
k3sModule
|
||||
];
|
||||
}
|
||||
|
||||
@@ -1,19 +1,28 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
mylib,
|
||||
...
|
||||
}: let
|
||||
hostName = "k3s-prod-1-master-2"; # define your hostname.
|
||||
k8sLib = import ../lib.nix;
|
||||
coreModule = k8sLib.gencoreModule {
|
||||
k3sServerName = "k3s-prod-1-master-1";
|
||||
|
||||
coreModule = mylib.genKubeVirtCoreModule {
|
||||
inherit pkgs hostName;
|
||||
inherit (myvars) networking;
|
||||
};
|
||||
k3sModule = mylib.genK3sServerModule {
|
||||
inherit pkgs;
|
||||
kubeconfigFile = "/home/${myvars.username}/.kube/config";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
|
||||
};
|
||||
in {
|
||||
imports =
|
||||
(mylib.scanPaths ./.)
|
||||
++ [
|
||||
coreModule
|
||||
k3sModule
|
||||
];
|
||||
}
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
...
|
||||
}: let
|
||||
serverName = "k3s-prod-1-master-1";
|
||||
serverIp = myvars.networking.hostAddress.${serverName}.address;
|
||||
package = pkgs.k3s_1_29;
|
||||
in {
|
||||
environment.systemPackages = [package];
|
||||
services.k3s = {
|
||||
inherit package;
|
||||
enable = true;
|
||||
role = "server";
|
||||
serverAddr = "https://${serverIp}:6443";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
# https://docs.k3s.io/cli/server
|
||||
extraFlags =
|
||||
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
|
||||
+ " --write-kubeconfig-mode 644"
|
||||
+ " --service-node-port-range 80-32767"
|
||||
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
|
||||
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
|
||||
+ " --data-dir /var/lib/rancher/k3s"
|
||||
+ " --disable-helm-controller" # we use fluxcd instead
|
||||
+ " --disable=traefik" # deploy our own ingress controller instead
|
||||
+ " --etcd-expose-metrics true"
|
||||
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
|
||||
};
|
||||
}
|
||||
@@ -1,19 +1,28 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
mylib,
|
||||
...
|
||||
}: let
|
||||
hostName = "k3s-prod-1-master-3"; # define your hostname.
|
||||
k8sLib = import ../lib.nix;
|
||||
coreModule = k8sLib.gencoreModule {
|
||||
k3sServerName = "k3s-prod-1-master-1";
|
||||
|
||||
coreModule = mylib.genKubeVirtCoreModule {
|
||||
inherit pkgs hostName;
|
||||
inherit (myvars) networking;
|
||||
};
|
||||
k3sModule = mylib.genK3sServerModule {
|
||||
inherit pkgs;
|
||||
kubeconfigFile = "/home/${myvars.username}/.kube/config";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
|
||||
};
|
||||
in {
|
||||
imports =
|
||||
(mylib.scanPaths ./.)
|
||||
++ [
|
||||
coreModule
|
||||
k3sModule
|
||||
];
|
||||
}
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
...
|
||||
}: let
|
||||
serverName = "k3s-prod-1-master-1";
|
||||
serverIp = myvars.networking.hostAddress.${serverName}.address;
|
||||
package = pkgs.k3s_1_29;
|
||||
in {
|
||||
environment.systemPackages = [package];
|
||||
services.k3s = {
|
||||
inherit package;
|
||||
enable = true;
|
||||
role = "server";
|
||||
serverAddr = "https://${serverIp}:6443";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
# https://docs.k3s.io/cli/server
|
||||
extraFlags =
|
||||
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
|
||||
+ " --write-kubeconfig-mode 644"
|
||||
+ " --service-node-port-range 80-32767"
|
||||
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
|
||||
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
|
||||
+ " --data-dir /var/lib/rancher/k3s"
|
||||
+ " --disable-helm-controller" # we use fluxcd instead
|
||||
+ " --disable=traefik" # deploy our own ingress controller instead
|
||||
+ " --etcd-expose-metrics true"
|
||||
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
|
||||
};
|
||||
}
|
||||
@@ -1,19 +1,27 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
mylib,
|
||||
...
|
||||
}: let
|
||||
hostName = "k3s-prod-1-worker-1"; # define your hostname.
|
||||
k8sLib = import ../lib.nix;
|
||||
coreModule = k8sLib.gencoreModule {
|
||||
k3sServerName = "k3s-prod-1-master-1";
|
||||
|
||||
coreModule = mylib.genKubeVirtCoreModule {
|
||||
inherit pkgs hostName;
|
||||
inherit (myvars) networking;
|
||||
};
|
||||
k3sModule = mylib.genK3sAgentModule {
|
||||
inherit pkgs;
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
|
||||
};
|
||||
in {
|
||||
imports =
|
||||
(mylib.scanPaths ./.)
|
||||
++ [
|
||||
coreModule
|
||||
k3sModule
|
||||
];
|
||||
}
|
||||
|
||||
@@ -1,19 +1,27 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
mylib,
|
||||
...
|
||||
}: let
|
||||
hostName = "k3s-prod-1-worker-2"; # define your hostname.
|
||||
k8sLib = import ../lib.nix;
|
||||
coreModule = k8sLib.gencoreModule {
|
||||
k3sServerName = "k3s-prod-1-master-1";
|
||||
|
||||
coreModule = mylib.genKubeVirtCoreModule {
|
||||
inherit pkgs hostName;
|
||||
inherit (myvars) networking;
|
||||
};
|
||||
k3sModule = mylib.genK3sAgentModule {
|
||||
inherit pkgs;
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
|
||||
};
|
||||
in {
|
||||
imports =
|
||||
(mylib.scanPaths ./.)
|
||||
++ [
|
||||
coreModule
|
||||
k3sModule
|
||||
];
|
||||
}
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
...
|
||||
}: let
|
||||
serverName = "k3s-prod-1-master-1";
|
||||
serverIp = myvars.networking.hostAddress.${serverName}.address;
|
||||
package = pkgs.k3s_1_29;
|
||||
in {
|
||||
environment.systemPackages = [package];
|
||||
services.k3s = {
|
||||
inherit package;
|
||||
enable = true;
|
||||
role = "agent";
|
||||
serverAddr = "https://${serverIp}:6443";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
# https://docs.k3s.io/cli/agent
|
||||
extraFlags =
|
||||
" --node-label=node-type=worker"
|
||||
+ " --data-dir /var/lib/rancher/k3s";
|
||||
};
|
||||
}
|
||||
@@ -1,19 +1,27 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
mylib,
|
||||
...
|
||||
}: let
|
||||
hostName = "k3s-prod-1-worker-3"; # define your hostname.
|
||||
k8sLib = import ../lib.nix;
|
||||
coreModule = k8sLib.gencoreModule {
|
||||
k3sServerName = "k3s-prod-1-master-1";
|
||||
|
||||
coreModule = mylib.genKubeVirtCoreModule {
|
||||
inherit pkgs hostName;
|
||||
inherit (myvars) networking;
|
||||
};
|
||||
k3sModule = mylib.genK3sAgentModule {
|
||||
inherit pkgs;
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
|
||||
};
|
||||
in {
|
||||
imports =
|
||||
(mylib.scanPaths ./.)
|
||||
++ [
|
||||
coreModule
|
||||
k3sModule
|
||||
];
|
||||
}
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
...
|
||||
}: let
|
||||
serverName = "k3s-prod-1-master-1";
|
||||
serverIp = myvars.networking.hostAddress.${serverName}.address;
|
||||
package = pkgs.k3s_1_29;
|
||||
in {
|
||||
environment.systemPackages = [package];
|
||||
services.k3s = {
|
||||
inherit package;
|
||||
enable = true;
|
||||
role = "agent";
|
||||
serverAddr = "https://${serverIp}:6443";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
# https://docs.k3s.io/cli/agent
|
||||
extraFlags =
|
||||
" --node-label=node-type=worker"
|
||||
+ " --data-dir /var/lib/rancher/k3s";
|
||||
};
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
mylib,
|
||||
myvars,
|
||||
@@ -7,17 +8,25 @@
|
||||
}: let
|
||||
# MoreFine - S500Plus
|
||||
hostName = "kubevirt-shoryu"; # Define your hostname.
|
||||
k8sLib = import ../lib.nix;
|
||||
coreModule = k8sLib.gencoreModule {
|
||||
|
||||
coreModule = mylib.genKubeVirtCoreModule {
|
||||
inherit pkgs hostName;
|
||||
inherit (myvars) networking;
|
||||
};
|
||||
k3sModule = mylib.genK3sServerModule {
|
||||
inherit pkgs;
|
||||
kubeconfigFile = "/home/${myvars.username}/.kube/config";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
# the first node in the cluster should be the one to initialize the cluster
|
||||
clusterInit = true;
|
||||
};
|
||||
in {
|
||||
imports =
|
||||
(mylib.scanPaths ./.)
|
||||
++ [
|
||||
coreModule
|
||||
disko.nixosModules.default
|
||||
../disko-config/kubevirt-disko-fs.nix
|
||||
coreModule
|
||||
k3sModule
|
||||
];
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
...
|
||||
}: let
|
||||
package = pkgs.k3s_1_29;
|
||||
in {
|
||||
environment.systemPackages = with pkgs; [
|
||||
package
|
||||
k9s
|
||||
kubectl
|
||||
istioctl
|
||||
kubernetes-helm
|
||||
|
||||
skopeo
|
||||
dive # explore docker layers
|
||||
];
|
||||
services.k3s = {
|
||||
inherit package;
|
||||
enable = true;
|
||||
|
||||
# Initialize HA cluster using an embedded etcd datastore.
|
||||
# If you are configuring an HA cluster with an embedded etcd,
|
||||
# the 1st server must have `clusterInit = true`
|
||||
# and other servers must connect to it using serverAddr.
|
||||
clusterInit = true;
|
||||
role = "server";
|
||||
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
|
||||
# https://docs.k3s.io/cli/server
|
||||
extraFlags =
|
||||
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
|
||||
+ " --write-kubeconfig-mode 644"
|
||||
+ " --service-node-port-range 80-32767"
|
||||
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
|
||||
+ " --data-dir /var/lib/rancher/k3s"
|
||||
+ " --disable-helm-controller" # we use fluxcd instead
|
||||
+ " --disable=traefik" # deploy our own ingress controller instead
|
||||
+ " --etcd-expose-metrics true"
|
||||
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
|
||||
};
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
mylib,
|
||||
myvars,
|
||||
@@ -6,17 +7,25 @@
|
||||
...
|
||||
}: let
|
||||
hostName = "kubevirt-shushou"; # Define your hostname.
|
||||
k8sLib = import ../lib.nix;
|
||||
coreModule = k8sLib.gencoreModule {
|
||||
k3sServerName = "kubevirt-shoryu";
|
||||
|
||||
coreModule = mylib.genKubeVirtCoreModule {
|
||||
inherit pkgs hostName;
|
||||
inherit (myvars) networking;
|
||||
};
|
||||
k3sModule = mylib.genK3sServerModule {
|
||||
inherit pkgs;
|
||||
kubeconfigFile = "/home/${myvars.username}/.kube/config";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
|
||||
};
|
||||
in {
|
||||
imports =
|
||||
(mylib.scanPaths ./.)
|
||||
++ [
|
||||
coreModule
|
||||
disko.nixosModules.default
|
||||
../disko-config/kubevirt-disko-fs.nix
|
||||
coreModule
|
||||
k3sModule
|
||||
];
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
...
|
||||
}: let
|
||||
package = pkgs.k3s_1_29;
|
||||
in {
|
||||
environment.systemPackages = with pkgs; [
|
||||
package
|
||||
k9s
|
||||
kubectl
|
||||
istioctl
|
||||
kubernetes-helm
|
||||
|
||||
skopeo
|
||||
dive # explore docker layers
|
||||
];
|
||||
services.k3s = {
|
||||
inherit package;
|
||||
enable = true;
|
||||
|
||||
# Initialize HA cluster using an embedded etcd datastore.
|
||||
# If you are configuring an HA cluster with an embedded etcd,
|
||||
# the 1st server must have `clusterInit = true`
|
||||
# and other servers must connect to it using serverAddr.
|
||||
clusterInit = true;
|
||||
role = "server";
|
||||
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
|
||||
# https://docs.k3s.io/cli/server
|
||||
extraFlags =
|
||||
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
|
||||
+ " --write-kubeconfig-mode 644"
|
||||
+ " --service-node-port-range 80-32767"
|
||||
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
|
||||
+ " --data-dir /var/lib/rancher/k3s"
|
||||
+ " --disable-helm-controller" # we use fluxcd instead
|
||||
+ " --disable=traefik" # deploy our own ingress controller instead
|
||||
+ " --etcd-expose-metrics true"
|
||||
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
|
||||
};
|
||||
}
|
||||
@@ -1,4 +1,5 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
mylib,
|
||||
myvars,
|
||||
@@ -6,17 +7,25 @@
|
||||
...
|
||||
}: let
|
||||
hostName = "kubevirt-youko"; # Define your hostname.
|
||||
k8sLib = import ../lib.nix;
|
||||
coreModule = k8sLib.gencoreModule {
|
||||
k3sServerName = "kubevirt-shoryu";
|
||||
|
||||
coreModule = mylib.genKubeVirtCoreModule {
|
||||
inherit pkgs hostName;
|
||||
inherit (myvars) networking;
|
||||
};
|
||||
k3sModule = mylib.genK3sServerModule {
|
||||
inherit pkgs;
|
||||
kubeconfigFile = "/home/${myvars.username}/.kube/config";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
serverIp = myvars.networking.hostsAddr.${k3sServerName}.ipv4;
|
||||
};
|
||||
in {
|
||||
imports =
|
||||
(mylib.scanPaths ./.)
|
||||
++ [
|
||||
coreModule
|
||||
disko.nixosModules.default
|
||||
../disko-config/kubevirt-disko-fs.nix
|
||||
coreModule
|
||||
k3sModule
|
||||
];
|
||||
}
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
...
|
||||
}: let
|
||||
package = pkgs.k3s_1_29;
|
||||
in {
|
||||
environment.systemPackages = with pkgs; [
|
||||
package
|
||||
k9s
|
||||
kubectl
|
||||
istioctl
|
||||
kubernetes-helm
|
||||
|
||||
skopeo
|
||||
dive # explore docker layers
|
||||
];
|
||||
services.k3s = {
|
||||
inherit package;
|
||||
enable = true;
|
||||
|
||||
# Initialize HA cluster using an embedded etcd datastore.
|
||||
# If you are configuring an HA cluster with an embedded etcd,
|
||||
# the 1st server must have `clusterInit = true`
|
||||
# and other servers must connect to it using serverAddr.
|
||||
clusterInit = true;
|
||||
role = "server";
|
||||
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
|
||||
# https://docs.k3s.io/cli/server
|
||||
extraFlags =
|
||||
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
|
||||
+ " --write-kubeconfig-mode 644"
|
||||
+ " --service-node-port-range 80-32767"
|
||||
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
|
||||
+ " --data-dir /var/lib/rancher/k3s"
|
||||
+ " --disable-helm-controller" # we use fluxcd instead
|
||||
+ " --disable=traefik" # deploy our own ingress controller instead
|
||||
+ " --etcd-expose-metrics true"
|
||||
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
|
||||
};
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
{
|
||||
gencoreModule = {
|
||||
pkgs,
|
||||
hostName,
|
||||
networking,
|
||||
...
|
||||
}: let
|
||||
hostAddress = networking.hostAddress.${hostName};
|
||||
in {
|
||||
# supported file systems, so we can mount any removable disks with these filesystems
|
||||
boot.supportedFilesystems = [
|
||||
"ext4"
|
||||
"btrfs"
|
||||
"xfs"
|
||||
#"zfs"
|
||||
"ntfs"
|
||||
"fat"
|
||||
"vfat"
|
||||
"exfat"
|
||||
"nfs" # required by longhorn
|
||||
"cifs" # mount windows share
|
||||
];
|
||||
|
||||
boot.kernelModules = ["kvm-amd" "vfio-pci"];
|
||||
boot.extraModprobeConfig = "options kvm_amd nested=1"; # for amd cpu
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
# Validate Hardware Virtualization Support via:
|
||||
# virt-host-validate qemu
|
||||
libvirt
|
||||
|
||||
# used by kubernetes' ovs-cni plugin
|
||||
# https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
multus-cni
|
||||
];
|
||||
|
||||
# Enable the Open vSwitch as a systemd service
|
||||
# It's required by kubernetes' ovs-cni plugin.
|
||||
virtualisation.vswitch = {
|
||||
enable = true;
|
||||
# reset the Open vSwitch configuration database to a default configuration on every start of the systemd ovsdb.service
|
||||
resetOnStart = false;
|
||||
};
|
||||
networking.vswitches = {
|
||||
# https://github.com/k8snetworkplumbingwg/ovs-cni/blob/main/docs/demo.md
|
||||
ovsbr1 = {
|
||||
interfaces = {
|
||||
# Attach the interfaces to OVS bridge
|
||||
# This interface should not used by the host itself!
|
||||
ens18 = {};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Workaround for longhorn running on NixOS
|
||||
# https://github.com/longhorn/longhorn/issues/2166
|
||||
systemd.tmpfiles.rules = [
|
||||
"L+ /usr/local/bin - - - - /run/current-system/sw/bin/"
|
||||
];
|
||||
# Longhorn uses open-iscsi to create block devices.
|
||||
services.openiscsi = {
|
||||
name = "iqn.2020-08.org.linux-iscsi.initiatorhost:${hostName}";
|
||||
enable = true;
|
||||
};
|
||||
|
||||
networking = {
|
||||
inherit hostName;
|
||||
inherit (networking) defaultGateway nameservers;
|
||||
|
||||
networkmanager.enable = false;
|
||||
# Set the host's address on the OVS bridge interface instead of the physical interface!
|
||||
interfaces.ovsbr1 = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [hostAddress];
|
||||
};
|
||||
};
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
# settings for stateful data, like file locations and database versions
|
||||
# on your system were taken. It‘s perfectly fine and recommended to leave
|
||||
# this value at the release version of the first install of this system.
|
||||
# Before changing this value read the documentation for this option
|
||||
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
};
|
||||
}
|
||||
@@ -10,7 +10,6 @@
|
||||
#############################################################
|
||||
let
|
||||
hostName = "nozomi"; # Define your hostname.
|
||||
hostAddress = myvars.networking.hostAddress.${hostName};
|
||||
in {
|
||||
imports = [
|
||||
# import the licheepi4a module, which contains the configuration for bootloader/kernel/firmware
|
||||
@@ -23,6 +22,7 @@ in {
|
||||
networking = {
|
||||
inherit hostName;
|
||||
inherit (myvars.networking) defaultGateway nameservers;
|
||||
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
|
||||
|
||||
wireless = {
|
||||
# https://wiki.archlinux.org/title/wpa_supplicant
|
||||
@@ -46,11 +46,6 @@ in {
|
||||
# proxy.default = "http://user:password@proxy:port/";
|
||||
# proxy.noProxy = "127.0.0.1,localhost,internal.domain";
|
||||
|
||||
# LPI4A's wireless interface
|
||||
interfaces.wlan0 = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [hostAddress];
|
||||
};
|
||||
# LPI4A's first ethernet interface
|
||||
# interfaces.end0 = {
|
||||
# useDHCP = false;
|
||||
|
||||
@@ -10,7 +10,6 @@
|
||||
#############################################################
|
||||
let
|
||||
hostName = "yukina"; # Define your hostname.
|
||||
hostAddress = myvars.networking.hostAddress.${hostName};
|
||||
in {
|
||||
imports = [
|
||||
# import the licheepi4a module, which contains the configuration for bootloader/kernel/firmware
|
||||
@@ -23,6 +22,7 @@ in {
|
||||
networking = {
|
||||
inherit hostName;
|
||||
inherit (myvars.networking) defaultGateway nameservers;
|
||||
inherit (myvars.networking.hostsInterface.${hostName}) interfaces;
|
||||
|
||||
wireless = {
|
||||
# https://wiki.archlinux.org/title/wpa_supplicant
|
||||
@@ -46,11 +46,6 @@ in {
|
||||
# proxy.default = "http://user:password@proxy:port/";
|
||||
# proxy.noProxy = "127.0.0.1,localhost,internal.domain";
|
||||
|
||||
# LPI4A's wireless interface
|
||||
interfaces.wlan0 = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [hostAddress];
|
||||
};
|
||||
# LPI4A's first ethernet interface
|
||||
# interfaces.end0 = {
|
||||
# useDHCP = false;
|
||||
|
||||
@@ -2,7 +2,13 @@
|
||||
colmenaSystem = import ./colmenaSystem.nix;
|
||||
macosSystem = import ./macosSystem.nix;
|
||||
nixosSystem = import ./nixosSystem.nix;
|
||||
|
||||
attrs = import ./attrs.nix {inherit lib;};
|
||||
|
||||
genKubeVirtCoreModule = import ./genKubeVirtCoreModule.nix;
|
||||
genK3sServerModule = import ./genK3sServerModule.nix;
|
||||
genK3sAgentModule = import ./genK3sAgentModule.nix;
|
||||
|
||||
# use path relative to the root of the project
|
||||
relativeToRoot = lib.path.append ../.;
|
||||
scanPaths = path:
|
||||
|
||||
@@ -1,20 +1,18 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
serverIp,
|
||||
tokenFile,
|
||||
...
|
||||
}: let
|
||||
serverName = "k3s-prod-1-master-1";
|
||||
serverIp = myvars.networking.hostAddress.${serverName}.address;
|
||||
package = pkgs.k3s_1_29;
|
||||
in {
|
||||
environment.systemPackages = [package];
|
||||
services.k3s = {
|
||||
inherit package;
|
||||
enable = true;
|
||||
inherit package tokenFile;
|
||||
|
||||
role = "agent";
|
||||
serverAddr = "https://${serverIp}:6443";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
# https://docs.k3s.io/cli/agent
|
||||
extraFlags =
|
||||
" --node-label=node-type=worker"
|
||||
@@ -1,7 +1,13 @@
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
myvars,
|
||||
kubeconfigFile,
|
||||
tokenFile,
|
||||
# Initialize HA cluster using an embedded etcd datastore.
|
||||
# If you are configuring an HA cluster with an embedded etcd,
|
||||
# the 1st server must have `clusterInit = true`
|
||||
# and other servers must connect to it using `serverAddr`.
|
||||
serverIp ? null,
|
||||
clusterInit ? (serverIp == null),
|
||||
...
|
||||
}: let
|
||||
package = pkgs.k3s_1_29;
|
||||
@@ -16,28 +22,31 @@ in {
|
||||
skopeo
|
||||
dive # explore docker layers
|
||||
];
|
||||
services.k3s = {
|
||||
inherit package;
|
||||
enable = true;
|
||||
|
||||
# Initialize HA cluster using an embedded etcd datastore.
|
||||
# If you are configuring an HA cluster with an embedded etcd,
|
||||
# the 1st server must have `clusterInit = true`
|
||||
# and other servers must connect to it using serverAddr.
|
||||
clusterInit = true;
|
||||
services.k3s = {
|
||||
enable = true;
|
||||
inherit package tokenFile clusterInit;
|
||||
serverAddr =
|
||||
if clusterInit
|
||||
then ""
|
||||
else "https://${serverIp}:6443";
|
||||
|
||||
role = "server";
|
||||
tokenFile = config.age.secrets."k3s-prod-1-token".path;
|
||||
# https://docs.k3s.io/cli/server
|
||||
extraFlags =
|
||||
" --write-kubeconfig /home/${myvars.username}/.kube/config"
|
||||
" --write-kubeconfig ${kubeconfigFile}"
|
||||
+ " --write-kubeconfig-mode 644"
|
||||
+ " --service-node-port-range 80-32767"
|
||||
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
|
||||
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
|
||||
+ " --data-dir /var/lib/rancher/k3s"
|
||||
+ " --etcd-expose-metrics true"
|
||||
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'"
|
||||
# disable some features we don't need
|
||||
+ " --disable-helm-controller" # we use fluxcd instead
|
||||
+ " --disable=traefik" # deploy our own ingress controller instead
|
||||
+ " --etcd-expose-metrics true"
|
||||
+ " --etcd-snapshot-schedule-cron '0 */12 * * *'";
|
||||
+ " --disable=servicelb" # we use kube-vip instead
|
||||
+ " --flannel-backend=none" # we use cilium instead
|
||||
+ " --disable-network-policy";
|
||||
};
|
||||
}
|
||||
78
lib/genKubeVirtCoreModule.nix
Normal file
78
lib/genKubeVirtCoreModule.nix
Normal file
@@ -0,0 +1,78 @@
|
||||
{
|
||||
pkgs,
|
||||
hostName,
|
||||
networking,
|
||||
...
|
||||
}: let
|
||||
inherit (networking.hostsAddr.${hostName}) iface;
|
||||
in {
|
||||
# supported file systems, so we can mount any removable disks with these filesystems
|
||||
boot.supportedFilesystems = [
|
||||
"ext4"
|
||||
"btrfs"
|
||||
"xfs"
|
||||
#"zfs"
|
||||
"ntfs"
|
||||
"fat"
|
||||
"vfat"
|
||||
"exfat"
|
||||
"nfs" # required by longhorn
|
||||
"cifs" # mount windows share
|
||||
];
|
||||
|
||||
boot.kernelModules = ["kvm-amd" "vfio-pci"];
|
||||
boot.extraModprobeConfig = "options kvm_amd nested=1"; # for amd cpu
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
# Validate Hardware Virtualization Support via:
|
||||
# virt-host-validate qemu
|
||||
libvirt
|
||||
|
||||
# used by kubernetes' ovs-cni plugin
|
||||
# https://github.com/k8snetworkplumbingwg/multus-cni
|
||||
multus-cni
|
||||
];
|
||||
|
||||
# Workaround for longhorn running on NixOS
|
||||
# https://github.com/longhorn/longhorn/issues/2166
|
||||
systemd.tmpfiles.rules = [
|
||||
"L+ /usr/local/bin - - - - /run/current-system/sw/bin/"
|
||||
];
|
||||
# Longhorn uses open-iscsi to create block devices.
|
||||
services.openiscsi = {
|
||||
name = "iqn.2020-08.org.linux-iscsi.initiatorhost:${hostName}";
|
||||
enable = true;
|
||||
};
|
||||
|
||||
# Enable the Open vSwitch as a systemd service
|
||||
# It's required by kubernetes' ovs-cni plugin.
|
||||
virtualisation.vswitch = {
|
||||
enable = true;
|
||||
# reset the Open vSwitch configuration database to a default configuration on every start of the systemd ovsdb.service
|
||||
resetOnStart = false;
|
||||
};
|
||||
networking.vswitches = {
|
||||
# https://github.com/k8snetworkplumbingwg/ovs-cni/blob/main/docs/demo.md
|
||||
ovsbr1 = {
|
||||
# Attach the interfaces to OVS bridge
|
||||
# This interface should not used by the host itself!
|
||||
interfaces.${iface} = {};
|
||||
};
|
||||
};
|
||||
networking = {
|
||||
inherit hostName;
|
||||
inherit (networking) defaultGateway nameservers;
|
||||
|
||||
networkmanager.enable = false;
|
||||
# Set the host's address on the OVS bridge interface instead of the physical interface!
|
||||
interfaces.ovsbr1 = networking.hostsInterface.${hostName}.interfaces.${iface};
|
||||
};
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
# settings for stateful data, like file locations and database versions
|
||||
# on your system were taken. It‘s perfectly fine and recommended to leave
|
||||
# this value at the release version of the first install of this system.
|
||||
# Before changing this value read the documentation for this option
|
||||
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
|
||||
system.stateVersion = "23.11"; # Did you read the comment?
|
||||
}
|
||||
@@ -104,7 +104,6 @@ All the outputs of this flake are defined here.
|
||||
├── nixos-tests
|
||||
├── src # every host has its own file in this directory
|
||||
│ ├── 12kingdoms-shoukei.nix
|
||||
│ ├── homelab-tailscale-gw.nix
|
||||
│ ├── idols-ai.nix
|
||||
│ ├── idols-aquamarine.nix
|
||||
│ ├── idols-kana.nix
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
{
|
||||
# NOTE: the args not used in this file CAN NOT be removed!
|
||||
# because haumea pass argument lazily,
|
||||
# and these arguments are used in the functions like `mylib.nixosSystem`, `mylib.colmenaSystem`, etc.
|
||||
inputs,
|
||||
lib,
|
||||
mylib,
|
||||
myvars,
|
||||
system,
|
||||
genSpecialArgs,
|
||||
...
|
||||
} @ args: let
|
||||
name = "tailscale-gw";
|
||||
tags = [name "homelab-network"];
|
||||
ssh-user = "root";
|
||||
|
||||
modules = {
|
||||
nixos-modules = map mylib.relativeToRoot [
|
||||
# common
|
||||
"secrets/nixos.nix"
|
||||
"modules/nixos/server/server.nix"
|
||||
"modules/nixos/server/proxmox-hardware-configuration.nix"
|
||||
# host specific
|
||||
"hosts/homelab-${name}"
|
||||
];
|
||||
};
|
||||
|
||||
systemArgs = modules // args;
|
||||
in {
|
||||
nixosConfigurations.${name} = mylib.nixosSystem systemArgs;
|
||||
|
||||
colmena.${name} =
|
||||
mylib.colmenaSystem (systemArgs // {inherit tags ssh-user;});
|
||||
|
||||
# generate proxmox image for virtual machines without desktop environment
|
||||
packages.${name} = inputs.self.nixosConfigurations.${name}.config.formats.proxmox;
|
||||
}
|
||||
@@ -7,30 +7,116 @@
|
||||
];
|
||||
prefixLength = 24;
|
||||
|
||||
hostAddress =
|
||||
lib.attrsets.mapAttrs
|
||||
(name: address: {inherit prefixLength address;})
|
||||
{
|
||||
"ai" = "192.168.5.100";
|
||||
"aquamarine" = "192.168.5.101";
|
||||
"ruby" = "192.168.5.102";
|
||||
"kana" = "192.168.5.103";
|
||||
"nozomi" = "192.168.5.104";
|
||||
"yukina" = "192.168.5.105";
|
||||
"chiaya" = "192.168.5.106";
|
||||
"suzu" = "192.168.5.107";
|
||||
"k3s-prod-1-master-1" = "192.168.5.108";
|
||||
"k3s-prod-1-master-2" = "192.168.5.109";
|
||||
"k3s-prod-1-master-3" = "192.168.5.110";
|
||||
"k3s-prod-1-worker-1" = "192.168.5.111";
|
||||
"k3s-prod-1-worker-2" = "192.168.5.112";
|
||||
"k3s-prod-1-worker-3" = "192.168.5.113";
|
||||
"kubevirt-shoryu" = "192.168.5.176";
|
||||
"kubevirt-shushou" = "192.168.5.177";
|
||||
"kubevirt-youko" = "192.168.5.178";
|
||||
"rakushun" = "192.168.5.179";
|
||||
"tailscale-gw" = "192.168.5.192";
|
||||
hostsAddr = {
|
||||
# Homelab's Physical Machines (KubeVirt Nodes)
|
||||
kubevirt-shoryu = {
|
||||
iface = "eno1";
|
||||
ipv4 = "192.168.5.181";
|
||||
};
|
||||
kubevirt-shushou = {
|
||||
iface = "eno1";
|
||||
ipv4 = "192.168.5.182";
|
||||
};
|
||||
kubevirt-youko = {
|
||||
iface = "eno1";
|
||||
ipv4 = "192.168.5.183";
|
||||
};
|
||||
|
||||
# Other VMs and Physical Machines
|
||||
ai = {
|
||||
# Desktop PC
|
||||
iface = "enp5s0";
|
||||
ipv4 = "192.168.5.100";
|
||||
};
|
||||
aquamarine = {
|
||||
# VM
|
||||
iface = "ens18";
|
||||
ipv4 = "192.168.5.101";
|
||||
};
|
||||
ruby = {
|
||||
# VM
|
||||
iface = "ens18";
|
||||
ipv4 = "192.168.5.102";
|
||||
};
|
||||
kana = {
|
||||
# VM
|
||||
iface = "ens18";
|
||||
ipv4 = "192.168.5.103";
|
||||
};
|
||||
nozomi = {
|
||||
# LicheePi 4A's wireless iterface - RISC-V
|
||||
iface = "wlan0";
|
||||
ipv4 = "192.168.5.104";
|
||||
};
|
||||
yukina = {
|
||||
# LicheePi 4A's wireless iterface - RISC-V
|
||||
iface = "wlan0";
|
||||
ipv4 = "192.168.5.105";
|
||||
};
|
||||
chiaya = {
|
||||
# VM
|
||||
iface = "ens18";
|
||||
ipv4 = "192.168.5.106";
|
||||
};
|
||||
suzu = {
|
||||
# Orange Pi 5 - ARM
|
||||
iface = "end1";
|
||||
ipv4 = "192.168.5.107";
|
||||
};
|
||||
rakushun = {
|
||||
# Orange Pi 5 - ARM
|
||||
# RJ45 port 1 - enP4p65s0
|
||||
# RJ45 port 2 - enP3p49s0
|
||||
iface = "enP4p65s0";
|
||||
ipv4 = "192.168.5.179";
|
||||
};
|
||||
|
||||
k3s-prod-1-master-1 = {
|
||||
# VM
|
||||
iface = "ens18";
|
||||
ipv4 = "192.168.5.108";
|
||||
};
|
||||
k3s-prod-1-master-2 = {
|
||||
# VM
|
||||
iface = "ens18";
|
||||
ipv4 = "192.168.5.109";
|
||||
};
|
||||
k3s-prod-1-master-3 = {
|
||||
# VM
|
||||
iface = "ens18";
|
||||
ipv4 = "192.168.5.110";
|
||||
};
|
||||
k3s-prod-1-worker-1 = {
|
||||
# VM
|
||||
iface = "ens18";
|
||||
ipv4 = "192.168.5.111";
|
||||
};
|
||||
k3s-prod-1-worker-2 = {
|
||||
# VM
|
||||
iface = "ens18";
|
||||
ipv4 = "192.168.5.112";
|
||||
};
|
||||
k3s-prod-1-worker-3 = {
|
||||
# VM
|
||||
iface = "ens18";
|
||||
ipv4 = "192.168.5.113";
|
||||
};
|
||||
};
|
||||
|
||||
hostsInterface =
|
||||
lib.attrsets.mapAttrs
|
||||
(
|
||||
key: val: {
|
||||
interfaces."${val.iface}" = {
|
||||
useDHCP = false;
|
||||
ipv4.addresses = [{
|
||||
inherit prefixLength;
|
||||
address = val.ipv4;
|
||||
}];
|
||||
};
|
||||
}
|
||||
)
|
||||
hostsAddr;
|
||||
|
||||
ssh = {
|
||||
# define the host alias for remote builders
|
||||
@@ -47,15 +133,15 @@
|
||||
# '';
|
||||
extraConfig =
|
||||
lib.attrsets.foldlAttrs
|
||||
(acc: host: value:
|
||||
(acc: host: val:
|
||||
acc
|
||||
+ ''
|
||||
Host ${host}
|
||||
HostName ${value.address}
|
||||
HostName ${val.ipv4}
|
||||
Port 22
|
||||
'')
|
||||
""
|
||||
hostAddress;
|
||||
hostsAddr;
|
||||
|
||||
# define the host key for remote builders so that nix can verify all the remote builders
|
||||
# this config will be written to /etc/ssh/ssh_known_hosts
|
||||
@@ -68,7 +154,7 @@
|
||||
# => { x = "bar-a"; y = "bar-b"; }
|
||||
lib.attrsets.mapAttrs
|
||||
(host: value: {
|
||||
hostNames = [host hostAddress.${host}.address];
|
||||
hostNames = [host hostsAddr.${host}.ipv4];
|
||||
publicKey = value.publicKey;
|
||||
})
|
||||
{
|
||||
|
||||
Reference in New Issue
Block a user