feat: new k3s cluster

This commit is contained in:
Ryan Yin
2024-02-18 13:49:28 +08:00
parent 1a3b02a062
commit c19184a6be
27 changed files with 508 additions and 68 deletions

View File

@@ -85,7 +85,7 @@ yabai-reload:
############################################################################
#
# Colmena - Remote NixOS deployment
# Homelab - Virtual Machines running on Proxmox
#
############################################################################
@@ -129,6 +129,41 @@ pve-tsgw:
rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-tailscale_gw.vma.zst
############################################################################
#
# Kubernetes related commands
#
############################################################################
k8s:
colmena apply --on '@k8s'
master:
colmena apply --on '@k8s' --on '@master'
worker:
colmena apply --on '@k8s' --on '@worker'
pve-k8s:
nom build .#k3s_prod_1_master_1
rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_master_1.vma.zst
nom build .#k3s_prod_1_master_2
rsync -avz --progress --copy-links result root@gtr5:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_master_2.vma.zst
nom build .#k3s_prod_1_master_3
rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_master_3.vma.zst
nom build .#k3s_prod_1_worker_1
rsync -avz --progress --copy-links result root@gtr5:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_worker_1.vma.zst
nom build .#k3s_prod_1_worker_2
rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_worker_2.vma.zst
nom build .#k3s_prod_1_worker_3
rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_worker_3.vma.zst
############################################################################
#
# RISC-V related commands

14
flake.lock generated
View File

@@ -9,17 +9,17 @@
]
},
"locked": {
"lastModified": 1694733633,
"narHash": "sha256-/o/OubAsPMbxqru59tLlWzUI7LBNDaoW4rFwQ2Smxcg=",
"lastModified": 1703089996,
"narHash": "sha256-ipqShkBmHKC9ft1ZAsA6aeKps32k7+XZSPwfxeHLsAU=",
"owner": "ryantm",
"repo": "agenix",
"rev": "54693c91d923fecb4cf04c4535e3d84f8dec7919",
"rev": "564595d0ad4be7277e07fa63b5a991b3c645655d",
"type": "github"
},
"original": {
"owner": "ryantm",
"repo": "agenix",
"rev": "54693c91d923fecb4cf04c4535e3d84f8dec7919",
"rev": "564595d0ad4be7277e07fa63b5a991b3c645655d",
"type": "github"
}
},
@@ -628,10 +628,10 @@
"mysecrets": {
"flake": false,
"locked": {
"lastModified": 1708183622,
"narHash": "sha256-fBhY9MhNLsDnktitkVP9jh37U9VfbDcrIld5ZkvsxJQ=",
"lastModified": 1708252756,
"narHash": "sha256-X88eosccBrDxn7BIVf8zmjhBjIDXs9PFJsVkanzSUKw=",
"ref": "refs/heads/main",
"rev": "79d8fa3312ec4a8c42ef77d09e98447dc3f9cb19",
"rev": "241dc94cf90b8d4ab8dec31eec0b07c35af42ba8",
"shallow": true,
"type": "git",
"url": "ssh://git@github.com/ryan4yin/nix-secrets.git"

View File

@@ -147,8 +147,8 @@
};
# secrets management
agenix = {
# lock with git commit at 0.14.0
url = "github:ryantm/agenix/54693c91d923fecb4cf04c4535e3d84f8dec7919";
# lock with git commit at 0.15.0
url = "github:ryantm/agenix/564595d0ad4be7277e07fa63b5a991b3c645655d";
# replaced with a type-safe reimplementation to get a better error message and less bugs.
# url = "github:ryan4yin/ragenix";
inputs.nixpkgs.follows = "nixpkgs";

View File

@@ -1,17 +1,14 @@
{vars_networking, ...}:
{vars_networking, mylib, ...}:
#############################################################
#
# Tailscale Gateway(homelab subnet router) - a NixOS VM running on Proxmox
#
#############################################################
let
hostName = "tailscale_gw"; # Define your hostname.
hostName = "tailscale-gw"; # Define your hostname.
hostAddress = vars_networking.hostAddress.${hostName};
in {
imports = [
./tailscale.nix
./proxy.nix
];
imports = mylib.scanPaths ./.;
# supported file systems, so we can mount any removable disks with these filesystems
boot.supportedFilesystems = [
@@ -25,7 +22,10 @@ in {
networking = {
inherit hostName;
inherit (vars_networking) defaultGateway nameservers;
inherit (vars_networking) nameservers;
# Use mainGateway instead of defaultGateway to make NAT Traversal work
defaultGateway = vars_networking.mainGateway;
networkmanager.enable = false;
interfaces.ens18 = {

View File

@@ -2,7 +2,7 @@
A router(IPv4 only) with a tranparent proxy to bypass the G|F|W.
NOTE: dae(running on aquamarine) do not provides http/socks5 proxy server, so a v2ray server is running on [homelab_tailscale_gw](../homelab_tailscale_gw/proxy.nix) to provide the http/socks5 proxy service.
NOTE: dae(running on aquamarine) do not provides http/socks5 proxy server, so a v2ray server is running on [idols_kana](../idols_kana/proxy.nix) to provide the http/socks5 proxy service.
## Troubleshooting

View File

@@ -9,15 +9,17 @@ I prefer to use [k3s] as the Kubernetes distribution, because it's lightweight,
## Hosts
1. For production:
1. `k3s-prod-master-1`
2. `k3s-prod-worker-1`
2. `k3s-prod-worker-2`
2. `k3s-prod-worker-3`
1. `k3s-prod-1-master-1`
1. `k3s-prod-1-master-2`
1. `k3s-prod-1-master-3`
2. `k3s-prod-1-worker-1`
2. `k3s-prod-1-worker-2`
2. `k3s-prod-1-worker-3`
1. For testing:.
1. `k3s-test-master-1`
2. `k3s-test-worker-1`
3. `k3s-test-worker-2`
4. `k3s-test-worker-3`
1. `k3s-test-1-master-1`
2. `k3s-test-1-worker-1`
3. `k3s-test-1-worker-2`
4. `k3s-test-1-worker-3`
[k3s]: https://github.com/k3s-io/k3s/
[what-have-k3s-removed-from-upstream-kubernetes]: https://github.com/k3s-io/k3s/?tab=readme-ov-file#what-have-you-removed-from-upstream-kubernetes

View File

@@ -0,0 +1,17 @@
{
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-master-1"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
];
}

View File

@@ -0,0 +1,38 @@
{
config,
pkgs,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --data-dir /var/lib/rancher/k3s"
+ " --etcd-expose-metrics true"
+ '' --etcd-snapshot-schedule-cron "0 */12 * * *"'';
};
}

View File

@@ -0,0 +1,17 @@
{
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-master-2"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
];
}

View File

@@ -0,0 +1,27 @@
{
config,
pkgs,
vars_networking,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = vars_networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "server";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --data-dir /var/lib/rancher/k3s"
+ " --etcd-expose-metrics true"
+ '' --etcd-snapshot-schedule-cron "0 */12 * * *"'';
};
}

View File

@@ -0,0 +1,17 @@
{
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-master-3"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
];
}

View File

@@ -0,0 +1,27 @@
{
config,
pkgs,
vars_networking,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = vars_networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "server";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --data-dir /var/lib/rancher/k3s"
+ " --etcd-expose-metrics true"
+ '' --etcd-snapshot-schedule-cron "0 */12 * * *"'';
};
}

View File

@@ -0,0 +1,17 @@
{
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-1"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
];
}

View File

@@ -0,0 +1,21 @@
{
config,
pkgs,
vars_networking,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = vars_networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "agent";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags = "--data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -0,0 +1,17 @@
{
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-2"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
];
}

View File

@@ -0,0 +1,21 @@
{
config,
pkgs,
vars_networking,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = vars_networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "agent";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags = "--data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -0,0 +1,17 @@
{
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-3"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
];
}

View File

@@ -0,0 +1,21 @@
{
config,
pkgs,
vars_networking,
...
}: let
serverName = "k3s-prod-1-master-1";
serverIp = vars_networking.hostAddress.${serverName}.address;
package = pkgs.k3s_1_29;
in {
environment.systemPackages = [package];
services.k3s = {
inherit package;
enable = true;
role = "agent";
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags = "--data-dir /var/lib/rancher/k3s";
};
}

43
hosts/k8s/lib.nix Normal file
View File

@@ -0,0 +1,43 @@
{
genCoreModule = {
hostName,
vars_networking,
}: let
hostAddress = vars_networking.hostAddress.${hostName};
in {
# supported file systems, so we can mount any removable disks with these filesystems
boot.supportedFilesystems = [
"ext4"
"btrfs"
"xfs"
#"zfs"
"ntfs"
"fat"
"vfat"
"exfat"
"cifs" # mount windows share
];
boot.kernelModules = ["kvm-amd"];
boot.extraModprobeConfig = "options kvm_amd nested=1"; # for amd cpu
networking = {
inherit hostName;
inherit (vars_networking) defaultGateway nameservers;
networkmanager.enable = false;
interfaces.ens18 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};
};
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "23.11"; # Did you read the comment?
};
}

View File

@@ -13,7 +13,7 @@ in
{name, ...}: {
deployment = {
inherit targetUser;
targetHost = name; # hostName or IP address
targetHost = builtins.replaceStrings ["_"] ["-"] name; # hostName or IP address
tags = host_tags;
};

View File

@@ -14,7 +14,7 @@ from pathlib import Path
NIX_DAEMON_PLIST = Path("/Library/LaunchDaemons/org.nixos.nix-daemon.plist")
NIX_DAEMON_NAME = "org.nixos.nix-daemon"
# http proxy provided by my homelab's bypass router
HTTP_PROXY = "http://192.168.5.192:7890"
HTTP_PROXY = "http://192.168.5.103:7890"
pl = plistlib.loads(NIX_DAEMON_PLIST.read_bytes())

View File

@@ -216,5 +216,15 @@ in {
// high_security;
};
})
(mkIf cfg.server.kubernetes.enable {
age.secrets = {
"k3s-prod-1-token" =
{
file = "${mysecrets}/server/k3s-prod-1-token.age";
}
// high_security;
};
})
]);
}

View File

@@ -80,6 +80,38 @@ in {
idol_kana_modules
{host_tags = idol_kana_tags;}
]);
k3s_prod_1_master_1 = colmenaSystem (attrs.mergeAttrsList [
x64_base_args
k3s_prod_1_master_1_modules
{host_tags = k3s_prod_1_master_1_tags;}
]);
k3s_prod_1_master_2 = colmenaSystem (attrs.mergeAttrsList [
x64_base_args
k3s_prod_1_master_2_modules
{host_tags = k3s_prod_1_master_2_tags;}
]);
k3s_prod_1_master_3 = colmenaSystem (attrs.mergeAttrsList [
x64_base_args
k3s_prod_1_master_3_modules
{host_tags = k3s_prod_1_master_3_tags;}
]);
k3s_prod_1_worker_1 = colmenaSystem (attrs.mergeAttrsList [
x64_base_args
k3s_prod_1_worker_1_modules
{host_tags = k3s_prod_1_worker_1_tags;}
]);
k3s_prod_1_worker_2 = colmenaSystem (attrs.mergeAttrsList [
x64_base_args
k3s_prod_1_worker_2_modules
{host_tags = k3s_prod_1_worker_2_tags;}
]);
k3s_prod_1_worker_3 = colmenaSystem (attrs.mergeAttrsList [
x64_base_args
k3s_prod_1_worker_3_modules
{host_tags = k3s_prod_1_worker_3_tags;}
]);
tailscale_gw = colmenaSystem (attrs.mergeAttrsList [
x64_base_args
homelab_tailscale_gw_modules

View File

@@ -24,6 +24,13 @@ in {
ruby = nixosSystem (idol_ruby_modules // base_args);
kana = nixosSystem (idol_kana_modules // base_args);
k3s_prod_1_master_1 = nixosSystem (k3s_prod_1_master_1_modules // base_args);
k3s_prod_1_master_2 = nixosSystem (k3s_prod_1_master_2_modules // base_args);
k3s_prod_1_master_3 = nixosSystem (k3s_prod_1_master_3_modules // base_args);
k3s_prod_1_worker_1 = nixosSystem (k3s_prod_1_worker_1_modules // base_args);
k3s_prod_1_worker_2 = nixosSystem (k3s_prod_1_worker_2_modules // base_args);
k3s_prod_1_worker_3 = nixosSystem (k3s_prod_1_worker_3_modules // base_args);
tailscale_gw = nixosSystem (homelab_tailscale_gw_modules // base_args);
};
@@ -47,6 +54,13 @@ in {
"ruby"
"kana"
"k3s_prod_1_master_1"
"k3s_prod_1_master_2"
"k3s_prod_1_master_3"
"k3s_prod_1_worker_1"
"k3s_prod_1_worker_2"
"k3s_prod_1_worker_3"
"tailscale_gw"
]
# generate proxmox image for virtual machines without desktop environment

View File

@@ -15,7 +15,14 @@ let
../modules/nixos/server/server.nix
../modules/nixos/server/proxmox-hardware-configuration.nix
];
# home-module.imports = [];
};
kube_base_modules = {
nixos-modules = [
../secrets/nixos.nix
../modules/nixos/server/server.nix
../modules/nixos/server/proxmox-hardware-configuration.nix
{modules.secrets.server.kubernetes.enable = true;}
];
};
in {
# --- Desktop Systems --- #
@@ -106,7 +113,64 @@ in {
++ pve_base_modules.nixos-modules;
# home-module.imports = [];
};
homelab_tailscale_gw_tags = ["tailscale_gw" "network" "homelab"];
homelab_tailscale_gw_tags = ["tailscale-gw" "network" "homelab"];
# --- Kubernetes Nodes --- #
k3s_prod_1_master_1_modules = {
nixos-modules =
[
../hosts/k8s/k3s_prod_1_master_1
]
++ kube_base_modules.nixos-modules;
# home-module.imports = [];
};
k3s_prod_1_master_1_tags = ["k8s" "master" "prod"];
k3s_prod_1_master_2_modules = {
nixos-modules =
[
../hosts/k8s/k3s_prod_1_master_2
]
++ kube_base_modules.nixos-modules;
};
k3s_prod_1_master_2_tags = ["k8s" "master" "prod"];
k3s_prod_1_master_3_modules = {
nixos-modules =
[
../hosts/k8s/k3s_prod_1_master_3
]
++ kube_base_modules.nixos-modules;
};
k3s_prod_1_master_3_tags = ["k8s" "master" "prod"];
k3s_prod_1_worker_1_modules = {
nixos-modules =
[
../hosts/k8s/k3s_prod_1_worker_1
]
++ kube_base_modules.nixos-modules;
};
k3s_prod_1_worker_1_tags = ["k8s" "worker" "prod"];
k3s_prod_1_worker_2_modules = {
nixos-modules =
[
../hosts/k8s/k3s_prod_1_worker_2
]
++ kube_base_modules.nixos-modules;
};
k3s_prod_1_worker_2_tags = ["k8s" "worker" "prod"];
k3s_prod_1_worker_3_modules = {
nixos-modules =
[
../hosts/k8s/k3s_prod_1_worker_3
]
++ kube_base_modules.nixos-modules;
};
k3s_prod_1_worker_3_tags = ["k8s" "worker" "prod"];
# --- RISC-V / AARCH64 Systems --- #

View File

@@ -1,49 +1,32 @@
{lib, ...}: rec {
defaultGateway = "192.168.5.101";
mainGateway = "192.168.5.1"; # main router
defaultGateway = "192.168.5.101"; # subrouter with a transparent proxy
nameservers = [
"119.29.29.29" # DNSPod
"223.5.5.5" # AliDNS
];
prefixLength = 24;
hostAddress = {
"ai" = {
inherit prefixLength;
address = "192.168.5.100";
hostAddress =
lib.attrsets.mapAttrs
(name: address: {inherit prefixLength address;})
{
"ai" = "192.168.5.100";
"aquamarine" = "192.168.5.101";
"ruby" = "192.168.5.102";
"kana" = "192.168.5.103";
"nozomi" = "192.168.5.104";
"yukina" = "192.168.5.105";
"chiaya" = "192.168.5.106";
"suzu" = "192.168.5.107";
"k3s-prod-1-master-1" = "192.168.5.108";
"k3s-prod-1-master-2" = "192.168.5.109";
"k3s-prod-1-master-3" = "192.168.5.110";
"k3s-prod-1-worker-1" = "192.168.5.111";
"k3s-prod-1-worker-2" = "192.168.5.112";
"k3s-prod-1-worker-3" = "192.168.5.113";
"tailscale-gw" = "192.168.5.192";
};
"aquamarine" = {
inherit prefixLength;
address = "192.168.5.101";
};
"ruby" = {
inherit prefixLength;
address = "192.168.5.102";
};
"kana" = {
inherit prefixLength;
address = "192.168.5.103";
};
"nozomi" = {
inherit prefixLength;
address = "192.168.5.104";
};
"yukina" = {
inherit prefixLength;
address = "192.168.5.105";
};
"chiaya" = {
inherit prefixLength;
address = "192.168.5.106";
};
"suzu" = {
inherit prefixLength;
address = "192.168.5.107";
};
"tailscale_gw" = {
inherit prefixLength;
address = "192.168.5.192";
};
};
ssh = {
# define the host alias for remote builders