Merge pull request #71 from ryan4yin/kubevirt

feat: kubevirt on k3s
This commit is contained in:
Ryan Yin
2024-03-03 20:03:43 +08:00
committed by GitHub
63 changed files with 17443 additions and 1384 deletions

121
Justfile
View File

@@ -85,12 +85,28 @@ yabai-reload:
############################################################################
#
# Homelab - Virtual Machines running on Proxmox
# Homelab - NixOS servers running on bare metal
#
############################################################################
colmena-ssh-key:
ssh-add /etc/agenix/ssh-key-romantic
virt:
colmena apply --on '@virt-*' --verbose --show-trace
shoryu:
colmena apply --on '@shoryu' --verbose --show-trace
shushou:
colmena apply --on '@shushou' --verbose --show-trace
youko:
colmena apply --on '@youko' --verbose --show-trace
############################################################################
#
# Homelab - Virtual Machines running on Kubevirt
#
############################################################################
lab:
colmena apply --on '@homelab-*' --verbose --show-trace
@@ -109,22 +125,22 @@ kana:
tsgw:
colmena apply --on '@tailscale-gw' --verbose --show-trace
pve-aqua:
nom build .#aquamarine
rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-aquamarine.vma.zst
pve-ruby:
nom build .#ruby
rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-ruby.vma.zst
pve-kana:
nom build .#kana
rsync -avz --progress --copy-links result root@gtr5:/var/lib/vz/dump/vzdump-qemu-kana.vma.zst
pve-tsgw:
nom build .#tailscale_gw
rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-tailscale_gw.vma.zst
# pve-aqua:
# nom build .#aquamarine
# rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-aquamarine.vma.zst
#
# pve-ruby:
# nom build .#ruby
# rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-ruby.vma.zst
#
# pve-kana:
# nom build .#kana
# rsync -avz --progress --copy-links result root@gtr5:/var/lib/vz/dump/vzdump-qemu-kana.vma.zst
#
# pve-tsgw:
# nom build .#tailscale_gw
# rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-tailscale_gw.vma.zst
#
############################################################################
#
@@ -133,33 +149,33 @@ pve-tsgw:
############################################################################
k8s:
colmena apply --on '@k8s-*'
colmena apply --on '@k8s-*' --verbose --show-trace
master:
colmena apply --on '@k8s-prod-master'
colmena apply --on '@k8s-prod-master-*' --verbose --show-trace
worker:
colmena apply --on '@k8s-prod-worker'
pve-k8s:
nom build .#k3s_prod_1_master_1
rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_master_1.vma.zst
nom build .#k3s_prod_1_master_2
rsync -avz --progress --copy-links result root@gtr5:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_master_2.vma.zst
nom build .#k3s_prod_1_master_3
rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_master_3.vma.zst
nom build .#k3s_prod_1_worker_1
rsync -avz --progress --copy-links result root@gtr5:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_worker_1.vma.zst
nom build .#k3s_prod_1_worker_2
rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_worker_2.vma.zst
nom build .#k3s_prod_1_worker_3
rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_worker_3.vma.zst
colmena apply --on '@k8s-prod-worker-*' --verbose --show-trace
# pve-k8s:
# nom build .#k3s_prod_1_master_1
# rsync -avz --progress --copy-links result root@um560:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_master_1.vma.zst
#
# nom build .#k3s_prod_1_master_2
# rsync -avz --progress --copy-links result root@gtr5:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_master_2.vma.zst
#
# nom build .#k3s_prod_1_master_3
# rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_master_3.vma.zst
#
# nom build .#k3s_prod_1_worker_1
# rsync -avz --progress --copy-links result root@gtr5:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_worker_1.vma.zst
#
# nom build .#k3s_prod_1_worker_2
# rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_worker_2.vma.zst
#
# nom build .#k3s_prod_1_worker_3
# rsync -avz --progress --copy-links result root@s500plus:/var/lib/vz/dump/vzdump-qemu-k3s_prod_1_worker_3.vma.zst
#
############################################################################
#
@@ -168,16 +184,13 @@ pve-k8s:
############################################################################
riscv:
colmena apply --on '@riscv'
riscv-debug:
colmena apply --on '@riscv' --verbose --show-trace
nozomi:
colmena apply --on '@nozomi'
colmena apply --on '@nozomi' --verbose --show-trace
yukina:
colmena apply --on '@yukina'
colmena apply --on '@yukina' --verbose --show-trace
############################################################################
#
@@ -186,13 +199,10 @@ yukina:
############################################################################
aarch:
colmena apply --on '@aarch'
aarch-debug:
colmena apply --on '@aarch' --verbose --show-trace
suzu:
colmena apply --on '@suzu'
colmena apply --on '@suzu' --verbose --show-trace
suzu-debug:
colmena apply --on '@suzu' --verbose --show-trace
@@ -252,3 +262,14 @@ emacs-purge:
emacs-reload:
doom sync
{{reload-emacs-cmd}}
# =================================================
#
# Kubernetes related commands
#
# =================================================
del-failed:
kubectl delete pod --all-namespaces --field-selector="status.phase==Failed"

21
flake.lock generated
View File

@@ -138,6 +138,26 @@
"type": "github"
}
},
"disko": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1709286488,
"narHash": "sha256-RDpTZ72zLu05djvXRzK76Ysqp9zSdh84ax/edEaJucs=",
"owner": "nix-community",
"repo": "disko",
"rev": "bde7dd352c07d43bd5b8245e6c39074a391fdd46",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "disko",
"type": "github"
}
},
"doomemacs": {
"flake": false,
"locked": {
@@ -1209,6 +1229,7 @@
"anyrun": "anyrun",
"astronvim": "astronvim",
"daeuniverse": "daeuniverse",
"disko": "disko",
"doomemacs": "doomemacs",
"home-manager": "home-manager_2",
"hyprland": "hyprland",

View File

@@ -156,6 +156,11 @@
nix-gaming.url = "github:fufexan/nix-gaming";
disko = {
url = "github:nix-community/disko";
inputs.nixpkgs.follows = "nixpkgs";
};
# add git hooks to format nix code before commit
pre-commit-hooks = {
url = "github:cachix/pre-commit-hooks.nix";

View File

@@ -12,6 +12,7 @@
kubectl
istioctl
kubevirt # virtctl
kubernetes-helm
];

View File

@@ -56,3 +56,5 @@ When building some packages for riscv64 or aarch64, I often have no cache availa
![](/_img/12kingdoms-1.webp)
![](/_img/12kingdoms-Youko-Rakushun.webp)
[List of Frieren characters](https://en.wikipedia.org/wiki/List_of_Frieren_characters)

View File

@@ -0,0 +1,33 @@
# Disko Config
Generate LUKS keyfile to encrypt the root partition, it's used by disko.
```bash
# partition the usb stick
parted /dev/sdb -- mklabel gpt
parted /dev/sdb -- mkpart primary 2M 512MB
parted /dev/sdb -- mkpart primary 512MB 1024MB
mkfs.fat -F 32 -n NIXOS_DSC /dev/sdb1
mkfs.fat -F 32 -n NIXOS_K3S /dev/sdb2
# Generate a keyfile from the true random number generator
KEYFILE=./kubevirt-luks-keyfile
dd bs=8192 count=4 iflag=fullblock if=/dev/random of=$KEYFILE
# generate token for k3s
K3S_TOKEN_FILE=./kubevirt-k3s-token
K3S_TOKEN=$(grep -ao '[A-Za-z0-9]' < /dev/random | head -64 | tr -d '\n' ; echo "")
echo $K3S_TOKEN > $K3S_TOKEN_FILE
# copy the keyfile and token to the usb stick
KEYFILE=./kubevirt-luks-keyfile
DEVICE=/dev/disk/by-label/NIXOS_DSC
dd bs=8192 count=4 iflag=fullblock if=$KEYFILE of=$DEVICE
K3S_TOKEN_FILE=./kubevirt-k3s-token
USB_PATH=/run/media/ryan/NIXOS_K3S
cp $K3S_TOKEN_FILE $USB_PATH
```

View File

@@ -0,0 +1,105 @@
{
# contains the k3s's token
fileSystems."/run/media/nixos_k3s" = {
device = "/dev/disk/by-label/NIXOS_K3S";
fsType = "vfat";
mountOptions = [
"ro"
];
};
disko.devices = {
disk = {
sda = {
type = "disk";
device = "/dev/nvme0n1";
content = {
type = "gpt";
partitions = {
# The EFI & Boot partition
ESP = {
size = "630M";
type = "EF00";
content = {
type = "filesystem";
format = "vfat";
mountpoint = "/boot/efi";
mountOptions = [
"defaults"
];
};
};
# The root partition
luks = {
size = "100%";
content = {
type = "luks";
name = "crypted";
settings = {
keyFile = "/dev/disk/by-label/NIXOS_DSC"; # The keyfile is stored on a USB stick
keyFileSize = 8192 * 4; # The maxium size of the keyfile is 8192 bytes
keyFileOffset = 0;
fallbackToPassword = true;
allowDiscards = true;
};
# Whether to add a boot.initrd.luks.devices entry for the specified disk.
initrdUnlock = true;
# encrypt the root partition with luks2 and argon2id, will prompt for a passphrase, which will be used to unlock the partition.
# cryptsetup luksFormat
extraFormatArgs = [
"--type luks2"
"--cipher aes-xts-plain64"
"--hash sha512"
"--iter-time 5000"
"--key-size 256"
"--pbkdf argon2id"
# use true random data from /dev/random, will block until enough entropy is available
"--use-random"
];
extraOpenArgs = [
"--timeout 10"
];
content = {
type = "btrfs";
extraArgs = ["-f"];
subvolumes = {
"@root" = {
mountpoint = "/";
mountOptions = ["compress-force=zstd:1" "noatime"];
};
"@home" = {
mountpoint = "/home";
mountOptions = ["compress-force=zstd:1"];
};
"@lib" = {
mountpoint = "/var/lib";
mountOptions = ["compress-force=zstd:1"];
};
"@nix" = {
mountpoint = "/nix";
mountOptions = ["compress-force=zstd:1" "noatime"];
};
"@tmp" = {
mountpoint = "/tmp";
mountOptions = ["compress-force=zstd:1" "noatime"];
};
"@snapshots" = {
mountpoint = "/snapshots";
mountOptions = ["compress-force=zstd:1" "noatime"];
};
"@swap" = {
mountpoint = "/swap";
swap.swapfile.size = "8192M";
};
};
};
};
};
};
};
};
};
};
}

View File

@@ -1,12 +1,13 @@
{
pkgs,
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-master-1"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
coreModule = k8sLib.gencoreModule {
inherit pkgs hostName vars_networking;
};
in {
imports =

View File

@@ -32,6 +32,8 @@ in {
" --write-kubeconfig /home/${username}/.kube/config"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller"
+ " --etcd-expose-metrics true"

View File

@@ -1,12 +1,13 @@
{
pkgs,
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-master-2"; # Define your hostname.
hostName = "k3s-prod-1-master-2"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
coreModule = k8sLib.gencoreModule {
inherit pkgs hostName vars_networking;
};
in {
imports =

View File

@@ -20,6 +20,8 @@ in {
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller"
+ " --etcd-expose-metrics true"

View File

@@ -1,12 +1,13 @@
{
pkgs,
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-master-3"; # Define your hostname.
hostName = "k3s-prod-1-master-3"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
coreModule = k8sLib.gencoreModule {
inherit pkgs hostName vars_networking;
};
in {
imports =

View File

@@ -20,6 +20,8 @@ in {
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --node-taint=CriticalAddonsOnly=true:NoExecute" # prevent workloads from running on the master
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller"
+ " --etcd-expose-metrics true"

View File

@@ -1,12 +1,13 @@
{
pkgs,
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-1"; # Define your hostname.
hostName = "k3s-prod-1-worker-1"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
coreModule = k8sLib.gencoreModule {
inherit pkgs hostName vars_networking;
};
in {
imports =

View File

@@ -16,6 +16,8 @@ in {
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags = "--data-dir /var/lib/rancher/k3s";
extraFlags =
" --node-label=node-type=worker"
+ " --data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -1,12 +1,13 @@
{
pkgs,
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-2"; # Define your hostname.
hostName = "k3s-prod-1-worker-2"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
coreModule = k8sLib.gencoreModule {
inherit pkgs hostName vars_networking;
};
in {
imports =

View File

@@ -16,6 +16,8 @@ in {
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags = "--data-dir /var/lib/rancher/k3s";
extraFlags =
" --node-label=node-type=worker"
+ " --data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -1,12 +1,13 @@
{
pkgs,
vars_networking,
mylib,
...
}: let
hostName = "k3s-prod-1-worker-3"; # Define your hostname.
hostName = "k3s-prod-1-worker-3"; # define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.genCoreModule {
inherit hostName vars_networking;
coreModule = k8sLib.gencoreModule {
inherit pkgs hostName vars_networking;
};
in {
imports =

View File

@@ -16,6 +16,8 @@ in {
serverAddr = "https://${serverIp}:6443";
tokenFile = config.age.secrets."k3s-prod-1-token".path;
# https://docs.k3s.io/cli/agent
extraFlags = "--data-dir /var/lib/rancher/k3s";
extraFlags =
" --node-label=node-type=worker"
+ " --data-dir /var/lib/rancher/k3s";
};
}

View File

@@ -0,0 +1,22 @@
{
pkgs,
mylib,
vars_networking,
disko,
...
}: let
# MoreFine - S500Plus
hostName = "kubevirt-shoryu"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
inherit pkgs hostName vars_networking;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
disko.nixosModules.default
../kubevirt-disko-fs.nix
];
}

View File

@@ -0,0 +1,41 @@
{
config,
pkgs,
username,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller"
+ " --etcd-expose-metrics true"
+ ''--etcd-snapshot-schedule-cron "0 */12 * * *"'';
};
}

View File

@@ -0,0 +1,21 @@
{
pkgs,
mylib,
vars_networking,
disko,
...
}: let
hostName = "kubevirt-shushou"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
inherit pkgs hostName vars_networking;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
disko.nixosModules.default
../kubevirt-disko-fs.nix
];
}

View File

@@ -0,0 +1,41 @@
{
config,
pkgs,
username,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller"
+ " --etcd-expose-metrics true"
+ ''--etcd-snapshot-schedule-cron "0 */12 * * *"'';
};
}

View File

@@ -0,0 +1,21 @@
{
pkgs,
mylib,
vars_networking,
disko,
...
}: let
hostName = "kubevirt-youko"; # Define your hostname.
k8sLib = import ../lib.nix;
coreModule = k8sLib.gencoreModule {
inherit pkgs hostName vars_networking;
};
in {
imports =
(mylib.scanPaths ./.)
++ [
coreModule
disko.nixosModules.default
../kubevirt-disko-fs.nix
];
}

View File

@@ -0,0 +1,41 @@
{
config,
pkgs,
username,
...
}: let
package = pkgs.k3s_1_29;
in {
environment.systemPackages = with pkgs; [
package
k9s
kubectl
istioctl
kubernetes-helm
skopeo
dive # explore docker layers
];
services.k3s = {
inherit package;
enable = true;
# Initialize HA cluster using an embedded etcd datastore.
# If you are configuring an HA cluster with an embedded etcd,
# the 1st server must have `clusterInit = true`
# and other servers must connect to it using serverAddr.
clusterInit = true;
role = "server";
tokenFile = "/run/media/nixos_k3s/kubevirt-k3s-token";
# https://docs.k3s.io/cli/server
extraFlags =
" --write-kubeconfig /etc/k3s/kubeconfig.yml"
+ " --write-kubeconfig-mode 644"
+ " --service-node-port-range 80-32767"
+ " --kube-apiserver-arg='--allow-privileged=true'" # required by kubevirt
+ " --data-dir /var/lib/rancher/k3s"
+ " --disable-helm-controller"
+ " --etcd-expose-metrics true"
+ ''--etcd-snapshot-schedule-cron "0 */12 * * *"'';
};
}

View File

@@ -1,7 +1,9 @@
{
genCoreModule = {
gencoreModule = {
pkgs,
hostName,
vars_networking,
...
}: let
hostAddress = vars_networking.hostAddress.${hostName};
in {
@@ -18,15 +20,40 @@
"cifs" # mount windows share
];
boot.kernelModules = ["kvm-amd"];
boot.kernelModules = ["kvm-amd" "vfio-pci"];
boot.extraModprobeConfig = "options kvm_amd nested=1"; # for amd cpu
environment.systemPackages = with pkgs; [
# Validate Hardware Virtualization Support via:
# virt-host-validate qemu
libvirt
];
# Enable the Open vSwitch as a systemd service
# It's required by kubernetes' ovs-cni plugin.
virtualisation.vswitch = {
enable = true;
# reset the Open vSwitch configuration database to a default configuration on every start of the systemd ovsdb.service
resetOnStart = false;
};
networking.vswitches = {
# https://github.com/k8snetworkplumbingwg/ovs-cni/blob/main/docs/demo.md
ovsbr1 = {
interfaces = {
# Attach the interfaces to OVS bridge
# This interface should not used by the host itself!
ens18 = {};
};
};
};
networking = {
inherit hostName;
inherit (vars_networking) defaultGateway nameservers;
networkmanager.enable = false;
interfaces.ens18 = {
# Set the host's address on the OVS bridge interface instead of the physical interface!
interfaces.ovsbr1 = {
useDHCP = false;
ipv4.addresses = [hostAddress];
};

View File

@@ -85,7 +85,7 @@ cryptsetup --help
# NOTE: `cat shoukei.md | grep luks > luks.sh` to generate this script
# encrypt the root partition with luks2 and argon2id, will prompt for a passphrase, which will be used to unlock the partition.
cryptsetup luksFormat --type luks2 --cipher aes-xts-plain64 --hash sha512 --iter-time 5000 --key-size 256 --pbkdf argon2id --use-urandom --verify-passphrase /dev/nvme0n1p2
cryptsetup luksFormat --type luks2 --cipher aes-xts-plain64 --hash sha512 --iter-time 5000 --key-size 256 --pbkdf argon2id --use-random --verify-passphrase /dev/nvme0n1p2
# show status
cryptsetup luksDump /dev/nvme0n1p2

View File

@@ -69,7 +69,7 @@ cryptsetup --help
# NOTE: `cat shoukei.md | grep luks > format.sh` to generate this script
# encrypt the root partition with luks2 and argon2id, will prompt for a passphrase, which will be used to unlock the partition.
cryptsetup luksFormat --type luks2 --cipher aes-xts-plain64 --hash sha512 --iter-time 5000 --key-size 256 --pbkdf argon2id --use-urandom --verify-passphrase /dev/nvme0n1p4
cryptsetup luksFormat --type luks2 --cipher aes-xts-plain64 --hash sha512 --iter-time 5000 --key-size 256 --pbkdf argon2id --use-random --verify-passphrase /dev/nvme0n1p4
# show status
cryptsetup luksDump /dev/nvme0n1p4

1
pulumi/k3s-prod-1/.envrc Normal file
View File

@@ -0,0 +1 @@
use flake

3
pulumi/k3s-prod-1/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
*.pyc
venv/
__pycache__/

View File

@@ -1 +0,0 @@
config:

View File

@@ -1,3 +1,6 @@
name: k3s-prod-1
runtime: go
description: A Go program to deploy all the resources for the k3s-prod-1 cluster
runtime:
name: python
options:
virtualenv: venv
description: A Python program to deploy a Helm chart onto a Kubernetes cluster

View File

@@ -0,0 +1,29 @@
import pulumi
import pulumi_kubernetes as kubernetes
from monitoring import *
from networking import *
from visualization import *
provider = kubernetes.Provider(
"k3s-prod-1",
# The name of the kubeconfig context to use
context="default",
# Disable server-side apply to make the cluster more reproducible.
# It will(TODO: not sure) discard any server-side changes to resources when set to false.
enable_server_side_apply=False,
)
# networking
new_cert_manager = NewCertManager(provider)
# monitoring
new_victoria_metrics = NewVictoriaMetrics(provider)
# visualization
new_kubevirt = NewKubeVirt(provider)
new_virtual_machines = NewVirtualMachines(provider, new_kubevirt)

58
pulumi/k3s-prod-1/flake.lock generated Normal file
View File

@@ -0,0 +1,58 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1707956935,
"narHash": "sha256-ZL2TrjVsiFNKOYwYQozpbvQSwvtV/3Me7Zwhmdsfyu4=",
"path": "/nix/store/m9s94alic7s2r6v47p7lwfj58ibc076a-source",
"rev": "a4d4fe8c5002202493e87ec8dbc91335ff55552c",
"type": "path"
},
"original": {
"id": "nixpkgs",
"type": "indirect"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs",
"utils": "utils"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1709126324,
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -0,0 +1,58 @@
{
description = "Python venv development template";
inputs = {
utils.url = "github:numtide/flake-utils";
};
outputs = {
self,
nixpkgs,
utils,
...
}:
utils.lib.eachDefaultSystem (system: let
pkgs = import nixpkgs {inherit system;};
in {
devShells.default = pkgs.mkShell {
name = "pulumi-venv";
venvDir = "./venv";
buildInputs = with pkgs; [
# A Python interpreter including the 'venv' module is required to bootstrap
# the environment.
python3Packages.python
# This executes some shell code to initialize a venv in $venvDir before
# dropping into the shell
python3Packages.venvShellHook
# pulumi related packages
pulumi
pulumictl
tf2pulumi
crd2pulumi
pulumiPackages.pulumi-random
pulumiPackages.pulumi-command
pulumiPackages.pulumi-aws-native
pulumiPackages.pulumi-language-go
pulumiPackages.pulumi-language-python
pulumiPackages.pulumi-language-nodejs
];
# Run this command, only after creating the virtual environment
postVenvCreation = ''
unset SOURCE_DATE_EPOCH
pip install -r requirements.txt
'';
# Now we can execute any commands within the virtual environment.
# This is optional and can be left out to run pip manually.
postShellHook = ''
# allow pip to install wheels
unset SOURCE_DATE_EPOCH
# fix `libstdc++.so.6 => not found`
LD_LIBRARY_PATH="${pkgs.stdenv.cc.cc.lib}/lib"
'';
};
});
}

View File

@@ -1,103 +0,0 @@
module k3s-prod-1
go 1.21
toolchain go1.21.6
require (
github.com/pulumi/pulumi-kubernetes v1.6.0
github.com/pulumi/pulumi-kubernetes/sdk/v4 v4.7.1
github.com/pulumi/pulumi/sdk/v3 v3.106.0
)
require (
dario.cat/mergo v1.0.0 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
github.com/aead/chacha20 v0.0.0-20180709150244-8b13a72661da // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/atotto/clipboard v0.1.4 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/charmbracelet/bubbles v0.16.1 // indirect
github.com/charmbracelet/bubbletea v0.24.2 // indirect
github.com/charmbracelet/lipgloss v0.7.1 // indirect
github.com/cheggaaa/pb v1.0.29 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/djherbis/times v1.5.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.5.0 // indirect
github.com/go-git/go-git/v5 v5.11.0 // indirect
github.com/gofrs/flock v0.7.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/glog v1.1.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/hcl/v2 v2.17.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-ps v1.0.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/reflow v0.3.0 // indirect
github.com/muesli/termenv v0.15.2 // indirect
github.com/opentracing/basictracer-go v1.1.0 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pgavlin/fx v0.1.6 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pkg/term v1.1.0 // indirect
github.com/pulumi/appdash v0.0.0-20231130102222-75f619a67231 // indirect
github.com/pulumi/esc v0.6.2 // indirect
github.com/pulumi/pulumi/sdk v0.0.0-20200324171821-8ce10e1dfe54 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rogpeppe/go-internal v1.11.0 // indirect
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
github.com/santhosh-tekuri/jsonschema/v5 v5.0.0 // indirect
github.com/sergi/go-diff v1.3.1 // indirect
github.com/skeema/knownhosts v1.2.1 // indirect
github.com/spf13/cobra v1.7.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/src-d/gcfg v1.4.0 // indirect
github.com/texttheater/golang-levenshtein v1.0.1 // indirect
github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 // indirect
github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/zclconf/go-cty v1.13.2 // indirect
go.uber.org/atomic v1.9.0 // indirect
golang.org/x/crypto v0.17.0 // indirect
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/net v0.19.0 // indirect
golang.org/x/sync v0.5.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/term v0.15.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.15.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130 // indirect
google.golang.org/grpc v1.57.1 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect
gopkg.in/src-d/go-git.v4 v4.13.1 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog v1.0.0 // indirect
lukechampine.com/frand v1.4.2 // indirect
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 // indirect
)

File diff suppressed because it is too large Load Diff

View File

@@ -1,21 +0,0 @@
package main
import (
"k3s-prod-1/monitoring"
"k3s-prod-1/networking"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
if err := monitoring.NewMonitoring(ctx, "prod"); err != nil {
return err
}
if err := networking.NewNetworking(ctx, "prod"); err != nil {
return err
}
return nil
})
}

View File

@@ -0,0 +1 @@
from .victoria_metrics import *

View File

@@ -1,28 +0,0 @@
package monitoring
import (
corev1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1"
metav1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/meta/v1"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func NewMonitoring(ctx *pulumi.Context, env string) error {
// Create a Kubernetes Namespace
namespaceName := "monitoring"
namespace, err := corev1.NewNamespace(ctx, namespaceName, &corev1.NamespaceArgs{
Metadata: &metav1.ObjectMetaArgs{
Name: pulumi.String(namespaceName),
},
})
if err != nil {
return err
}
// Export the name of the namespace
ctx.Export("monitoringNamespaceName", namespace.Metadata.Name())
if err := NewVictoriaMetrics(ctx, env, namespace); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,53 @@
from pulumi import ResourceOptions, FileAsset
import pulumi_kubernetes as kubernetes
from pathlib import Path
class NewVictoriaMetrics:
NAMESPACE = "monitoring"
def __init__(self, provider):
self.provider = provider
app_labels = {
"app": "monitoring",
}
victoriaMetricsvaluesPath = (
Path(__file__).parent / "victoria_metrics_helm_values.yml"
)
# Create a namespace (user supplies the name of the namespace)
self.ns = kubernetes.core.v1.Namespace(
"monitoring",
metadata=kubernetes.meta.v1.ObjectMetaArgs(
labels=app_labels,
name=self.NAMESPACE,
),
opts=ResourceOptions(
provider=self.provider,
),
)
# https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-k8s-stack
self.victoriaMetrics = kubernetes.helm.v3.Release(
"victoria-metrics-k8s-stack",
chart="victoria-metrics-k8s-stack",
namespace=self.ns.metadata.name,
repository_opts=kubernetes.helm.v3.RepositoryOptsArgs(
repo="https://victoriametrics.github.io/helm-charts/",
),
version="0.19.2",
skip_crds=False,
atomic=True, # purges chart on fail
cleanup_on_fail=True, # Allow deletion of new resources created in this upgrade when upgrade fails.
dependency_update=True, # run helm dependency update before installing the chart
reset_values=True, # When upgrading, reset the values to the ones built into the chart
# verify=True, # verify the package before installing it
# recreate_pods=True, # performs pods restart for the resource if applicable
value_yaml_files=[FileAsset(victoriaMetricsvaluesPath)],
opts=ResourceOptions(
provider=self.provider,
),
)

View File

@@ -0,0 +1,37 @@
# https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-k8s-stack
#
# Pulumi will complain ` ValueError: unexpected input of type set` if some values are not available in helm chart!
grafana:
enabled: true
defaultDashboardsTimezone: utc+8
ingress:
enabled: true
hosts:
- k8s-grafana.writefor.fun
persistence:
type: pvc
enabled: false
kube-state-metrics:
enabled: true
prometheus-node-exporter:
# install node exporter via nixos, not container
enabled: false
vmagent:
# vmagent collects metrics from targets and sends them to a remote storage
enabled: true
vmalert:
# vmalert is a Prometheus-compatible alertmanager
enabled: true
vmsingle:
# Single-node VictoriaMetrics for storing metrics.
# https://docs.victoriametrics.com/faq/#which-victoriametrics-type-is-recommended-for-use-in-production---single-node-or-cluster
# vmsingle = vmcluster(vmselect + vmstorage + vminsert)
enabled: true
ingress:
hosts:
- vm.writefor.fun
spec:
storage:
resources:
requests:
storage: 50Gi

View File

@@ -1,85 +0,0 @@
package monitoring
import (
corev1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1"
"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func NewVictoriaMetrics(ctx *pulumi.Context, env string, namespace corev1.Namespace) error {
var opts []pulumi.ResourceOption
opts = append(opts, pulumi.DependsOn([]pulumi.Resource{namespace}))
// https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-k8s-stack
_, err := helm.NewChart(ctx, "victoria-metrics-k8s-stack", helm.ChartArgs{
Chart: pulumi.String("victoria-metrics-k8s-stack"),
Version: pulumi.String("0.19.0"),
Namespace: pulumi.String(namespace.Metadata.Name()),
FetchArgs: helm.FetchArgs{
Repo: pulumi.String("https://victoriametrics.github.io/helm-charts/"),
},
// https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-k8s-stack/README.md
Values: pulumi.Map{
// grafana.ingress.enabled: true
"ingress": pulumi.Map{
"enabled": pulumi.Bool(true),
},
// grafana.defaultDashboardsTimezone: utc+8
// grafana.ingress.hosts[0].host: grafana.example.com
"grafana": pulumi.Map{
"defaultDashboardsTimezone": pulumi.String("utc+8"),
"ingress": pulumi.Map{
"hosts": pulumi.Array{
pulumi.Map{
"host": pulumi.String("k8s-grafana.writefor.fun"),
},
},
},
},
// prometheus-node-exporter.enabled: false
"nodeExporter": pulumi.Map{
"enabled": pulumi.Bool(false),
},
"vmsingle": pulumi.Map{
"enabled": pulumi.Bool(true),
"ingress": pulumi.Map{
"hosts": pulumi.Array{
pulumi.Map{
"host": pulumi.String("vm.writefor.fun"),
},
},
},
// https://docs.victoriametrics.com/operator/api/#vmsinglespec
"spec": pulumi.Map{
"affinity": pulumi.Map{
"nodeAffinity": pulumi.Map{
"requiredDuringSchedulingIgnoredDuringExecution": pulumi.Map{
"nodeSelectorTerms": pulumi.Array{
pulumi.Map{
"matchExpressions": pulumi.Array{
pulumi.Map{
"key": pulumi.String("kubernetes.io/arch"),
"operator": pulumi.String("In"),
"values": pulumi.Array{
pulumi.String("amd64"),
},
},
},
},
},
},
},
},
"storage": pulumi.Map{
"resources": pulumi.Map{
"requests": pulumi.Map{
"storage": pulumi.String("50Gi"),
},
},
},
},
},
},
}, opts...)
return err
}

View File

@@ -0,0 +1 @@
from .cert_manager import *

View File

@@ -1,25 +0,0 @@
package networking
import (
corev1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1"
"github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/helm/v3"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func NewCertManager(ctx *pulumi.Context, env string, namespace corev1.Namespace) error {
var opts []pulumi.ResourceOption
opts = append(opts, pulumi.DependsOn([]pulumi.Resource{namespace}))
_, err := helm.NewChart(ctx, "cert-manager", helm.ChartArgs{
Chart: pulumi.String("cert-manager"),
Version: pulumi.String("corev1.14.2 "),
Namespace: pulumi.String(namespace.Metadata.Name()),
FetchArgs: helm.FetchArgs{
Repo: pulumi.String("https://charts.jetstack.io"),
},
// https://cert-manager.io/docs/installation/helm/
Values: pulumi.Map{},
}, opts...)
return err
}

View File

@@ -0,0 +1,30 @@
from pulumi import ResourceOptions
from pulumi_kubernetes.core.v1 import Namespace
from pulumi_kubernetes_cert_manager import CertManager, ReleaseArgs
class NewCertManager:
NAMESPACE = "cert-manager"
def __init__(self, provider):
self.provider = provider
self.ns = Namespace(
"cert-manager",
metadata={"name": self.NAMESPACE},
opts=ResourceOptions(
provider=self.provider,
),
)
# Install cert-manager into our cluster.
self.manager = CertManager(
"cert-manager",
install_crds=True,
helm_options=ReleaseArgs(
namespace=self.NAMESPACE,
),
opts=ResourceOptions(
provider=self.provider,
),
)

View File

@@ -1,28 +0,0 @@
package networking
import (
corev1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/core/v1"
metav1 "github.com/pulumi/pulumi-kubernetes/sdk/v4/go/kubernetes/meta/v1"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func NewNetworking(ctx *pulumi.Context, env string) error {
// Create a Kubernetes Namespace
namespaceName := "networking"
namespace, err := corev1.NewNamespace(ctx, namespaceName, &corev1.NamespaceArgs{
Metadata: &metav1.ObjectMetaArgs{
Name: pulumi.String(namespaceName),
},
})
if err != nil {
return err
}
// Export the name of the namespace
ctx.Export("networkingNamespaceName", namespace.Metadata.Name())
if err := NewCertManager(ctx, env, namespace); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,3 @@
pulumi>=3.0.0,<4.0.0
pulumi-kubernetes>=3.30.0,<5.0.0
pulumi-kubernetes-cert-manager>=0.0.5,<1.0.0

View File

@@ -0,0 +1,2 @@
from .kubevirt import NewKubeVirt
from .virtual_machines import NewVirtualMachines

View File

@@ -0,0 +1,75 @@
from pulumi import ResourceOptions
from pulumi_kubernetes.yaml import ConfigGroup
from pathlib import Path
class NewKubeVirt:
CURRENT_DIR = Path(__file__).parent
CURRENT_DIR_STR = CURRENT_DIR.as_posix()
def __init__(self, provider):
self.provider = provider
self.kubevirt = ConfigGroup(
"kubevirt",
files=[
self.CURRENT_DIR_STR + "/yaml/kubevirt-operator.yaml",
],
opts=ResourceOptions(provider=self.provider),
)
self.kubevirt_cr = ConfigGroup(
"kubevirt-cr",
files=[
self.CURRENT_DIR_STR + "/yaml/custom-kubevirt-*.yaml",
],
opts=ResourceOptions(
provider=self.provider,
depends_on=[self.kubevirt],
),
)
self.containerDataImporter = ConfigGroup(
"container-data-importer",
files=[self.CURRENT_DIR_STR + "/yaml/cdi-*.yaml"],
opts=ResourceOptions(
provider=self.provider,
depends_on=[self.kubevirt, self.kubevirt_cr],
),
)
self.clusterNetworkAddonsOperator = ConfigGroup(
"cluster-network-addons-operator",
files=[
self.CURRENT_DIR_STR + "/yaml/cluster-network-addons-*.yaml",
],
opts=ResourceOptions(
provider=self.provider,
depends_on=[self.kubevirt, self.kubevirt_cr],
),
)
self.clusterNetworkAddons = ConfigGroup(
"cluster-network-addons",
files=[
self.CURRENT_DIR_STR + "/yaml/custom-networkaddons-*.yaml",
],
opts=ResourceOptions(
provider=self.provider,
depends_on=[
self.kubevirt,
self.kubevirt_cr,
self.clusterNetworkAddonsOperator,
],
),
)
def resouces(self):
return [
self.kubevirt,
self.kubevirt_cr,
self.containerDataImporter,
self.clusterNetworkAddonsOperator,
self.clusterNetworkAddons,
]

View File

@@ -0,0 +1,18 @@
apiVersion: cdi.kubevirt.io/v1beta1
kind: CDI
metadata:
name: cdi
spec:
config:
featureGates:
- HonorWaitForFirstConsumer
imagePullPolicy: IfNotPresent
infra:
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: CriticalAddonsOnly
operator: Exists
workload:
nodeSelector:
kubernetes.io/os: linux

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,8 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: cluster-network-addons
labels:
name: cluster-network-addons
openshift.io/cluster-monitoring: "true"

View File

@@ -0,0 +1,458 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cluster-network-addons-operator
namespace: cluster-network-addons
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
name: cluster-network-addons-operator
name: cluster-network-addons-operator
rules:
- apiGroups:
- operator.openshift.io
resources:
- networks
verbs:
- list
- watch
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
verbs:
- get
- list
- create
- update
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- create
- update
- apiGroups:
- networkaddonsoperator.network.kubevirt.io
resources:
- networkaddonsconfigs
verbs:
- list
- watch
- apiGroups:
- networkaddonsoperator.network.kubevirt.io
resources:
- networkaddonsconfigs/status
verbs:
- patch
- apiGroups:
- networkaddonsoperator.network.kubevirt.io
resources:
- networkaddonsconfigs/finalizers
verbs:
- update
- apiGroups:
- apps
resources:
- deployments
- daemonsets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
- namespaces
verbs:
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
verbs:
- get
- create
- update
- bind
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
verbs:
- get
- create
- update
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
verbs:
- get
- create
- update
- delete
- apiGroups:
- config.openshift.io
resources:
- infrastructures
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- update
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
- list
- watch
- apiGroups:
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- get
- update
- patch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- delete
- apiGroups:
- ""
resources:
- secrets
verbs:
- list
- watch
- create
- update
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- create
- update
- list
- watch
- apiGroups:
- kubevirt.io
resources:
- virtualmachines
verbs:
- get
- list
- watch
- update
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- apps
resources:
- deployments
verbs:
- get
- create
- update
- apiGroups:
- kubevirt.io
resources:
- virtualmachineinstances
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- k8s.cni.cncf.io
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-network-addons-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-network-addons-operator
subjects:
- kind: ServiceAccount
name: cluster-network-addons-operator
namespace: cluster-network-addons
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
name: cluster-network-addons-operator
name: cluster-network-addons-operator
namespace: cluster-network-addons
rules:
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get
- create
- update
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
- update
- apiGroups:
- apps
resources:
- deployments
verbs:
- delete
- apiGroups:
- ""
resources:
- namespaces
verbs:
- update
- get
- patch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- create
- update
- delete
- apiGroups:
- monitoring.coreos.com
resources:
- prometheusrules
- servicemonitors
verbs:
- get
- create
- update
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
verbs:
- get
- create
- update
- delete
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cluster-network-addons-operator
namespace: cluster-network-addons
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cluster-network-addons-operator
subjects:
- kind: ServiceAccount
name: cluster-network-addons-operator
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
networkaddonsoperator.network.kubevirt.io/version: 0.91.0
labels:
prometheus.cnao.io: "true"
name: cluster-network-addons-operator
namespace: cluster-network-addons
spec:
replicas: 1
selector:
matchLabels:
name: cluster-network-addons-operator
strategy:
type: Recreate
template:
metadata:
annotations:
description: cluster-network-addons-operator manages the lifecycle of different
Kubernetes network components on top of Kubernetes cluster
labels:
name: cluster-network-addons-operator
prometheus.cnao.io: "true"
spec:
containers:
- env:
- name: MULTUS_IMAGE
value: ghcr.io/k8snetworkplumbingwg/multus-cni@sha256:3fbcc32bd4e4d15bd93c96def784a229cd84cca27942bf4858b581f31c97ee02
- name: MULTUS_DYNAMIC_NETWORKS_CONTROLLER_IMAGE
value: ghcr.io/k8snetworkplumbingwg/multus-dynamic-networks-controller@sha256:57573a24923e5588bca6bc337a8b2b08406c5b77583974365d2cf063c0dd5d06
- name: LINUX_BRIDGE_IMAGE
value: quay.io/kubevirt/cni-default-plugins@sha256:c884d6d08f8c0db98964f1eb3877b44ade41fa106083802a9914775df17d5291
- name: LINUX_BRIDGE_MARKER_IMAGE
value: quay.io/kubevirt/bridge-marker@sha256:bba066e3b5ff3fb8c5e20861fe8abe51e3c9b50ad6ce3b2616af9cb5479a06d0
- name: OVS_CNI_IMAGE
value: quay.io/kubevirt/ovs-cni-plugin@sha256:e16ac74343da21abb8fb668ce71e728053d00503a992dae2164b9e94a280113e
- name: KUBEMACPOOL_IMAGE
value: quay.io/kubevirt/kubemacpool@sha256:cf8daa57ae6603b776d3af512331b143fa03bc2f4b72f28420fddcf5e4156d0a
- name: MACVTAP_CNI_IMAGE
value: quay.io/kubevirt/macvtap-cni@sha256:850b89343ace7c7ea6b18dd8e11964613974e9d1f7377af03854d407fb15230a
- name: KUBE_RBAC_PROXY_IMAGE
value: quay.io/openshift/origin-kube-rbac-proxy@sha256:e2def4213ec0657e72eb790ae8a115511d5b8f164a62d3568d2f1bff189917e8
- name: KUBE_SECONDARY_DNS_IMAGE
value: ghcr.io/kubevirt/kubesecondarydns@sha256:e87e829380a1e576384145f78ccaa885ba1d5690d5de7d0b73d40cfb804ea24d
- name: CORE_DNS_IMAGE
value: registry.k8s.io/coredns/coredns@sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e
- name: OPERATOR_IMAGE
value: quay.io/kubevirt/cluster-network-addons-operator:v0.91.0
- name: OPERATOR_NAME
value: cluster-network-addons-operator
- name: OPERATOR_VERSION
value: 0.91.0
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: OPERAND_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: WATCH_NAMESPACE
- name: MONITORING_NAMESPACE
value: openshift-monitoring
- name: MONITORING_SERVICE_ACCOUNT
value: prometheus-k8s
- name: RUNBOOK_URL_TEMPLATE
value: https://kubevirt.io/monitoring/runbooks/
image: quay.io/kubevirt/cluster-network-addons-operator:v0.91.0
imagePullPolicy: Always
name: cluster-network-addons-operator
resources:
requests:
cpu: 50m
memory: 30Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
- args:
- --logtostderr
- --secure-listen-address=:8443
- --upstream=http://127.0.0.1:8080
image: quay.io/openshift/origin-kube-rbac-proxy@sha256:e2def4213ec0657e72eb790ae8a115511d5b8f164a62d3568d2f1bff189917e8
imagePullPolicy: Always
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: metrics
protocol: TCP
resources:
requests:
cpu: 10m
memory: 20Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
priorityClassName: system-cluster-critical
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
serviceAccountName: cluster-network-addons-operator

View File

@@ -0,0 +1,34 @@
---
apiVersion: kubevirt.io/v1
kind: KubeVirt
metadata:
name: kubevirt
namespace: kubevirt
spec:
workloads:
nodePlacement:
nodeSelector:
node-type: worker
certificateRotateStrategy: {}
configuration:
network:
# Bridge network interface on Pod network
# is not compatible with istio & live migration
# so we should disable it
permitBridgeInterfaceOnPodNetwork: false
migrations:
parallelMigrationsPerCluster: 5
parallelOutboundMigrationsPerNode: 2
bandwidthPerMigration: 220Mi # 220MiB/s
completionTimeoutPerGiB: 800
progressTimeout: 150
disableTLS: false
nodeDrainTaintKey: "kubevirt.io/drain"
allowAutoConverge: false
allowPostCopy: false
unsafeMigrationOverride: false
developerConfiguration:
featureGates: []
customizeComponents: {}
imagePullPolicy: IfNotPresent
workloadUpdateStrategy: {}

View File

@@ -0,0 +1,7 @@
apiVersion: networkaddonsoperator.network.kubevirt.io/v1
kind: NetworkAddonsConfig
metadata:
name: cluster
spec:
multus: {} # multi-network plugin
ovs: {} # openvswitch cni plugin

View File

@@ -0,0 +1,33 @@
# ========================================
# Install KubeVirt
# https://github.com/kubevirt/kubevirt/tree/main
# ========================================
export RELEASE=$(curl https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt)
echo "The latest kubevirt's version is $RELEASE"
curl -Lo kubevirt-operator-${RELEASE}.yaml https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-operator.yaml
curl -Lo kubevirt-cr-${RELEASE}.yaml https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-cr.yaml
# ========================================
# Install CDI(Containerized Data Importer)
# https://github.com/kubevirt/containerized-data-importer
# ========================================
export CDI_VERSION=$(curl -s https://api.github.com/repos/kubevirt/containerized-data-importer/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')
echo "The latest CDI(Containerized Data Importer)'s version is $CDI_VERSION"
curl -Lo cdi-operator-${CDI_VERSION}.yaml https://github.com/kubevirt/containerized-data-importer/releases/download/$CDI_VERSION/cdi-operator.yaml
curl -Lo cdi-cr-${CDI_VERSION}.yaml https://github.com/kubevirt/containerized-data-importer/releases/download/$CDI_VERSION/cdi-cr.yaml
# ========================================
# Install Cluster Network Addons Operator
# https://github.com/kubevirt/cluster-network-addons-operator/tree/main?tab=readme-ov-file#deployment
# ========================================
export CNAO_VERSION=$(curl -s https://api.github.com/repos/kubevirt/cluster-network-addons-operator/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')
echo "The latest Cluster Network Addons Operator's version is $CNAO_VERSION"
curl -Lo cluster-network-addons-namespace-${CNAO_VERSION}.yaml https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/namespace.yaml
curl -Lo cluster-network-addons-config.crd-${CNAO_VERSION}.yaml https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/network-addons-config.crd.yaml
curl -Lo cluster-network-addons-operator-${CNAO_VERSION}.yaml https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/operator.yaml

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
from pulumi import ResourceOptions
from pulumi_kubernetes.yaml import ConfigGroup
from pathlib import Path
from ..kubevirt import NewKubeVirt
class NewVirtualMachines:
CURRENT_DIR = Path(__file__).parent
CURRENT_DIR_STR = CURRENT_DIR.as_posix()
def __init__(self, provider, kubevirt: NewKubeVirt):
self.provider = provider
self.kubevirt = ConfigGroup(
"virtual-machines",
files=[self.CURRENT_DIR_STR + "/yaml/*.yaml"],
opts=ResourceOptions(
provider=self.provider, depends_on=kubevirt.resouces()
),
)

View File

@@ -0,0 +1,59 @@
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: ovs-net
annotations:
k8s.v1.cni.cncf.io/resourceName: ovs-cni.network.kubevirt.io/ovsbr1
spec:
config: '{
"cniVersion": "0.4.0",
"type": "ovs",
"bridge": "ovsbr1",
}'
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: testvm-nocloud
spec:
runStrategy: Always
template:
metadata:
labels:
kubevirt.io/vm: testvm-nocloud
spec:
terminationGracePeriodSeconds: 30
domain:
resources:
requests:
memory: 1024M
devices:
disks:
- name: containerdisk
disk:
bus: virtio
- name: emptydisk
disk:
bus: virtio
- disk:
bus: virtio
name: cloudinitdisk
networks:
- name: ovs-net
multus: # Multus network as default
default: true
networkName: ovsbr1
volumes:
- name: containerdisk
containerDisk:
image: kubevirt/fedora-cloud-container-disk-demo:latest
- name: emptydisk
emptyDisk:
capacity: "2Gi"
- name: cloudinitdisk
cloudInitNoCloud:
userData: |-
#cloud-config
password: fedora
chpasswd: { expire: False }

View File

@@ -117,6 +117,43 @@ in {
};
homelab_tailscale_gw_tags = ["tailscale-gw" "homelab-network"];
# --- Kubevirt Nodes --- #
kubevirt_shoryu_modules = {
nixos-modules =
[
../hosts/k8s/kubevirt_shoryu
]
++ kube_base_modules.nixos-modules;
home-module.imports = [
../home/linux/server.nix
];
};
kubevirt_shoryu_tags = ["virt-shoryu"];
kubevirt_shushou_modules = {
nixos-modules =
[
../hosts/k8s/kubevirt_shushou
]
++ kube_base_modules.nixos-modules;
home-module.imports = [
../home/linux/server.nix
];
};
kubevirt_shushou_tags = ["virt-shushou"];
kubevirt_youko_modules = {
nixos-modules =
[
../hosts/k8s/kubevirt_youko
]
++ kube_base_modules.nixos-modules;
home-module.imports = [
../home/linux/server.nix
];
};
kubevirt_youko_tags = ["virt-youko"];
# --- Kubernetes Nodes --- #
k3s_prod_1_master_1_modules = {
@@ -129,7 +166,7 @@ in {
../home/linux/server.nix
];
};
k3s_prod_1_master_1_tags = ["k8s-prod-master"];
k3s_prod_1_master_1_tags = ["k8s-prod-master-1"];
k3s_prod_1_master_2_modules = {
nixos-modules =
@@ -138,7 +175,7 @@ in {
]
++ kube_base_modules.nixos-modules;
};
k3s_prod_1_master_2_tags = ["k8s-prod-master"];
k3s_prod_1_master_2_tags = ["k8s-prod-master-2"];
k3s_prod_1_master_3_modules = {
nixos-modules =
@@ -147,7 +184,7 @@ in {
]
++ kube_base_modules.nixos-modules;
};
k3s_prod_1_master_3_tags = ["k8s-prod-master"];
k3s_prod_1_master_3_tags = ["k8s-prod-master-3"];
k3s_prod_1_worker_1_modules = {
nixos-modules =
@@ -156,7 +193,7 @@ in {
]
++ kube_base_modules.nixos-modules;
};
k3s_prod_1_worker_1_tags = ["k8s-prod-worker"];
k3s_prod_1_worker_1_tags = ["k8s-prod-worker-1"];
k3s_prod_1_worker_2_modules = {
nixos-modules =
@@ -165,7 +202,7 @@ in {
]
++ kube_base_modules.nixos-modules;
};
k3s_prod_1_worker_2_tags = ["k8s-prod-worker"];
k3s_prod_1_worker_2_tags = ["k8s-prod-worker-2"];
k3s_prod_1_worker_3_modules = {
nixos-modules =
@@ -174,7 +211,7 @@ in {
]
++ kube_base_modules.nixos-modules;
};
k3s_prod_1_worker_3_tags = ["k8s-prod-worker"];
k3s_prod_1_worker_3_tags = ["k8s-prod-worker-3"];
# --- RISC-V / AARCH64 Systems --- #

View File

@@ -1,6 +1,6 @@
{lib, ...}: rec {
mainGateway = "192.168.5.1"; # main router
defaultGateway = "192.168.5.101"; # subrouter with a transparent proxy
defaultGateway = "192.168.5.101"; # subrouter with a transparent proxy
nameservers = [
"119.29.29.29" # DNSPod
"223.5.5.5" # AliDNS
@@ -25,6 +25,9 @@
"k3s-prod-1-worker-1" = "192.168.5.111";
"k3s-prod-1-worker-2" = "192.168.5.112";
"k3s-prod-1-worker-3" = "192.168.5.113";
"kubevirt-shoryu" = "192.168.5.176";
"kubevirt-shushou" = "192.168.5.177";
"kubevirt-youko" = "192.168.5.178";
"tailscale-gw" = "192.168.5.192";
};