Merge pull request #62 from ryan4yin/monitoring

feat: monitoring + containers
This commit is contained in:
Ryan Yin
2024-02-17 23:36:25 +08:00
committed by GitHub
25 changed files with 739 additions and 184 deletions

6
flake.lock generated
View File

@@ -628,10 +628,10 @@
"mysecrets": { "mysecrets": {
"flake": false, "flake": false,
"locked": { "locked": {
"lastModified": 1708107208, "lastModified": 1708183622,
"narHash": "sha256-v2ugfiX05Kv+z1E1iO/nYiFj540V9SGES5JPAeLVu5M=", "narHash": "sha256-fBhY9MhNLsDnktitkVP9jh37U9VfbDcrIld5ZkvsxJQ=",
"ref": "refs/heads/main", "ref": "refs/heads/main",
"rev": "57e9a6dab2d3e1702354ff4862afe9b48ed31e07", "rev": "79d8fa3312ec4a8c42ef77d09e98447dc3f9cb19",
"shallow": true, "shallow": true,
"type": "git", "type": "git",
"url": "ssh://git@github.com/ryan4yin/nix-secrets.git" "url": "ssh://git@github.com/ryan4yin/nix-secrets.git"

View File

@@ -78,6 +78,7 @@
}; };
}; };
# https://github.com/NixOS/nixpkgs/blob/nixos-23.11/nixos/modules/services/monitoring/prometheus/exporters/v2ray.nix
# https://github.com/wi1dcard/v2ray-exporter # https://github.com/wi1dcard/v2ray-exporter
services.prometheus.exporters.v2ray = { services.prometheus.exporters.v2ray = {
enable = true; enable = true;

View File

@@ -166,6 +166,7 @@ in {
}; };
# monitoring with prometheus # monitoring with prometheus
# https://github.com/NixOS/nixpkgs/blob/nixos-23.11/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
services.prometheus.exporters.dnsmasq = { services.prometheus.exporters.dnsmasq = {
enable = true; enable = true;
listenAddress = "0.0.0.0"; listenAddress = "0.0.0.0";

View File

@@ -1,13 +1,14 @@
# Idols - Kana # Idols - Kana
TODO: use kana for various services. Use kana for common applications.
All the services assumes a reverse proxy to be setup in the front, they are not exposed to the internet directly. All the services assumes a reverse proxy to be setup in the front, they are not exposed to the internet directly.
Services: ## Services
1. dashy: Homepage
1. ddns
1. transmission & AriaNg: Torrent downloader and HTTP downloader
1. uptime-kuma: uptime monitoring
1. alist/filebrower: File browser for local/SMB/Cloud
1. excalidraw/DDTV/owncast/jitsi-meet/...
4. dashy: Homepage
3. ddns
4. transmission & AriaNg: Torrent downloader and HTTP downloader
5. uptime-kuma: uptime monitoring
7. alist/filebrower: File browser for local/SMB/Cloud
8. excalidraw/DDTV/owncast/jitsi-meet/...

View File

@@ -1,4 +1,8 @@
{vars_networking, ...}: {
vars_networking,
mylib,
...
}:
############################################################# #############################################################
# #
# Kana - a NixOS VM running on Proxmox # Kana - a NixOS VM running on Proxmox
@@ -8,6 +12,8 @@ let
hostName = "kana"; # Define your hostname. hostName = "kana"; # Define your hostname.
hostAddress = vars_networking.hostAddress.${hostName}; hostAddress = vars_networking.hostAddress.${hostName};
in { in {
imports = mylib.scanPaths ./.;
# Enable binfmt emulation of aarch64-linux, this is required for cross compilation. # Enable binfmt emulation of aarch64-linux, this is required for cross compilation.
boot.binfmt.emulatedSystems = ["aarch64-linux" "riscv64-linux"]; boot.binfmt.emulatedSystems = ["aarch64-linux" "riscv64-linux"];
# supported file systems, so we can mount any removable disks with these filesystems # supported file systems, so we can mount any removable disks with these filesystems

View File

@@ -0,0 +1,242 @@
appConfig:
theme: crayola
layout: auto
iconSize: large
language: cn
startingView: default
defaultOpeningMethod: newtab
statusCheck: true
statusCheckInterval: 0
backgroundImg: https://thiscute.world/posts/revolution-and-innovation/rolling-girls.webp
faviconApi: allesedv
routingMode: history
enableMultiTasking: false
widgetsAlwaysUseProxy: false
webSearch:
disableWebSearch: false
searchEngine: duckduckgo
openingMethod: newtab
searchBangs: {}
enableFontAwesome: true
enableMaterialDesignIcons: false
hideComponents:
hideHeading: false
hideNav: false
hideSearch: false
hideSettings: false
hideFooter: false
auth:
enableGuestAccess: false
users: []
enableKeycloak: false
showSplashScreen: false
preventWriteToDisk: false
preventLocalSave: false
disableConfiguration: false
allowConfigEdit: true
enableServiceWorker: false
disableContextMenu: false
disableUpdateChecks: false
disableSmartSort: false
enableErrorReporting: false
pageInfo:
title: This Cute Micro Cluster
description: 欢迎进入 ryan4yin 的 Cute Micro Cluster 主页,在这里你能找到许多有趣的玩意儿哦
navLinks:
- title: GitHub
path: https://github.com/ryan4yin
target: newtab
- title: Blog
path: https://thiscute.world/
target: newtab
- title: Dashy Docs
path: https://dashy.to/docs
target: newtab
footerText: 做更多有价值的东西,赚更多的钱,也帮助更多的人。
sections:
- name: Proxmox VE 虚拟化集群
icon: si-proxmox
displayData:
sortBy: default
rows: 1
cols: 1
collapsed: false
hideForGuests: false
items:
- &ref_0
title: PVE-UM560
description: 'CPU: R5-5625U / MEM: 32G / DISK: 512G+4T*2'
icon: si-proxmox
url: https://192.168.5.173:8006
target: newtab
provider: Proxmox
statusCheck: true
statusCheckAllowInsecure: true
id: 0_153265_pveum
- &ref_1
title: PVE-S500+
description: 'CPU: R7-5825U / MEM: 64G / DISK: 1T'
icon: si-proxmox
url: https://192.168.5.174:8006/
target: newtab
provider: Proxmox
statusCheck: true
statusCheckAllowInsecure: true
id: 1_153265_pves
- &ref_2
title: PVE-GTR5
description: 'CPU: R9-5900HX / MEM: 64G / DISK: 1T'
icon: si-proxmox
url: https://192.168.5.172:8006
target: newtab
provider: Proxmox
statusCheck: true
statusCheckAllowInsecure: true
id: 2_153265_pvegtr
- &ref_3
title: Orange Pi 5 8G
description: 'CPU: 8C / MEM: 8G / DISK: 128G'
icon: si-raspberrypi
url: ssh pi@192.168.5.191
target: clipboard
statusCheck: true
statusCheckUrl: https://192.168.5.191:10250
statusCheckAllowInsecure: true
statusCheckAcceptCodes: '404'
id: 3_153265_orangepig
filteredItems:
- *ref_0
- *ref_1
- *ref_2
- *ref_3
- name: K3s 容器化集群
displayData:
sortBy: default
rows: 1
cols: 1
collapsed: false
hideForGuests: false
items:
- &ref_4
title: k3s-main-master
description: control-plane + master
icon: si-k3s
url: ssh ryan@192.168.5.181
target: clipboard
provider: Rancher
statusCheck: true
statusCheckUrl: ' https://192.168.5.181:6443'
statusCheckAllowInsecure: true
statusCheckAcceptCodes: '401'
id: 0_138418_ksmainmaster
- &ref_5
title: k3s-data-1-master
description: worker node
icon: si-k3s
url: ssh ryan@192.168.5.182
target: clipboard
provider: Rancher
statusCheck: true
statusCheckUrl: https://192.168.5.182:10250
statusCheckAllowInsecure: true
statusCheckAcceptCodes: '404'
id: 1_138418_ksdatamaster
- &ref_6
title: k3s-data-1-worker-1
description: worker node
icon: si-k3s
url: ssh ryan@192.168.5.184
target: clipboard
provider: Rancher
statusCheck: true
statusCheckUrl: https://192.168.5.184:10250
statusCheckAllowInsecure: true
statusCheckAcceptCodes: '404'
id: 2_138418_ksdataworker
- &ref_7
title: k3s-data-1-worker-2
description: worker node
icon: si-k3s
url: ssh ryan@192.168.5.186
target: clipboard
provider: Rancher
statusCheck: true
statusCheckUrl: https://192.168.5.186:10250
statusCheckAllowInsecure: true
statusCheckAcceptCodes: '404'
id: 3_138418_ksdataworker
filteredItems:
- *ref_4
- *ref_5
- *ref_6
- *ref_7
- name: System Monitoring & Control
icon: fas fa-monitor-heart-rate
items:
- &ref_9
title: Grafana
description: Data visualised on dashboards
icon: hl-grafana
url: http://grafana.writefor.fun
target: newtab
statusCheck: true
statusCheckAllowInsecure: true
id: 1_2578_grafana
- &ref_10
title: Prometheus Dashboard
description: Monitoring - Prometheus
icon: si-prometheus
url: http://prometheus.writefor.fun
target: newtab
statusCheck: true
id: 2_2578_prometheus
- &ref_11
title: Uptime Kuma
description: Uptime Checking
icon: hl-uptime-kuma
url: http://uptime-kuma.writefor.fun
target: newtab
statusCheck: true
id: 3_2578_uptimekuma
displayData:
sortBy: default
rows: 1
cols: 1
collapsed: false
hideForGuests: false
filteredItems:
- *ref_9
- *ref_10
- *ref_11
- name: Productivity
icon: fas fa-bookmark
items:
- &ref_12
title: Cloud IDE
description: Eclipse Che - Cloud IDE
icon: hl-code
url: https://ide.writefor.fun/
target: newtab
statusCheck: true
id: 0_1302_cloudide
filteredItems:
- *ref_12
- name: Media & Entertainment
icon: fas fa-photo-video
items:
- &ref_13
title: Home Assistant
description: Smart home control
icon: hl-home-assistant
url: http://ha.writefor.fun:8123/
target: newtab
statusCheck: true
id: 0_1956_homeassistant
displayData:
sortBy: default
rows: 1
cols: 1
collapsed: false
hideForGuests: false
filteredItems:
- *ref_13

View File

@@ -0,0 +1,24 @@
{
# Install the dashy configuration file instaed of symlink it
system.activationScripts.installDashyConfig = ''
install -Dm 600 ${./dashy_conf.yml} /etc/dashy/dashy_conf.yml
'';
# https://github.com/NixOS/nixpkgs/blob/nixos-23.11/nixos/modules/virtualisation/oci-containers.nix
virtualisation.oci-containers.containers = {
# check its logs via `journalctl -u podman-dashy`
dashy = {
hostname = "dashy";
image = "lissy93/dashy:latest";
ports = ["4000:80"];
environment = {
"NODE_ENV" = "production";
};
volumes = [
"/etc/dashy/dashy_conf.yml:/app/public/conf.yml"
];
autoStart = true;
# cmd = [];
};
};
}

View File

@@ -0,0 +1,28 @@
{
lib,
mylib,
...
}: {
imports = mylib.scanPaths ./.;
virtualisation = {
docker.enable = lib.mkForce false;
podman = {
enable = true;
# Create a `docker` alias for podman, to use it as a drop-in replacement
dockerCompat = true;
# Required for containers under podman-compose to be able to talk to each other.
defaultNetwork.settings.dns_enabled = true;
# Periodically prune Podman resources
autoPrune = {
enable = true;
dates = "weekly";
flags = ["--all"];
};
};
oci-containers = {
backend = "podman";
};
};
}

View File

@@ -1,5 +1,9 @@
let {
dataDir = "/data/transmission"; config,
username,
...
}: let
dataDir = "/var/lib/transmission";
name = "transmission"; name = "transmission";
in { in {
# the headless Transmission BitTorrent daemon # the headless Transmission BitTorrent daemon
@@ -10,9 +14,6 @@ in {
user = name; user = name;
group = name; group = name;
home = dataDir; home = dataDir;
incomplete-dir-enabled = true;
incomplete-dir = "${dataDir}/incomplete";
download-dir = "${dataDir}/downloads";
downloadDirPermissions = "0770"; downloadDirPermissions = "0770";
# Whether to enable tweaking of kernel parameters to open many more connections at the same time. # Whether to enable tweaking of kernel parameters to open many more connections at the same time.
@@ -23,7 +24,7 @@ in {
# Path to a JSON file to be merged with the settings. # Path to a JSON file to be merged with the settings.
# Useful to merge a file which is better kept out of the Nix store to set secret config parameters like `rpc-password`. # Useful to merge a file which is better kept out of the Nix store to set secret config parameters like `rpc-password`.
credentialsFile = "/etc/agenix/transmission-credentials.json"; credentialsFile = config.age.secrets."transmission-credentials.json".path;
# Whether to open the RPC port in the firewall. # Whether to open the RPC port in the firewall.
openRPCPort = false; openRPCPort = false;
@@ -43,7 +44,7 @@ in {
# rpc = Web Interface # rpc = Web Interface
rpc-port = 9091; rpc-port = 9091;
rpc-bind-address = "127.0.0.1"; rpc-bind-address = "0.0.0.0";
anti-brute-force-enabled = true; anti-brute-force-enabled = true;
# After this amount of failed authentication attempts is surpassed, # After this amount of failed authentication attempts is surpassed,
# the RPC server will deny any further authentication attempts until it is restarted. # the RPC server will deny any further authentication attempts until it is restarted.
@@ -53,15 +54,19 @@ in {
# Comma-delimited list of IP addresses. # Comma-delimited list of IP addresses.
# Wildcards allowed using '*'. Example: "127.0.0.*,192.168.*.*", # Wildcards allowed using '*'. Example: "127.0.0.*,192.168.*.*",
# rpc-whitelist-enabled = true; rpc-whitelist-enabled = true;
# rpc-whitelist = ""; rpc-whitelist = "127.0.0.*,192.168.*.*";
# Comma-delimited list of domain names. # Comma-delimited list of domain names.
# Wildcards allowed using '*'. Example: "*.foo.org,example.com", # Wildcards allowed using '*'. Example: "*.foo.org,example.com",
# rpc-host-whitelist-enabled = true; rpc-host-whitelist-enabled = true;
# rpc-host-whitelist = ""; rpc-host-whitelist = "*.writefor.fun,localhost,192.168.5.*";
rpc-user = name; rpc-user = username;
rpc-username = name; rpc-username = username;
# rpc-password = "xxx"; # you'd better use the credentialsFile for this. # rpc-password = "test"; # you'd better use the credentialsFile for this.
incomplete-dir-enabled = true;
incomplete-dir = "${dataDir}/incomplete";
download-dir = "${dataDir}/downloads";
# Watch a directory for torrent files and add them to transmission. # Watch a directory for torrent files and add them to transmission.
watch-dir-enabled = false; watch-dir-enabled = false;

View File

@@ -4,10 +4,9 @@
enable = true; enable = true;
# https://github.com/louislam/uptime-kuma/wiki/Environment-Variables # https://github.com/louislam/uptime-kuma/wiki/Environment-Variables
settings = { settings = {
# this assumes a reverse proxy to be set, uptime-kuma will only listen on localhost "UPTIME_KUMA_HOST" = "0.0.0.0";
"UPTIME_KUMA_HOST" = "127.0.0.1"; "UPTIME_KUMA_PORT" = "3001";
"UPTIME_KUMA_PORT" = 3001; "DATA_DIR" = "/var/lib/uptime-kuma/";
"DATA_DIR" = "/data/uptime-kuma";
}; };
}; };
} }

View File

@@ -1,8 +1,9 @@
# Idols - Ruby # Idols - Ruby
TODO: use ruby for backup / sync my personal data. TODO: use ruby for backup / sync my personal data, and monitor the status/logs of my homelab.
For safety, those data should be encrypted before sending to the cloud or my NAS. For safety, those data should be encrypted before sending to the cloud or my NAS.
1. prometheus: Monitor the status of my homelab
1. restic: Backup file from homelab to NAS, or from NAS to Cloud 1. restic: Backup file from homelab to NAS, or from NAS to Cloud
2. synthing: Sync file between android/macbook/PC and NAS 1. synthing: Sync file between android/macbook/PC and NAS

View File

@@ -1,4 +1,8 @@
{vars_networking, ...}: {
vars_networking,
mylib,
...
}:
############################################################# #############################################################
# #
# Ruby - a NixOS VM running on Proxmox # Ruby - a NixOS VM running on Proxmox
@@ -8,9 +12,7 @@ let
hostName = "ruby"; # Define your hostname. hostName = "ruby"; # Define your hostname.
hostAddress = vars_networking.hostAddress.${hostName}; hostAddress = vars_networking.hostAddress.${hostName};
in { in {
imports = [ imports = mylib.scanPaths ./.;
./restic.nix
];
# Enable binfmt emulation of aarch64-linux, this is required for cross compilation. # Enable binfmt emulation of aarch64-linux, this is required for cross compilation.
boot.binfmt.emulatedSystems = ["aarch64-linux" "riscv64-linux"]; boot.binfmt.emulatedSystems = ["aarch64-linux" "riscv64-linux"];

View File

@@ -0,0 +1,3 @@
{mylib, ...}: {
imports = mylib.scanPaths ./.;
}

View File

@@ -0,0 +1,4 @@
{
# TODO
# https://github.com/NixOS/nixpkgs/blob/nixos-23.11/nixos/modules/services/monitoring/prometheus/exporters/pve.nix
}

View File

View File

View File

@@ -0,0 +1,52 @@
{
config,
pkgs,
username,
useremail,
...
}: {
services.grafana = {
enable = true;
dataDir = "/var/lib/grafana";
# DeclarativePlugins = with pkgs.grafanaPlugins; [ grafana-piechart-panel ];
settings = {
server = {
http_addr = "0.0.0.0";
http_port = 80;
protocol = "http";
domain = "grafana.writefo.fun";
# Redirect to correct domain if the host header does not match the domain. Prevents DNS rebinding attacks.
serve_from_sub_path = false;
# Add subpath to the root_url if serve_from_sub_path is true
root_url = "%(protocol)s://%(domain)s:%(http_port)s/";
enforce_domain = false;
read_timeout = "180s";
# Enable HTTP compression, this can improve transfer speed and bandwidth utilization.
enable_gzip = true;
# Cdn for accelerating loading of frontend assets.
# cdn_url = "https://cdn.jsdelivr.net/npm/grafana@7.5.5";
};
security = {
admin_user = username;
admin_email = useremail;
# Use file provider to read the admin password from a file.
# https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider
admin_password = "$__file{${config.age.secrets."grafana-admin-password".path}}";
};
users = {
allow_sign_up = false;
# home_page = "";
default_theme = "dark";
};
};
# Declaratively provision Grafana's data sources, dashboards, and alerting rules.
# Grafana's alerting rules is not recommended to use, we use Prometheus alertmanager instead.
# https://grafana.com/docs/grafana/latest/administration/provisioning/#data-sources
provision = {
datasources.path = ./datasources.yml;
dashboards.path = ./dashboards.yml;
};
};
}

View File

@@ -0,0 +1,108 @@
{
config,
vars_networking,
...
}: {
# https://prometheus.io/docs/prometheus/latest/configuration/configuration/
services.prometheus = {
enable = true;
checkConfig = true;
listenAddress = "0.0.0.0";
port = 9090;
webExternalUrl = "https://prometheus.writefor.fun";
extraFlags = ["--storage.tsdb.retention.time=15d"];
# Directory below /var/lib to store Prometheus metrics data.
stateDir = "prometheus2";
# Reload prometheus when configuration file changes (instead of restart).
enableReload = true;
# https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read
# remoteRead = [];
# Rules are read from these files.
# https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/
#
# Prometheus supports two types of rules which may be configured
# and then evaluated at regular intervals:
# 1. Recording rules
# Recording rules allow you to precompute frequently needed or computationally
# expensive expressions and save their result as a new set of time series.
# Querying the precomputed result will then often be much faster than executing the original expression.
# This is especially useful for dashboards, which need to query the same expression repeatedly every time they refresh.
# 2. Alerting rules
# Alerting rules allow you to define alert conditions based on Prometheus expression language expressions
# and to send notifications about firing alerts to an external service.
ruleFiles = [
./recording_rules.yml
./alerting_rules.yml
];
# specifies a set of targets and parameters describing how to scrape metrics from them.
# https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
scrapeConfigs = [
{
job_name = "node-exporter";
scrape_interval = "30s";
metrics_path = "/metrics";
static_configs = [
{
# All my NixOS hosts.
targets =
map (host: "${host.address}:9100")
(builtins.attrValues vars_networking.hostAddress);
labels.type = "node";
}
];
}
];
# specifies Alertmanager instances the Prometheus server sends alerts to
# https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config
alertmanagers = [{static_configs = [{targets = ["localhost:9093"];}];}];
};
services.prometheus.alertmanager = {
enable = true;
logLevel = "info";
environmentFile = config.age.secrets."alertmanager.env".path;
webExternalUrl = "https://alertmanager.writefor.fun";
listenAddress = "[::1]";
configuration = {
global = {
# The smarthost and SMTP sender used for mail notifications.
smtp_smarthost = "smtp.qq.com:465";
smtp_from = "$SMTP_SENDER_EMAIL";
smtp_auth_username = "$SMTP_AUTH_USERNAME";
smtp_auth_password = "$SMTP_AUTH_PASSWORD";
# smtp.qq.com:465 support SSL only, so we need to disable TLS here.
# https://service.mail.qq.com/detail/0/310
smtp_require_tls = false;
};
route = {
receiver = "default";
routes = [
{
group_by = ["host"];
group_wait = "5m";
group_interval = "5m";
repeat_interval = "4h";
receiver = "default";
}
];
};
receivers = [
{
name = "default";
email_configs = [
{
to = "ryan4yin@linux.com";
# Whether to notify about resolved alerts.
send_resolved = true;
}
];
}
];
};
};
}

View File

@@ -1,16 +1,20 @@
{pkgs, ...}: { {pkgs, ...}: let
passwordFile = "/etc/agenix/restic-password";
sshKeyPath = "/etc/agenix/ssh-key-for-restic-backup";
rcloneConfigFile = "/etc/agenix/rclone-conf-for-restic-backup";
in {
# https://github.com/NixOS/nixpkgs/blob/nixos-23.11/nixos/modules/services/backup/restic.nix # https://github.com/NixOS/nixpkgs/blob/nixos-23.11/nixos/modules/services/backup/restic.nix
services.restic.backups = { services.restic.backups = {
homelab-backup = { homelab-backup = {
inherit passwordFile;
initialize = true; # Initialize the repository if it doesn't exist. initialize = true; # Initialize the repository if it doesn't exist.
passwordFile = "/etc/agenix/restic-password";
repository = "rclone:smb-downloads:/Downloads/proxmox-backup/"; # backup to a rclone remote repository = "rclone:smb-downloads:/Downloads/proxmox-backup/"; # backup to a rclone remote
# rclone related # rclone related
# rcloneOptions = { # rcloneOptions = {
# bwlimit = "100M"; # Limit the bandwidth used by rclone. # bwlimit = "100M"; # Limit the bandwidth used by rclone.
# }; # };
rcloneConfigFile = "/etc/agenix/rclone-conf-for-restic-backup"; inherit rcloneConfigFile;
# Which local paths to backup, in addition to ones specified via `dynamicFilesFrom`. # Which local paths to backup, in addition to ones specified via `dynamicFilesFrom`.
paths = [ paths = [
@@ -42,7 +46,7 @@
pve_nodes | each {|it| pve_nodes | each {|it|
rsync -avz \ rsync -avz \
-e "ssh -i /etc/agenix/ssh-key-for-restic-backup" \ -e "ssh -i ${sshKeyPath}" \
$"($it):/var/lib/vz" $"/tmp/restic-backup-temp/($it)" $"($it):/var/lib/vz" $"/tmp/restic-backup-temp/($it)"
} }
' '
@@ -55,7 +59,7 @@
# Extra arguments passed to restic backup. # Extra arguments passed to restic backup.
# extraBackupArgs = [ # extraBackupArgs = [
# "--exclude-file=/etc/agenix/restic-excludes" # "--exclude-file=/etc/restic/excludes-list"
# ]; # ];
# repository = "/mnt/backup-hdd"; # backup to a local directory # repository = "/mnt/backup-hdd"; # backup to a local directory

View File

@@ -0,0 +1,18 @@
{
# enable the node exporter on all nixos hosts
# https://github.com/NixOS/nixpkgs/blob/nixos-23.11/nixos/modules/services/monitoring/prometheus/exporters/node.nix
services.prometheus.exporters.node = {
enable = true;
listenAddress = "0.0.0.0";
port = 9100;
# There're already a lot of collectors enabled by default
# https://github.com/prometheus/node_exporter?tab=readme-ov-file#enabled-by-default
enabledCollectors = [
"systemd"
"logind"
];
# use either enabledCollectors or disabledCollectors
# disabledCollectors = [];
};
}

View File

@@ -29,147 +29,192 @@ in {
options.modules.secrets = { options.modules.secrets = {
desktop.enable = mkEnableOption "NixOS Secrets for Desktops"; desktop.enable = mkEnableOption "NixOS Secrets for Desktops";
server.enable = mkEnableOption "NixOS Secrets for Servers";
server.network.enable = mkEnableOption "NixOS Secrets for Network Servers";
server.application.enable = mkEnableOption "NixOS Secrets for Application Servers";
server.operation.enable = mkEnableOption "NixOS Secrets for Operation Servers(Backup, Monitoring, etc)";
server.kubernetes.enable = mkEnableOption "NixOS Secrets for Kubernetes";
impermanence.enable = mkEnableOption "Wether use impermanence and ephemeral root file sytem"; impermanence.enable = mkEnableOption "Wether use impermanence and ephemeral root file sytem";
}; };
config = mkIf (cfg.server.enable || cfg.desktop.enable) (mkMerge [ config =
{ mkIf (
environment.systemPackages = [ cfg.desktop.enable
agenix.packages."${pkgs.system}".default || cfg.server.application.enable
]; || cfg.server.network.enable
|| cfg.server.operation.enable
# if you changed this key, you need to regenerate all encrypt files from the decrypt contents! || cfg.server.kubernetes.enable
age.identityPaths = ) (mkMerge [
if cfg.impermanence.enable {
then [ environment.systemPackages = [
# To decrypt secrets on boot, this key should exists when the system is booting, agenix.packages."${pkgs.system}".default
# so we should use the real key file path(prefixed by `/persistent/`) here, instead of the path mounted by impermanence.
"/persistent/etc/ssh/ssh_host_ed25519_key" # Linux
]
else [
"/etc/ssh/ssh_host_ed25519_key"
]; ];
assertions = [ # if you changed this key, you need to regenerate all encrypt files from the decrypt contents!
{ age.identityPaths =
# this expression should be true to pass the assertion if cfg.impermanence.enable
assertion = !(cfg.server.enable && cfg.desktop.enable); then [
message = "Enable either desktop or server's secrets, not both!"; # To decrypt secrets on boot, this key should exists when the system is booting,
} # so we should use the real key file path(prefixed by `/persistent/`) here, instead of the path mounted by impermanence.
]; "/persistent/etc/ssh/ssh_host_ed25519_key" # Linux
} ]
else [
"/etc/ssh/ssh_host_ed25519_key"
];
(mkIf cfg.desktop.enable { assertions = [
age.secrets = {
# ---------------------------------------------
# no one can read/write this file, even root.
# ---------------------------------------------
# .age means the decrypted file is still encrypted by age(via a passphrase)
"ryan4yin-gpg-subkeys.priv.age" =
{ {
file = "${mysecrets}/ryan4yin-gpg-subkeys-2024-01-27.priv.age.age"; # This expression should be true to pass the assertion
assertion =
!(cfg.desktop.enable
&& (
cfg.server.application.enable
|| cfg.server.network.enable
|| cfg.server.operation.enable
|| cfg.server.kubernetes.enable
));
message = "Enable either desktop or server's secrets, not both!";
} }
// noaccess; ];
}
# --------------------------------------------- (mkIf cfg.desktop.enable {
# only root can read this file. age.secrets = {
# --------------------------------------------- # ---------------------------------------------
# no one can read/write this file, even root.
# ---------------------------------------------
"wg-business.conf" = # .age means the decrypted file is still encrypted by age(via a passphrase)
{ "ryan4yin-gpg-subkeys.priv.age" =
file = "${mysecrets}/wg-business.conf.age"; {
} file = "${mysecrets}/ryan4yin-gpg-subkeys-2024-01-27.priv.age.age";
// high_security; }
// noaccess;
# Used only by NixOS Modules # ---------------------------------------------
# smb-credentials is referenced in /etc/fstab, by ../hosts/ai/cifs-mount.nix # only root can read this file.
"smb-credentials" = # ---------------------------------------------
{
file = "${mysecrets}/smb-credentials.age";
}
// high_security;
"rclone.conf" = "wg-business.conf" =
{ {
file = "${mysecrets}/rclone.conf.age"; file = "${mysecrets}/wg-business.conf.age";
} }
// high_security; // high_security;
"nix-access-tokens" = # Used only by NixOS Modules
{ # smb-credentials is referenced in /etc/fstab, by ../hosts/ai/cifs-mount.nix
file = "${mysecrets}/nix-access-tokens.age"; "smb-credentials" =
} {
// high_security; file = "${mysecrets}/smb-credentials.age";
}
// high_security;
# --------------------------------------------- "rclone.conf" =
# user can read this file. {
# --------------------------------------------- file = "${mysecrets}/rclone.conf.age";
}
// high_security;
"ssh-key-romantic" = "nix-access-tokens" =
{ {
file = "${mysecrets}/ssh-key-romantic.age"; file = "${mysecrets}/nix-access-tokens.age";
} }
// user_readable; // high_security;
# alias-for-work # ---------------------------------------------
"alias-for-work.nushell" = # user can read this file.
{ # ---------------------------------------------
file = "${mysecrets}/alias-for-work.nushell.age";
}
// user_readable;
"alias-for-work.bash" = "ssh-key-romantic" =
{ {
file = "${mysecrets}/alias-for-work.bash.age"; file = "${mysecrets}/ssh-key-romantic.age";
} }
// user_readable; // user_readable;
};
# place secrets in /etc/ # alias-for-work
environment.etc = { "alias-for-work.nushell" =
# wireguard config used with `wg-quick up wg-business` {
"wireguard/wg-business.conf" = { file = "${mysecrets}/alias-for-work.nushell.age";
source = config.age.secrets."wg-business.conf".path; }
// user_readable;
"alias-for-work.bash" =
{
file = "${mysecrets}/alias-for-work.bash.age";
}
// user_readable;
}; };
"agenix/rclone.conf" = { # place secrets in /etc/
source = config.age.secrets."rclone.conf".path; environment.etc = {
}; # wireguard config used with `wg-quick up wg-business`
"wireguard/wg-business.conf" = {
source = config.age.secrets."wg-business.conf".path;
};
"agenix/ssh-key-romantic" = { "agenix/rclone.conf" = {
source = config.age.secrets."ssh-key-romantic".path; source = config.age.secrets."rclone.conf".path;
mode = "0600"; };
user = username;
};
"agenix/ryan4yin-gpg-subkeys.priv.age" = { "agenix/ssh-key-romantic" = {
source = config.age.secrets."ryan4yin-gpg-subkeys.priv.age".path; source = config.age.secrets."ssh-key-romantic".path;
mode = "0000"; mode = "0600";
}; user = username;
};
# The following secrets are used by home-manager modules "agenix/ryan4yin-gpg-subkeys.priv.age" = {
# So we need to make then readable by the user source = config.age.secrets."ryan4yin-gpg-subkeys.priv.age".path;
"agenix/alias-for-work.nushell" = { mode = "0000";
source = config.age.secrets."alias-for-work.nushell".path; };
mode = "0644"; # both the original file and the symlink should be readable and executable by the user
};
"agenix/alias-for-work.bash" = {
source = config.age.secrets."alias-for-work.bash".path;
mode = "0644"; # both the original file and the symlink should be readable and executable by the user
};
};
})
(mkIf cfg.server.enable { # The following secrets are used by home-manager modules
age.secrets = { # So we need to make then readable by the user
"dae-subscription.dae" = "agenix/alias-for-work.nushell" = {
{ source = config.age.secrets."alias-for-work.nushell".path;
file = "${mysecrets}/server/dae-subscription.dae.age"; mode = "0644"; # both the original file and the symlink should be readable and executable by the user
} };
// high_security; "agenix/alias-for-work.bash" = {
}; source = config.age.secrets."alias-for-work.bash".path;
}) mode = "0644"; # both the original file and the symlink should be readable and executable by the user
]); };
};
})
(mkIf cfg.server.network.enable {
age.secrets = {
"dae-subscription.dae" =
{
file = "${mysecrets}/server/dae-subscription.dae.age";
}
// high_security;
};
})
(mkIf cfg.server.application.enable {
age.secrets = {
"transmission-credentials.json" =
{
file = "${mysecrets}/server/transmission-credentials.json.age";
}
// high_security;
};
})
(mkIf cfg.server.operation.enable {
age.secrets = {
"grafana-admin-password" = {
file = "${mysecrets}/server/grafana-admin-password.age";
mode = "0400";
owner = "grafana";
};
"alertmanager.env" =
{
file = "${mysecrets}/server/alertmanager.env.age";
}
// high_security;
};
})
]);
} }

View File

@@ -8,6 +8,15 @@ let
../home/linux/desktop.nix ../home/linux/desktop.nix
]; ];
}; };
pve_base_modules = {
nixos-modules = [
../secrets/nixos.nix
../modules/nixos/server/server.nix
../modules/nixos/server/proxmox-hardware-configuration.nix
];
# home-module.imports = [];
};
in { in {
# 星野 アイ, Hoshino Ai # 星野 アイ, Hoshino Ai
idol_ai_modules_i3 = { idol_ai_modules_i3 = {
@@ -50,45 +59,47 @@ in {
# 星野 愛久愛海, Hoshino Akuamarin # 星野 愛久愛海, Hoshino Akuamarin
idol_aquamarine_modules = { idol_aquamarine_modules = {
nixos-modules = [ nixos-modules =
../secrets/nixos.nix [
../hosts/idols_aquamarine ../hosts/idols_aquamarine
../modules/nixos/server/server.nix ../modules/nixos/server/proxmox-hardware-configuration.nix
../modules/nixos/server/proxmox-hardware-configuration.nix {modules.secrets.server.network.enable = true;}
{modules.secrets.server.enable = true;} ]
]; ++ pve_base_modules.nixos-modules;
# home-module.imports = []; # home-module.imports = [];
}; };
idol_aquamarine_tags = ["aqua" "router"]; idol_aquamarine_tags = ["aqua" "router"];
# 星野 瑠美衣, Hoshino Rubii # 星野 瑠美衣, Hoshino Rubii
idol_ruby_modules = { idol_ruby_modules = {
nixos-modules = [ nixos-modules =
../hosts/idols_ruby [
../modules/nixos/server/server.nix ../hosts/idols_ruby
../modules/nixos/server/proxmox-hardware-configuration.nix {modules.secrets.server.operation.enable = true;}
]; ]
++ pve_base_modules.nixos-modules;
# home-module.imports = []; # home-module.imports = [];
}; };
idol_ruby_tags = ["dist-build" "ruby"]; idol_ruby_tags = ["dist-build" "ruby"];
# 有馬 かな, Arima Kana # 有馬 かな, Arima Kana
idol_kana_modules = { idol_kana_modules = {
nixos-modules = [ nixos-modules =
../hosts/idols_kana [
../modules/nixos/server/server.nix ../hosts/idols_kana
../modules/nixos/server/proxmox-hardware-configuration.nix {modules.secrets.server.application.enable = true;}
]; ]
++ pve_base_modules.nixos-modules;
# home-module.imports = []; # home-module.imports = [];
}; };
idol_kana_tags = ["dist-build" "kana"]; idol_kana_tags = ["dist-build" "kana"];
homelab_tailscale_gw_modules = { homelab_tailscale_gw_modules = {
nixos-modules = [ nixos-modules =
../hosts/homelab_tailscale_gw [
../modules/nixos/server/server.nix ../hosts/homelab_tailscale_gw
../modules/nixos/server/proxmox-hardware-configuration.nix ]
]; ++ pve_base_modules.nixos-modules;
# home-module.imports = []; # home-module.imports = [];
}; };
homelab_tailscale_gw_tags = ["tailscale_gw"]; homelab_tailscale_gw_tags = ["tailscale_gw"];

View File

@@ -86,8 +86,8 @@
}) })
{ {
aquamarine.publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHJrHY3BZRTu0hrlsKxqS+O4GDp4cbumF8aNnbPCGKji root@aquamarine"; aquamarine.publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHJrHY3BZRTu0hrlsKxqS+O4GDp4cbumF8aNnbPCGKji root@aquamarine";
ruby.publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHrDXNQXELnbevZ1rImfXwmQHkRcd3TDNLsQo33c2tUf"; ruby.publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOAMmGni8imcaS40cXgLbVQqPYnDYKs8MSbyWL91RV98 root@ruby";
kana.publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJMVX05DQD1XJ0AqFZzsRsqgeUOlZ4opAI+8tkVXyjq+"; kana.publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIcINkxU3KxPsCpWltfEBjDYtKEeCmgrDxyUadl1iZ1D root@kana";
}; };
}; };
} }