feat: migrate k8s related configs to another repo

This commit is contained in:
Ryan Yin
2024-03-03 20:48:32 +08:00
parent 5811a41aca
commit d20760cd61
26 changed files with 0 additions and 16925 deletions

View File

@@ -1,23 +0,0 @@
# Pulumi - Infrastructure as Code
> WIP, not working yet.
My infrastructure is managed by Pulumi & NixOS.
[Pulumi AI](https://www.pulumi.com/ai/) is a Chatbot based on GPT v4, it can help you to write Pulumi code.
## Why Pulumi for Kubernetes?
1. Deploying Helm charts & yaml files in the right order, in a declarative way.
- Helm CLI supports only imperative commands, you need to run a bunch of commands like `helm repo add`, `helm repo update`, `helm install`, `helm upgrade`, etc.
it's really hard to manage the lifecycle of the Helm chart in this way.
1. Deal with secrets in a secure way.
1. Deploying Kubernetes resources in a unified way, instead of running a bunch of commands like `kubectl apply`, `helm install`, `kustomize`, etc.
## Why not ArgoCD or FluxCD?
ArgoCD & FluxCD support only Kubernetes, and it's too heavy for my use case.
Pulumi supports not only Kubernetes but also other cloud providers like Proxmox, Libvirt, AWS, Azure, GCP, etc.
It's a unified way to manage the lifecycle of all my infrastructure resources.

View File

@@ -1 +0,0 @@
use flake

View File

@@ -1,3 +0,0 @@
*.pyc
venv/
__pycache__/

View File

@@ -1,6 +0,0 @@
name: k3s-prod-1
runtime:
name: python
options:
virtualenv: venv
description: A Python program to deploy a Helm chart onto a Kubernetes cluster

View File

@@ -1,29 +0,0 @@
import pulumi
import pulumi_kubernetes as kubernetes
from monitoring import *
from networking import *
from visualization import *
provider = kubernetes.Provider(
"k3s-prod-1",
# The name of the kubeconfig context to use
context="default",
# Disable server-side apply to make the cluster more reproducible.
# It will(TODO: not sure) discard any server-side changes to resources when set to false.
enable_server_side_apply=False,
)
# networking
new_cert_manager = NewCertManager(provider)
# monitoring
new_victoria_metrics = NewVictoriaMetrics(provider)
# visualization
new_kubevirt = NewKubeVirt(provider)
new_virtual_machines = NewVirtualMachines(provider, new_kubevirt)

View File

@@ -1,58 +0,0 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1707956935,
"narHash": "sha256-ZL2TrjVsiFNKOYwYQozpbvQSwvtV/3Me7Zwhmdsfyu4=",
"path": "/nix/store/m9s94alic7s2r6v47p7lwfj58ibc076a-source",
"rev": "a4d4fe8c5002202493e87ec8dbc91335ff55552c",
"type": "path"
},
"original": {
"id": "nixpkgs",
"type": "indirect"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs",
"utils": "utils"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1709126324,
"narHash": "sha256-q6EQdSeUZOG26WelxqkmR7kArjgWCdw5sfJVHPH/7j8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "d465f4819400de7c8d874d50b982301f28a84605",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View File

@@ -1,58 +0,0 @@
{
description = "Python venv development template";
inputs = {
utils.url = "github:numtide/flake-utils";
};
outputs = {
self,
nixpkgs,
utils,
...
}:
utils.lib.eachDefaultSystem (system: let
pkgs = import nixpkgs {inherit system;};
in {
devShells.default = pkgs.mkShell {
name = "pulumi-venv";
venvDir = "./venv";
buildInputs = with pkgs; [
# A Python interpreter including the 'venv' module is required to bootstrap
# the environment.
python3Packages.python
# This executes some shell code to initialize a venv in $venvDir before
# dropping into the shell
python3Packages.venvShellHook
# pulumi related packages
pulumi
pulumictl
tf2pulumi
crd2pulumi
pulumiPackages.pulumi-random
pulumiPackages.pulumi-command
pulumiPackages.pulumi-aws-native
pulumiPackages.pulumi-language-go
pulumiPackages.pulumi-language-python
pulumiPackages.pulumi-language-nodejs
];
# Run this command, only after creating the virtual environment
postVenvCreation = ''
unset SOURCE_DATE_EPOCH
pip install -r requirements.txt
'';
# Now we can execute any commands within the virtual environment.
# This is optional and can be left out to run pip manually.
postShellHook = ''
# allow pip to install wheels
unset SOURCE_DATE_EPOCH
# fix `libstdc++.so.6 => not found`
LD_LIBRARY_PATH="${pkgs.stdenv.cc.cc.lib}/lib"
'';
};
});
}

View File

@@ -1 +0,0 @@
from .victoria_metrics import *

View File

@@ -1,53 +0,0 @@
from pulumi import ResourceOptions, FileAsset
import pulumi_kubernetes as kubernetes
from pathlib import Path
class NewVictoriaMetrics:
NAMESPACE = "monitoring"
def __init__(self, provider):
self.provider = provider
app_labels = {
"app": "monitoring",
}
victoriaMetricsvaluesPath = (
Path(__file__).parent / "victoria_metrics_helm_values.yml"
)
# Create a namespace (user supplies the name of the namespace)
self.ns = kubernetes.core.v1.Namespace(
"monitoring",
metadata=kubernetes.meta.v1.ObjectMetaArgs(
labels=app_labels,
name=self.NAMESPACE,
),
opts=ResourceOptions(
provider=self.provider,
),
)
# https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-k8s-stack
self.victoriaMetrics = kubernetes.helm.v3.Release(
"victoria-metrics-k8s-stack",
chart="victoria-metrics-k8s-stack",
namespace=self.ns.metadata.name,
repository_opts=kubernetes.helm.v3.RepositoryOptsArgs(
repo="https://victoriametrics.github.io/helm-charts/",
),
version="0.19.2",
skip_crds=False,
atomic=True, # purges chart on fail
cleanup_on_fail=True, # Allow deletion of new resources created in this upgrade when upgrade fails.
dependency_update=True, # run helm dependency update before installing the chart
reset_values=True, # When upgrading, reset the values to the ones built into the chart
# verify=True, # verify the package before installing it
# recreate_pods=True, # performs pods restart for the resource if applicable
value_yaml_files=[FileAsset(victoriaMetricsvaluesPath)],
opts=ResourceOptions(
provider=self.provider,
),
)

View File

@@ -1,37 +0,0 @@
# https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-k8s-stack
#
# Pulumi will complain ` ValueError: unexpected input of type set` if some values are not available in helm chart!
grafana:
enabled: true
defaultDashboardsTimezone: utc+8
ingress:
enabled: true
hosts:
- k8s-grafana.writefor.fun
persistence:
type: pvc
enabled: false
kube-state-metrics:
enabled: true
prometheus-node-exporter:
# install node exporter via nixos, not container
enabled: false
vmagent:
# vmagent collects metrics from targets and sends them to a remote storage
enabled: true
vmalert:
# vmalert is a Prometheus-compatible alertmanager
enabled: true
vmsingle:
# Single-node VictoriaMetrics for storing metrics.
# https://docs.victoriametrics.com/faq/#which-victoriametrics-type-is-recommended-for-use-in-production---single-node-or-cluster
# vmsingle = vmcluster(vmselect + vmstorage + vminsert)
enabled: true
ingress:
hosts:
- vm.writefor.fun
spec:
storage:
resources:
requests:
storage: 50Gi

View File

@@ -1 +0,0 @@
from .cert_manager import *

View File

@@ -1,30 +0,0 @@
from pulumi import ResourceOptions
from pulumi_kubernetes.core.v1 import Namespace
from pulumi_kubernetes_cert_manager import CertManager, ReleaseArgs
class NewCertManager:
NAMESPACE = "cert-manager"
def __init__(self, provider):
self.provider = provider
self.ns = Namespace(
"cert-manager",
metadata={"name": self.NAMESPACE},
opts=ResourceOptions(
provider=self.provider,
),
)
# Install cert-manager into our cluster.
self.manager = CertManager(
"cert-manager",
install_crds=True,
helm_options=ReleaseArgs(
namespace=self.NAMESPACE,
),
opts=ResourceOptions(
provider=self.provider,
),
)

View File

@@ -1,3 +0,0 @@
pulumi>=3.0.0,<4.0.0
pulumi-kubernetes>=3.30.0,<5.0.0
pulumi-kubernetes-cert-manager>=0.0.5,<1.0.0

View File

@@ -1,2 +0,0 @@
from .kubevirt import NewKubeVirt
from .virtual_machines import NewVirtualMachines

View File

@@ -1,75 +0,0 @@
from pulumi import ResourceOptions
from pulumi_kubernetes.yaml import ConfigGroup
from pathlib import Path
class NewKubeVirt:
CURRENT_DIR = Path(__file__).parent
CURRENT_DIR_STR = CURRENT_DIR.as_posix()
def __init__(self, provider):
self.provider = provider
self.kubevirt = ConfigGroup(
"kubevirt",
files=[
self.CURRENT_DIR_STR + "/yaml/kubevirt-operator.yaml",
],
opts=ResourceOptions(provider=self.provider),
)
self.kubevirt_cr = ConfigGroup(
"kubevirt-cr",
files=[
self.CURRENT_DIR_STR + "/yaml/custom-kubevirt-*.yaml",
],
opts=ResourceOptions(
provider=self.provider,
depends_on=[self.kubevirt],
),
)
self.containerDataImporter = ConfigGroup(
"container-data-importer",
files=[self.CURRENT_DIR_STR + "/yaml/cdi-*.yaml"],
opts=ResourceOptions(
provider=self.provider,
depends_on=[self.kubevirt, self.kubevirt_cr],
),
)
self.clusterNetworkAddonsOperator = ConfigGroup(
"cluster-network-addons-operator",
files=[
self.CURRENT_DIR_STR + "/yaml/cluster-network-addons-*.yaml",
],
opts=ResourceOptions(
provider=self.provider,
depends_on=[self.kubevirt, self.kubevirt_cr],
),
)
self.clusterNetworkAddons = ConfigGroup(
"cluster-network-addons",
files=[
self.CURRENT_DIR_STR + "/yaml/custom-networkaddons-*.yaml",
],
opts=ResourceOptions(
provider=self.provider,
depends_on=[
self.kubevirt,
self.kubevirt_cr,
self.clusterNetworkAddonsOperator,
],
),
)
def resouces(self):
return [
self.kubevirt,
self.kubevirt_cr,
self.containerDataImporter,
self.clusterNetworkAddonsOperator,
self.clusterNetworkAddons,
]

View File

@@ -1,18 +0,0 @@
apiVersion: cdi.kubevirt.io/v1beta1
kind: CDI
metadata:
name: cdi
spec:
config:
featureGates:
- HonorWaitForFirstConsumer
imagePullPolicy: IfNotPresent
infra:
nodeSelector:
kubernetes.io/os: linux
tolerations:
- key: CriticalAddonsOnly
operator: Exists
workload:
nodeSelector:
kubernetes.io/os: linux

View File

@@ -1,8 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: cluster-network-addons
labels:
name: cluster-network-addons
openshift.io/cluster-monitoring: "true"

View File

@@ -1,458 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cluster-network-addons-operator
namespace: cluster-network-addons
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
name: cluster-network-addons-operator
name: cluster-network-addons-operator
rules:
- apiGroups:
- operator.openshift.io
resources:
- networks
verbs:
- list
- watch
- apiGroups:
- security.openshift.io
resources:
- securitycontextconstraints
verbs:
- get
- list
- create
- update
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- create
- update
- apiGroups:
- networkaddonsoperator.network.kubevirt.io
resources:
- networkaddonsconfigs
verbs:
- list
- watch
- apiGroups:
- networkaddonsoperator.network.kubevirt.io
resources:
- networkaddonsconfigs/status
verbs:
- patch
- apiGroups:
- networkaddonsoperator.network.kubevirt.io
resources:
- networkaddonsconfigs/finalizers
verbs:
- update
- apiGroups:
- apps
resources:
- deployments
- daemonsets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- configmaps
- namespaces
verbs:
- list
- watch
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterroles
verbs:
- get
- create
- update
- bind
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- clusterrolebindings
verbs:
- get
- create
- update
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
verbs:
- get
- create
- update
- delete
- apiGroups:
- config.openshift.io
resources:
- infrastructures
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- update
- apiGroups:
- ""
resources:
- pods
- pods/status
verbs:
- get
- update
- list
- watch
- apiGroups:
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
- nodes/status
verbs:
- get
- update
- patch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- delete
- apiGroups:
- ""
resources:
- secrets
verbs:
- list
- watch
- create
- update
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- get
- create
- update
- list
- watch
- apiGroups:
- kubevirt.io
resources:
- virtualmachines
verbs:
- get
- list
- watch
- update
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- apps
resources:
- deployments
verbs:
- get
- create
- update
- apiGroups:
- kubevirt.io
resources:
- virtualmachineinstances
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- k8s.cni.cncf.io
resources:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-network-addons-operator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-network-addons-operator
subjects:
- kind: ServiceAccount
name: cluster-network-addons-operator
namespace: cluster-network-addons
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
name: cluster-network-addons-operator
name: cluster-network-addons-operator
namespace: cluster-network-addons
rules:
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get
- create
- update
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
- update
- apiGroups:
- apps
resources:
- deployments
verbs:
- delete
- apiGroups:
- ""
resources:
- namespaces
verbs:
- update
- get
- patch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- get
- create
- update
- delete
- apiGroups:
- monitoring.coreos.com
resources:
- prometheusrules
- servicemonitors
verbs:
- get
- create
- update
- delete
- apiGroups:
- rbac.authorization.k8s.io
resources:
- roles
- rolebindings
verbs:
- get
- create
- update
- delete
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cluster-network-addons-operator
namespace: cluster-network-addons
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cluster-network-addons-operator
subjects:
- kind: ServiceAccount
name: cluster-network-addons-operator
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
networkaddonsoperator.network.kubevirt.io/version: 0.91.0
labels:
prometheus.cnao.io: "true"
name: cluster-network-addons-operator
namespace: cluster-network-addons
spec:
replicas: 1
selector:
matchLabels:
name: cluster-network-addons-operator
strategy:
type: Recreate
template:
metadata:
annotations:
description: cluster-network-addons-operator manages the lifecycle of different
Kubernetes network components on top of Kubernetes cluster
labels:
name: cluster-network-addons-operator
prometheus.cnao.io: "true"
spec:
containers:
- env:
- name: MULTUS_IMAGE
value: ghcr.io/k8snetworkplumbingwg/multus-cni@sha256:3fbcc32bd4e4d15bd93c96def784a229cd84cca27942bf4858b581f31c97ee02
- name: MULTUS_DYNAMIC_NETWORKS_CONTROLLER_IMAGE
value: ghcr.io/k8snetworkplumbingwg/multus-dynamic-networks-controller@sha256:57573a24923e5588bca6bc337a8b2b08406c5b77583974365d2cf063c0dd5d06
- name: LINUX_BRIDGE_IMAGE
value: quay.io/kubevirt/cni-default-plugins@sha256:c884d6d08f8c0db98964f1eb3877b44ade41fa106083802a9914775df17d5291
- name: LINUX_BRIDGE_MARKER_IMAGE
value: quay.io/kubevirt/bridge-marker@sha256:bba066e3b5ff3fb8c5e20861fe8abe51e3c9b50ad6ce3b2616af9cb5479a06d0
- name: OVS_CNI_IMAGE
value: quay.io/kubevirt/ovs-cni-plugin@sha256:e16ac74343da21abb8fb668ce71e728053d00503a992dae2164b9e94a280113e
- name: KUBEMACPOOL_IMAGE
value: quay.io/kubevirt/kubemacpool@sha256:cf8daa57ae6603b776d3af512331b143fa03bc2f4b72f28420fddcf5e4156d0a
- name: MACVTAP_CNI_IMAGE
value: quay.io/kubevirt/macvtap-cni@sha256:850b89343ace7c7ea6b18dd8e11964613974e9d1f7377af03854d407fb15230a
- name: KUBE_RBAC_PROXY_IMAGE
value: quay.io/openshift/origin-kube-rbac-proxy@sha256:e2def4213ec0657e72eb790ae8a115511d5b8f164a62d3568d2f1bff189917e8
- name: KUBE_SECONDARY_DNS_IMAGE
value: ghcr.io/kubevirt/kubesecondarydns@sha256:e87e829380a1e576384145f78ccaa885ba1d5690d5de7d0b73d40cfb804ea24d
- name: CORE_DNS_IMAGE
value: registry.k8s.io/coredns/coredns@sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e
- name: OPERATOR_IMAGE
value: quay.io/kubevirt/cluster-network-addons-operator:v0.91.0
- name: OPERATOR_NAME
value: cluster-network-addons-operator
- name: OPERATOR_VERSION
value: 0.91.0
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: OPERAND_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: WATCH_NAMESPACE
- name: MONITORING_NAMESPACE
value: openshift-monitoring
- name: MONITORING_SERVICE_ACCOUNT
value: prometheus-k8s
- name: RUNBOOK_URL_TEMPLATE
value: https://kubevirt.io/monitoring/runbooks/
image: quay.io/kubevirt/cluster-network-addons-operator:v0.91.0
imagePullPolicy: Always
name: cluster-network-addons-operator
resources:
requests:
cpu: 50m
memory: 30Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
- args:
- --logtostderr
- --secure-listen-address=:8443
- --upstream=http://127.0.0.1:8080
image: quay.io/openshift/origin-kube-rbac-proxy@sha256:e2def4213ec0657e72eb790ae8a115511d5b8f164a62d3568d2f1bff189917e8
imagePullPolicy: Always
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: metrics
protocol: TCP
resources:
requests:
cpu: 10m
memory: 20Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
priorityClassName: system-cluster-critical
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
serviceAccountName: cluster-network-addons-operator

View File

@@ -1,34 +0,0 @@
---
apiVersion: kubevirt.io/v1
kind: KubeVirt
metadata:
name: kubevirt
namespace: kubevirt
spec:
workloads:
nodePlacement:
nodeSelector:
node-type: worker
certificateRotateStrategy: {}
configuration:
network:
# Bridge network interface on Pod network
# is not compatible with istio & live migration
# so we should disable it
permitBridgeInterfaceOnPodNetwork: false
migrations:
parallelMigrationsPerCluster: 5
parallelOutboundMigrationsPerNode: 2
bandwidthPerMigration: 220Mi # 220MiB/s
completionTimeoutPerGiB: 800
progressTimeout: 150
disableTLS: false
nodeDrainTaintKey: "kubevirt.io/drain"
allowAutoConverge: false
allowPostCopy: false
unsafeMigrationOverride: false
developerConfiguration:
featureGates: []
customizeComponents: {}
imagePullPolicy: IfNotPresent
workloadUpdateStrategy: {}

View File

@@ -1,7 +0,0 @@
apiVersion: networkaddonsoperator.network.kubevirt.io/v1
kind: NetworkAddonsConfig
metadata:
name: cluster
spec:
multus: {} # multi-network plugin
ovs: {} # openvswitch cni plugin

View File

@@ -1,33 +0,0 @@
# ========================================
# Install KubeVirt
# https://github.com/kubevirt/kubevirt/tree/main
# ========================================
export RELEASE=$(curl https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt)
echo "The latest kubevirt's version is $RELEASE"
curl -Lo kubevirt-operator-${RELEASE}.yaml https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-operator.yaml
curl -Lo kubevirt-cr-${RELEASE}.yaml https://github.com/kubevirt/kubevirt/releases/download/${RELEASE}/kubevirt-cr.yaml
# ========================================
# Install CDI(Containerized Data Importer)
# https://github.com/kubevirt/containerized-data-importer
# ========================================
export CDI_VERSION=$(curl -s https://api.github.com/repos/kubevirt/containerized-data-importer/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')
echo "The latest CDI(Containerized Data Importer)'s version is $CDI_VERSION"
curl -Lo cdi-operator-${CDI_VERSION}.yaml https://github.com/kubevirt/containerized-data-importer/releases/download/$CDI_VERSION/cdi-operator.yaml
curl -Lo cdi-cr-${CDI_VERSION}.yaml https://github.com/kubevirt/containerized-data-importer/releases/download/$CDI_VERSION/cdi-cr.yaml
# ========================================
# Install Cluster Network Addons Operator
# https://github.com/kubevirt/cluster-network-addons-operator/tree/main?tab=readme-ov-file#deployment
# ========================================
export CNAO_VERSION=$(curl -s https://api.github.com/repos/kubevirt/cluster-network-addons-operator/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')
echo "The latest Cluster Network Addons Operator's version is $CNAO_VERSION"
curl -Lo cluster-network-addons-namespace-${CNAO_VERSION}.yaml https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/namespace.yaml
curl -Lo cluster-network-addons-config.crd-${CNAO_VERSION}.yaml https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/network-addons-config.crd.yaml
curl -Lo cluster-network-addons-operator-${CNAO_VERSION}.yaml https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${CNAO_VERSION}/operator.yaml

View File

@@ -1,22 +0,0 @@
from pulumi import ResourceOptions
from pulumi_kubernetes.yaml import ConfigGroup
from pathlib import Path
from ..kubevirt import NewKubeVirt
class NewVirtualMachines:
CURRENT_DIR = Path(__file__).parent
CURRENT_DIR_STR = CURRENT_DIR.as_posix()
def __init__(self, provider, kubevirt: NewKubeVirt):
self.provider = provider
self.kubevirt = ConfigGroup(
"virtual-machines",
files=[self.CURRENT_DIR_STR + "/yaml/*.yaml"],
opts=ResourceOptions(
provider=self.provider, depends_on=kubevirt.resouces()
),
)

View File

@@ -1,59 +0,0 @@
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: ovs-net
annotations:
k8s.v1.cni.cncf.io/resourceName: ovs-cni.network.kubevirt.io/ovsbr1
spec:
config: '{
"cniVersion": "0.4.0",
"type": "ovs",
"bridge": "ovsbr1",
}'
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: testvm-nocloud
spec:
runStrategy: Always
template:
metadata:
labels:
kubevirt.io/vm: testvm-nocloud
spec:
terminationGracePeriodSeconds: 30
domain:
resources:
requests:
memory: 1024M
devices:
disks:
- name: containerdisk
disk:
bus: virtio
- name: emptydisk
disk:
bus: virtio
- disk:
bus: virtio
name: cloudinitdisk
networks:
- name: ovs-net
multus: # Multus network as default
default: true
networkName: ovsbr1
volumes:
- name: containerdisk
containerDisk:
image: kubevirt/fedora-cloud-container-disk-demo:latest
- name: emptydisk
emptyDisk:
capacity: "2Gi"
- name: cloudinitdisk
cloudInitNoCloud:
userData: |-
#cloud-config
password: fedora
chpasswd: { expire: False }