Add token requestor flow

This commit is contained in:
Martin Šalata
2022-06-07 12:57:34 +02:00
parent 3f865bb2fc
commit 99f210a8af
8 changed files with 403 additions and 50 deletions

View File

@@ -14,7 +14,7 @@
EXTENSION_PREFIX := gardener-extension
NAME := shoot-fleet-agent
REGISTRY := javamachr
REGISTRY := ysoftglobal.azurecr.io
IMAGE_PREFIX := $(REGISTRY)/gardener-extension
REPO_ROOT := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
HACK_DIR := $(REPO_ROOT)/hack
@@ -100,7 +100,7 @@ revendor:
@GO111MODULE=on go mod vendor
@GO111MODULE=on go mod tidy
@chmod +x $(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/*
@chmod +x $(REPO_ROOT)/vendor/github. com/gardener/gardener/hack/.ci/*
@chmod +x $(REPO_ROOT)/vendor/github.com/gardener/gardener/hack/.ci/*
@$(REPO_ROOT)/hack/update-github-templates.sh
.PHONY: clean

View File

@@ -0,0 +1,4 @@
apiVersion: v1
description: A Helm chart for shoot-fleet-agent-shoot
name: shoot-fleet-agent-shoot
version: 0.1.0

View File

@@ -0,0 +1,35 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: extensions.gardener.cloud:extension-shoot-fleet-agent:shoot
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- '*'
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: extensions.gardener.cloud:extension-shoot-fleet-agent:shoot
labels:
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: extensions.gardener.cloud:extension-shoot-fleet-agent:shoot
subjects:
- kind: ServiceAccount
name: {{ .Values.shootAccessServiceAccountName }}
namespace: {{ .Values.shootAccessServiceAccountNamespace }}

View File

@@ -0,0 +1,2 @@
shootAccessServiceAccountName: ""
shootAccessServiceAccountNamespace: kube-system

2
go.mod
View File

@@ -18,6 +18,7 @@ require (
k8s.io/code-generator v0.24.0
k8s.io/component-base v0.24.0
sigs.k8s.io/controller-runtime v0.11.2
sigs.k8s.io/yaml v1.3.0
)
require (
@@ -130,7 +131,6 @@ require (
sigs.k8s.io/controller-tools v0.8.0 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
replace (

View File

@@ -17,6 +17,10 @@ package controller
import (
"context"
"fmt"
managed_resource_handler "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller/managed-resource-handler"
token_requestor_handler "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller/token-requestor-handler"
"github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/utils"
"k8s.io/apimachinery/pkg/api/errors"
"reflect"
"strings"
@@ -89,6 +93,21 @@ func initializeFleetManagers(config config.Config, logger logr.Logger) map[strin
return fleetManagers
}
func (a *actuator) ensureDependencies(ctx context.Context, cluster *extensions.Cluster) error {
// Initialize token requestor handler
tokenRequestor := token_requestor_handler.NewTokenRequestorHandler(ctx, a.client, cluster)
// Initialize the managed resoruce handler
managedResourceHandler := managed_resource_handler.NewManagedResourceHandler(ctx, a.client, cluster)
dependencyFunctions := []func() error{
tokenRequestor.EnsureKubeconfig,
managedResourceHandler.EnsureManagedResoruces,
}
return utils.RunParallelFunctions(dependencyFunctions)
}
// Reconcile the Extension resource.
func (a *actuator) Reconcile(ctx context.Context, ex *extensionsv1alpha1.Extension) error {
namespace := ex.GetNamespace()
@@ -106,7 +125,17 @@ func (a *actuator) Reconcile(ctx context.Context, ex *extensionsv1alpha1.Extensi
return fmt.Errorf("failed to decode provider config: %+v", err)
}
}
a.ReconcileClusterInFleetManager(ctx, namespace, cluster, shootsConfigOverride)
if err := a.ensureDependencies(ctx, cluster); err != nil {
a.logger.Error(err, "Could not ensure dependencies")
return err
}
if err = a.ReconcileClusterInFleetManager(ctx, namespace, cluster, shootsConfigOverride); err != nil {
a.logger.Error(err, "Could not reconcile cluster in fleet")
return err
}
return a.updateStatus(ctx, ex)
}
@@ -163,71 +192,87 @@ func (a *actuator) InjectScheme(scheme *runtime.Scheme) error {
}
// ReconcileClusterInFleetManager reconciles cluster registration in remote fleet manager
func (a *actuator) ReconcileClusterInFleetManager(ctx context.Context, namespace string, cluster *extensions.Cluster, override *config.Config) {
func (a *actuator) ReconcileClusterInFleetManager(ctx context.Context, namespace string, cluster *extensions.Cluster, override *config.Config) error {
a.logger.Info("Starting with already registered check")
labels := prepareLabels(cluster, getProjectConfig(cluster, &a.serviceConfig), getProjectConfig(cluster, override))
registered, err := a.getFleetManager(cluster).GetCluster(ctx, cluster.Shoot.Name)
if err != nil {
// We cannot find the cluster because of an unknown error
if err != nil && !errors.IsNotFound(err) {
a.logger.Error(err, "Failed to get cluster registration for Shoot", "shoot", cluster.Shoot.Name)
return err
}
if err == nil && registered != nil {
if reflect.DeepEqual(registered.Labels, labels) {
a.logger.Info("Cluster already registered - skipping registration", "clientId", registered.Spec.ClientID)
} else {
a.logger.Info("Updating labels of already registered cluster.", "clientId", registered.Spec.ClientID)
a.updateClusterLabelsInFleet(ctx, registered, cluster, labels)
}
return
// We cannot find the cluster because we haven't registered it yet
if err != nil && errors.IsNotFound(err) {
a.logger.Info("Creating fleet cluster", "shoot", cluster.Shoot.Name)
return a.registerNewClusterInFleet(ctx, namespace, cluster, labels)
}
a.registerNewClusterInFleet(ctx, namespace, cluster, labels)
// The cluster we have in fleet is already in the correct state
if reflect.DeepEqual(registered.Labels, labels) {
a.logger.Info("Cluster already registered - skipping registration", "clientId", registered.Spec.ClientID)
return nil
}
a.logger.Info("Updating labels of already registered cluster.", "clientId", registered.Spec.ClientID)
return a.updateClusterLabelsInFleet(ctx, registered, cluster, labels)
}
func (a *actuator) updateClusterLabelsInFleet(ctx context.Context, clusterRegistration *fleetv1alpha1.Cluster, cluster *extensions.Cluster, labels map[string]string) {
func (a *actuator) updateClusterLabelsInFleet(ctx context.Context, clusterRegistration *fleetv1alpha1.Cluster, cluster *extensions.Cluster, labels map[string]string) error {
clusterRegistration.Labels = labels
_, err := a.getFleetManager(cluster).UpdateCluster(ctx, clusterRegistration)
if err != nil {
a.logger.Error(err, "Failed to update cluster labels in Fleet registration.", "clusterName", clusterRegistration.Name)
}
return err
}
func (a *actuator) registerNewClusterInFleet(ctx context.Context, namespace string, cluster *extensions.Cluster, labels map[string]string) {
func (a *actuator) registerNewClusterInFleet(ctx context.Context, namespace string, cluster *extensions.Cluster, labels map[string]string) error {
a.logger.Info("Looking up Secret with KubeConfig for given Shoot.", "namespace", namespace, "secretName", KubeconfigSecretName)
secret := &corev1.Secret{}
if err := a.client.Get(ctx, kutil.Key(namespace, KubeconfigSecretName), secret); err == nil {
secretData := make(map[string][]byte)
secretData["value"] = secret.Data[KubeconfigKey]
a.logger.Info("Loaded kubeconfig from secret", "kubeconfig", secret, "namespace", namespace)
const fleetRegisterNamespace = "clusters"
kubeconfigSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "kubecfg-" + buildCrdName(cluster),
Namespace: fleetRegisterNamespace,
},
Data: secretData,
}
clusterRegistration := fleetv1alpha1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: buildCrdName(cluster),
Namespace: fleetRegisterNamespace,
Labels: labels,
},
Spec: fleetv1alpha1.ClusterSpec{
KubeConfigSecret: "kubecfg-" + buildCrdName(cluster),
},
}
if _, err = a.getFleetManager(cluster).CreateKubeconfigSecret(ctx, &kubeconfigSecret); err != nil {
a.logger.Error(err, "Failed to create secret with kubeconfig for Fleet registration")
}
if _, err = a.getFleetManager(cluster).CreateCluster(ctx, &clusterRegistration); err != nil {
a.logger.Error(err, "Failed to create Cluster for Fleet registration")
}
a.logger.Info("Registered shoot cluster in Fleet Manager ", "registration", clusterRegistration)
} else {
if err := a.client.Get(ctx, kutil.Key(namespace, KubeconfigSecretName), secret); err != nil {
a.logger.Error(err, "Failed to find Secret with kubeconfig for Fleet registration.")
return err
}
secretData := make(map[string][]byte)
secretData["value"] = secret.Data[KubeconfigKey]
a.logger.Info("Loaded kubeconfig from secret", "kubeconfig", secret, "namespace", namespace)
const fleetRegisterNamespace = "clusters"
kubeconfigSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "kubecfg-" + buildCrdName(cluster),
Namespace: fleetRegisterNamespace,
},
Data: secretData,
}
clusterRegistration := fleetv1alpha1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: buildCrdName(cluster),
Namespace: fleetRegisterNamespace,
Labels: labels,
},
Spec: fleetv1alpha1.ClusterSpec{
KubeConfigSecret: "kubecfg-" + buildCrdName(cluster),
},
}
if _, err := a.getFleetManager(cluster).CreateKubeconfigSecret(ctx, &kubeconfigSecret); err != nil {
a.logger.Error(err, "Failed to create secret with kubeconfig for Fleet registration")
return err
}
if _, err := a.getFleetManager(cluster).CreateCluster(ctx, &clusterRegistration); err != nil {
a.logger.Error(err, "Failed to create Cluster for Fleet registration")
return err
}
a.logger.Info("Registered shoot cluster in Fleet Manager ", "registration", clusterRegistration)
return nil
}
func prepareLabels(cluster *extensions.Cluster, serviceConfig projConfig.ProjectConfig, override projConfig.ProjectConfig) map[string]string {

View File

@@ -0,0 +1,117 @@
package managed_resource_handler
import (
"context"
"fmt"
"github.com/gardener/gardener/extensions/pkg/util"
"github.com/gardener/gardener/pkg/apis/resources/v1alpha1"
"github.com/gardener/gardener/pkg/extensions"
kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
"github.com/gardener/gardener/pkg/utils/kubernetes/health"
"github.com/gardener/gardener/pkg/utils/managedresources"
"github.com/gardener/gardener/pkg/utils/retry"
"github.com/go-logr/logr"
token_requestor_handler "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller/token-requestor-handler"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"path/filepath"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"time"
)
var (
ChartsPath = filepath.Join("charts", "internal")
ManagedResourcesChartValues = map[string]interface{}{
"shootAccessServiceAccountName": token_requestor_handler.TargetServiceAccountName,
"shootAccessServiceAccountNamespace": token_requestor_handler.TargetServiceAccountNamespace,
}
)
const (
ManagedResourceChartName = "shoot-fleet-agent-shoot"
ManagedResourceName = "extension-shoot-fleet-agent-shoot"
)
type ManagedResourceHandler struct {
ctx context.Context
client client.Client
cluster *extensions.Cluster
logger logr.Logger
}
func NewManagedResourceHandler(ctx context.Context, client client.Client, cluster *extensions.Cluster) *ManagedResourceHandler {
return &ManagedResourceHandler{
ctx: ctx,
client: client,
cluster: cluster,
logger: log.Log.WithValues("logger", "managed-resource-handler", "cluster", cluster.ObjectMeta.Name),
}
}
// renderChart takes the helm chart at charts/internal/<ManagedResourceChartName> and renders it with the values
func (h *ManagedResourceHandler) renderChart() ([]byte, error) {
chartPath := filepath.Join(ChartsPath, ManagedResourceChartName)
renderer, err := util.NewChartRendererForShoot(h.cluster.Shoot.Spec.Kubernetes.Version)
if err != nil {
return nil, err
}
chart, err := renderer.Render(chartPath, ManagedResourceChartName, h.cluster.ObjectMeta.Name, ManagedResourcesChartValues)
if err != nil {
return nil, err
}
return chart.Manifest(), nil
}
func (h *ManagedResourceHandler) EnsureManagedResoruces() error {
h.logger.Info("Rendering chart with the managed resources")
renderedChart, err := h.renderChart()
if err != nil {
return err
}
data := map[string][]byte{ManagedResourceChartName: renderedChart}
keepObjects := false
forceOverwriteAnnotations := false
h.logger.Info("Creating the ManagedResource")
if err := managedresources.Create(h.ctx, h.client, h.cluster.ObjectMeta.Name, ManagedResourceName, false, "", data, &keepObjects, map[string]string{}, &forceOverwriteAnnotations); err != nil {
h.logger.Error(err, "ManagedResource could not be created")
return err
}
h.logger.Info("Waiting until the ManagedResource is healthy")
if err := h.waitUntilManagedResourceHealthy(); err != nil {
h.logger.Error(err, "ManagedResource has been created, but hasn't been applied")
return err
}
h.logger.Info("ManagedResource created")
return nil
}
func (h *ManagedResourceHandler) waitUntilManagedResourceHealthy() error {
name := ManagedResourceName
namespace := h.cluster.ObjectMeta.Name
obj := &v1alpha1.ManagedResource{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
}
return retry.UntilTimeout(h.ctx, 5*time.Second, 60*time.Second, func(ctx context.Context) (done bool, err error) {
if err := h.client.Get(ctx, kutil.Key(namespace, name), obj); err != nil {
h.logger.Info(fmt.Sprintf("Could not wait for the managed resource to be ready: %+v", err))
return retry.MinorError(err)
}
// Check whether ManagedResource has proper status
if err := health.CheckManagedResource(obj); err != nil {
h.logger.Info(fmt.Sprintf("Managed resource %s not ready (yet): %+v", name, err))
return retry.MinorError(err)
}
return retry.Ok()
})
}

View File

@@ -0,0 +1,150 @@
package token_requestor_handler
import (
"context"
"fmt"
extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller"
constsv1alpha1 "github.com/gardener/gardener/pkg/apis/resources/v1alpha1"
"github.com/gardener/gardener/pkg/extensions"
kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
"github.com/gardener/gardener/pkg/utils/retry"
"github.com/go-logr/logr"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientapiv1 "k8s.io/client-go/tools/clientcmd/api/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/yaml"
"time"
)
const (
TokenRequestorSecretName = "shoot-access-extension-shoot-fleet-agent"
TokenRequestorSecretKey = constsv1alpha1.DataKeyKubeconfig
TargetServiceAccountName = "extension-shoot-fleet-agent"
TargetServiceAccountNamespace = "kube-system"
)
type TokenRequestorHandler struct {
ctx context.Context
client client.Client
cluster *extensions.Cluster
logger logr.Logger
}
func NewTokenRequestorHandler(ctx context.Context, client client.Client, cluster *extensions.Cluster) *TokenRequestorHandler {
return &TokenRequestorHandler{
ctx: ctx,
client: client,
cluster: cluster,
logger: log.Log.WithValues("logger", "token-requestor-handler", "cluster", cluster.ObjectMeta.Name),
}
}
func (t *TokenRequestorHandler) getGenericKubeconfigName() string {
return extensionscontroller.GenericTokenKubeconfigSecretNameFromCluster(t.cluster)
}
func (t *TokenRequestorHandler) getGenericKubeconfig() (*clientapiv1.Config, error) {
kubeconfigSecret := &v1.Secret{}
kubeconfigSecretName := t.getGenericKubeconfigName()
if err := t.client.Get(t.ctx, kutil.Key(t.cluster.ObjectMeta.Name, kubeconfigSecretName), kubeconfigSecret); err != nil {
return nil, err
}
kubeconfigData, ok := kubeconfigSecret.Data[TokenRequestorSecretKey]
if !ok {
return nil, fmt.Errorf("secret %s doesn't have data key %s", kubeconfigSecretName, TokenRequestorSecretKey)
}
kubeconfig := &clientapiv1.Config{}
if err := yaml.Unmarshal(kubeconfigData, kubeconfig); err != nil {
return nil, err
}
return kubeconfig, nil
}
func (t *TokenRequestorHandler) isTokenInserted() bool {
kubeconfigSecret := &v1.Secret{}
if err := t.client.Get(t.ctx, kutil.Key(t.cluster.ObjectMeta.Name, TokenRequestorSecretName), kubeconfigSecret); err != nil {
t.logger.Info(fmt.Sprintf("Couldn't fet the kubeconfig secret %s, %+v", TokenRequestorSecretName, err))
return false
}
kubeconfigData, ok := kubeconfigSecret.Data[TokenRequestorSecretKey]
if !ok {
t.logger.Info(fmt.Sprintf("Kubeconfig secret %s doesn't contain data item %s", TokenRequestorSecretName, TokenRequestorSecretKey))
return false
}
kubeconfig := &clientapiv1.Config{}
if err := yaml.Unmarshal(kubeconfigData, kubeconfig); err != nil {
t.logger.Info(fmt.Sprintf("Data item %s in kubeconfig secret %s couldn't be parsed: %+v", TokenRequestorSecretKey, TokenRequestorSecretName, err))
return false
}
if len(kubeconfig.AuthInfos) == 0 || kubeconfig.AuthInfos[0].AuthInfo.Token == "" {
t.logger.Info(fmt.Sprintf("Kubeconfig in secret %s doesn't contain token (yet?)", TokenRequestorSecretName))
return false
}
t.logger.Info(fmt.Sprintf("Kubeconfig in secret %s contains a proper token", TokenRequestorSecretName))
return true
}
func (t *TokenRequestorHandler) createKubeconfigSecretFromGeneric() error {
kubeconfig, err := t.getGenericKubeconfig()
if err != nil {
return err
}
kubeconfigYaml, err := yaml.Marshal(kubeconfig)
if err != nil {
return err
}
existingSecret := &v1.Secret{}
err = t.client.Get(t.ctx, kutil.Key(t.cluster.ObjectMeta.Name, TokenRequestorSecretName), existingSecret)
if err == nil || !apierrors.IsNotFound(err) {
return err
}
secretObject := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: t.cluster.ObjectMeta.Name,
Name: TokenRequestorSecretName,
Labels: map[string]string{
constsv1alpha1.ResourceManagerPurpose: constsv1alpha1.LabelPurposeTokenRequest,
},
Annotations: map[string]string{
constsv1alpha1.ServiceAccountName: TargetServiceAccountName,
constsv1alpha1.ServiceAccountNamespace: TargetServiceAccountNamespace,
},
},
StringData: map[string]string{
TokenRequestorSecretKey: string(kubeconfigYaml),
},
}
return t.client.Create(t.ctx, secretObject)
}
// EnsureKubeconfig creates the secret with a token-requestor label and waits until gardener fills in
// the token into the kubeconfig
func (t *TokenRequestorHandler) EnsureKubeconfig() error {
t.logger.Info(fmt.Sprintf("Trying to create the token requestor secret"))
if err := t.createKubeconfigSecretFromGeneric(); err != nil {
t.logger.Error(err, "Generic kubeconfig could not be copied")
return err
}
t.logger.Info("Waiting until the token is propagated into the token requestor secret")
if err := retry.UntilTimeout(t.ctx, 5*time.Second, 60*time.Second, func(ctx context.Context) (bool, error) {
return t.isTokenInserted(), nil
}); err != nil {
t.logger.Error(err, "Kubeconfig could not be created")
return err
}
return nil
}