mirror of
https://github.com/ysoftdevs/gardener-extension-shoot-fleet-agent.git
synced 2026-03-29 13:22:17 +02:00
Add token requestor flow
This commit is contained in:
@@ -17,6 +17,10 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
managed_resource_handler "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller/managed-resource-handler"
|
||||
token_requestor_handler "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller/token-requestor-handler"
|
||||
"github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/utils"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
@@ -89,6 +93,21 @@ func initializeFleetManagers(config config.Config, logger logr.Logger) map[strin
|
||||
return fleetManagers
|
||||
}
|
||||
|
||||
func (a *actuator) ensureDependencies(ctx context.Context, cluster *extensions.Cluster) error {
|
||||
// Initialize token requestor handler
|
||||
tokenRequestor := token_requestor_handler.NewTokenRequestorHandler(ctx, a.client, cluster)
|
||||
|
||||
// Initialize the managed resoruce handler
|
||||
managedResourceHandler := managed_resource_handler.NewManagedResourceHandler(ctx, a.client, cluster)
|
||||
|
||||
dependencyFunctions := []func() error{
|
||||
tokenRequestor.EnsureKubeconfig,
|
||||
managedResourceHandler.EnsureManagedResoruces,
|
||||
}
|
||||
|
||||
return utils.RunParallelFunctions(dependencyFunctions)
|
||||
}
|
||||
|
||||
// Reconcile the Extension resource.
|
||||
func (a *actuator) Reconcile(ctx context.Context, ex *extensionsv1alpha1.Extension) error {
|
||||
namespace := ex.GetNamespace()
|
||||
@@ -106,7 +125,17 @@ func (a *actuator) Reconcile(ctx context.Context, ex *extensionsv1alpha1.Extensi
|
||||
return fmt.Errorf("failed to decode provider config: %+v", err)
|
||||
}
|
||||
}
|
||||
a.ReconcileClusterInFleetManager(ctx, namespace, cluster, shootsConfigOverride)
|
||||
|
||||
if err := a.ensureDependencies(ctx, cluster); err != nil {
|
||||
a.logger.Error(err, "Could not ensure dependencies")
|
||||
return err
|
||||
}
|
||||
|
||||
if err = a.ReconcileClusterInFleetManager(ctx, namespace, cluster, shootsConfigOverride); err != nil {
|
||||
a.logger.Error(err, "Could not reconcile cluster in fleet")
|
||||
return err
|
||||
}
|
||||
|
||||
return a.updateStatus(ctx, ex)
|
||||
}
|
||||
|
||||
@@ -163,71 +192,87 @@ func (a *actuator) InjectScheme(scheme *runtime.Scheme) error {
|
||||
}
|
||||
|
||||
// ReconcileClusterInFleetManager reconciles cluster registration in remote fleet manager
|
||||
func (a *actuator) ReconcileClusterInFleetManager(ctx context.Context, namespace string, cluster *extensions.Cluster, override *config.Config) {
|
||||
func (a *actuator) ReconcileClusterInFleetManager(ctx context.Context, namespace string, cluster *extensions.Cluster, override *config.Config) error {
|
||||
a.logger.Info("Starting with already registered check")
|
||||
labels := prepareLabels(cluster, getProjectConfig(cluster, &a.serviceConfig), getProjectConfig(cluster, override))
|
||||
registered, err := a.getFleetManager(cluster).GetCluster(ctx, cluster.Shoot.Name)
|
||||
if err != nil {
|
||||
|
||||
// We cannot find the cluster because of an unknown error
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
a.logger.Error(err, "Failed to get cluster registration for Shoot", "shoot", cluster.Shoot.Name)
|
||||
return err
|
||||
}
|
||||
if err == nil && registered != nil {
|
||||
if reflect.DeepEqual(registered.Labels, labels) {
|
||||
a.logger.Info("Cluster already registered - skipping registration", "clientId", registered.Spec.ClientID)
|
||||
} else {
|
||||
a.logger.Info("Updating labels of already registered cluster.", "clientId", registered.Spec.ClientID)
|
||||
a.updateClusterLabelsInFleet(ctx, registered, cluster, labels)
|
||||
}
|
||||
return
|
||||
|
||||
// We cannot find the cluster because we haven't registered it yet
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
a.logger.Info("Creating fleet cluster", "shoot", cluster.Shoot.Name)
|
||||
return a.registerNewClusterInFleet(ctx, namespace, cluster, labels)
|
||||
}
|
||||
a.registerNewClusterInFleet(ctx, namespace, cluster, labels)
|
||||
|
||||
// The cluster we have in fleet is already in the correct state
|
||||
if reflect.DeepEqual(registered.Labels, labels) {
|
||||
a.logger.Info("Cluster already registered - skipping registration", "clientId", registered.Spec.ClientID)
|
||||
return nil
|
||||
}
|
||||
|
||||
a.logger.Info("Updating labels of already registered cluster.", "clientId", registered.Spec.ClientID)
|
||||
return a.updateClusterLabelsInFleet(ctx, registered, cluster, labels)
|
||||
}
|
||||
|
||||
func (a *actuator) updateClusterLabelsInFleet(ctx context.Context, clusterRegistration *fleetv1alpha1.Cluster, cluster *extensions.Cluster, labels map[string]string) {
|
||||
func (a *actuator) updateClusterLabelsInFleet(ctx context.Context, clusterRegistration *fleetv1alpha1.Cluster, cluster *extensions.Cluster, labels map[string]string) error {
|
||||
clusterRegistration.Labels = labels
|
||||
_, err := a.getFleetManager(cluster).UpdateCluster(ctx, clusterRegistration)
|
||||
if err != nil {
|
||||
a.logger.Error(err, "Failed to update cluster labels in Fleet registration.", "clusterName", clusterRegistration.Name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *actuator) registerNewClusterInFleet(ctx context.Context, namespace string, cluster *extensions.Cluster, labels map[string]string) {
|
||||
func (a *actuator) registerNewClusterInFleet(ctx context.Context, namespace string, cluster *extensions.Cluster, labels map[string]string) error {
|
||||
a.logger.Info("Looking up Secret with KubeConfig for given Shoot.", "namespace", namespace, "secretName", KubeconfigSecretName)
|
||||
secret := &corev1.Secret{}
|
||||
if err := a.client.Get(ctx, kutil.Key(namespace, KubeconfigSecretName), secret); err == nil {
|
||||
secretData := make(map[string][]byte)
|
||||
secretData["value"] = secret.Data[KubeconfigKey]
|
||||
a.logger.Info("Loaded kubeconfig from secret", "kubeconfig", secret, "namespace", namespace)
|
||||
|
||||
const fleetRegisterNamespace = "clusters"
|
||||
kubeconfigSecret := corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubecfg-" + buildCrdName(cluster),
|
||||
Namespace: fleetRegisterNamespace,
|
||||
},
|
||||
Data: secretData,
|
||||
}
|
||||
|
||||
clusterRegistration := fleetv1alpha1.Cluster{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: buildCrdName(cluster),
|
||||
Namespace: fleetRegisterNamespace,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: fleetv1alpha1.ClusterSpec{
|
||||
KubeConfigSecret: "kubecfg-" + buildCrdName(cluster),
|
||||
},
|
||||
}
|
||||
if _, err = a.getFleetManager(cluster).CreateKubeconfigSecret(ctx, &kubeconfigSecret); err != nil {
|
||||
a.logger.Error(err, "Failed to create secret with kubeconfig for Fleet registration")
|
||||
}
|
||||
if _, err = a.getFleetManager(cluster).CreateCluster(ctx, &clusterRegistration); err != nil {
|
||||
a.logger.Error(err, "Failed to create Cluster for Fleet registration")
|
||||
}
|
||||
a.logger.Info("Registered shoot cluster in Fleet Manager ", "registration", clusterRegistration)
|
||||
} else {
|
||||
if err := a.client.Get(ctx, kutil.Key(namespace, KubeconfigSecretName), secret); err != nil {
|
||||
a.logger.Error(err, "Failed to find Secret with kubeconfig for Fleet registration.")
|
||||
return err
|
||||
}
|
||||
|
||||
secretData := make(map[string][]byte)
|
||||
secretData["value"] = secret.Data[KubeconfigKey]
|
||||
a.logger.Info("Loaded kubeconfig from secret", "kubeconfig", secret, "namespace", namespace)
|
||||
|
||||
const fleetRegisterNamespace = "clusters"
|
||||
kubeconfigSecret := corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubecfg-" + buildCrdName(cluster),
|
||||
Namespace: fleetRegisterNamespace,
|
||||
},
|
||||
Data: secretData,
|
||||
}
|
||||
|
||||
clusterRegistration := fleetv1alpha1.Cluster{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: buildCrdName(cluster),
|
||||
Namespace: fleetRegisterNamespace,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: fleetv1alpha1.ClusterSpec{
|
||||
KubeConfigSecret: "kubecfg-" + buildCrdName(cluster),
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := a.getFleetManager(cluster).CreateKubeconfigSecret(ctx, &kubeconfigSecret); err != nil {
|
||||
a.logger.Error(err, "Failed to create secret with kubeconfig for Fleet registration")
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := a.getFleetManager(cluster).CreateCluster(ctx, &clusterRegistration); err != nil {
|
||||
a.logger.Error(err, "Failed to create Cluster for Fleet registration")
|
||||
return err
|
||||
}
|
||||
|
||||
a.logger.Info("Registered shoot cluster in Fleet Manager ", "registration", clusterRegistration)
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareLabels(cluster *extensions.Cluster, serviceConfig projConfig.ProjectConfig, override projConfig.ProjectConfig) map[string]string {
|
||||
|
||||
117
pkg/controller/managed-resource-handler/handler.go
Normal file
117
pkg/controller/managed-resource-handler/handler.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package managed_resource_handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/gardener/gardener/extensions/pkg/util"
|
||||
"github.com/gardener/gardener/pkg/apis/resources/v1alpha1"
|
||||
"github.com/gardener/gardener/pkg/extensions"
|
||||
kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
|
||||
"github.com/gardener/gardener/pkg/utils/kubernetes/health"
|
||||
"github.com/gardener/gardener/pkg/utils/managedresources"
|
||||
"github.com/gardener/gardener/pkg/utils/retry"
|
||||
"github.com/go-logr/logr"
|
||||
token_requestor_handler "github.com/javamachr/gardener-extension-shoot-fleet-agent/pkg/controller/token-requestor-handler"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"path/filepath"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ChartsPath = filepath.Join("charts", "internal")
|
||||
ManagedResourcesChartValues = map[string]interface{}{
|
||||
"shootAccessServiceAccountName": token_requestor_handler.TargetServiceAccountName,
|
||||
"shootAccessServiceAccountNamespace": token_requestor_handler.TargetServiceAccountNamespace,
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
ManagedResourceChartName = "shoot-fleet-agent-shoot"
|
||||
ManagedResourceName = "extension-shoot-fleet-agent-shoot"
|
||||
)
|
||||
|
||||
type ManagedResourceHandler struct {
|
||||
ctx context.Context
|
||||
client client.Client
|
||||
cluster *extensions.Cluster
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
func NewManagedResourceHandler(ctx context.Context, client client.Client, cluster *extensions.Cluster) *ManagedResourceHandler {
|
||||
return &ManagedResourceHandler{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
cluster: cluster,
|
||||
logger: log.Log.WithValues("logger", "managed-resource-handler", "cluster", cluster.ObjectMeta.Name),
|
||||
}
|
||||
}
|
||||
|
||||
// renderChart takes the helm chart at charts/internal/<ManagedResourceChartName> and renders it with the values
|
||||
func (h *ManagedResourceHandler) renderChart() ([]byte, error) {
|
||||
chartPath := filepath.Join(ChartsPath, ManagedResourceChartName)
|
||||
renderer, err := util.NewChartRendererForShoot(h.cluster.Shoot.Spec.Kubernetes.Version)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chart, err := renderer.Render(chartPath, ManagedResourceChartName, h.cluster.ObjectMeta.Name, ManagedResourcesChartValues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return chart.Manifest(), nil
|
||||
}
|
||||
|
||||
func (h *ManagedResourceHandler) EnsureManagedResoruces() error {
|
||||
h.logger.Info("Rendering chart with the managed resources")
|
||||
renderedChart, err := h.renderChart()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data := map[string][]byte{ManagedResourceChartName: renderedChart}
|
||||
keepObjects := false
|
||||
forceOverwriteAnnotations := false
|
||||
|
||||
h.logger.Info("Creating the ManagedResource")
|
||||
if err := managedresources.Create(h.ctx, h.client, h.cluster.ObjectMeta.Name, ManagedResourceName, false, "", data, &keepObjects, map[string]string{}, &forceOverwriteAnnotations); err != nil {
|
||||
h.logger.Error(err, "ManagedResource could not be created")
|
||||
return err
|
||||
}
|
||||
|
||||
h.logger.Info("Waiting until the ManagedResource is healthy")
|
||||
if err := h.waitUntilManagedResourceHealthy(); err != nil {
|
||||
h.logger.Error(err, "ManagedResource has been created, but hasn't been applied")
|
||||
return err
|
||||
}
|
||||
|
||||
h.logger.Info("ManagedResource created")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *ManagedResourceHandler) waitUntilManagedResourceHealthy() error {
|
||||
name := ManagedResourceName
|
||||
namespace := h.cluster.ObjectMeta.Name
|
||||
|
||||
obj := &v1alpha1.ManagedResource{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
return retry.UntilTimeout(h.ctx, 5*time.Second, 60*time.Second, func(ctx context.Context) (done bool, err error) {
|
||||
if err := h.client.Get(ctx, kutil.Key(namespace, name), obj); err != nil {
|
||||
h.logger.Info(fmt.Sprintf("Could not wait for the managed resource to be ready: %+v", err))
|
||||
return retry.MinorError(err)
|
||||
}
|
||||
|
||||
// Check whether ManagedResource has proper status
|
||||
if err := health.CheckManagedResource(obj); err != nil {
|
||||
h.logger.Info(fmt.Sprintf("Managed resource %s not ready (yet): %+v", name, err))
|
||||
return retry.MinorError(err)
|
||||
}
|
||||
|
||||
return retry.Ok()
|
||||
})
|
||||
}
|
||||
150
pkg/controller/token-requestor-handler/handler.go
Normal file
150
pkg/controller/token-requestor-handler/handler.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package token_requestor_handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller"
|
||||
constsv1alpha1 "github.com/gardener/gardener/pkg/apis/resources/v1alpha1"
|
||||
"github.com/gardener/gardener/pkg/extensions"
|
||||
kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
|
||||
"github.com/gardener/gardener/pkg/utils/retry"
|
||||
"github.com/go-logr/logr"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientapiv1 "k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/yaml"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
TokenRequestorSecretName = "shoot-access-extension-shoot-fleet-agent"
|
||||
TokenRequestorSecretKey = constsv1alpha1.DataKeyKubeconfig
|
||||
TargetServiceAccountName = "extension-shoot-fleet-agent"
|
||||
TargetServiceAccountNamespace = "kube-system"
|
||||
)
|
||||
|
||||
type TokenRequestorHandler struct {
|
||||
ctx context.Context
|
||||
client client.Client
|
||||
cluster *extensions.Cluster
|
||||
logger logr.Logger
|
||||
}
|
||||
|
||||
func NewTokenRequestorHandler(ctx context.Context, client client.Client, cluster *extensions.Cluster) *TokenRequestorHandler {
|
||||
return &TokenRequestorHandler{
|
||||
ctx: ctx,
|
||||
client: client,
|
||||
cluster: cluster,
|
||||
logger: log.Log.WithValues("logger", "token-requestor-handler", "cluster", cluster.ObjectMeta.Name),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TokenRequestorHandler) getGenericKubeconfigName() string {
|
||||
return extensionscontroller.GenericTokenKubeconfigSecretNameFromCluster(t.cluster)
|
||||
}
|
||||
|
||||
func (t *TokenRequestorHandler) getGenericKubeconfig() (*clientapiv1.Config, error) {
|
||||
kubeconfigSecret := &v1.Secret{}
|
||||
kubeconfigSecretName := t.getGenericKubeconfigName()
|
||||
if err := t.client.Get(t.ctx, kutil.Key(t.cluster.ObjectMeta.Name, kubeconfigSecretName), kubeconfigSecret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeconfigData, ok := kubeconfigSecret.Data[TokenRequestorSecretKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("secret %s doesn't have data key %s", kubeconfigSecretName, TokenRequestorSecretKey)
|
||||
}
|
||||
|
||||
kubeconfig := &clientapiv1.Config{}
|
||||
if err := yaml.Unmarshal(kubeconfigData, kubeconfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kubeconfig, nil
|
||||
}
|
||||
|
||||
func (t *TokenRequestorHandler) isTokenInserted() bool {
|
||||
kubeconfigSecret := &v1.Secret{}
|
||||
if err := t.client.Get(t.ctx, kutil.Key(t.cluster.ObjectMeta.Name, TokenRequestorSecretName), kubeconfigSecret); err != nil {
|
||||
t.logger.Info(fmt.Sprintf("Couldn't fet the kubeconfig secret %s, %+v", TokenRequestorSecretName, err))
|
||||
return false
|
||||
}
|
||||
|
||||
kubeconfigData, ok := kubeconfigSecret.Data[TokenRequestorSecretKey]
|
||||
if !ok {
|
||||
t.logger.Info(fmt.Sprintf("Kubeconfig secret %s doesn't contain data item %s", TokenRequestorSecretName, TokenRequestorSecretKey))
|
||||
return false
|
||||
}
|
||||
|
||||
kubeconfig := &clientapiv1.Config{}
|
||||
if err := yaml.Unmarshal(kubeconfigData, kubeconfig); err != nil {
|
||||
t.logger.Info(fmt.Sprintf("Data item %s in kubeconfig secret %s couldn't be parsed: %+v", TokenRequestorSecretKey, TokenRequestorSecretName, err))
|
||||
return false
|
||||
}
|
||||
|
||||
if len(kubeconfig.AuthInfos) == 0 || kubeconfig.AuthInfos[0].AuthInfo.Token == "" {
|
||||
t.logger.Info(fmt.Sprintf("Kubeconfig in secret %s doesn't contain token (yet?)", TokenRequestorSecretName))
|
||||
return false
|
||||
}
|
||||
|
||||
t.logger.Info(fmt.Sprintf("Kubeconfig in secret %s contains a proper token", TokenRequestorSecretName))
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *TokenRequestorHandler) createKubeconfigSecretFromGeneric() error {
|
||||
kubeconfig, err := t.getGenericKubeconfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
kubeconfigYaml, err := yaml.Marshal(kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existingSecret := &v1.Secret{}
|
||||
err = t.client.Get(t.ctx, kutil.Key(t.cluster.ObjectMeta.Name, TokenRequestorSecretName), existingSecret)
|
||||
if err == nil || !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
secretObject := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: t.cluster.ObjectMeta.Name,
|
||||
Name: TokenRequestorSecretName,
|
||||
Labels: map[string]string{
|
||||
constsv1alpha1.ResourceManagerPurpose: constsv1alpha1.LabelPurposeTokenRequest,
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
constsv1alpha1.ServiceAccountName: TargetServiceAccountName,
|
||||
constsv1alpha1.ServiceAccountNamespace: TargetServiceAccountNamespace,
|
||||
},
|
||||
},
|
||||
StringData: map[string]string{
|
||||
TokenRequestorSecretKey: string(kubeconfigYaml),
|
||||
},
|
||||
}
|
||||
return t.client.Create(t.ctx, secretObject)
|
||||
}
|
||||
|
||||
// EnsureKubeconfig creates the secret with a token-requestor label and waits until gardener fills in
|
||||
// the token into the kubeconfig
|
||||
func (t *TokenRequestorHandler) EnsureKubeconfig() error {
|
||||
t.logger.Info(fmt.Sprintf("Trying to create the token requestor secret"))
|
||||
if err := t.createKubeconfigSecretFromGeneric(); err != nil {
|
||||
t.logger.Error(err, "Generic kubeconfig could not be copied")
|
||||
return err
|
||||
}
|
||||
|
||||
t.logger.Info("Waiting until the token is propagated into the token requestor secret")
|
||||
if err := retry.UntilTimeout(t.ctx, 5*time.Second, 60*time.Second, func(ctx context.Context) (bool, error) {
|
||||
return t.isTokenInserted(), nil
|
||||
}); err != nil {
|
||||
t.logger.Error(err, "Kubeconfig could not be created")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user