From c1b37cf196882c0fbbd47508e0f617da11f88ddd Mon Sep 17 00:00:00 2001 From: RainbowMango Date: Wed, 28 Apr 2021 17:08:06 +0800 Subject: [PATCH] Adopt Reconcile() method signature change Adopt MapOjbect removing change Adopt event handling object changes Adopt ToRequestsFunc to MapFunc change Adopt EnqueueRequestsFromMapFunc change Adopt runerable interface change Adopt using admission review version v1 Signed-off-by: RainbowMango --- artifacts/deploy/webhook-configuration.yaml | 14 ++++---- cmd/agent/app/agent.go | 17 +++++----- cmd/agent/main.go | 4 +-- .../app/controllermanager.go | 11 ++++--- cmd/controller-manager/controller-manager.go | 4 +-- cmd/webhook/app/webhook.go | 9 +++--- cmd/webhook/main.go | 4 +-- pkg/controllers/binding/binding_controller.go | 32 +++++++++---------- .../cluster_resource_binding_controller.go | 32 +++++++++---------- pkg/controllers/cluster/cluster_controller.go | 2 +- .../execution/execution_controller.go | 2 +- pkg/controllers/hpa/hpa_controller.go | 2 +- .../namespace/namespace_sync_controller.go | 13 +++----- .../propagationpolicy_controller.go | 2 +- .../status/cluster_status_controller.go | 2 +- .../status/workstatus_controller.go | 2 +- pkg/util/detector/detector.go | 12 +++---- .../clusterpropagationpolicy/validating.go | 4 +-- pkg/webhook/propagationpolicy/validating.go | 4 +-- 19 files changed, 85 insertions(+), 87 deletions(-) diff --git a/artifacts/deploy/webhook-configuration.yaml b/artifacts/deploy/webhook-configuration.yaml index 61f92a510e29..25c50477390b 100644 --- a/artifacts/deploy/webhook-configuration.yaml +++ b/artifacts/deploy/webhook-configuration.yaml @@ -17,7 +17,7 @@ webhooks: caBundle: {{caBundle}} failurePolicy: Fail sideEffects: None - admissionReviewVersions: ["v1beta1"] + admissionReviewVersions: ["v1"] timeoutSeconds: 3 - name: clusterpropagationpolicy.karmada.io rules: @@ -31,7 +31,7 @@ webhooks: caBundle: {{caBundle}} failurePolicy: Fail sideEffects: None - admissionReviewVersions: ["v1beta1"] + admissionReviewVersions: ["v1"] timeoutSeconds: 3 - name: overridepolicy.karmada.io rules: @@ -45,7 +45,7 @@ webhooks: caBundle: {{caBundle}} failurePolicy: Fail sideEffects: None - admissionReviewVersions: ["v1beta1"] + admissionReviewVersions: ["v1"] timeoutSeconds: 3 - name: work.karmada.io rules: @@ -59,7 +59,7 @@ webhooks: caBundle: {{caBundle}} failurePolicy: Fail sideEffects: None - admissionReviewVersions: ["v1beta1"] + admissionReviewVersions: ["v1"] timeoutSeconds: 3 --- apiVersion: admissionregistration.k8s.io/v1 @@ -81,7 +81,7 @@ webhooks: caBundle: {{caBundle}} failurePolicy: Fail sideEffects: None - admissionReviewVersions: ["v1beta1"] + admissionReviewVersions: ["v1"] timeoutSeconds: 3 - name: propagationpolicy.karmada.io rules: @@ -95,7 +95,7 @@ webhooks: caBundle: {{caBundle}} failurePolicy: Fail sideEffects: None - admissionReviewVersions: ["v1beta1"] + admissionReviewVersions: ["v1"] timeoutSeconds: 3 - name: clusterpropagationpolicy.karmada.io rules: @@ -109,5 +109,5 @@ webhooks: caBundle: {{caBundle}} failurePolicy: Fail sideEffects: None - admissionReviewVersions: ["v1beta1"] + admissionReviewVersions: ["v1"] timeoutSeconds: 3 diff --git a/cmd/agent/app/agent.go b/cmd/agent/app/agent.go index 8cc8573e5e1a..86e30c52e6f3 100644 --- a/cmd/agent/app/agent.go +++ b/cmd/agent/app/agent.go @@ -1,6 +1,7 @@ package app import ( + "context" "flag" "fmt" "os" @@ -27,14 +28,14 @@ import ( ) // NewAgentCommand creates a *cobra.Command object with default parameters -func NewAgentCommand(stopChan <-chan struct{}) *cobra.Command { +func NewAgentCommand(ctx context.Context) *cobra.Command { opts := options.NewOptions() cmd := &cobra.Command{ Use: "karmada-agent", Long: `The karmada agent runs the cluster registration agent`, Run: func(cmd *cobra.Command, args []string) { - if err := run(opts, stopChan); err != nil { + if err := run(ctx, opts); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } @@ -46,7 +47,7 @@ func NewAgentCommand(stopChan <-chan struct{}) *cobra.Command { return cmd } -func run(opts *options.Options, stopChan <-chan struct{}) error { +func run(ctx context.Context, opts *options.Options) error { controlPlaneRestConfig, err := clientcmd.BuildConfigFromFlags("", opts.KarmadaKubeConfig) if err != nil { return fmt.Errorf("error building kubeconfig of karmada control plane: %s", err.Error()) @@ -74,10 +75,10 @@ func run(opts *options.Options, stopChan <-chan struct{}) error { return err } - setupControllers(controllerManager, opts, stopChan) + setupControllers(controllerManager, opts, ctx.Done()) // blocks until the stop channel is closed. - if err := controllerManager.Start(stopChan); err != nil { + if err := controllerManager.Start(ctx); err != nil { klog.Errorf("controller manager exits unexpectedly: %v", err) return err } @@ -88,13 +89,13 @@ func run(opts *options.Options, stopChan <-chan struct{}) error { func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) { predicateFun := predicate.Funcs{ CreateFunc: func(createEvent event.CreateEvent) bool { - return createEvent.Meta.GetName() == opts.ClusterName + return createEvent.Object.GetName() == opts.ClusterName }, UpdateFunc: func(updateEvent event.UpdateEvent) bool { - return updateEvent.MetaOld.GetName() == opts.ClusterName + return updateEvent.ObjectOld.GetName() == opts.ClusterName }, DeleteFunc: func(deleteEvent event.DeleteEvent) bool { - return deleteEvent.Meta.GetName() == opts.ClusterName + return deleteEvent.Object.GetName() == opts.ClusterName }, GenericFunc: func(genericEvent event.GenericEvent) bool { return false diff --git a/cmd/agent/main.go b/cmd/agent/main.go index 9f09168bec6b..f5e1526a9e80 100644 --- a/cmd/agent/main.go +++ b/cmd/agent/main.go @@ -12,9 +12,9 @@ func main() { logs.InitLogs() defer logs.FlushLogs() - stopChan := apiserver.SetupSignalHandler() + ctx := apiserver.SetupSignalContext() - if err := app.NewAgentCommand(stopChan).Execute(); err != nil { + if err := app.NewAgentCommand(ctx).Execute(); err != nil { klog.Fatal(err.Error()) } } diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index 036850a0d654..4fa8a1acd074 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -1,6 +1,7 @@ package app import ( + "context" "flag" "fmt" "os" @@ -36,7 +37,7 @@ import ( ) // NewControllerManagerCommand creates a *cobra.Command object with default parameters -func NewControllerManagerCommand(stopChan <-chan struct{}) *cobra.Command { +func NewControllerManagerCommand(ctx context.Context) *cobra.Command { opts := options.NewOptions() cmd := &cobra.Command{ @@ -44,7 +45,7 @@ func NewControllerManagerCommand(stopChan <-chan struct{}) *cobra.Command { Long: `The controller manager runs a bunch of controllers`, Run: func(cmd *cobra.Command, args []string) { opts.Complete() - if err := Run(opts, stopChan); err != nil { + if err := Run(ctx, opts); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } @@ -57,7 +58,7 @@ func NewControllerManagerCommand(stopChan <-chan struct{}) *cobra.Command { } // Run runs the controller-manager with options. This should never exit. -func Run(opts *options.Options, stopChan <-chan struct{}) error { +func Run(ctx context.Context, opts *options.Options) error { logs.InitLogs() defer logs.FlushLogs() @@ -82,10 +83,10 @@ func Run(opts *options.Options, stopChan <-chan struct{}) error { return err } - setupControllers(controllerManager, opts, stopChan) + setupControllers(controllerManager, opts, ctx.Done()) // blocks until the stop channel is closed. - if err := controllerManager.Start(stopChan); err != nil { + if err := controllerManager.Start(ctx); err != nil { klog.Errorf("controller manager exits unexpectedly: %v", err) return err } diff --git a/cmd/controller-manager/controller-manager.go b/cmd/controller-manager/controller-manager.go index 3faf0c2d8664..b4b0f8baa833 100644 --- a/cmd/controller-manager/controller-manager.go +++ b/cmd/controller-manager/controller-manager.go @@ -15,9 +15,9 @@ func main() { logs.InitLogs() defer logs.FlushLogs() - stopChan := apiserver.SetupSignalHandler() + ctx := apiserver.SetupSignalContext() - if err := app.NewControllerManagerCommand(stopChan).Execute(); err != nil { + if err := app.NewControllerManagerCommand(ctx).Execute(); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } diff --git a/cmd/webhook/app/webhook.go b/cmd/webhook/app/webhook.go index a620d347797c..811804d12eb3 100644 --- a/cmd/webhook/app/webhook.go +++ b/cmd/webhook/app/webhook.go @@ -1,6 +1,7 @@ package app import ( + "context" "flag" "fmt" "net/http" @@ -23,7 +24,7 @@ import ( ) // NewWebhookCommand creates a *cobra.Command object with default parameters -func NewWebhookCommand(stopChan <-chan struct{}) *cobra.Command { +func NewWebhookCommand(ctx context.Context) *cobra.Command { opts := options.NewOptions() cmd := &cobra.Command{ @@ -31,7 +32,7 @@ func NewWebhookCommand(stopChan <-chan struct{}) *cobra.Command { Long: `Start a webhook server`, Run: func(cmd *cobra.Command, args []string) { opts.Complete() - if err := Run(opts, stopChan); err != nil { + if err := Run(ctx, opts); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } @@ -45,7 +46,7 @@ func NewWebhookCommand(stopChan <-chan struct{}) *cobra.Command { } // Run runs the webhook server with options. This should never exit. -func Run(opts *options.Options, stopChan <-chan struct{}) error { +func Run(ctx context.Context, opts *options.Options) error { logs.InitLogs() defer logs.FlushLogs() @@ -78,7 +79,7 @@ func Run(opts *options.Options, stopChan <-chan struct{}) error { hookServer.WebhookMux.Handle("/readyz/", http.StripPrefix("/readyz/", &healthz.Handler{})) // blocks until the stop channel is closed. - if err := hookManager.Start(stopChan); err != nil { + if err := hookManager.Start(ctx); err != nil { klog.Errorf("webhook server exits unexpectedly: %v", err) return err } diff --git a/cmd/webhook/main.go b/cmd/webhook/main.go index e33caf8f1b3b..9888650d64e9 100644 --- a/cmd/webhook/main.go +++ b/cmd/webhook/main.go @@ -14,9 +14,9 @@ func main() { logs.InitLogs() defer logs.FlushLogs() - stopChan := apiserver.SetupSignalHandler() + ctx := apiserver.SetupSignalContext() - if err := app.NewWebhookCommand(stopChan).Execute(); err != nil { + if err := app.NewWebhookCommand(ctx).Execute(); err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } diff --git a/pkg/controllers/binding/binding_controller.go b/pkg/controllers/binding/binding_controller.go index e11986c0ba40..3d8b0b886151 100644 --- a/pkg/controllers/binding/binding_controller.go +++ b/pkg/controllers/binding/binding_controller.go @@ -38,7 +38,7 @@ type ResourceBindingController struct { // Reconcile performs a full reconciliation for the object referred to by the Request. // The Controller will requeue the Request to be processed again if an error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (c *ResourceBindingController) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) { +func (c *ResourceBindingController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Reconciling ResourceBinding %s.", req.NamespacedName.String()) binding := &workv1alpha1.ResourceBinding{} @@ -109,11 +109,11 @@ func (c *ResourceBindingController) syncBinding(binding *workv1alpha1.ResourceBi // SetupWithManager creates a controller and register to controller manager. func (c *ResourceBindingController) SetupWithManager(mgr controllerruntime.Manager) error { - workFn := handler.ToRequestsFunc( - func(a handler.MapObject) []reconcile.Request { + workFn := handler.MapFunc( + func(a client.Object) []reconcile.Request { var requests []reconcile.Request - labels := a.Meta.GetLabels() + labels := a.GetLabels() resourcebindingNamespace, namespaceExist := labels[util.ResourceBindingNamespaceLabel] resourcebindingName, nameExist := labels[util.ResourceBindingNameLabel] if !namespaceExist || !nameExist { @@ -129,17 +129,17 @@ func (c *ResourceBindingController) SetupWithManager(mgr controllerruntime.Manag }) return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha1.ResourceBinding{}). - Watches(&source.Kind{Type: &workv1alpha1.Work{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: workFn}, workPredicateFn). - Watches(&source.Kind{Type: &policyv1alpha1.OverridePolicy{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: c.newOverridePolicyFunc()}). - Watches(&source.Kind{Type: &policyv1alpha1.ClusterOverridePolicy{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: c.newOverridePolicyFunc()}). - Watches(&source.Kind{Type: &policyv1alpha1.ReplicaSchedulingPolicy{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: c.newReplicaSchedulingPolicyFunc()}). + Watches(&source.Kind{Type: &workv1alpha1.Work{}}, handler.EnqueueRequestsFromMapFunc(workFn), workPredicateFn). + Watches(&source.Kind{Type: &policyv1alpha1.OverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())). + Watches(&source.Kind{Type: &policyv1alpha1.ClusterOverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())). + Watches(&source.Kind{Type: &policyv1alpha1.ReplicaSchedulingPolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newReplicaSchedulingPolicyFunc())). Complete(c) } -func (c *ResourceBindingController) newOverridePolicyFunc() handler.ToRequestsFunc { - return func(a handler.MapObject) []reconcile.Request { +func (c *ResourceBindingController) newOverridePolicyFunc() handler.MapFunc { + return func(a client.Object) []reconcile.Request { var overrideRS []policyv1alpha1.ResourceSelector - switch t := a.Object.(type) { + switch t := a.(type) { case *policyv1alpha1.ClusterOverridePolicy: overrideRS = t.Spec.ResourceSelectors case *policyv1alpha1.OverridePolicy: @@ -164,7 +164,7 @@ func (c *ResourceBindingController) newOverridePolicyFunc() handler.ToRequestsFu for _, rs := range overrideRS { if util.ResourceMatches(workload, rs) { - klog.V(2).Infof("Enqueue ResourceBinding(%s/%s) as override policy(%s/%s) changes.", binding.Namespace, binding.Name, a.Meta.GetNamespace(), a.Meta.GetName()) + klog.V(2).Infof("Enqueue ResourceBinding(%s/%s) as override policy(%s/%s) changes.", binding.Namespace, binding.Name, a.GetNamespace(), a.GetName()) requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: binding.Namespace, Name: binding.Name}}) break } @@ -174,9 +174,9 @@ func (c *ResourceBindingController) newOverridePolicyFunc() handler.ToRequestsFu } } -func (c *ResourceBindingController) newReplicaSchedulingPolicyFunc() handler.ToRequestsFunc { - return func(a handler.MapObject) []reconcile.Request { - rspResourceSelectors := a.Object.(*policyv1alpha1.ReplicaSchedulingPolicy).Spec.ResourceSelectors +func (c *ResourceBindingController) newReplicaSchedulingPolicyFunc() handler.MapFunc { + return func(a client.Object) []reconcile.Request { + rspResourceSelectors := a.(*policyv1alpha1.ReplicaSchedulingPolicy).Spec.ResourceSelectors bindingList := &workv1alpha1.ResourceBindingList{} if err := c.Client.List(context.TODO(), bindingList); err != nil { klog.Errorf("Failed to list resourceBindings, error: %v", err) @@ -193,7 +193,7 @@ func (c *ResourceBindingController) newReplicaSchedulingPolicyFunc() handler.ToR for _, rs := range rspResourceSelectors { if util.ResourceMatches(workload, rs) { - klog.V(2).Infof("Enqueue ResourceBinding(%s/%s) as replica scheduling policy(%s/%s) changes.", binding.Namespace, binding.Name, a.Meta.GetNamespace(), a.Meta.GetName()) + klog.V(2).Infof("Enqueue ResourceBinding(%s/%s) as replica scheduling policy(%s/%s) changes.", binding.Namespace, binding.Name, a.GetNamespace(), a.GetName()) requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: binding.Namespace, Name: binding.Name}}) break } diff --git a/pkg/controllers/binding/cluster_resource_binding_controller.go b/pkg/controllers/binding/cluster_resource_binding_controller.go index 7ccc41af28fe..a4ae7e9afd66 100644 --- a/pkg/controllers/binding/cluster_resource_binding_controller.go +++ b/pkg/controllers/binding/cluster_resource_binding_controller.go @@ -38,7 +38,7 @@ type ClusterResourceBindingController struct { // Reconcile performs a full reconciliation for the object referred to by the Request. // The Controller will requeue the Request to be processed again if an error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (c *ClusterResourceBindingController) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) { +func (c *ClusterResourceBindingController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Reconciling ClusterResourceBinding %s.", req.NamespacedName.String()) clusterResourceBinding := &workv1alpha1.ClusterResourceBinding{} @@ -104,11 +104,11 @@ func (c *ClusterResourceBindingController) syncBinding(binding *workv1alpha1.Clu // SetupWithManager creates a controller and register to controller manager. func (c *ClusterResourceBindingController) SetupWithManager(mgr controllerruntime.Manager) error { - workFn := handler.ToRequestsFunc( - func(a handler.MapObject) []reconcile.Request { + workFn := handler.MapFunc( + func(a client.Object) []reconcile.Request { var requests []reconcile.Request - labels := a.Meta.GetLabels() + labels := a.GetLabels() clusterResourcebindingName, nameExist := labels[util.ClusterResourceBindingLabel] if !nameExist { return nil @@ -122,17 +122,17 @@ func (c *ClusterResourceBindingController) SetupWithManager(mgr controllerruntim }) return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha1.ClusterResourceBinding{}). - Watches(&source.Kind{Type: &workv1alpha1.Work{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: workFn}, workPredicateFn). - Watches(&source.Kind{Type: &policyv1alpha1.OverridePolicy{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: c.newOverridePolicyFunc()}). - Watches(&source.Kind{Type: &policyv1alpha1.ClusterOverridePolicy{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: c.newOverridePolicyFunc()}). - Watches(&source.Kind{Type: &policyv1alpha1.ReplicaSchedulingPolicy{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: c.newReplicaSchedulingPolicyFunc()}). + Watches(&source.Kind{Type: &workv1alpha1.Work{}}, handler.EnqueueRequestsFromMapFunc(workFn), workPredicateFn). + Watches(&source.Kind{Type: &policyv1alpha1.OverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())). + Watches(&source.Kind{Type: &policyv1alpha1.ClusterOverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())). + Watches(&source.Kind{Type: &policyv1alpha1.ReplicaSchedulingPolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newReplicaSchedulingPolicyFunc())). Complete(c) } -func (c *ClusterResourceBindingController) newOverridePolicyFunc() handler.ToRequestsFunc { - return func(a handler.MapObject) []reconcile.Request { +func (c *ClusterResourceBindingController) newOverridePolicyFunc() handler.MapFunc { + return func(a client.Object) []reconcile.Request { var overrideRS []policyv1alpha1.ResourceSelector - switch t := a.Object.(type) { + switch t := a.(type) { case *policyv1alpha1.ClusterOverridePolicy: overrideRS = t.Spec.ResourceSelectors case *policyv1alpha1.OverridePolicy: @@ -157,7 +157,7 @@ func (c *ClusterResourceBindingController) newOverridePolicyFunc() handler.ToReq for _, rs := range overrideRS { if util.ResourceMatches(workload, rs) { - klog.V(2).Infof("Enqueue ClusterResourceBinding(%s) as override policy(%s/%s) changes.", binding.Name, a.Meta.GetNamespace(), a.Meta.GetName()) + klog.V(2).Infof("Enqueue ClusterResourceBinding(%s) as override policy(%s/%s) changes.", binding.Name, a.GetNamespace(), a.GetName()) requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: binding.Name}}) break } @@ -167,9 +167,9 @@ func (c *ClusterResourceBindingController) newOverridePolicyFunc() handler.ToReq } } -func (c *ClusterResourceBindingController) newReplicaSchedulingPolicyFunc() handler.ToRequestsFunc { - return func(a handler.MapObject) []reconcile.Request { - rspResourceSelectors := a.Object.(*policyv1alpha1.ReplicaSchedulingPolicy).Spec.ResourceSelectors +func (c *ClusterResourceBindingController) newReplicaSchedulingPolicyFunc() handler.MapFunc { + return func(a client.Object) []reconcile.Request { + rspResourceSelectors := a.(*policyv1alpha1.ReplicaSchedulingPolicy).Spec.ResourceSelectors bindingList := &workv1alpha1.ClusterResourceBindingList{} if err := c.Client.List(context.TODO(), bindingList); err != nil { klog.Errorf("Failed to list clusterResourceBindings, error: %v", err) @@ -186,7 +186,7 @@ func (c *ClusterResourceBindingController) newReplicaSchedulingPolicyFunc() hand for _, rs := range rspResourceSelectors { if util.ResourceMatches(workload, rs) { - klog.V(2).Infof("Enqueue ClusterResourceBinding(%s) as replica scheduling policy(%s/%s) changes.", binding.Name, a.Meta.GetNamespace(), a.Meta.GetName()) + klog.V(2).Infof("Enqueue ClusterResourceBinding(%s) as replica scheduling policy(%s/%s) changes.", binding.Name, a.GetNamespace(), a.GetName()) requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: binding.Name}}) break } diff --git a/pkg/controllers/cluster/cluster_controller.go b/pkg/controllers/cluster/cluster_controller.go index 2af310e3a0be..1fb9c002bc99 100644 --- a/pkg/controllers/cluster/cluster_controller.go +++ b/pkg/controllers/cluster/cluster_controller.go @@ -34,7 +34,7 @@ type Controller struct { // Reconcile performs a full reconciliation for the object referred to by the Request. // The Controller will requeue the Request to be processed again if an error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) { +func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Reconciling cluster %s", req.NamespacedName.Name) cluster := &v1alpha1.Cluster{} diff --git a/pkg/controllers/execution/execution_controller.go b/pkg/controllers/execution/execution_controller.go index aaa021be8b7d..a8df85dbf4c0 100644 --- a/pkg/controllers/execution/execution_controller.go +++ b/pkg/controllers/execution/execution_controller.go @@ -41,7 +41,7 @@ type Controller struct { // Reconcile performs a full reconciliation for the object referred to by the Request. // The Controller will requeue the Request to be processed again if an error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) { +func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Reconciling Work %s", req.NamespacedName.String()) work := &workv1alpha1.Work{} diff --git a/pkg/controllers/hpa/hpa_controller.go b/pkg/controllers/hpa/hpa_controller.go index 62a7137b2e32..be25ce4013e5 100644 --- a/pkg/controllers/hpa/hpa_controller.go +++ b/pkg/controllers/hpa/hpa_controller.go @@ -37,7 +37,7 @@ type HorizontalPodAutoscalerController struct { // Reconcile performs a full reconciliation for the object referred to by the Request. // The Controller will requeue the Request to be processed again if an error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (c *HorizontalPodAutoscalerController) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) { +func (c *HorizontalPodAutoscalerController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Reconciling HorizontalPodAutoscaler %s.", req.NamespacedName.String()) hpa := &autoscalingv1.HorizontalPodAutoscaler{} diff --git a/pkg/controllers/namespace/namespace_sync_controller.go b/pkg/controllers/namespace/namespace_sync_controller.go index 7a48739a6d15..86627be28f77 100644 --- a/pkg/controllers/namespace/namespace_sync_controller.go +++ b/pkg/controllers/namespace/namespace_sync_controller.go @@ -45,7 +45,7 @@ type Controller struct { // Reconcile performs a full reconciliation for the object referred to by the Request. // The Controller will requeue the Request to be processed again if an error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) { +func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Namespaces sync controller reconciling %s", req.NamespacedName.String()) if !c.namespaceShouldBeSynced(req.Name) { return controllerruntime.Result{}, nil @@ -134,8 +134,8 @@ func (c *Controller) buildWorks(namespace *v1.Namespace, clusters []v1alpha1.Clu // SetupWithManager creates a controller and register to controller manager. func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error { - namespaceFn := handler.ToRequestsFunc( - func(a handler.MapObject) []reconcile.Request { + namespaceFn := handler.MapFunc( + func(a client.Object) []reconcile.Request { var requests []reconcile.Request namespaceList := &v1.NamespaceList{} if err := c.Client.List(context.TODO(), namespaceList); err != nil { @@ -153,10 +153,6 @@ func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error { predicate := builder.WithPredicates(predicate.Funcs{ CreateFunc: func(e event.CreateEvent) bool { - if e.Meta == nil { - klog.Errorf("CreateEvent received with no metadata, event: %v", e) - return false - } return true }, UpdateFunc: func(e event.UpdateEvent) bool { @@ -171,7 +167,6 @@ func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error { }) return controllerruntime.NewControllerManagedBy(mgr). - For(&v1.Namespace{}).Watches(&source.Kind{Type: &v1alpha1.Cluster{}}, &handler.EnqueueRequestsFromMapFunc{ - ToRequests: namespaceFn}, + For(&v1.Namespace{}).Watches(&source.Kind{Type: &v1alpha1.Cluster{}}, handler.EnqueueRequestsFromMapFunc(namespaceFn), predicate).Complete(c) } diff --git a/pkg/controllers/propagationpolicy/propagationpolicy_controller.go b/pkg/controllers/propagationpolicy/propagationpolicy_controller.go index a791875fc109..d92d23399779 100644 --- a/pkg/controllers/propagationpolicy/propagationpolicy_controller.go +++ b/pkg/controllers/propagationpolicy/propagationpolicy_controller.go @@ -21,7 +21,7 @@ type Controller struct { // Reconcile performs a full reconciliation for the object referred to by the Request. // The Controller will requeue the Request to be processed again if an error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (c *Controller) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) { +func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Reconciling PropagationPolicy %s.", req.NamespacedName.String()) policy := &v1alpha1.PropagationPolicy{} diff --git a/pkg/controllers/status/cluster_status_controller.go b/pkg/controllers/status/cluster_status_controller.go index cce587e76c85..97401ad0c6b2 100644 --- a/pkg/controllers/status/cluster_status_controller.go +++ b/pkg/controllers/status/cluster_status_controller.go @@ -54,7 +54,7 @@ type ClusterStatusController struct { // Reconcile syncs status of the given member cluster. // The Controller will requeue the Request to be processed again if an error is non-nil or // Result.Requeue is true, otherwise upon completion it will requeue the reconcile key after the duration. -func (c *ClusterStatusController) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) { +func (c *ClusterStatusController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Syncing cluster status: %s", req.NamespacedName.String()) cluster := &v1alpha1.Cluster{} diff --git a/pkg/controllers/status/workstatus_controller.go b/pkg/controllers/status/workstatus_controller.go index 33220295c594..f31a9a5975cd 100644 --- a/pkg/controllers/status/workstatus_controller.go +++ b/pkg/controllers/status/workstatus_controller.go @@ -51,7 +51,7 @@ type WorkStatusController struct { // Reconcile performs a full reconciliation for the object referred to by the Request. // The Controller will requeue the Request to be processed again if an error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (c *WorkStatusController) Reconcile(req controllerruntime.Request) (controllerruntime.Result, error) { +func (c *WorkStatusController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Reconciling status of Work %s.", req.NamespacedName.String()) work := &workv1alpha1.Work{} diff --git a/pkg/util/detector/detector.go b/pkg/util/detector/detector.go index 42267865d9de..72531f807411 100644 --- a/pkg/util/detector/detector.go +++ b/pkg/util/detector/detector.go @@ -75,10 +75,10 @@ type ResourceDetector struct { } // Start runs the detector, never stop until stopCh closed. -func (d *ResourceDetector) Start(stopCh <-chan struct{}) error { +func (d *ResourceDetector) Start(ctx context.Context) error { klog.Infof("Starting resource detector.") d.waitingObjects = make(map[keys.ClusterWideKey]struct{}) - d.stopCh = stopCh + d.stopCh = ctx.Done() // setup policy reconcile worker d.policyReconcileWorker = util.NewAsyncWorker("propagationpolicy detector", 1*time.Millisecond, ClusterWideKeyFunc, d.ReconcilePropagationPolicy) @@ -104,10 +104,10 @@ func (d *ResourceDetector) Start(stopCh <-chan struct{}) error { clusterBindingHandler := informermanager.NewHandlerOnEvents(d.OnClusterResourceBindingAdd, d.OnClusterResourceBindingUpdate, d.OnClusterResourceBindingDelete) d.InformerManager.ForResource(workv1alpha1.SchemeGroupVersion.WithResource("clusterresourcebindings"), clusterBindingHandler) - d.Processor.Run(1, stopCh) + d.Processor.Run(1, d.stopCh) go d.discoverResources(30 * time.Second) - <-stopCh + <-d.stopCh klog.Infof("Stopped as stopCh closed.") return nil } @@ -425,7 +425,7 @@ func (d *ResourceDetector) ClaimPolicyForObject(object *unstructured.Unstructure util.MergeLabel(object, util.PropagationPolicyNamespaceLabel, policyNamespace) util.MergeLabel(object, util.PropagationPolicyNameLabel, policyName) - return d.Client.Update(context.TODO(), object.DeepCopyObject()) + return d.Client.Update(context.TODO(), object) } // ClaimClusterPolicyForObject set cluster identifier which the object associated with. @@ -438,7 +438,7 @@ func (d *ResourceDetector) ClaimClusterPolicyForObject(object *unstructured.Unst } util.MergeLabel(object, util.ClusterPropagationPolicyLabel, policyName) - return d.Client.Update(context.TODO(), object.DeepCopyObject()) + return d.Client.Update(context.TODO(), object) } // BuildResourceBinding builds a desired ResourceBinding for object. diff --git a/pkg/webhook/clusterpropagationpolicy/validating.go b/pkg/webhook/clusterpropagationpolicy/validating.go index 521c79280f58..4a4442acfd59 100644 --- a/pkg/webhook/clusterpropagationpolicy/validating.go +++ b/pkg/webhook/clusterpropagationpolicy/validating.go @@ -4,7 +4,7 @@ import ( "context" "net/http" - "k8s.io/api/admission/v1beta1" + admissionv1 "k8s.io/api/admission/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -33,7 +33,7 @@ func (v *ValidatingAdmission) Handle(ctx context.Context, req admission.Request) } klog.V(2).Infof("Validating ClusterPropagationPolicy(%s) for request: %s", policy.Name, req.Operation) - if req.Operation == v1beta1.Update { + if req.Operation == admissionv1.Update { oldPolicy := &policyv1alpha1.ClusterPropagationPolicy{} err := v.decoder.DecodeRaw(req.OldObject, oldPolicy) if err != nil { diff --git a/pkg/webhook/propagationpolicy/validating.go b/pkg/webhook/propagationpolicy/validating.go index b27662223906..0a4e172ebc7c 100644 --- a/pkg/webhook/propagationpolicy/validating.go +++ b/pkg/webhook/propagationpolicy/validating.go @@ -4,7 +4,7 @@ import ( "context" "net/http" - "k8s.io/api/admission/v1beta1" + admissionv1 "k8s.io/api/admission/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -33,7 +33,7 @@ func (v *ValidatingAdmission) Handle(ctx context.Context, req admission.Request) } klog.V(2).Infof("Validating PropagationPolicy(%s/%s) for request: %s", policy.Namespace, policy.Name, req.Operation) - if req.Operation == v1beta1.Update { + if req.Operation == admissionv1.Update { oldPolicy := &policyv1alpha1.PropagationPolicy{} err := v.decoder.DecodeRaw(req.OldObject, oldPolicy) if err != nil {