From 416fa6ad23c69f70f211e9d0a7c2e3a42f4744ab Mon Sep 17 00:00:00 2001 From: "L. Pivarc" Date: Mon, 8 Aug 2022 15:05:58 +0200 Subject: [PATCH 1/6] Enforce AllowPrivilegeEscalation Be compliant with PSA restricted. This can be achivied for virt-api, virt-operator, virt-controller. Signed-off-by: L. Pivarc (cherry picked from commit 6021d9eda5ce20e2d0c12be6524ec7c5f8a45565) Signed-off-by: L. Pivarc --- manifests/generated/operator-csv.yaml.in | 2 ++ .../resource/generate/components/BUILD.bazel | 1 + .../resource/generate/components/deployments.go | 14 ++++++++++++++ 3 files changed, 17 insertions(+) diff --git a/manifests/generated/operator-csv.yaml.in b/manifests/generated/operator-csv.yaml.in index f78c120e7c6d..3ae93eca81dc 100644 --- a/manifests/generated/operator-csv.yaml.in +++ b/manifests/generated/operator-csv.yaml.in @@ -1073,6 +1073,8 @@ spec: requests: cpu: 10m memory: 150Mi + securityContext: + allowPrivilegeEscalation: false volumeMounts: - mountPath: /etc/virt-operator/certificates name: kubevirt-operator-certs diff --git a/pkg/virt-operator/resource/generate/components/BUILD.bazel b/pkg/virt-operator/resource/generate/components/BUILD.bazel index 37d487212098..0dabf5b20656 100644 --- a/pkg/virt-operator/resource/generate/components/BUILD.bazel +++ b/pkg/virt-operator/resource/generate/components/BUILD.bazel @@ -44,6 +44,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/pkg/virt-operator/resource/generate/components/deployments.go b/pkg/virt-operator/resource/generate/components/deployments.go index 85da56f393b2..87865779c1e9 100644 --- a/pkg/virt-operator/resource/generate/components/deployments.go +++ b/pkg/virt-operator/resource/generate/components/deployments.go @@ -23,6 +23,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/pointer" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -332,6 +333,11 @@ func NewApiServerDeployment(namespace string, repository string, imagePrefix str }, } + if container.SecurityContext == nil { + container.SecurityContext = &corev1.SecurityContext{} + } + container.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false) + return deployment, nil } @@ -410,6 +416,11 @@ func NewControllerDeployment(namespace string, repository string, imagePrefix st }, } + if container.SecurityContext == nil { + container.SecurityContext = &corev1.SecurityContext{} + } + container.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false) + return deployment, nil } @@ -516,6 +527,9 @@ func NewOperatorDeployment(namespace string, repository string, imagePrefix stri corev1.ResourceMemory: resource.MustParse("150Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + }, }, }, SecurityContext: &corev1.PodSecurityContext{ From 96625b62d77ef0fc2d09d0c68e85e8bb30927a0f Mon Sep 17 00:00:00 2001 From: "L. Pivarc" Date: Mon, 8 Aug 2022 15:32:20 +0200 Subject: [PATCH 2/6] Drop ALL capabilities Be compliant with PSA restricted. This can be applied to virt-api, virt-operator, virt-controller. Signed-off-by: L. Pivarc (cherry picked from commit 46c395a9f752c3bce565d6d2350c7b9da1ff5a39) Signed-off-by: L. Pivarc --- manifests/generated/operator-csv.yaml.in | 3 +++ .../resource/generate/components/deployments.go | 9 +++++++++ 2 files changed, 12 insertions(+) diff --git a/manifests/generated/operator-csv.yaml.in b/manifests/generated/operator-csv.yaml.in index 3ae93eca81dc..3ec542f37de5 100644 --- a/manifests/generated/operator-csv.yaml.in +++ b/manifests/generated/operator-csv.yaml.in @@ -1075,6 +1075,9 @@ spec: memory: 150Mi securityContext: allowPrivilegeEscalation: false + capabilities: + drop: + - ALL volumeMounts: - mountPath: /etc/virt-operator/certificates name: kubevirt-operator-certs diff --git a/pkg/virt-operator/resource/generate/components/deployments.go b/pkg/virt-operator/resource/generate/components/deployments.go index 87865779c1e9..f7e0f6cd6beb 100644 --- a/pkg/virt-operator/resource/generate/components/deployments.go +++ b/pkg/virt-operator/resource/generate/components/deployments.go @@ -337,6 +337,9 @@ func NewApiServerDeployment(namespace string, repository string, imagePrefix str container.SecurityContext = &corev1.SecurityContext{} } container.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false) + container.SecurityContext.Capabilities = &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + } return deployment, nil } @@ -420,6 +423,9 @@ func NewControllerDeployment(namespace string, repository string, imagePrefix st container.SecurityContext = &corev1.SecurityContext{} } container.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false) + container.SecurityContext.Capabilities = &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + } return deployment, nil } @@ -529,6 +535,9 @@ func NewOperatorDeployment(namespace string, repository string, imagePrefix stri }, SecurityContext: &corev1.SecurityContext{ AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, }, }, }, From dd7f4686ae143052ee5123c72c08f7f9049ecaa2 Mon Sep 17 00:00:00 2001 From: "L. Pivarc" Date: Thu, 11 Aug 2022 09:49:48 +0200 Subject: [PATCH 3/6] Set SeccompProfile Be compliant with PSA restricted. This can be applied to virt-api, virt-operator, virt-controller. Signed-off-by: L. Pivarc (cherry picked from commit 63c6f8e15a4d0f600d01fd86e65dad8eaedec8e3) Signed-off-by: L. Pivarc --- manifests/generated/operator-csv.yaml.in | 4 +++ .../generate/components/deployments.go | 36 ++++++++++--------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/manifests/generated/operator-csv.yaml.in b/manifests/generated/operator-csv.yaml.in index 3ec542f37de5..880a3eeb59d7 100644 --- a/manifests/generated/operator-csv.yaml.in +++ b/manifests/generated/operator-csv.yaml.in @@ -1078,6 +1078,8 @@ spec: capabilities: drop: - ALL + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/virt-operator/certificates name: kubevirt-operator-certs @@ -1087,6 +1089,8 @@ spec: priorityClassName: kubevirt-cluster-critical securityContext: runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: kubevirt-operator tolerations: - key: CriticalAddonsOnly diff --git a/pkg/virt-operator/resource/generate/components/deployments.go b/pkg/virt-operator/resource/generate/components/deployments.go index f7e0f6cd6beb..cbb5b5e7e194 100644 --- a/pkg/virt-operator/resource/generate/components/deployments.go +++ b/pkg/virt-operator/resource/generate/components/deployments.go @@ -285,7 +285,8 @@ func NewApiServerDeployment(namespace string, repository string, imagePrefix str pod := &deployment.Spec.Template.Spec pod.ServiceAccountName = rbac.ApiServiceAccountName pod.SecurityContext = &corev1.PodSecurityContext{ - RunAsNonRoot: boolPtr(true), + RunAsNonRoot: boolPtr(true), + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, } container := &deployment.Spec.Template.Spec.Containers[0] @@ -333,14 +334,13 @@ func NewApiServerDeployment(namespace string, repository string, imagePrefix str }, } - if container.SecurityContext == nil { - container.SecurityContext = &corev1.SecurityContext{} - } - container.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false) - container.SecurityContext.Capabilities = &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, + container.SecurityContext = &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, } - return deployment, nil } @@ -357,7 +357,8 @@ func NewControllerDeployment(namespace string, repository string, imagePrefix st pod := &deployment.Spec.Template.Spec pod.ServiceAccountName = rbac.ControllerServiceAccountName pod.SecurityContext = &corev1.PodSecurityContext{ - RunAsNonRoot: boolPtr(true), + RunAsNonRoot: boolPtr(true), + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, } launcherVersion = AddVersionSeparatorPrefix(launcherVersion) @@ -419,14 +420,13 @@ func NewControllerDeployment(namespace string, repository string, imagePrefix st }, } - if container.SecurityContext == nil { - container.SecurityContext = &corev1.SecurityContext{} - } - container.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false) - container.SecurityContext.Capabilities = &corev1.Capabilities{ - Drop: []corev1.Capability{"ALL"}, + container.SecurityContext = &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, } - return deployment, nil } @@ -538,11 +538,13 @@ func NewOperatorDeployment(namespace string, repository string, imagePrefix stri Capabilities: &corev1.Capabilities{ Drop: []corev1.Capability{"ALL"}, }, + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, }, }, }, SecurityContext: &corev1.PodSecurityContext{ - RunAsNonRoot: boolPtr(true), + RunAsNonRoot: boolPtr(true), + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, }, }, }, From 998225b578d854edee0aa705d7bfae641ab837c4 Mon Sep 17 00:00:00 2001 From: "L. Pivarc" Date: Thu, 8 Sep 2022 13:38:35 +0200 Subject: [PATCH 4/6] Pod security label for Kubevirt Kubevirt install namespace needs to specify level to enforce as it contains privileged workload. Signed-off-by: L. Pivarc (cherry picked from commit 4690f390de540b979fe4c94c1ad1bb98ab987661) Signed-off-by: L. Pivarc --- hack/cluster-deploy.sh | 2 ++ manifests/release/kubevirt-operator.yaml.in | 1 + 2 files changed, 3 insertions(+) diff --git a/hack/cluster-deploy.sh b/hack/cluster-deploy.sh index c33db20c202f..1db598d17769 100755 --- a/hack/cluster-deploy.sh +++ b/hack/cluster-deploy.sh @@ -74,6 +74,8 @@ apiVersion: v1 kind: Namespace metadata: name: ${namespace:?} + labels: + pod-security.kubernetes.io/enforce: "privileged" EOF if [[ "$KUBEVIRT_PROVIDER" =~ kind.* ]]; then diff --git a/manifests/release/kubevirt-operator.yaml.in b/manifests/release/kubevirt-operator.yaml.in index 603be12bc270..6ac36d99bf28 100644 --- a/manifests/release/kubevirt-operator.yaml.in +++ b/manifests/release/kubevirt-operator.yaml.in @@ -4,6 +4,7 @@ kind: Namespace metadata: labels: kubevirt.io: "" + pod-security.kubernetes.io/enforce: "privileged" name: {{.Namespace}} {{index .GeneratedManifests "kv-resource.yaml"}} --- From 8fd9ac1b55bc104d37c3de357f71ff8c7e29454c Mon Sep 17 00:00:00 2001 From: "L. Pivarc" Date: Mon, 26 Sep 2022 17:26:30 +0200 Subject: [PATCH 5/6] Integrate with Pod security VMs are unfortunatly still privileged workload(in Kubevirt). We have to integrate with new Pod Security Standards in order to allow seamless integration, upgrades. This means we now make sure that target namespace allows privileged workloads if PSA feature gate is enabled. This unfortunatly means users escalate their privileges, in terms of Pod security, by having ability to create VMs. (cherry picked from commit 8512fe3f07aeb174d58f8f4b8b36cb1d535045a7) Signed-off-by: L. Pivarc Signed-off-by: L. Pivarc --- manifests/generated/operator-csv.yaml.in | 9 ++ .../rbac-operator.authorization.k8s.yaml.in | 9 ++ pkg/virt-config/feature-gates.go | 5 + pkg/virt-controller/watch/BUILD.bazel | 2 + pkg/virt-controller/watch/application.go | 6 +- pkg/virt-controller/watch/application_test.go | 2 + pkg/virt-controller/watch/migration.go | 18 ++++ pkg/virt-controller/watch/migration_test.go | 1 + pkg/virt-controller/watch/psa.go | 53 ++++++++++ pkg/virt-controller/watch/psa_test.go | 99 +++++++++++++++++++ pkg/virt-controller/watch/vmi.go | 24 +++++ pkg/virt-controller/watch/vmi_test.go | 5 +- .../resource/generate/rbac/controller.go | 14 +++ 13 files changed, 245 insertions(+), 2 deletions(-) create mode 100644 pkg/virt-controller/watch/psa.go create mode 100644 pkg/virt-controller/watch/psa_test.go diff --git a/manifests/generated/operator-csv.yaml.in b/manifests/generated/operator-csv.yaml.in index 880a3eeb59d7..52369152c100 100644 --- a/manifests/generated/operator-csv.yaml.in +++ b/manifests/generated/operator-csv.yaml.in @@ -458,6 +458,15 @@ spec: - get - list - watch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - patch - apiGroups: - policy resources: diff --git a/manifests/generated/rbac-operator.authorization.k8s.yaml.in b/manifests/generated/rbac-operator.authorization.k8s.yaml.in index 8450c42be4c9..2351d3711360 100644 --- a/manifests/generated/rbac-operator.authorization.k8s.yaml.in +++ b/manifests/generated/rbac-operator.authorization.k8s.yaml.in @@ -360,6 +360,15 @@ rules: - get - list - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - patch - apiGroups: - policy resources: diff --git a/pkg/virt-config/feature-gates.go b/pkg/virt-config/feature-gates.go index 7a12998dc0e2..8d8f6532dfa0 100644 --- a/pkg/virt-config/feature-gates.go +++ b/pkg/virt-config/feature-gates.go @@ -45,6 +45,7 @@ const ( NonRoot = "NonRootExperimental" ClusterProfiler = "ClusterProfiler" WorkloadEncryptionSEV = "WorkloadEncryptionSEV" + PSA = "PSA" ) func (c *ClusterConfig) isFeatureGateEnabled(featureGate string) bool { @@ -136,3 +137,7 @@ func (config *ClusterConfig) ClusterProfilerEnabled() bool { func (config *ClusterConfig) WorkloadEncryptionSEVEnabled() bool { return config.isFeatureGateEnabled(WorkloadEncryptionSEV) } + +func (config *ClusterConfig) PSAEnabled() bool { + return config.isFeatureGateEnabled(PSA) +} diff --git a/pkg/virt-controller/watch/BUILD.bazel b/pkg/virt-controller/watch/BUILD.bazel index 40e6981b6018..bd383440bbfa 100644 --- a/pkg/virt-controller/watch/BUILD.bazel +++ b/pkg/virt-controller/watch/BUILD.bazel @@ -8,6 +8,7 @@ go_library( "migration.go", "node.go", "pool.go", + "psa.go", "replicaset.go", "util.go", "vm.go", @@ -94,6 +95,7 @@ go_test( "migration_test.go", "node_test.go", "pool_test.go", + "psa_test.go", "replicaset_test.go", "vm_test.go", "vmi_test.go", diff --git a/pkg/virt-controller/watch/application.go b/pkg/virt-controller/watch/application.go index cf791c60e401..484dde06ec83 100644 --- a/pkg/virt-controller/watch/application.go +++ b/pkg/virt-controller/watch/application.go @@ -139,6 +139,8 @@ type VirtControllerApp struct { vmiInformer cache.SharedIndexInformer vmiRecorder record.EventRecorder + namespaceStore cache.Store + kubeVirtInformer cache.SharedIndexInformer clusterConfig *virtconfig.ClusterConfig @@ -316,7 +318,7 @@ func Execute() { app.vmiInformer = app.informerFactory.VMI() app.kvPodInformer = app.informerFactory.KubeVirtPod() app.nodeInformer = app.informerFactory.KubeVirtNode() - + app.namespaceStore = app.informerFactory.Namespace().GetStore() app.vmiCache = app.vmiInformer.GetStore() app.vmiRecorder = app.newRecorder(k8sv1.NamespaceAll, "virtualmachine-controller") @@ -512,6 +514,7 @@ func (vca *VirtControllerApp) initCommon() { vca.cdiConfigInformer, vca.clusterConfig, topologyHinter, + vca.namespaceStore, ) recorder := vca.newRecorder(k8sv1.NamespaceAll, "node-controller") @@ -528,6 +531,7 @@ func (vca *VirtControllerApp) initCommon() { vca.vmiRecorder, vca.clientSet, vca.clusterConfig, + vca.namespaceStore, ) vca.nodeTopologyUpdater = topology.NewNodeTopologyUpdater(vca.clientSet, topologyHinter, vca.nodeInformer) diff --git a/pkg/virt-controller/watch/application_test.go b/pkg/virt-controller/watch/application_test.go index bae95bf8a68e..cf7cd7d2c59d 100644 --- a/pkg/virt-controller/watch/application_test.go +++ b/pkg/virt-controller/watch/application_test.go @@ -124,6 +124,7 @@ var _ = Describe("Application", func() { cdiConfigInformer, config, topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", nil), + nil, ) app.rsController = NewVMIReplicaSet(vmiInformer, rsInformer, recorder, virtClient, uint(10)) app.vmController = NewVMController(vmiInformer, @@ -145,6 +146,7 @@ var _ = Describe("Application", func() { recorder, virtClient, config, + nil, ) app.snapshotController = &snapshot.VMSnapshotController{ Client: virtClient, diff --git a/pkg/virt-controller/watch/migration.go b/pkg/virt-controller/watch/migration.go index 4b8cbdcdbe2b..33c4368d45e4 100644 --- a/pkg/virt-controller/watch/migration.go +++ b/pkg/virt-controller/watch/migration.go @@ -94,6 +94,7 @@ type MigrationController struct { pvcInformer cache.SharedIndexInformer pdbInformer cache.SharedIndexInformer migrationPolicyInformer cache.SharedIndexInformer + namespaceStore cache.Store recorder record.EventRecorder podExpectations *controller.UIDTrackingControllerExpectations migrationStartLock *sync.Mutex @@ -115,6 +116,7 @@ func NewMigrationController(templateService services.TemplateService, recorder record.EventRecorder, clientset kubecli.KubevirtClient, clusterConfig *virtconfig.ClusterConfig, + namespaceStore cache.Store, ) *MigrationController { c := &MigrationController{ @@ -136,6 +138,8 @@ func NewMigrationController(templateService services.TemplateService, unschedulablePendingTimeoutSeconds: defaultUnschedulablePendingTimeoutSeconds, catchAllPendingTimeoutSeconds: defaultCatchAllPendingTimeoutSeconds, + + namespaceStore: namespaceStore, } c.vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -582,6 +586,13 @@ func (c *MigrationController) createTargetPod(migration *virtv1.VirtualMachineIn } } + if c.clusterConfig.PSAEnabled() { + // Check my impact + if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace()); err != nil { + return err + } + } + key := controller.MigrationKey(migration) c.podExpectations.ExpectCreations(key, 1) pod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), templatePod, v1.CreateOptions{}) @@ -840,8 +851,15 @@ func (c *MigrationController) createAttachmentPod(migration *virtv1.VirtualMachi attachmentPodTemplate.ObjectMeta.Labels[virtv1.MigrationJobLabel] = string(migration.UID) attachmentPodTemplate.ObjectMeta.Annotations[virtv1.MigrationJobNameAnnotation] = string(migration.Name) + if c.clusterConfig.PSAEnabled() { + // Check my impact + if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace()); err != nil { + return err + } + } key := controller.MigrationKey(migration) c.podExpectations.ExpectCreations(key, 1) + attachmentPod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), attachmentPodTemplate, v1.CreateOptions{}) if err != nil { c.podExpectations.CreationObserved(key) diff --git a/pkg/virt-controller/watch/migration_test.go b/pkg/virt-controller/watch/migration_test.go index 91f521bedb6c..b30d44d3299e 100644 --- a/pkg/virt-controller/watch/migration_test.go +++ b/pkg/virt-controller/watch/migration_test.go @@ -270,6 +270,7 @@ var _ = Describe("Migration watcher", func() { recorder, virtClient, config, + nil, ) // Wrap our workqueue to have a way to detect when we are done processing updates mockQueue = testutils.NewMockWorkQueue(controller.Queue) diff --git a/pkg/virt-controller/watch/psa.go b/pkg/virt-controller/watch/psa.go new file mode 100644 index 000000000000..bc44d95f6ec4 --- /dev/null +++ b/pkg/virt-controller/watch/psa.go @@ -0,0 +1,53 @@ +/* + * This file is part of the KubeVirt project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2022 Red Hat, Inc. + * + */ + +package watch + +import ( + "context" + "fmt" + + k8sv1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + + "kubevirt.io/client-go/kubecli" +) + +const PSALabel = "pod-security.kubernetes.io/enforce" + +func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient, namespace string) error { + obj, exists, err := namespaceStore.GetByKey(namespace) + if err != nil { + return fmt.Errorf("Failed to get namespace, %w", err) + } + if !exists { + return fmt.Errorf("Namespace %s not observed, %w", namespace, err) + } + namespaceObj := obj.(*k8sv1.Namespace) + enforceLevel, labelExist := namespaceObj.Labels[PSALabel] + if !labelExist || enforceLevel != "privileged" { + data := []byte(fmt.Sprintf(`{"metadata": { "labels": {"%s": "privileged"}}}`, PSALabel)) + _, err := client.CoreV1().Namespaces().Patch(context.TODO(), namespace, types.StrategicMergePatchType, data, v1.PatchOptions{}) + if err != nil { + return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s", namespace)} + } + } + return nil +} diff --git a/pkg/virt-controller/watch/psa_test.go b/pkg/virt-controller/watch/psa_test.go new file mode 100644 index 000000000000..7e1327fad220 --- /dev/null +++ b/pkg/virt-controller/watch/psa_test.go @@ -0,0 +1,99 @@ +package watch + +import ( + "encoding/json" + + "github.com/golang/mock/gomock" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + k8sv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + + "kubevirt.io/client-go/kubecli" +) + +var _ = Describe("PSA", func() { + var ( + namespaceStore cache.Store + client *kubecli.MockKubevirtClient + kubeClient *fake.Clientset + ctrl *gomock.Controller + ) + + BeforeEach(func() { + namespaceStore = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + ctrl = gomock.NewController(GinkgoT()) + client = kubecli.NewMockKubevirtClient(ctrl) + kubeClient = fake.NewSimpleClientset() + client.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes() + }) + + Context("should patch namespace with enforce level", func() { + BeforeEach(func() { + kubeClient.Fake.PrependReactor("patch", "namespaces", + func(action testing.Action) (handled bool, obj k8sruntime.Object, err error) { + patchAction, ok := action.(testing.PatchAction) + Expect(ok).To(BeTrue()) + patchBytes := patchAction.GetPatch() + namespace := &k8sv1.Namespace{} + Expect(json.Unmarshal(patchBytes, namespace)).To(Succeed()) + + Expect(namespace.Labels).To(HaveKeyWithValue(PSALabel, "privileged")) + return true, nil, nil + }) + }) + + It("when label is missing", func() { + namespace := &k8sv1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + } + Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred()) + + Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed()) + }) + + It("when enforce label is not privileged", func() { + namespace := &k8sv1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{ + PSALabel: "restricted", + }, + }, + } + Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred()) + + Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed()) + }) + }) + It("should not patch namespace when enforce label is set to privileged", func() { + namespace := &k8sv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{ + PSALabel: "privileged", + }, + }, + } + Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred()) + kubeClient.Fake.PrependReactor("patch", "namespaces", + func(action testing.Action) (handled bool, obj k8sruntime.Object, err error) { + Expect("Patch namespaces is not expected").To(BeEmpty()) + return true, nil, nil + }) + Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed()) + }) + +}) diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go index bdf2b932e7be..e1828ef37781 100644 --- a/pkg/virt-controller/watch/vmi.go +++ b/pkg/virt-controller/watch/vmi.go @@ -151,6 +151,7 @@ func NewVMIController(templateService services.TemplateService, cdiConfigInformer cache.SharedIndexInformer, clusterConfig *virtconfig.ClusterConfig, topologyHinter topology.Hinter, + namespaceStore cache.Store, ) *VMIController { c := &VMIController{ @@ -169,6 +170,7 @@ func NewVMIController(templateService services.TemplateService, cdiConfigInformer: cdiConfigInformer, clusterConfig: clusterConfig, topologyHinter: topologyHinter, + namespaceStore: namespaceStore, } c.vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -230,6 +232,7 @@ type VMIController struct { cdiInformer cache.SharedIndexInformer cdiConfigInformer cache.SharedIndexInformer clusterConfig *virtconfig.ClusterConfig + namespaceStore cache.Store } func (c *VMIController) Run(threadiness int, stopCh <-chan struct{}) { @@ -1036,6 +1039,13 @@ func (c *VMIController) sync(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, return &syncErrorImpl{fmt.Errorf(failedToRenderLaunchManifestErrFormat, err), FailedCreatePodReason} } + if c.clusterConfig.PSAEnabled() { + namespace := vmi.GetNamespace() + if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil { + return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s", namespace)} + } + } + vmiKey := controller.VirtualMachineInstanceKey(vmi) c.podExpectations.ExpectCreations(vmiKey, 1) pod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), templatePod, v1.CreateOptions{}) @@ -1765,6 +1775,13 @@ func (c *VMIController) createAttachmentPod(vmi *virtv1.VirtualMachineInstance, vmiKey := controller.VirtualMachineInstanceKey(vmi) c.podExpectations.ExpectCreations(vmiKey, 1) + if c.clusterConfig.PSAEnabled() { + namespace := vmi.GetNamespace() + if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil { + return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s while creating attachment pod", namespace)} + } + } + pod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), attachmentPodTemplate, v1.CreateOptions{}) if err != nil { c.podExpectations.CreationObserved(vmiKey) @@ -1784,6 +1801,13 @@ func (c *VMIController) triggerHotplugPopulation(volume *virtv1.Volume, vmi *vir vmiKey := controller.VirtualMachineInstanceKey(vmi) c.podExpectations.ExpectCreations(vmiKey, 1) + if c.clusterConfig.PSAEnabled() { + namespace := vmi.GetNamespace() + if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil { + return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s while creating hotplug population trigger pod", namespace)} + } + } + _, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), populateHotplugPodTemplate, v1.CreateOptions{}) if err != nil { c.podExpectations.CreationObserved(vmiKey) diff --git a/pkg/virt-controller/watch/vmi_test.go b/pkg/virt-controller/watch/vmi_test.go index da28034fcf0d..a2f5de735176 100644 --- a/pkg/virt-controller/watch/vmi_test.go +++ b/pkg/virt-controller/watch/vmi_test.go @@ -75,6 +75,7 @@ var _ = Describe("VirtualMachineInstance watcher", func() { var kubeClient *fake.Clientset var networkClient *fakenetworkclient.Clientset var pvcInformer cache.SharedIndexInformer + var namespaceStore cache.Store var dataVolumeSource *framework.FakeControllerSource var dataVolumeInformer cache.SharedIndexInformer @@ -213,6 +214,7 @@ var _ = Describe("VirtualMachineInstance watcher", func() { pvcInformer, _ = testutils.NewFakeInformerFor(&k8sv1.PersistentVolumeClaim{}) cdiInformer, _ = testutils.NewFakeInformerFor(&cdiv1.CDIConfig{}) cdiConfigInformer, _ = testutils.NewFakeInformerFor(&cdiv1.CDIConfig{}) + namespaceStore = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) controller = NewVMIController( services.NewTemplateService("a", 240, "b", "c", "d", "e", "f", "g", pvcInformer.GetStore(), virtClient, config, qemuGid), vmiInformer, @@ -225,7 +227,8 @@ var _ = Describe("VirtualMachineInstance watcher", func() { cdiInformer, cdiConfigInformer, config, - topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", nil), + topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", config), + namespaceStore, ) // Wrap our workqueue to have a way to detect when we are done processing updates mockQueue = testutils.NewMockWorkQueue(controller.Queue) diff --git a/pkg/virt-operator/resource/generate/rbac/controller.go b/pkg/virt-operator/resource/generate/rbac/controller.go index 953ac301a91f..0963946ec024 100644 --- a/pkg/virt-operator/resource/generate/rbac/controller.go +++ b/pkg/virt-operator/resource/generate/rbac/controller.go @@ -67,6 +67,20 @@ func newControllerClusterRole() *rbacv1.ClusterRole { }, }, Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{ + "", + }, + Resources: []string{ + "namespaces", + }, + Verbs: []string{ + "get", + "list", + "watch", + "patch", + }, + }, { APIGroups: []string{ "policy", From 39abb5962282aa0cda8686080cf1a027cc297211 Mon Sep 17 00:00:00 2001 From: "L. Pivarc" Date: Thu, 8 Sep 2022 14:24:23 +0200 Subject: [PATCH 6/6] Pod secuirty on Openshift Make sure we are not racing with cluster sync mechanism on Openshift. Signed-off-by: L. Pivarc (cherry picked from commit 230676f1228b5ffebc2933d1f69d04cc60b4e8eb) Signed-off-by: L. Pivarc --- pkg/virt-controller/watch/BUILD.bazel | 1 + pkg/virt-controller/watch/application.go | 11 ++++++ pkg/virt-controller/watch/application_test.go | 2 + pkg/virt-controller/watch/migration.go | 10 +++-- pkg/virt-controller/watch/migration_test.go | 1 + pkg/virt-controller/watch/psa.go | 11 +++++- pkg/virt-controller/watch/psa_test.go | 39 +++++++++++++------ pkg/virt-controller/watch/vmi.go | 9 +++-- pkg/virt-controller/watch/vmi_test.go | 1 + 9 files changed, 65 insertions(+), 20 deletions(-) diff --git a/pkg/virt-controller/watch/BUILD.bazel b/pkg/virt-controller/watch/BUILD.bazel index bd383440bbfa..f0ea0582abad 100644 --- a/pkg/virt-controller/watch/BUILD.bazel +++ b/pkg/virt-controller/watch/BUILD.bazel @@ -27,6 +27,7 @@ go_library( "//pkg/monitoring/vmistats:go_default_library", "//pkg/service:go_default_library", "//pkg/util:go_default_library", + "//pkg/util/cluster:go_default_library", "//pkg/util/lookup:go_default_library", "//pkg/util/migrations:go_default_library", "//pkg/util/pdbs:go_default_library", diff --git a/pkg/virt-controller/watch/application.go b/pkg/virt-controller/watch/application.go index 484dde06ec83..2c6415db5d34 100644 --- a/pkg/virt-controller/watch/application.go +++ b/pkg/virt-controller/watch/application.go @@ -65,6 +65,7 @@ import ( vmiprom "kubevirt.io/kubevirt/pkg/monitoring/vmistats" // import for prometheus metrics "kubevirt.io/kubevirt/pkg/service" "kubevirt.io/kubevirt/pkg/util" + clusterutil "kubevirt.io/kubevirt/pkg/util/cluster" "kubevirt.io/kubevirt/pkg/util/webhooks" virtconfig "kubevirt.io/kubevirt/pkg/virt-config" "kubevirt.io/kubevirt/pkg/virt-controller/leaderelectionconfig" @@ -229,6 +230,8 @@ type VirtControllerApp struct { nodeTopologyUpdatePeriod time.Duration reloadableRateLimiter *ratelimiter.ReloadableRateLimiter leaderElector *leaderelection.LeaderElector + + onOpenshift bool } var _ service.Service = &VirtControllerApp{} @@ -362,6 +365,12 @@ func Execute() { app.migrationPolicyInformer = app.informerFactory.MigrationPolicy() + onOpenShift, err := clusterutil.IsOnOpenShift(app.clientSet) + if err != nil { + golog.Fatalf("Error determining cluster type: %v", err) + } + app.onOpenshift = onOpenShift + app.initCommon() app.initReplicaSet() app.initPool() @@ -515,6 +524,7 @@ func (vca *VirtControllerApp) initCommon() { vca.clusterConfig, topologyHinter, vca.namespaceStore, + vca.onOpenshift, ) recorder := vca.newRecorder(k8sv1.NamespaceAll, "node-controller") @@ -532,6 +542,7 @@ func (vca *VirtControllerApp) initCommon() { vca.clientSet, vca.clusterConfig, vca.namespaceStore, + vca.onOpenshift, ) vca.nodeTopologyUpdater = topology.NewNodeTopologyUpdater(vca.clientSet, topologyHinter, vca.nodeInformer) diff --git a/pkg/virt-controller/watch/application_test.go b/pkg/virt-controller/watch/application_test.go index cf7cd7d2c59d..b679cfb7eb5b 100644 --- a/pkg/virt-controller/watch/application_test.go +++ b/pkg/virt-controller/watch/application_test.go @@ -125,6 +125,7 @@ var _ = Describe("Application", func() { config, topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", nil), nil, + false, ) app.rsController = NewVMIReplicaSet(vmiInformer, rsInformer, recorder, virtClient, uint(10)) app.vmController = NewVMController(vmiInformer, @@ -147,6 +148,7 @@ var _ = Describe("Application", func() { virtClient, config, nil, + false, ) app.snapshotController = &snapshot.VMSnapshotController{ Client: virtClient, diff --git a/pkg/virt-controller/watch/migration.go b/pkg/virt-controller/watch/migration.go index 33c4368d45e4..0c90dbe1d2df 100644 --- a/pkg/virt-controller/watch/migration.go +++ b/pkg/virt-controller/watch/migration.go @@ -103,6 +103,8 @@ type MigrationController struct { unschedulablePendingTimeoutSeconds int64 catchAllPendingTimeoutSeconds int64 + + onOpenshift bool } func NewMigrationController(templateService services.TemplateService, @@ -117,6 +119,7 @@ func NewMigrationController(templateService services.TemplateService, clientset kubecli.KubevirtClient, clusterConfig *virtconfig.ClusterConfig, namespaceStore cache.Store, + onOpenshift bool, ) *MigrationController { c := &MigrationController{ @@ -140,6 +143,7 @@ func NewMigrationController(templateService services.TemplateService, catchAllPendingTimeoutSeconds: defaultCatchAllPendingTimeoutSeconds, namespaceStore: namespaceStore, + onOpenshift: onOpenshift, } c.vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -587,8 +591,7 @@ func (c *MigrationController) createTargetPod(migration *virtv1.VirtualMachineIn } if c.clusterConfig.PSAEnabled() { - // Check my impact - if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace()); err != nil { + if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace(), c.onOpenshift); err != nil { return err } } @@ -852,8 +855,7 @@ func (c *MigrationController) createAttachmentPod(migration *virtv1.VirtualMachi attachmentPodTemplate.ObjectMeta.Annotations[virtv1.MigrationJobNameAnnotation] = string(migration.Name) if c.clusterConfig.PSAEnabled() { - // Check my impact - if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace()); err != nil { + if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace(), c.onOpenshift); err != nil { return err } } diff --git a/pkg/virt-controller/watch/migration_test.go b/pkg/virt-controller/watch/migration_test.go index b30d44d3299e..654f2554a1de 100644 --- a/pkg/virt-controller/watch/migration_test.go +++ b/pkg/virt-controller/watch/migration_test.go @@ -271,6 +271,7 @@ var _ = Describe("Migration watcher", func() { virtClient, config, nil, + false, ) // Wrap our workqueue to have a way to detect when we are done processing updates mockQueue = testutils.NewMockWorkQueue(controller.Queue) diff --git a/pkg/virt-controller/watch/psa.go b/pkg/virt-controller/watch/psa.go index bc44d95f6ec4..b03254c49fa4 100644 --- a/pkg/virt-controller/watch/psa.go +++ b/pkg/virt-controller/watch/psa.go @@ -31,8 +31,9 @@ import ( ) const PSALabel = "pod-security.kubernetes.io/enforce" +const OpenshiftPSAsync = "security.openshift.io/scc.podSecurityLabelSync" -func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient, namespace string) error { +func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient, namespace string, onOpenshift bool) error { obj, exists, err := namespaceStore.GetByKey(namespace) if err != nil { return fmt.Errorf("Failed to get namespace, %w", err) @@ -43,7 +44,13 @@ func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient namespaceObj := obj.(*k8sv1.Namespace) enforceLevel, labelExist := namespaceObj.Labels[PSALabel] if !labelExist || enforceLevel != "privileged" { - data := []byte(fmt.Sprintf(`{"metadata": { "labels": {"%s": "privileged"}}}`, PSALabel)) + labels := "" + if !onOpenshift { + labels = fmt.Sprintf(`{"%s": "privileged"}`, PSALabel) + } else { + labels = fmt.Sprintf(`{"%s": "privileged", "%s": "false"}`, PSALabel, OpenshiftPSAsync) + } + data := []byte(fmt.Sprintf(`{"metadata": { "labels": %s}}`, labels)) _, err := client.CoreV1().Namespaces().Patch(context.TODO(), namespace, types.StrategicMergePatchType, data, v1.PatchOptions{}) if err != nil { return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s", namespace)} diff --git a/pkg/virt-controller/watch/psa_test.go b/pkg/virt-controller/watch/psa_test.go index 7e1327fad220..ff7c51b1fb75 100644 --- a/pkg/virt-controller/watch/psa_test.go +++ b/pkg/virt-controller/watch/psa_test.go @@ -5,7 +5,9 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" k8sv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sruntime "k8s.io/apimachinery/pkg/runtime" @@ -22,6 +24,7 @@ var _ = Describe("PSA", func() { client *kubecli.MockKubevirtClient kubeClient *fake.Clientset ctrl *gomock.Controller + notOnOpenshift = false ) BeforeEach(func() { @@ -33,21 +36,28 @@ var _ = Describe("PSA", func() { }) Context("should patch namespace with enforce level", func() { - BeforeEach(func() { + var ( + onOpenshift = true + psaLabels = HaveKeyWithValue(PSALabel, "privileged") + psaLabelsOnOpenshift = And(HaveKeyWithValue(PSALabel, "privileged"), HaveKeyWithValue(OpenshiftPSAsync, "false")) + ) + + expectLabels := func(expectedLabels types.GomegaMatcher) { kubeClient.Fake.PrependReactor("patch", "namespaces", func(action testing.Action) (handled bool, obj k8sruntime.Object, err error) { patchAction, ok := action.(testing.PatchAction) Expect(ok).To(BeTrue()) patchBytes := patchAction.GetPatch() namespace := &k8sv1.Namespace{} - Expect(json.Unmarshal(patchBytes, namespace)).To(Succeed()) + Expect(json.Unmarshal(patchBytes, namespace)).To(Succeed(), string(patchBytes)) - Expect(namespace.Labels).To(HaveKeyWithValue(PSALabel, "privileged")) + Expect(namespace.Labels).To(expectedLabels) return true, nil, nil }) - }) + } - It("when label is missing", func() { + DescribeTable("when label is missing", func(expectedLabels types.GomegaMatcher, onOpenshift bool) { + expectLabels(expectedLabels) namespace := &k8sv1.Namespace{ TypeMeta: metav1.TypeMeta{ Kind: "Namespace", @@ -58,10 +68,14 @@ var _ = Describe("PSA", func() { } Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred()) - Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed()) - }) + Expect(escalateNamespace(namespaceStore, client, "test", onOpenshift)).To(Succeed()) + }, + Entry("on plain Kubernetes", psaLabels, notOnOpenshift), + Entry("on Openshift", psaLabelsOnOpenshift, onOpenshift), + ) - It("when enforce label is not privileged", func() { + DescribeTable("when enforce label is not privileged", func(expectedLabels types.GomegaMatcher, onOpenshift bool) { + expectLabels(expectedLabels) namespace := &k8sv1.Namespace{ TypeMeta: metav1.TypeMeta{ Kind: "Namespace", @@ -75,8 +89,11 @@ var _ = Describe("PSA", func() { } Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred()) - Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed()) - }) + Expect(escalateNamespace(namespaceStore, client, "test", onOpenshift)).To(Succeed()) + }, + Entry("on plain Kubernetes", psaLabels, notOnOpenshift), + Entry("on Openshift", psaLabelsOnOpenshift, onOpenshift), + ) }) It("should not patch namespace when enforce label is set to privileged", func() { namespace := &k8sv1.Namespace{ @@ -93,7 +110,7 @@ var _ = Describe("PSA", func() { Expect("Patch namespaces is not expected").To(BeEmpty()) return true, nil, nil }) - Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed()) + Expect(escalateNamespace(namespaceStore, client, "test", notOnOpenshift)).To(Succeed()) }) }) diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go index e1828ef37781..dd0a1b278691 100644 --- a/pkg/virt-controller/watch/vmi.go +++ b/pkg/virt-controller/watch/vmi.go @@ -152,6 +152,7 @@ func NewVMIController(templateService services.TemplateService, clusterConfig *virtconfig.ClusterConfig, topologyHinter topology.Hinter, namespaceStore cache.Store, + onOpenshift bool, ) *VMIController { c := &VMIController{ @@ -171,6 +172,7 @@ func NewVMIController(templateService services.TemplateService, clusterConfig: clusterConfig, topologyHinter: topologyHinter, namespaceStore: namespaceStore, + onOpenshift: onOpenshift, } c.vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -233,6 +235,7 @@ type VMIController struct { cdiConfigInformer cache.SharedIndexInformer clusterConfig *virtconfig.ClusterConfig namespaceStore cache.Store + onOpenshift bool } func (c *VMIController) Run(threadiness int, stopCh <-chan struct{}) { @@ -1041,7 +1044,7 @@ func (c *VMIController) sync(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, if c.clusterConfig.PSAEnabled() { namespace := vmi.GetNamespace() - if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil { + if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil { return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s", namespace)} } } @@ -1777,7 +1780,7 @@ func (c *VMIController) createAttachmentPod(vmi *virtv1.VirtualMachineInstance, if c.clusterConfig.PSAEnabled() { namespace := vmi.GetNamespace() - if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil { + if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil { return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s while creating attachment pod", namespace)} } } @@ -1803,7 +1806,7 @@ func (c *VMIController) triggerHotplugPopulation(volume *virtv1.Volume, vmi *vir if c.clusterConfig.PSAEnabled() { namespace := vmi.GetNamespace() - if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil { + if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil { return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s while creating hotplug population trigger pod", namespace)} } } diff --git a/pkg/virt-controller/watch/vmi_test.go b/pkg/virt-controller/watch/vmi_test.go index a2f5de735176..98224e324616 100644 --- a/pkg/virt-controller/watch/vmi_test.go +++ b/pkg/virt-controller/watch/vmi_test.go @@ -229,6 +229,7 @@ var _ = Describe("VirtualMachineInstance watcher", func() { config, topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", config), namespaceStore, + false, ) // Wrap our workqueue to have a way to detect when we are done processing updates mockQueue = testutils.NewMockWorkQueue(controller.Queue)