diff --git a/hack/cluster-deploy.sh b/hack/cluster-deploy.sh index c33db20c202f..1db598d17769 100755 --- a/hack/cluster-deploy.sh +++ b/hack/cluster-deploy.sh @@ -74,6 +74,8 @@ apiVersion: v1 kind: Namespace metadata: name: ${namespace:?} + labels: + pod-security.kubernetes.io/enforce: "privileged" EOF if [[ "$KUBEVIRT_PROVIDER" =~ kind.* ]]; then diff --git a/manifests/generated/operator-csv.yaml.in b/manifests/generated/operator-csv.yaml.in index f78c120e7c6d..52369152c100 100644 --- a/manifests/generated/operator-csv.yaml.in +++ b/manifests/generated/operator-csv.yaml.in @@ -458,6 +458,15 @@ spec: - get - list - watch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - patch - apiGroups: - policy resources: @@ -1073,6 +1082,13 @@ spec: requests: cpu: 10m memory: 150Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault volumeMounts: - mountPath: /etc/virt-operator/certificates name: kubevirt-operator-certs @@ -1082,6 +1098,8 @@ spec: priorityClassName: kubevirt-cluster-critical securityContext: runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: kubevirt-operator tolerations: - key: CriticalAddonsOnly diff --git a/manifests/generated/rbac-operator.authorization.k8s.yaml.in b/manifests/generated/rbac-operator.authorization.k8s.yaml.in index 8450c42be4c9..2351d3711360 100644 --- a/manifests/generated/rbac-operator.authorization.k8s.yaml.in +++ b/manifests/generated/rbac-operator.authorization.k8s.yaml.in @@ -360,6 +360,15 @@ rules: - get - list - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch + - patch - apiGroups: - policy resources: diff --git a/manifests/release/kubevirt-operator.yaml.in b/manifests/release/kubevirt-operator.yaml.in index 603be12bc270..6ac36d99bf28 100644 --- a/manifests/release/kubevirt-operator.yaml.in +++ b/manifests/release/kubevirt-operator.yaml.in @@ -4,6 +4,7 @@ kind: Namespace metadata: labels: kubevirt.io: "" + pod-security.kubernetes.io/enforce: "privileged" name: {{.Namespace}} {{index .GeneratedManifests "kv-resource.yaml"}} --- diff --git a/pkg/virt-config/feature-gates.go b/pkg/virt-config/feature-gates.go index 7a12998dc0e2..8d8f6532dfa0 100644 --- a/pkg/virt-config/feature-gates.go +++ b/pkg/virt-config/feature-gates.go @@ -45,6 +45,7 @@ const ( NonRoot = "NonRootExperimental" ClusterProfiler = "ClusterProfiler" WorkloadEncryptionSEV = "WorkloadEncryptionSEV" + PSA = "PSA" ) func (c *ClusterConfig) isFeatureGateEnabled(featureGate string) bool { @@ -136,3 +137,7 @@ func (config *ClusterConfig) ClusterProfilerEnabled() bool { func (config *ClusterConfig) WorkloadEncryptionSEVEnabled() bool { return config.isFeatureGateEnabled(WorkloadEncryptionSEV) } + +func (config *ClusterConfig) PSAEnabled() bool { + return config.isFeatureGateEnabled(PSA) +} diff --git a/pkg/virt-controller/watch/BUILD.bazel b/pkg/virt-controller/watch/BUILD.bazel index 40e6981b6018..f0ea0582abad 100644 --- a/pkg/virt-controller/watch/BUILD.bazel +++ b/pkg/virt-controller/watch/BUILD.bazel @@ -8,6 +8,7 @@ go_library( "migration.go", "node.go", "pool.go", + "psa.go", "replicaset.go", "util.go", "vm.go", @@ -26,6 +27,7 @@ go_library( "//pkg/monitoring/vmistats:go_default_library", "//pkg/service:go_default_library", "//pkg/util:go_default_library", + "//pkg/util/cluster:go_default_library", "//pkg/util/lookup:go_default_library", "//pkg/util/migrations:go_default_library", "//pkg/util/pdbs:go_default_library", @@ -94,6 +96,7 @@ go_test( "migration_test.go", "node_test.go", "pool_test.go", + "psa_test.go", "replicaset_test.go", "vm_test.go", "vmi_test.go", diff --git a/pkg/virt-controller/watch/application.go b/pkg/virt-controller/watch/application.go index cf791c60e401..2c6415db5d34 100644 --- a/pkg/virt-controller/watch/application.go +++ b/pkg/virt-controller/watch/application.go @@ -65,6 +65,7 @@ import ( vmiprom "kubevirt.io/kubevirt/pkg/monitoring/vmistats" // import for prometheus metrics "kubevirt.io/kubevirt/pkg/service" "kubevirt.io/kubevirt/pkg/util" + clusterutil "kubevirt.io/kubevirt/pkg/util/cluster" "kubevirt.io/kubevirt/pkg/util/webhooks" virtconfig "kubevirt.io/kubevirt/pkg/virt-config" "kubevirt.io/kubevirt/pkg/virt-controller/leaderelectionconfig" @@ -139,6 +140,8 @@ type VirtControllerApp struct { vmiInformer cache.SharedIndexInformer vmiRecorder record.EventRecorder + namespaceStore cache.Store + kubeVirtInformer cache.SharedIndexInformer clusterConfig *virtconfig.ClusterConfig @@ -227,6 +230,8 @@ type VirtControllerApp struct { nodeTopologyUpdatePeriod time.Duration reloadableRateLimiter *ratelimiter.ReloadableRateLimiter leaderElector *leaderelection.LeaderElector + + onOpenshift bool } var _ service.Service = &VirtControllerApp{} @@ -316,7 +321,7 @@ func Execute() { app.vmiInformer = app.informerFactory.VMI() app.kvPodInformer = app.informerFactory.KubeVirtPod() app.nodeInformer = app.informerFactory.KubeVirtNode() - + app.namespaceStore = app.informerFactory.Namespace().GetStore() app.vmiCache = app.vmiInformer.GetStore() app.vmiRecorder = app.newRecorder(k8sv1.NamespaceAll, "virtualmachine-controller") @@ -360,6 +365,12 @@ func Execute() { app.migrationPolicyInformer = app.informerFactory.MigrationPolicy() + onOpenShift, err := clusterutil.IsOnOpenShift(app.clientSet) + if err != nil { + golog.Fatalf("Error determining cluster type: %v", err) + } + app.onOpenshift = onOpenShift + app.initCommon() app.initReplicaSet() app.initPool() @@ -512,6 +523,8 @@ func (vca *VirtControllerApp) initCommon() { vca.cdiConfigInformer, vca.clusterConfig, topologyHinter, + vca.namespaceStore, + vca.onOpenshift, ) recorder := vca.newRecorder(k8sv1.NamespaceAll, "node-controller") @@ -528,6 +541,8 @@ func (vca *VirtControllerApp) initCommon() { vca.vmiRecorder, vca.clientSet, vca.clusterConfig, + vca.namespaceStore, + vca.onOpenshift, ) vca.nodeTopologyUpdater = topology.NewNodeTopologyUpdater(vca.clientSet, topologyHinter, vca.nodeInformer) diff --git a/pkg/virt-controller/watch/application_test.go b/pkg/virt-controller/watch/application_test.go index bae95bf8a68e..b679cfb7eb5b 100644 --- a/pkg/virt-controller/watch/application_test.go +++ b/pkg/virt-controller/watch/application_test.go @@ -124,6 +124,8 @@ var _ = Describe("Application", func() { cdiConfigInformer, config, topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", nil), + nil, + false, ) app.rsController = NewVMIReplicaSet(vmiInformer, rsInformer, recorder, virtClient, uint(10)) app.vmController = NewVMController(vmiInformer, @@ -145,6 +147,8 @@ var _ = Describe("Application", func() { recorder, virtClient, config, + nil, + false, ) app.snapshotController = &snapshot.VMSnapshotController{ Client: virtClient, diff --git a/pkg/virt-controller/watch/migration.go b/pkg/virt-controller/watch/migration.go index 4b8cbdcdbe2b..0c90dbe1d2df 100644 --- a/pkg/virt-controller/watch/migration.go +++ b/pkg/virt-controller/watch/migration.go @@ -94,6 +94,7 @@ type MigrationController struct { pvcInformer cache.SharedIndexInformer pdbInformer cache.SharedIndexInformer migrationPolicyInformer cache.SharedIndexInformer + namespaceStore cache.Store recorder record.EventRecorder podExpectations *controller.UIDTrackingControllerExpectations migrationStartLock *sync.Mutex @@ -102,6 +103,8 @@ type MigrationController struct { unschedulablePendingTimeoutSeconds int64 catchAllPendingTimeoutSeconds int64 + + onOpenshift bool } func NewMigrationController(templateService services.TemplateService, @@ -115,6 +118,8 @@ func NewMigrationController(templateService services.TemplateService, recorder record.EventRecorder, clientset kubecli.KubevirtClient, clusterConfig *virtconfig.ClusterConfig, + namespaceStore cache.Store, + onOpenshift bool, ) *MigrationController { c := &MigrationController{ @@ -136,6 +141,9 @@ func NewMigrationController(templateService services.TemplateService, unschedulablePendingTimeoutSeconds: defaultUnschedulablePendingTimeoutSeconds, catchAllPendingTimeoutSeconds: defaultCatchAllPendingTimeoutSeconds, + + namespaceStore: namespaceStore, + onOpenshift: onOpenshift, } c.vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -582,6 +590,12 @@ func (c *MigrationController) createTargetPod(migration *virtv1.VirtualMachineIn } } + if c.clusterConfig.PSAEnabled() { + if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace(), c.onOpenshift); err != nil { + return err + } + } + key := controller.MigrationKey(migration) c.podExpectations.ExpectCreations(key, 1) pod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), templatePod, v1.CreateOptions{}) @@ -840,8 +854,14 @@ func (c *MigrationController) createAttachmentPod(migration *virtv1.VirtualMachi attachmentPodTemplate.ObjectMeta.Labels[virtv1.MigrationJobLabel] = string(migration.UID) attachmentPodTemplate.ObjectMeta.Annotations[virtv1.MigrationJobNameAnnotation] = string(migration.Name) + if c.clusterConfig.PSAEnabled() { + if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace(), c.onOpenshift); err != nil { + return err + } + } key := controller.MigrationKey(migration) c.podExpectations.ExpectCreations(key, 1) + attachmentPod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), attachmentPodTemplate, v1.CreateOptions{}) if err != nil { c.podExpectations.CreationObserved(key) diff --git a/pkg/virt-controller/watch/migration_test.go b/pkg/virt-controller/watch/migration_test.go index 91f521bedb6c..654f2554a1de 100644 --- a/pkg/virt-controller/watch/migration_test.go +++ b/pkg/virt-controller/watch/migration_test.go @@ -270,6 +270,8 @@ var _ = Describe("Migration watcher", func() { recorder, virtClient, config, + nil, + false, ) // Wrap our workqueue to have a way to detect when we are done processing updates mockQueue = testutils.NewMockWorkQueue(controller.Queue) diff --git a/pkg/virt-controller/watch/psa.go b/pkg/virt-controller/watch/psa.go new file mode 100644 index 000000000000..b03254c49fa4 --- /dev/null +++ b/pkg/virt-controller/watch/psa.go @@ -0,0 +1,60 @@ +/* + * This file is part of the KubeVirt project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2022 Red Hat, Inc. + * + */ + +package watch + +import ( + "context" + "fmt" + + k8sv1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + + "kubevirt.io/client-go/kubecli" +) + +const PSALabel = "pod-security.kubernetes.io/enforce" +const OpenshiftPSAsync = "security.openshift.io/scc.podSecurityLabelSync" + +func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient, namespace string, onOpenshift bool) error { + obj, exists, err := namespaceStore.GetByKey(namespace) + if err != nil { + return fmt.Errorf("Failed to get namespace, %w", err) + } + if !exists { + return fmt.Errorf("Namespace %s not observed, %w", namespace, err) + } + namespaceObj := obj.(*k8sv1.Namespace) + enforceLevel, labelExist := namespaceObj.Labels[PSALabel] + if !labelExist || enforceLevel != "privileged" { + labels := "" + if !onOpenshift { + labels = fmt.Sprintf(`{"%s": "privileged"}`, PSALabel) + } else { + labels = fmt.Sprintf(`{"%s": "privileged", "%s": "false"}`, PSALabel, OpenshiftPSAsync) + } + data := []byte(fmt.Sprintf(`{"metadata": { "labels": %s}}`, labels)) + _, err := client.CoreV1().Namespaces().Patch(context.TODO(), namespace, types.StrategicMergePatchType, data, v1.PatchOptions{}) + if err != nil { + return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s", namespace)} + } + } + return nil +} diff --git a/pkg/virt-controller/watch/psa_test.go b/pkg/virt-controller/watch/psa_test.go new file mode 100644 index 000000000000..ff7c51b1fb75 --- /dev/null +++ b/pkg/virt-controller/watch/psa_test.go @@ -0,0 +1,116 @@ +package watch + +import ( + "encoding/json" + + "github.com/golang/mock/gomock" + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" + k8sv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + + "kubevirt.io/client-go/kubecli" +) + +var _ = Describe("PSA", func() { + var ( + namespaceStore cache.Store + client *kubecli.MockKubevirtClient + kubeClient *fake.Clientset + ctrl *gomock.Controller + notOnOpenshift = false + ) + + BeforeEach(func() { + namespaceStore = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) + ctrl = gomock.NewController(GinkgoT()) + client = kubecli.NewMockKubevirtClient(ctrl) + kubeClient = fake.NewSimpleClientset() + client.EXPECT().CoreV1().Return(kubeClient.CoreV1()).AnyTimes() + }) + + Context("should patch namespace with enforce level", func() { + var ( + onOpenshift = true + psaLabels = HaveKeyWithValue(PSALabel, "privileged") + psaLabelsOnOpenshift = And(HaveKeyWithValue(PSALabel, "privileged"), HaveKeyWithValue(OpenshiftPSAsync, "false")) + ) + + expectLabels := func(expectedLabels types.GomegaMatcher) { + kubeClient.Fake.PrependReactor("patch", "namespaces", + func(action testing.Action) (handled bool, obj k8sruntime.Object, err error) { + patchAction, ok := action.(testing.PatchAction) + Expect(ok).To(BeTrue()) + patchBytes := patchAction.GetPatch() + namespace := &k8sv1.Namespace{} + Expect(json.Unmarshal(patchBytes, namespace)).To(Succeed(), string(patchBytes)) + + Expect(namespace.Labels).To(expectedLabels) + return true, nil, nil + }) + } + + DescribeTable("when label is missing", func(expectedLabels types.GomegaMatcher, onOpenshift bool) { + expectLabels(expectedLabels) + namespace := &k8sv1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + } + Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred()) + + Expect(escalateNamespace(namespaceStore, client, "test", onOpenshift)).To(Succeed()) + }, + Entry("on plain Kubernetes", psaLabels, notOnOpenshift), + Entry("on Openshift", psaLabelsOnOpenshift, onOpenshift), + ) + + DescribeTable("when enforce label is not privileged", func(expectedLabels types.GomegaMatcher, onOpenshift bool) { + expectLabels(expectedLabels) + namespace := &k8sv1.Namespace{ + TypeMeta: metav1.TypeMeta{ + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{ + PSALabel: "restricted", + }, + }, + } + Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred()) + + Expect(escalateNamespace(namespaceStore, client, "test", onOpenshift)).To(Succeed()) + }, + Entry("on plain Kubernetes", psaLabels, notOnOpenshift), + Entry("on Openshift", psaLabelsOnOpenshift, onOpenshift), + ) + }) + It("should not patch namespace when enforce label is set to privileged", func() { + namespace := &k8sv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{ + PSALabel: "privileged", + }, + }, + } + Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred()) + kubeClient.Fake.PrependReactor("patch", "namespaces", + func(action testing.Action) (handled bool, obj k8sruntime.Object, err error) { + Expect("Patch namespaces is not expected").To(BeEmpty()) + return true, nil, nil + }) + Expect(escalateNamespace(namespaceStore, client, "test", notOnOpenshift)).To(Succeed()) + }) + +}) diff --git a/pkg/virt-controller/watch/vmi.go b/pkg/virt-controller/watch/vmi.go index bdf2b932e7be..dd0a1b278691 100644 --- a/pkg/virt-controller/watch/vmi.go +++ b/pkg/virt-controller/watch/vmi.go @@ -151,6 +151,8 @@ func NewVMIController(templateService services.TemplateService, cdiConfigInformer cache.SharedIndexInformer, clusterConfig *virtconfig.ClusterConfig, topologyHinter topology.Hinter, + namespaceStore cache.Store, + onOpenshift bool, ) *VMIController { c := &VMIController{ @@ -169,6 +171,8 @@ func NewVMIController(templateService services.TemplateService, cdiConfigInformer: cdiConfigInformer, clusterConfig: clusterConfig, topologyHinter: topologyHinter, + namespaceStore: namespaceStore, + onOpenshift: onOpenshift, } c.vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -230,6 +234,8 @@ type VMIController struct { cdiInformer cache.SharedIndexInformer cdiConfigInformer cache.SharedIndexInformer clusterConfig *virtconfig.ClusterConfig + namespaceStore cache.Store + onOpenshift bool } func (c *VMIController) Run(threadiness int, stopCh <-chan struct{}) { @@ -1036,6 +1042,13 @@ func (c *VMIController) sync(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod, return &syncErrorImpl{fmt.Errorf(failedToRenderLaunchManifestErrFormat, err), FailedCreatePodReason} } + if c.clusterConfig.PSAEnabled() { + namespace := vmi.GetNamespace() + if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil { + return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s", namespace)} + } + } + vmiKey := controller.VirtualMachineInstanceKey(vmi) c.podExpectations.ExpectCreations(vmiKey, 1) pod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), templatePod, v1.CreateOptions{}) @@ -1765,6 +1778,13 @@ func (c *VMIController) createAttachmentPod(vmi *virtv1.VirtualMachineInstance, vmiKey := controller.VirtualMachineInstanceKey(vmi) c.podExpectations.ExpectCreations(vmiKey, 1) + if c.clusterConfig.PSAEnabled() { + namespace := vmi.GetNamespace() + if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil { + return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s while creating attachment pod", namespace)} + } + } + pod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), attachmentPodTemplate, v1.CreateOptions{}) if err != nil { c.podExpectations.CreationObserved(vmiKey) @@ -1784,6 +1804,13 @@ func (c *VMIController) triggerHotplugPopulation(volume *virtv1.Volume, vmi *vir vmiKey := controller.VirtualMachineInstanceKey(vmi) c.podExpectations.ExpectCreations(vmiKey, 1) + if c.clusterConfig.PSAEnabled() { + namespace := vmi.GetNamespace() + if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil { + return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s while creating hotplug population trigger pod", namespace)} + } + } + _, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), populateHotplugPodTemplate, v1.CreateOptions{}) if err != nil { c.podExpectations.CreationObserved(vmiKey) diff --git a/pkg/virt-controller/watch/vmi_test.go b/pkg/virt-controller/watch/vmi_test.go index da28034fcf0d..98224e324616 100644 --- a/pkg/virt-controller/watch/vmi_test.go +++ b/pkg/virt-controller/watch/vmi_test.go @@ -75,6 +75,7 @@ var _ = Describe("VirtualMachineInstance watcher", func() { var kubeClient *fake.Clientset var networkClient *fakenetworkclient.Clientset var pvcInformer cache.SharedIndexInformer + var namespaceStore cache.Store var dataVolumeSource *framework.FakeControllerSource var dataVolumeInformer cache.SharedIndexInformer @@ -213,6 +214,7 @@ var _ = Describe("VirtualMachineInstance watcher", func() { pvcInformer, _ = testutils.NewFakeInformerFor(&k8sv1.PersistentVolumeClaim{}) cdiInformer, _ = testutils.NewFakeInformerFor(&cdiv1.CDIConfig{}) cdiConfigInformer, _ = testutils.NewFakeInformerFor(&cdiv1.CDIConfig{}) + namespaceStore = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) controller = NewVMIController( services.NewTemplateService("a", 240, "b", "c", "d", "e", "f", "g", pvcInformer.GetStore(), virtClient, config, qemuGid), vmiInformer, @@ -225,7 +227,9 @@ var _ = Describe("VirtualMachineInstance watcher", func() { cdiInformer, cdiConfigInformer, config, - topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", nil), + topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", config), + namespaceStore, + false, ) // Wrap our workqueue to have a way to detect when we are done processing updates mockQueue = testutils.NewMockWorkQueue(controller.Queue) diff --git a/pkg/virt-operator/resource/generate/components/BUILD.bazel b/pkg/virt-operator/resource/generate/components/BUILD.bazel index 37d487212098..0dabf5b20656 100644 --- a/pkg/virt-operator/resource/generate/components/BUILD.bazel +++ b/pkg/virt-operator/resource/generate/components/BUILD.bazel @@ -44,6 +44,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library", "//vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1:go_default_library", + "//vendor/k8s.io/utils/pointer:go_default_library", ], ) diff --git a/pkg/virt-operator/resource/generate/components/deployments.go b/pkg/virt-operator/resource/generate/components/deployments.go index 85da56f393b2..cbb5b5e7e194 100644 --- a/pkg/virt-operator/resource/generate/components/deployments.go +++ b/pkg/virt-operator/resource/generate/components/deployments.go @@ -23,6 +23,7 @@ import ( "strings" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/utils/pointer" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -284,7 +285,8 @@ func NewApiServerDeployment(namespace string, repository string, imagePrefix str pod := &deployment.Spec.Template.Spec pod.ServiceAccountName = rbac.ApiServiceAccountName pod.SecurityContext = &corev1.PodSecurityContext{ - RunAsNonRoot: boolPtr(true), + RunAsNonRoot: boolPtr(true), + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, } container := &deployment.Spec.Template.Spec.Containers[0] @@ -332,6 +334,13 @@ func NewApiServerDeployment(namespace string, repository string, imagePrefix str }, } + container.SecurityContext = &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, + } return deployment, nil } @@ -348,7 +357,8 @@ func NewControllerDeployment(namespace string, repository string, imagePrefix st pod := &deployment.Spec.Template.Spec pod.ServiceAccountName = rbac.ControllerServiceAccountName pod.SecurityContext = &corev1.PodSecurityContext{ - RunAsNonRoot: boolPtr(true), + RunAsNonRoot: boolPtr(true), + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, } launcherVersion = AddVersionSeparatorPrefix(launcherVersion) @@ -410,6 +420,13 @@ func NewControllerDeployment(namespace string, repository string, imagePrefix st }, } + container.SecurityContext = &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, + } return deployment, nil } @@ -516,10 +533,18 @@ func NewOperatorDeployment(namespace string, repository string, imagePrefix stri corev1.ResourceMemory: resource.MustParse("150Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, + }, }, }, SecurityContext: &corev1.PodSecurityContext{ - RunAsNonRoot: boolPtr(true), + RunAsNonRoot: boolPtr(true), + SeccompProfile: &corev1.SeccompProfile{Type: corev1.SeccompProfileTypeRuntimeDefault}, }, }, }, diff --git a/pkg/virt-operator/resource/generate/rbac/controller.go b/pkg/virt-operator/resource/generate/rbac/controller.go index 953ac301a91f..0963946ec024 100644 --- a/pkg/virt-operator/resource/generate/rbac/controller.go +++ b/pkg/virt-operator/resource/generate/rbac/controller.go @@ -67,6 +67,20 @@ func newControllerClusterRole() *rbacv1.ClusterRole { }, }, Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{ + "", + }, + Resources: []string{ + "namespaces", + }, + Verbs: []string{ + "get", + "list", + "watch", + "patch", + }, + }, { APIGroups: []string{ "policy",