Skip to content

Commit

Permalink
Merge pull request #8529 from xpivarc/release-0.49-psa
Browse files Browse the repository at this point in the history
[release-0.49] Integrate with Pod security
  • Loading branch information
kubevirt-bot committed Oct 4, 2022
2 parents 401cafd + 39abb59 commit f7bd5bd
Show file tree
Hide file tree
Showing 17 changed files with 331 additions and 5 deletions.
2 changes: 2 additions & 0 deletions hack/cluster-deploy.sh
Expand Up @@ -74,6 +74,8 @@ apiVersion: v1
kind: Namespace
metadata:
name: ${namespace:?}
labels:
pod-security.kubernetes.io/enforce: "privileged"
EOF

if [[ "$KUBEVIRT_PROVIDER" =~ kind.* ]]; then
Expand Down
18 changes: 18 additions & 0 deletions manifests/generated/operator-csv.yaml.in
Expand Up @@ -458,6 +458,15 @@ spec:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- patch
- apiGroups:
- policy
resources:
Expand Down Expand Up @@ -1073,6 +1082,13 @@ spec:
requests:
cpu: 10m
memory: 150Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
volumeMounts:
- mountPath: /etc/virt-operator/certificates
name: kubevirt-operator-certs
Expand All @@ -1082,6 +1098,8 @@ spec:
priorityClassName: kubevirt-cluster-critical
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
serviceAccountName: kubevirt-operator
tolerations:
- key: CriticalAddonsOnly
Expand Down
9 changes: 9 additions & 0 deletions manifests/generated/rbac-operator.authorization.k8s.yaml.in
Expand Up @@ -360,6 +360,15 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- list
- watch
- patch
- apiGroups:
- policy
resources:
Expand Down
1 change: 1 addition & 0 deletions manifests/release/kubevirt-operator.yaml.in
Expand Up @@ -4,6 +4,7 @@ kind: Namespace
metadata:
labels:
kubevirt.io: ""
pod-security.kubernetes.io/enforce: "privileged"
name: {{.Namespace}}
{{index .GeneratedManifests "kv-resource.yaml"}}
---
Expand Down
5 changes: 5 additions & 0 deletions pkg/virt-config/feature-gates.go
Expand Up @@ -45,6 +45,7 @@ const (
NonRoot = "NonRootExperimental"
ClusterProfiler = "ClusterProfiler"
WorkloadEncryptionSEV = "WorkloadEncryptionSEV"
PSA = "PSA"
)

func (c *ClusterConfig) isFeatureGateEnabled(featureGate string) bool {
Expand Down Expand Up @@ -136,3 +137,7 @@ func (config *ClusterConfig) ClusterProfilerEnabled() bool {
func (config *ClusterConfig) WorkloadEncryptionSEVEnabled() bool {
return config.isFeatureGateEnabled(WorkloadEncryptionSEV)
}

func (config *ClusterConfig) PSAEnabled() bool {
return config.isFeatureGateEnabled(PSA)
}
3 changes: 3 additions & 0 deletions pkg/virt-controller/watch/BUILD.bazel
Expand Up @@ -8,6 +8,7 @@ go_library(
"migration.go",
"node.go",
"pool.go",
"psa.go",
"replicaset.go",
"util.go",
"vm.go",
Expand All @@ -26,6 +27,7 @@ go_library(
"//pkg/monitoring/vmistats:go_default_library",
"//pkg/service:go_default_library",
"//pkg/util:go_default_library",
"//pkg/util/cluster:go_default_library",
"//pkg/util/lookup:go_default_library",
"//pkg/util/migrations:go_default_library",
"//pkg/util/pdbs:go_default_library",
Expand Down Expand Up @@ -94,6 +96,7 @@ go_test(
"migration_test.go",
"node_test.go",
"pool_test.go",
"psa_test.go",
"replicaset_test.go",
"vm_test.go",
"vmi_test.go",
Expand Down
17 changes: 16 additions & 1 deletion pkg/virt-controller/watch/application.go
Expand Up @@ -65,6 +65,7 @@ import (
vmiprom "kubevirt.io/kubevirt/pkg/monitoring/vmistats" // import for prometheus metrics
"kubevirt.io/kubevirt/pkg/service"
"kubevirt.io/kubevirt/pkg/util"
clusterutil "kubevirt.io/kubevirt/pkg/util/cluster"
"kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-controller/leaderelectionconfig"
Expand Down Expand Up @@ -139,6 +140,8 @@ type VirtControllerApp struct {
vmiInformer cache.SharedIndexInformer
vmiRecorder record.EventRecorder

namespaceStore cache.Store

kubeVirtInformer cache.SharedIndexInformer

clusterConfig *virtconfig.ClusterConfig
Expand Down Expand Up @@ -227,6 +230,8 @@ type VirtControllerApp struct {
nodeTopologyUpdatePeriod time.Duration
reloadableRateLimiter *ratelimiter.ReloadableRateLimiter
leaderElector *leaderelection.LeaderElector

onOpenshift bool
}

var _ service.Service = &VirtControllerApp{}
Expand Down Expand Up @@ -316,7 +321,7 @@ func Execute() {
app.vmiInformer = app.informerFactory.VMI()
app.kvPodInformer = app.informerFactory.KubeVirtPod()
app.nodeInformer = app.informerFactory.KubeVirtNode()

app.namespaceStore = app.informerFactory.Namespace().GetStore()
app.vmiCache = app.vmiInformer.GetStore()
app.vmiRecorder = app.newRecorder(k8sv1.NamespaceAll, "virtualmachine-controller")

Expand Down Expand Up @@ -360,6 +365,12 @@ func Execute() {

app.migrationPolicyInformer = app.informerFactory.MigrationPolicy()

onOpenShift, err := clusterutil.IsOnOpenShift(app.clientSet)
if err != nil {
golog.Fatalf("Error determining cluster type: %v", err)
}
app.onOpenshift = onOpenShift

app.initCommon()
app.initReplicaSet()
app.initPool()
Expand Down Expand Up @@ -512,6 +523,8 @@ func (vca *VirtControllerApp) initCommon() {
vca.cdiConfigInformer,
vca.clusterConfig,
topologyHinter,
vca.namespaceStore,
vca.onOpenshift,
)

recorder := vca.newRecorder(k8sv1.NamespaceAll, "node-controller")
Expand All @@ -528,6 +541,8 @@ func (vca *VirtControllerApp) initCommon() {
vca.vmiRecorder,
vca.clientSet,
vca.clusterConfig,
vca.namespaceStore,
vca.onOpenshift,
)

vca.nodeTopologyUpdater = topology.NewNodeTopologyUpdater(vca.clientSet, topologyHinter, vca.nodeInformer)
Expand Down
4 changes: 4 additions & 0 deletions pkg/virt-controller/watch/application_test.go
Expand Up @@ -124,6 +124,8 @@ var _ = Describe("Application", func() {
cdiConfigInformer,
config,
topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", nil),
nil,
false,
)
app.rsController = NewVMIReplicaSet(vmiInformer, rsInformer, recorder, virtClient, uint(10))
app.vmController = NewVMController(vmiInformer,
Expand All @@ -145,6 +147,8 @@ var _ = Describe("Application", func() {
recorder,
virtClient,
config,
nil,
false,
)
app.snapshotController = &snapshot.VMSnapshotController{
Client: virtClient,
Expand Down
20 changes: 20 additions & 0 deletions pkg/virt-controller/watch/migration.go
Expand Up @@ -94,6 +94,7 @@ type MigrationController struct {
pvcInformer cache.SharedIndexInformer
pdbInformer cache.SharedIndexInformer
migrationPolicyInformer cache.SharedIndexInformer
namespaceStore cache.Store
recorder record.EventRecorder
podExpectations *controller.UIDTrackingControllerExpectations
migrationStartLock *sync.Mutex
Expand All @@ -102,6 +103,8 @@ type MigrationController struct {

unschedulablePendingTimeoutSeconds int64
catchAllPendingTimeoutSeconds int64

onOpenshift bool
}

func NewMigrationController(templateService services.TemplateService,
Expand All @@ -115,6 +118,8 @@ func NewMigrationController(templateService services.TemplateService,
recorder record.EventRecorder,
clientset kubecli.KubevirtClient,
clusterConfig *virtconfig.ClusterConfig,
namespaceStore cache.Store,
onOpenshift bool,
) *MigrationController {

c := &MigrationController{
Expand All @@ -136,6 +141,9 @@ func NewMigrationController(templateService services.TemplateService,

unschedulablePendingTimeoutSeconds: defaultUnschedulablePendingTimeoutSeconds,
catchAllPendingTimeoutSeconds: defaultCatchAllPendingTimeoutSeconds,

namespaceStore: namespaceStore,
onOpenshift: onOpenshift,
}

c.vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
Expand Down Expand Up @@ -582,6 +590,12 @@ func (c *MigrationController) createTargetPod(migration *virtv1.VirtualMachineIn
}
}

if c.clusterConfig.PSAEnabled() {
if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace(), c.onOpenshift); err != nil {
return err
}
}

key := controller.MigrationKey(migration)
c.podExpectations.ExpectCreations(key, 1)
pod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), templatePod, v1.CreateOptions{})
Expand Down Expand Up @@ -840,8 +854,14 @@ func (c *MigrationController) createAttachmentPod(migration *virtv1.VirtualMachi
attachmentPodTemplate.ObjectMeta.Labels[virtv1.MigrationJobLabel] = string(migration.UID)
attachmentPodTemplate.ObjectMeta.Annotations[virtv1.MigrationJobNameAnnotation] = string(migration.Name)

if c.clusterConfig.PSAEnabled() {
if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace(), c.onOpenshift); err != nil {
return err
}
}
key := controller.MigrationKey(migration)
c.podExpectations.ExpectCreations(key, 1)

attachmentPod, err := c.clientset.CoreV1().Pods(vmi.GetNamespace()).Create(context.Background(), attachmentPodTemplate, v1.CreateOptions{})
if err != nil {
c.podExpectations.CreationObserved(key)
Expand Down
2 changes: 2 additions & 0 deletions pkg/virt-controller/watch/migration_test.go
Expand Up @@ -270,6 +270,8 @@ var _ = Describe("Migration watcher", func() {
recorder,
virtClient,
config,
nil,
false,
)
// Wrap our workqueue to have a way to detect when we are done processing updates
mockQueue = testutils.NewMockWorkQueue(controller.Queue)
Expand Down
60 changes: 60 additions & 0 deletions pkg/virt-controller/watch/psa.go
@@ -0,0 +1,60 @@
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2022 Red Hat, Inc.
*
*/

package watch

import (
"context"
"fmt"

k8sv1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"

"kubevirt.io/client-go/kubecli"
)

const PSALabel = "pod-security.kubernetes.io/enforce"
const OpenshiftPSAsync = "security.openshift.io/scc.podSecurityLabelSync"

func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient, namespace string, onOpenshift bool) error {
obj, exists, err := namespaceStore.GetByKey(namespace)
if err != nil {
return fmt.Errorf("Failed to get namespace, %w", err)
}
if !exists {
return fmt.Errorf("Namespace %s not observed, %w", namespace, err)
}
namespaceObj := obj.(*k8sv1.Namespace)
enforceLevel, labelExist := namespaceObj.Labels[PSALabel]
if !labelExist || enforceLevel != "privileged" {
labels := ""
if !onOpenshift {
labels = fmt.Sprintf(`{"%s": "privileged"}`, PSALabel)
} else {
labels = fmt.Sprintf(`{"%s": "privileged", "%s": "false"}`, PSALabel, OpenshiftPSAsync)
}
data := []byte(fmt.Sprintf(`{"metadata": { "labels": %s}}`, labels))
_, err := client.CoreV1().Namespaces().Patch(context.TODO(), namespace, types.StrategicMergePatchType, data, v1.PatchOptions{})
if err != nil {
return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s", namespace)}
}
}
return nil
}

0 comments on commit f7bd5bd

Please sign in to comment.