Skip to content

Commit

Permalink
Pod secuirty on Openshift
Browse files Browse the repository at this point in the history
Make sure we are not racing with cluster
sync mechanism on Openshift.

Signed-off-by: L. Pivarc <lpivarc@redhat.com>
(cherry picked from commit 230676f)
Signed-off-by: L. Pivarc <lpivarc@redhat.com>
  • Loading branch information
xpivarc committed Sep 27, 2022
1 parent 8fd9ac1 commit 39abb59
Show file tree
Hide file tree
Showing 9 changed files with 65 additions and 20 deletions.
1 change: 1 addition & 0 deletions pkg/virt-controller/watch/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ go_library(
"//pkg/monitoring/vmistats:go_default_library",
"//pkg/service:go_default_library",
"//pkg/util:go_default_library",
"//pkg/util/cluster:go_default_library",
"//pkg/util/lookup:go_default_library",
"//pkg/util/migrations:go_default_library",
"//pkg/util/pdbs:go_default_library",
Expand Down
11 changes: 11 additions & 0 deletions pkg/virt-controller/watch/application.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ import (
vmiprom "kubevirt.io/kubevirt/pkg/monitoring/vmistats" // import for prometheus metrics
"kubevirt.io/kubevirt/pkg/service"
"kubevirt.io/kubevirt/pkg/util"
clusterutil "kubevirt.io/kubevirt/pkg/util/cluster"
"kubevirt.io/kubevirt/pkg/util/webhooks"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/pkg/virt-controller/leaderelectionconfig"
Expand Down Expand Up @@ -229,6 +230,8 @@ type VirtControllerApp struct {
nodeTopologyUpdatePeriod time.Duration
reloadableRateLimiter *ratelimiter.ReloadableRateLimiter
leaderElector *leaderelection.LeaderElector

onOpenshift bool
}

var _ service.Service = &VirtControllerApp{}
Expand Down Expand Up @@ -362,6 +365,12 @@ func Execute() {

app.migrationPolicyInformer = app.informerFactory.MigrationPolicy()

onOpenShift, err := clusterutil.IsOnOpenShift(app.clientSet)
if err != nil {
golog.Fatalf("Error determining cluster type: %v", err)
}
app.onOpenshift = onOpenShift

app.initCommon()
app.initReplicaSet()
app.initPool()
Expand Down Expand Up @@ -515,6 +524,7 @@ func (vca *VirtControllerApp) initCommon() {
vca.clusterConfig,
topologyHinter,
vca.namespaceStore,
vca.onOpenshift,
)

recorder := vca.newRecorder(k8sv1.NamespaceAll, "node-controller")
Expand All @@ -532,6 +542,7 @@ func (vca *VirtControllerApp) initCommon() {
vca.clientSet,
vca.clusterConfig,
vca.namespaceStore,
vca.onOpenshift,
)

vca.nodeTopologyUpdater = topology.NewNodeTopologyUpdater(vca.clientSet, topologyHinter, vca.nodeInformer)
Expand Down
2 changes: 2 additions & 0 deletions pkg/virt-controller/watch/application_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ var _ = Describe("Application", func() {
config,
topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", nil),
nil,
false,
)
app.rsController = NewVMIReplicaSet(vmiInformer, rsInformer, recorder, virtClient, uint(10))
app.vmController = NewVMController(vmiInformer,
Expand All @@ -147,6 +148,7 @@ var _ = Describe("Application", func() {
virtClient,
config,
nil,
false,
)
app.snapshotController = &snapshot.VMSnapshotController{
Client: virtClient,
Expand Down
10 changes: 6 additions & 4 deletions pkg/virt-controller/watch/migration.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,8 @@ type MigrationController struct {

unschedulablePendingTimeoutSeconds int64
catchAllPendingTimeoutSeconds int64

onOpenshift bool
}

func NewMigrationController(templateService services.TemplateService,
Expand All @@ -117,6 +119,7 @@ func NewMigrationController(templateService services.TemplateService,
clientset kubecli.KubevirtClient,
clusterConfig *virtconfig.ClusterConfig,
namespaceStore cache.Store,
onOpenshift bool,
) *MigrationController {

c := &MigrationController{
Expand All @@ -140,6 +143,7 @@ func NewMigrationController(templateService services.TemplateService,
catchAllPendingTimeoutSeconds: defaultCatchAllPendingTimeoutSeconds,

namespaceStore: namespaceStore,
onOpenshift: onOpenshift,
}

c.vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
Expand Down Expand Up @@ -587,8 +591,7 @@ func (c *MigrationController) createTargetPod(migration *virtv1.VirtualMachineIn
}

if c.clusterConfig.PSAEnabled() {
// Check my impact
if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace()); err != nil {
if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace(), c.onOpenshift); err != nil {
return err
}
}
Expand Down Expand Up @@ -852,8 +855,7 @@ func (c *MigrationController) createAttachmentPod(migration *virtv1.VirtualMachi
attachmentPodTemplate.ObjectMeta.Annotations[virtv1.MigrationJobNameAnnotation] = string(migration.Name)

if c.clusterConfig.PSAEnabled() {
// Check my impact
if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace()); err != nil {
if err := escalateNamespace(c.namespaceStore, c.clientset, vmi.GetNamespace(), c.onOpenshift); err != nil {
return err
}
}
Expand Down
1 change: 1 addition & 0 deletions pkg/virt-controller/watch/migration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,7 @@ var _ = Describe("Migration watcher", func() {
virtClient,
config,
nil,
false,
)
// Wrap our workqueue to have a way to detect when we are done processing updates
mockQueue = testutils.NewMockWorkQueue(controller.Queue)
Expand Down
11 changes: 9 additions & 2 deletions pkg/virt-controller/watch/psa.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,9 @@ import (
)

const PSALabel = "pod-security.kubernetes.io/enforce"
const OpenshiftPSAsync = "security.openshift.io/scc.podSecurityLabelSync"

func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient, namespace string) error {
func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient, namespace string, onOpenshift bool) error {
obj, exists, err := namespaceStore.GetByKey(namespace)
if err != nil {
return fmt.Errorf("Failed to get namespace, %w", err)
Expand All @@ -43,7 +44,13 @@ func escalateNamespace(namespaceStore cache.Store, client kubecli.KubevirtClient
namespaceObj := obj.(*k8sv1.Namespace)
enforceLevel, labelExist := namespaceObj.Labels[PSALabel]
if !labelExist || enforceLevel != "privileged" {
data := []byte(fmt.Sprintf(`{"metadata": { "labels": {"%s": "privileged"}}}`, PSALabel))
labels := ""
if !onOpenshift {
labels = fmt.Sprintf(`{"%s": "privileged"}`, PSALabel)
} else {
labels = fmt.Sprintf(`{"%s": "privileged", "%s": "false"}`, PSALabel, OpenshiftPSAsync)
}
data := []byte(fmt.Sprintf(`{"metadata": { "labels": %s}}`, labels))
_, err := client.CoreV1().Namespaces().Patch(context.TODO(), namespace, types.StrategicMergePatchType, data, v1.PatchOptions{})
if err != nil {
return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s", namespace)}
Expand Down
39 changes: 28 additions & 11 deletions pkg/virt-controller/watch/psa_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,9 @@ import (

"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/types"
k8sv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
Expand All @@ -22,6 +24,7 @@ var _ = Describe("PSA", func() {
client *kubecli.MockKubevirtClient
kubeClient *fake.Clientset
ctrl *gomock.Controller
notOnOpenshift = false
)

BeforeEach(func() {
Expand All @@ -33,21 +36,28 @@ var _ = Describe("PSA", func() {
})

Context("should patch namespace with enforce level", func() {
BeforeEach(func() {
var (
onOpenshift = true
psaLabels = HaveKeyWithValue(PSALabel, "privileged")
psaLabelsOnOpenshift = And(HaveKeyWithValue(PSALabel, "privileged"), HaveKeyWithValue(OpenshiftPSAsync, "false"))
)

expectLabels := func(expectedLabels types.GomegaMatcher) {
kubeClient.Fake.PrependReactor("patch", "namespaces",
func(action testing.Action) (handled bool, obj k8sruntime.Object, err error) {
patchAction, ok := action.(testing.PatchAction)
Expect(ok).To(BeTrue())
patchBytes := patchAction.GetPatch()
namespace := &k8sv1.Namespace{}
Expect(json.Unmarshal(patchBytes, namespace)).To(Succeed())
Expect(json.Unmarshal(patchBytes, namespace)).To(Succeed(), string(patchBytes))

Expect(namespace.Labels).To(HaveKeyWithValue(PSALabel, "privileged"))
Expect(namespace.Labels).To(expectedLabels)
return true, nil, nil
})
})
}

It("when label is missing", func() {
DescribeTable("when label is missing", func(expectedLabels types.GomegaMatcher, onOpenshift bool) {
expectLabels(expectedLabels)
namespace := &k8sv1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
Expand All @@ -58,10 +68,14 @@ var _ = Describe("PSA", func() {
}
Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred())

Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed())
})
Expect(escalateNamespace(namespaceStore, client, "test", onOpenshift)).To(Succeed())
},
Entry("on plain Kubernetes", psaLabels, notOnOpenshift),
Entry("on Openshift", psaLabelsOnOpenshift, onOpenshift),
)

It("when enforce label is not privileged", func() {
DescribeTable("when enforce label is not privileged", func(expectedLabels types.GomegaMatcher, onOpenshift bool) {
expectLabels(expectedLabels)
namespace := &k8sv1.Namespace{
TypeMeta: metav1.TypeMeta{
Kind: "Namespace",
Expand All @@ -75,8 +89,11 @@ var _ = Describe("PSA", func() {
}
Expect(namespaceStore.Add(namespace)).NotTo(HaveOccurred())

Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed())
})
Expect(escalateNamespace(namespaceStore, client, "test", onOpenshift)).To(Succeed())
},
Entry("on plain Kubernetes", psaLabels, notOnOpenshift),
Entry("on Openshift", psaLabelsOnOpenshift, onOpenshift),
)
})
It("should not patch namespace when enforce label is set to privileged", func() {
namespace := &k8sv1.Namespace{
Expand All @@ -93,7 +110,7 @@ var _ = Describe("PSA", func() {
Expect("Patch namespaces is not expected").To(BeEmpty())
return true, nil, nil
})
Expect(escalateNamespace(namespaceStore, client, "test")).To(Succeed())
Expect(escalateNamespace(namespaceStore, client, "test", notOnOpenshift)).To(Succeed())
})

})
9 changes: 6 additions & 3 deletions pkg/virt-controller/watch/vmi.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,7 @@ func NewVMIController(templateService services.TemplateService,
clusterConfig *virtconfig.ClusterConfig,
topologyHinter topology.Hinter,
namespaceStore cache.Store,
onOpenshift bool,
) *VMIController {

c := &VMIController{
Expand All @@ -171,6 +172,7 @@ func NewVMIController(templateService services.TemplateService,
clusterConfig: clusterConfig,
topologyHinter: topologyHinter,
namespaceStore: namespaceStore,
onOpenshift: onOpenshift,
}

c.vmiInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
Expand Down Expand Up @@ -233,6 +235,7 @@ type VMIController struct {
cdiConfigInformer cache.SharedIndexInformer
clusterConfig *virtconfig.ClusterConfig
namespaceStore cache.Store
onOpenshift bool
}

func (c *VMIController) Run(threadiness int, stopCh <-chan struct{}) {
Expand Down Expand Up @@ -1041,7 +1044,7 @@ func (c *VMIController) sync(vmi *virtv1.VirtualMachineInstance, pod *k8sv1.Pod,

if c.clusterConfig.PSAEnabled() {
namespace := vmi.GetNamespace()
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil {
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil {
return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s", namespace)}
}
}
Expand Down Expand Up @@ -1777,7 +1780,7 @@ func (c *VMIController) createAttachmentPod(vmi *virtv1.VirtualMachineInstance,

if c.clusterConfig.PSAEnabled() {
namespace := vmi.GetNamespace()
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil {
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil {
return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s while creating attachment pod", namespace)}
}
}
Expand All @@ -1803,7 +1806,7 @@ func (c *VMIController) triggerHotplugPopulation(volume *virtv1.Volume, vmi *vir

if c.clusterConfig.PSAEnabled() {
namespace := vmi.GetNamespace()
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace); err != nil {
if err := escalateNamespace(c.namespaceStore, c.clientset, namespace, c.onOpenshift); err != nil {
return &syncErrorImpl{err, fmt.Sprintf("Failed to apply enforce label on namespace %s while creating hotplug population trigger pod", namespace)}
}
}
Expand Down
1 change: 1 addition & 0 deletions pkg/virt-controller/watch/vmi_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,7 @@ var _ = Describe("VirtualMachineInstance watcher", func() {
config,
topology.NewTopologyHinter(&cache.FakeCustomStore{}, &cache.FakeCustomStore{}, "amd64", config),
namespaceStore,
false,
)
// Wrap our workqueue to have a way to detect when we are done processing updates
mockQueue = testutils.NewMockWorkQueue(controller.Queue)
Expand Down

0 comments on commit 39abb59

Please sign in to comment.