Skip to content

Commit

Permalink
Merge pull request #99197 from soltysh/cronjob_e2e
Browse files Browse the repository at this point in the history
Switch to batch/v1 in cronjob's e2es
  • Loading branch information
k8s-ci-robot committed Mar 6, 2021
2 parents a54414e + 0603a14 commit 4770211
Showing 1 changed file with 47 additions and 33 deletions.
80 changes: 47 additions & 33 deletions test/e2e/apps/cronjob.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
"github.com/onsi/gomega"

batchv1 "k8s.io/api/batch/v1"
batchv1beta1 "k8s.io/api/batch/v1beta1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -68,7 +67,7 @@ var _ = SIGDescribe("CronJob", func() {
// multiple jobs running at once
ginkgo.It("should schedule multiple jobs concurrently", func() {
ginkgo.By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent,
sleepCommand, nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
Expand All @@ -91,7 +90,7 @@ var _ = SIGDescribe("CronJob", func() {
// suspended should not schedule jobs
ginkgo.It("should not schedule jobs when suspended [Slow]", func() {
ginkgo.By("Creating a suspended cronjob")
cronJob := newTestCronJob("suspended", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
cronJob := newTestCronJob("suspended", "*/1 * * * ?", batchv1.AllowConcurrent,
sleepCommand, nil, nil)
t := true
cronJob.Spec.Suspend = &t
Expand All @@ -115,7 +114,7 @@ var _ = SIGDescribe("CronJob", func() {
// only single active job is allowed for ForbidConcurrent
ginkgo.It("should not schedule new jobs when ForbidConcurrent [Slow]", func() {
ginkgo.By("Creating a ForbidConcurrent cronjob")
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent,
sleepCommand, nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
Expand Down Expand Up @@ -147,7 +146,7 @@ var _ = SIGDescribe("CronJob", func() {
// only single active job is allowed for ReplaceConcurrent
ginkgo.It("should replace jobs when ReplaceConcurrent", func() {
ginkgo.By("Creating a ReplaceConcurrent cronjob")
cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1beta1.ReplaceConcurrent,
cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1.ReplaceConcurrent,
sleepCommand, nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
Expand Down Expand Up @@ -178,7 +177,7 @@ var _ = SIGDescribe("CronJob", func() {

ginkgo.It("should be able to schedule after more than 100 missed schedule", func() {
ginkgo.By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.ForbidConcurrent,
sleepCommand, nil, nil)
creationTime := time.Now().Add(-99 * 24 * time.Hour)
lastScheduleTime := creationTime.Add(-1 * 24 * time.Hour)
Expand All @@ -205,7 +204,7 @@ var _ = SIGDescribe("CronJob", func() {
// shouldn't give us unexpected warnings
ginkgo.It("should not emit unexpected warnings", func() {
ginkgo.By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1.AllowConcurrent,
nil, nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
Expand All @@ -228,7 +227,7 @@ var _ = SIGDescribe("CronJob", func() {
// deleted jobs should be removed from the active list
ginkgo.It("should remove from active list jobs that have been deleted", func() {
ginkgo.By("Creating a ForbidConcurrent cronjob")
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent,
sleepCommand, nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
Expand Down Expand Up @@ -269,7 +268,7 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Creating an AllowConcurrent cronjob with custom history limit")
successLimit := int32(1)
failedLimit := int32(0)
cronJob := newTestCronJob("successful-jobs-history-limit", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
cronJob := newTestCronJob("successful-jobs-history-limit", "*/1 * * * ?", batchv1.AllowConcurrent,
successCommand, &successLimit, &failedLimit)

ensureHistoryLimits(f.ClientSet, f.Namespace.Name, cronJob)
Expand All @@ -280,7 +279,7 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Creating an AllowConcurrent cronjob with custom history limit")
successLimit := int32(0)
failedLimit := int32(1)
cronJob := newTestCronJob("failed-jobs-history-limit", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
cronJob := newTestCronJob("failed-jobs-history-limit", "*/1 * * * ?", batchv1.AllowConcurrent,
failureCommand, &successLimit, &failedLimit)

ensureHistoryLimits(f.ClientSet, f.Namespace.Name, cronJob)
Expand All @@ -290,15 +289,15 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Creating a cronjob")
successLimit := int32(1)
failedLimit := int32(0)
cjTemplate := newTestCronJob("test-api", "* */1 * * ?", batchv1beta1.AllowConcurrent,
cjTemplate := newTestCronJob("test-api", "* */1 * * ?", batchv1.AllowConcurrent,
successCommand, &successLimit, &failedLimit)
cjTemplate.Labels = map[string]string{
"special-label": f.UniqueName,
}

ns := f.Namespace.Name
cjVersion := "v1beta1"
cjClient := f.ClientSet.BatchV1beta1().CronJobs(ns)
cjVersion := "v1"
cjClient := f.ClientSet.BatchV1().CronJobs(ns)

ginkgo.By("creating")
createdCronJob, err := cjClient.Create(context.TODO(), cjTemplate, metav1.CreateOptions{})
Expand All @@ -320,7 +319,7 @@ var _ = SIGDescribe("CronJob", func() {
framework.ExpectNoError(err)

// Test cluster-wide list and watch
clusterCJClient := f.ClientSet.BatchV1beta1().CronJobs("")
clusterCJClient := f.ClientSet.BatchV1().CronJobs("")
ginkgo.By("cluster-wide listing")
clusterCJs, err := clusterCJClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
Expand All @@ -338,7 +337,7 @@ var _ = SIGDescribe("CronJob", func() {
framework.ExpectEqual(patchedCronJob.Annotations["patched"], "true", "patched object should have the applied annotation")

ginkgo.By("updating")
var cjToUpdate, updatedCronJob *batchv1beta1.CronJob
var cjToUpdate, updatedCronJob *batchv1.CronJob
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
cjToUpdate, err = cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{})
if err != nil {
Expand All @@ -357,7 +356,7 @@ var _ = SIGDescribe("CronJob", func() {
case evt, ok := <-cjWatch.ResultChan():
framework.ExpectEqual(ok, true, "watch channel should not close")
framework.ExpectEqual(evt.Type, watch.Modified)
watchedCronJob, isCronJob := evt.Object.(*batchv1beta1.CronJob)
watchedCronJob, isCronJob := evt.Object.(*batchv1.CronJob)
framework.ExpectEqual(isCronJob, true, fmt.Sprintf("expected CronJob, got %T", evt.Object))
if watchedCronJob.Annotations["patched"] == "true" {
framework.Logf("saw patched and updated annotations")
Expand All @@ -375,7 +374,7 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("patching /status")
// we need to use RFC3339 version since conversion over the wire cuts nanoseconds
now1 := metav1.Now().Rfc3339Copy()
cjStatus := batchv1beta1.CronJobStatus{
cjStatus := batchv1.CronJobStatus{
LastScheduleTime: &now1,
}
cjStatusJSON, err := json.Marshal(cjStatus)
Expand All @@ -390,7 +389,7 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("updating /status")
// we need to use RFC3339 version since conversion over the wire cuts nanoseconds
now2 := metav1.Now().Rfc3339Copy()
var statusToUpdate, updatedStatus *batchv1beta1.CronJob
var statusToUpdate, updatedStatus *batchv1.CronJob
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
statusToUpdate, err = cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{})
if err != nil {
Expand All @@ -412,17 +411,32 @@ var _ = SIGDescribe("CronJob", func() {
framework.ExpectEqual(string(createdCronJob.UID), statusUID, fmt.Sprintf("createdCronJob.UID: %v expected to match statusUID: %v ", createdCronJob.UID, statusUID))

// CronJob resource delete operations
ginkgo.By("deleting a collection")
expectFinalizer := func(cj *batchv1beta1.CronJob, msg string) {
expectFinalizer := func(cj *batchv1.CronJob, msg string) {
framework.ExpectNotEqual(cj.DeletionTimestamp, nil, fmt.Sprintf("expected deletionTimestamp, got nil on step: %q, cronjob: %+v", msg, cj))
framework.ExpectEqual(len(cj.Finalizers) > 0, true, fmt.Sprintf("expected finalizers on cronjob, got none on step: %q, cronjob: %+v", msg, cj))
}

ginkgo.By("deleting")
cjTemplate.Name = "for-removal"
forRemovalCronJob, err := cjClient.Create(context.TODO(), cjTemplate, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = cjClient.Delete(context.TODO(), forRemovalCronJob.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
cj, err := cjClient.Get(context.TODO(), forRemovalCronJob.Name, metav1.GetOptions{})
// If controller does not support finalizers, we expect a 404. Otherwise we validate finalizer behavior.
if err == nil {
expectFinalizer(cj, "deleting cronjob")
} else {
framework.ExpectEqual(apierrors.IsNotFound(err), true, fmt.Sprintf("expected 404, got %v", err))
}

ginkgo.By("deleting a collection")
err = cjClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
cjs, err = cjClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
framework.ExpectNoError(err)
// Should have <= 1 items since some cronjobs might not have been deleted yet due to finalizers
framework.ExpectEqual(len(cjs.Items) <= 1, true, "filtered list should be <= 1")
// Should have <= 2 items since some cronjobs might not have been deleted yet due to finalizers
framework.ExpectEqual(len(cjs.Items) <= 2, true, "filtered list should be <= 2")
// Validate finalizers
for _, cj := range cjs.Items {
expectFinalizer(&cj, "deleting cronjob collection")
Expand All @@ -431,7 +445,7 @@ var _ = SIGDescribe("CronJob", func() {

})

func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1beta1.CronJob) {
func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1.CronJob) {
cronJob, err := createCronJob(c, ns, cronJob)
framework.ExpectNoError(err, "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", ns)

Expand Down Expand Up @@ -473,22 +487,22 @@ func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1beta1
}

// newTestCronJob returns a cronjob which does one of several testing behaviors.
func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.ConcurrencyPolicy,
command []string, successfulJobsHistoryLimit *int32, failedJobsHistoryLimit *int32) *batchv1beta1.CronJob {
func newTestCronJob(name, schedule string, concurrencyPolicy batchv1.ConcurrencyPolicy,
command []string, successfulJobsHistoryLimit *int32, failedJobsHistoryLimit *int32) *batchv1.CronJob {
parallelism := int32(1)
completions := int32(1)
backofflimit := int32(1)
sj := &batchv1beta1.CronJob{
sj := &batchv1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
TypeMeta: metav1.TypeMeta{
Kind: "CronJob",
},
Spec: batchv1beta1.CronJobSpec{
Spec: batchv1.CronJobSpec{
Schedule: schedule,
ConcurrencyPolicy: concurrencyPolicy,
JobTemplate: batchv1beta1.JobTemplateSpec{
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Parallelism: &parallelism,
Completions: &completions,
Expand Down Expand Up @@ -530,17 +544,17 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.Concur
return sj
}

func createCronJob(c clientset.Interface, ns string, cronJob *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
return c.BatchV1beta1().CronJobs(ns).Create(context.TODO(), cronJob, metav1.CreateOptions{})
func createCronJob(c clientset.Interface, ns string, cronJob *batchv1.CronJob) (*batchv1.CronJob, error) {
return c.BatchV1().CronJobs(ns).Create(context.TODO(), cronJob, metav1.CreateOptions{})
}

func getCronJob(c clientset.Interface, ns, name string) (*batchv1beta1.CronJob, error) {
return c.BatchV1beta1().CronJobs(ns).Get(context.TODO(), name, metav1.GetOptions{})
func getCronJob(c clientset.Interface, ns, name string) (*batchv1.CronJob, error) {
return c.BatchV1().CronJobs(ns).Get(context.TODO(), name, metav1.GetOptions{})
}

func deleteCronJob(c clientset.Interface, ns, name string) error {
propagationPolicy := metav1.DeletePropagationBackground // Also delete jobs and pods related to cronjob
return c.BatchV1beta1().CronJobs(ns).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
return c.BatchV1().CronJobs(ns).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
}

// Wait for at least given amount of active jobs.
Expand Down

0 comments on commit 4770211

Please sign in to comment.