Skip to content

Commit cccbb6f

Browse files
committed
Add e2e tests
1 parent 5e97ac9 commit cccbb6f

File tree

3 files changed

+223
-0
lines changed

3 files changed

+223
-0
lines changed

test/e2e/apps/job.go

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ import (
4242
"k8s.io/client-go/util/retry"
4343
"k8s.io/client-go/util/workqueue"
4444
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
45+
"k8s.io/kubernetes/pkg/features"
4546
"k8s.io/kubernetes/test/e2e/framework"
4647
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
4748
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@@ -1326,6 +1327,51 @@ done`}
13261327
WithPolling(time.Second).WithTimeout(3 * time.Second).
13271328
Should(gomega.HaveField("Status", gomega.BeEquivalentTo(batchv1.JobStatus{})))
13281329
})
1330+
1331+
framework.It("containers restarted by container restart policy should not trigger PodFailurePolicy", framework.WithFeature("ContainerRestartRules"), framework.WithFeatureGate(features.ContainerRestartRules), func(ctx context.Context) {
1332+
parallelism := int32(1)
1333+
completions := int32(1)
1334+
backoffLimit := int32(1)
1335+
containerRestartPolicyOnFailure := v1.ContainerRestartPolicyOnFailure
1336+
1337+
ginkgo.By("Looking for a node to schedule job pod")
1338+
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
1339+
framework.ExpectNoError(err)
1340+
1341+
ginkgo.By("Creating a job with container-level RestartPolicy and PodFailurePolicy")
1342+
job := e2ejob.NewTestJobOnNode("failOnce", "managed-by", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name)
1343+
container := job.Spec.Template.Spec.Containers[0]
1344+
container.RestartPolicy = &containerRestartPolicyOnFailure
1345+
job.Spec.Template.Spec.Containers[0] = container
1346+
job.Spec.PodFailurePolicy = &batchv1.PodFailurePolicy{
1347+
Rules: []batchv1.PodFailurePolicyRule{{
1348+
Action: batchv1.PodFailurePolicyActionFailJob,
1349+
OnExitCodes: &batchv1.PodFailurePolicyOnExitCodesRequirement{
1350+
ContainerName: &container.Name,
1351+
Operator: batchv1.PodFailurePolicyOnExitCodesOpIn,
1352+
Values: []int32{1},
1353+
},
1354+
}},
1355+
}
1356+
job, err = e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
1357+
framework.ExpectNoError(err, "failed to create job in namespace: %s/%s", job.Namespace, job.Name)
1358+
1359+
ginkgo.By("Waiting for job to complete")
1360+
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobReasonCompletionsReached, completions)
1361+
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
1362+
1363+
ginkgo.By("Ensuring job succeeded")
1364+
job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
1365+
framework.ExpectNoError(err, "failed to get job")
1366+
for _, cond := range job.Status.Conditions {
1367+
if cond.Type == batchv1.JobComplete {
1368+
gomega.Expect(cond.Status).Should(gomega.Equal(v1.ConditionTrue))
1369+
}
1370+
}
1371+
gomega.Expect(job.Status.Active).Should(gomega.Equal(int32(0)))
1372+
gomega.Expect(job.Status.Ready).Should(gomega.Equal(ptr.To[int32](0)))
1373+
gomega.Expect(job.Status.Terminating).Should(gomega.Equal(ptr.To[int32](0)))
1374+
})
13291375
})
13301376

13311377
func updateJobSuspendWithRetries(ctx context.Context, f *framework.Framework, job *batchv1.Job, suspend *bool) error {

test/e2e/feature/feature.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,10 @@ var (
7575
// TODO: document the feature (owning SIG, when to use this feature for a test)
7676
ComprehensiveNamespaceDraining = framework.WithFeature(framework.ValidFeatures.Add("ComprehensiveNamespaceDraining"))
7777

78+
// Onwer: sig-node
79+
// Enables configuring per-container restart policy and restart policy rules.
80+
ContainerRestartRules = framework.WithFeature(framework.ValidFeatures.Add("ContainerRestartRules"))
81+
7882
// Owner: sig-node
7983
// Enables configuring custom stop signals for containers from container lifecycle
8084
ContainerStopSignals = framework.WithFeature(framework.ValidFeatures.Add("ContainerStopSignals"))

test/e2e/node/pods.go

Lines changed: 173 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -718,6 +718,179 @@ var _ = SIGDescribe("Pods Extended (pod generation)", feature.PodObservedGenerat
718718
})
719719
})
720720

721+
var _ = SIGDescribe("Pod Extended (container restart policy)", feature.ContainerRestartRules, framework.WithFeatureGate(features.ContainerRestartRules), func() {
722+
f := framework.NewDefaultFramework("pods")
723+
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
724+
725+
ginkgo.Describe("Container Restart Rules", func() {
726+
var (
727+
containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
728+
containerRestartPolicyNever = v1.ContainerRestartPolicyNever
729+
)
730+
731+
ginkgo.It("should restart container on rule match", func(ctx context.Context) {
732+
podName := "restart-rules-exit-code-" + string(uuid.NewUUID())
733+
pod := &v1.Pod{
734+
ObjectMeta: metav1.ObjectMeta{
735+
Name: podName,
736+
},
737+
Spec: v1.PodSpec{
738+
RestartPolicy: v1.RestartPolicyNever,
739+
Containers: []v1.Container{
740+
{
741+
Name: "main-container",
742+
Image: imageutils.GetE2EImage(imageutils.BusyBox),
743+
Command: []string{"/bin/sh", "-c", "exit 42"},
744+
RestartPolicy: &containerRestartPolicyNever,
745+
RestartPolicyRules: []v1.ContainerRestartRule{
746+
{
747+
Action: v1.ContainerRestartRuleActionRestart,
748+
ExitCodes: &v1.ContainerRestartRuleOnExitCodes{
749+
Operator: v1.ContainerRestartRuleOnExitCodesOpIn,
750+
Values: []int32{42},
751+
},
752+
},
753+
},
754+
},
755+
},
756+
},
757+
}
758+
759+
createAndValidateRestartableContainer(ctx, f, pod, podName, "main-container")
760+
})
761+
762+
ginkgo.It("should not restart container on rule mismatch, container restart policy Never", func(ctx context.Context) {
763+
podName := "restart-rules-no-restart-" + string(uuid.NewUUID())
764+
pod := &v1.Pod{
765+
ObjectMeta: metav1.ObjectMeta{
766+
Name: podName,
767+
},
768+
Spec: v1.PodSpec{
769+
RestartPolicy: v1.RestartPolicyNever,
770+
Containers: []v1.Container{
771+
{
772+
Name: "main-container",
773+
Image: imageutils.GetE2EImage(imageutils.BusyBox),
774+
Command: []string{"/bin/sh", "-c", "exit 1"},
775+
RestartPolicy: &containerRestartPolicyNever,
776+
RestartPolicyRules: []v1.ContainerRestartRule{
777+
{
778+
Action: v1.ContainerRestartRuleActionRestart,
779+
ExitCodes: &v1.ContainerRestartRuleOnExitCodes{
780+
Operator: v1.ContainerRestartRuleOnExitCodesOpIn,
781+
Values: []int32{42},
782+
},
783+
},
784+
},
785+
},
786+
},
787+
},
788+
}
789+
790+
createAndValidateNonRestartableContainer(ctx, f, pod, podName, "main-container")
791+
})
792+
793+
ginkgo.It("should restart container on container-level restart policy Never", func(ctx context.Context) {
794+
podName := "restart-rules-no-restart-" + string(uuid.NewUUID())
795+
pod := &v1.Pod{
796+
ObjectMeta: metav1.ObjectMeta{
797+
Name: podName,
798+
},
799+
Spec: v1.PodSpec{
800+
RestartPolicy: v1.RestartPolicyAlways,
801+
Containers: []v1.Container{
802+
{
803+
Name: "main-container",
804+
Image: imageutils.GetE2EImage(imageutils.BusyBox),
805+
Command: []string{"/bin/sh", "-c", "exit 1"},
806+
RestartPolicy: &containerRestartPolicyNever,
807+
},
808+
},
809+
},
810+
}
811+
812+
createAndValidateNonRestartableContainer(ctx, f, pod, podName, "main-container")
813+
})
814+
815+
ginkgo.It("should restart container on container-level restart policy Always", func(ctx context.Context) {
816+
podName := "restart-rules-no-restart-" + string(uuid.NewUUID())
817+
pod := &v1.Pod{
818+
ObjectMeta: metav1.ObjectMeta{
819+
Name: podName,
820+
},
821+
Spec: v1.PodSpec{
822+
RestartPolicy: v1.RestartPolicyNever,
823+
Containers: []v1.Container{
824+
{
825+
Name: "main-container",
826+
Image: imageutils.GetE2EImage(imageutils.BusyBox),
827+
Command: []string{"/bin/sh", "-c", "exit 1"},
828+
RestartPolicy: &containerRestartPolicyAlways,
829+
},
830+
},
831+
},
832+
}
833+
834+
createAndValidateRestartableContainer(ctx, f, pod, podName, "main-container")
835+
})
836+
837+
ginkgo.It("should restart container on pod-level restart policy Always when no container-level restart policy", func(ctx context.Context) {
838+
podName := "restart-rules-no-match-" + string(uuid.NewUUID())
839+
pod := &v1.Pod{
840+
ObjectMeta: metav1.ObjectMeta{
841+
Name: podName,
842+
},
843+
Spec: v1.PodSpec{
844+
RestartPolicy: v1.RestartPolicyAlways,
845+
Containers: []v1.Container{
846+
{
847+
Name: "main-container",
848+
Image: imageutils.GetE2EImage(imageutils.BusyBox),
849+
Command: []string{"/bin/sh", "-c", "exit 1"},
850+
},
851+
},
852+
},
853+
}
854+
855+
createAndValidateRestartableContainer(ctx, f, pod, podName, "main-container")
856+
})
857+
})
858+
})
859+
860+
func createAndValidateRestartableContainer(ctx context.Context, f *framework.Framework, pod *v1.Pod, podName, containerName string) {
861+
ginkgo.By("Creating the pod")
862+
e2epod.NewPodClient(f).Create(ctx, pod)
863+
864+
ginkgo.By("Waiting for the container to restart")
865+
err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, podName, "container restarted", 10*time.Minute, func(pod *v1.Pod) (bool, error) {
866+
for _, status := range pod.Status.ContainerStatuses {
867+
if status.Name == containerName && status.RestartCount > 0 {
868+
return true, nil
869+
}
870+
}
871+
return false, nil
872+
})
873+
framework.ExpectNoError(err, "failed to see container restart")
874+
}
875+
876+
func createAndValidateNonRestartableContainer(ctx context.Context, f *framework.Framework, pod *v1.Pod, podName, containerName string) {
877+
ginkgo.By("Creating the pod")
878+
e2epod.NewPodClient(f).Create(ctx, pod)
879+
880+
ginkgo.By("Waiting for the pod to terminate")
881+
err := e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(ctx, f.ClientSet, podName, f.Namespace.Name, 10*time.Minute)
882+
framework.ExpectNoError(err, "failed to wait for pod terminate")
883+
884+
ginkgo.By("Checking container restart count")
885+
p, err := e2epod.NewPodClient(f).Get(ctx, podName, metav1.GetOptions{})
886+
framework.ExpectNoError(err, "failed to get pod")
887+
for _, status := range p.Status.ContainerStatuses {
888+
if status.Name == containerName {
889+
gomega.Expect(status.RestartCount).To(gomega.BeZero())
890+
}
891+
}
892+
}
893+
721894
func createAndTestPodRepeatedly(ctx context.Context, workers, iterations int, scenario podScenario, podClient v1core.PodInterface) {
722895
var (
723896
lock sync.Mutex

0 commit comments

Comments
 (0)