diff --git a/e2e/native/native_with_sources_test.go b/e2e/native/native_with_sources_test.go index 6ceeb3cf9d..5adfa807df 100644 --- a/e2e/native/native_with_sources_test.go +++ b/e2e/native/native_with_sources_test.go @@ -41,7 +41,8 @@ func TestNativeHighMemoryIntegrations(t *testing.T) { t.Run("java native support", func(t *testing.T) { name := javaNativeName - g.Expect(KamelRun(t, ctx, ns, "files/Java.java", "--name", name, "-t", "quarkus.build-mode=native", "-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed()) + g.Expect(KamelRun(t, ctx, ns, "files/Java.java", "--name", name, "-t", "quarkus.build-mode=native", + "-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed()) g.Eventually(IntegrationPodPhase(t, ctx, ns, name), TestTimeoutVeryLong).Should(Equal(corev1.PodRunning)) g.Eventually(IntegrationPod(t, ctx, ns, name), TestTimeoutShort). @@ -52,7 +53,8 @@ func TestNativeHighMemoryIntegrations(t *testing.T) { t.Run("java native same should not rebuild", func(t *testing.T) { name := javaNativeCloneName - g.Expect(KamelRun(t, ctx, ns, "files/Java.java", "--name", name, "-t", "quarkus.build-mode=native", "-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed()) + g.Expect(KamelRun(t, ctx, ns, "files/Java.java", "--name", name, "-t", "quarkus.build-mode=native", + "-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed()) // This one should run quickly as it suppose to reuse an IntegrationKit g.Eventually(IntegrationPodPhase(t, ctx, ns, name), TestTimeoutShort).Should(Equal(corev1.PodRunning)) @@ -66,7 +68,8 @@ func TestNativeHighMemoryIntegrations(t *testing.T) { t.Run("java native should rebuild", func(t *testing.T) { name := javaNative2Name - g.Expect(KamelRun(t, ctx, ns, "files/Java2.java", "--name", name, "-t", "quarkus.build-mode=native", "-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed()) + g.Expect(KamelRun(t, ctx, ns, "files/Java2.java", "--name", name, "-t", "quarkus.build-mode=native", + "-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed()) g.Eventually(IntegrationPodPhase(t, ctx, ns, name), TestTimeoutVeryLong).Should(Equal(corev1.PodRunning)) g.Eventually(IntegrationPod(t, ctx, ns, name), TestTimeoutShort). diff --git a/pkg/controller/build/build_pod.go b/pkg/controller/build/build_pod.go index 2b2b877980..15995a56aa 100644 --- a/pkg/controller/build/build_pod.go +++ b/pkg/controller/build/build_pod.go @@ -107,8 +107,15 @@ func newBuildPod(ctx context.Context, client client.Client, build *v1.Build) *co return pod } -func configureResources(taskName string, build *v1.Build, container *corev1.Container) { +func configureTaskResources(taskName string, build *v1.Build, container *corev1.Container) { conf := build.TaskConfiguration(taskName) + configureResources(taskName, build, container, + conf.RequestCPU, conf.LimitCPU, conf.RequestMemory, conf.LimitMemory) +} + +func configureResources( + taskName string, build *v1.Build, container *corev1.Container, + requestCPU, limitCPU, requestMemory, limitMemory string) { requestsList := container.Resources.Requests limitsList := container.Resources.Limits var err error @@ -119,25 +126,25 @@ func configureResources(taskName string, build *v1.Build, container *corev1.Cont limitsList = make(corev1.ResourceList) } - requestsList, err = kubernetes.ConfigureResource(conf.RequestCPU, requestsList, corev1.ResourceCPU) + requestsList, err = kubernetes.ConfigureResource(requestCPU, requestsList, corev1.ResourceCPU) if err != nil { Log.WithValues("request-namespace", build.Namespace, "request-name", build.Name). - Errorf(err, "Could not configure builder resource cpu, leaving default value") + Errorf(err, "Could not configure %s resource cpu, leaving default value", taskName) } - requestsList, err = kubernetes.ConfigureResource(conf.RequestMemory, requestsList, corev1.ResourceMemory) + requestsList, err = kubernetes.ConfigureResource(requestMemory, requestsList, corev1.ResourceMemory) if err != nil { Log.WithValues("request-namespace", build.Namespace, "request-name", build.Name). - Errorf(err, "Could not configure builder resource memory, leaving default value") + Errorf(err, "Could not configure %s resource memory, leaving default value", taskName) } - limitsList, err = kubernetes.ConfigureResource(conf.LimitCPU, limitsList, corev1.ResourceCPU) + limitsList, err = kubernetes.ConfigureResource(limitCPU, limitsList, corev1.ResourceCPU) if err != nil { Log.WithValues("request-namespace", build.Namespace, "request-name", build.Name). - Errorf(err, "Could not configure builder limit cpu, leaving default value") + Errorf(err, "Could not configure %s limit cpu, leaving default value", taskName) } - limitsList, err = kubernetes.ConfigureResource(conf.LimitMemory, limitsList, corev1.ResourceMemory) + limitsList, err = kubernetes.ConfigureResource(limitMemory, limitsList, corev1.ResourceMemory) if err != nil { Log.WithValues("request-namespace", build.Namespace, "request-name", build.Name). - Errorf(err, "Could not configure builder limit memory, leaving default value") + Errorf(err, "Could not configure %s limit memory, leaving default value", taskName) } container.Resources.Requests = requestsList @@ -228,7 +235,12 @@ func addBuildTaskToPod(ctx context.Context, client client.Client, build *v1.Buil } } - configureResources(taskName, build, &container) + // Default resources for a build. We set a high upper bound limit + // in order to let any heavy Maven build to run without problems. + // If the process cannot complete, then the user should increase these limits accordingly. + configureResources(taskName, build, &container, "500m", "1", "512Mi", "4Gi") + // possible user based resource configuration + configureTaskResources(taskName, build, &container) addContainerToPod(build, container, pod) } @@ -249,7 +261,10 @@ func addCustomTaskToPod(build *v1.Build, task *v1.UserTask, pod *corev1.Pod) { } } - configureResources(task.Name, build, &container) + // Default resources for a custom task. We assume some + // lighter process which won't require too much resources. + configureResources(task.Name, build, &container, "250m", "500m", "256Mi", "1Gi") + configureTaskResources(task.Name, build, &container) addContainerToPod(build, container, pod) } diff --git a/pkg/controller/build/build_pod_test.go b/pkg/controller/build/build_pod_test.go index 7a41d3923c..97727f4542 100644 --- a/pkg/controller/build/build_pod_test.go +++ b/pkg/controller/build/build_pod_test.go @@ -25,6 +25,7 @@ import ( "github.com/apache/camel-k/v2/pkg/internal" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -67,3 +68,97 @@ func TestNewBuildPodConfiguration(t *testing.T) { assert.Equal(t, map[string]string{"node": "selector"}, pod.Spec.NodeSelector) assert.Equal(t, map[string]string{"annotation": "value"}, pod.Annotations) } + +func TestConfigureResourcesDefault(t *testing.T) { + build := v1.Build{ + ObjectMeta: metav1.ObjectMeta{ + Name: "theBuildName", + }, + Spec: v1.BuildSpec{ + Tasks: []v1.Task{ + { + Builder: &v1.BuilderTask{ + BaseTask: v1.BaseTask{ + Name: "builder", + Configuration: v1.BuildConfiguration{}, + }, + }, + }, + }, + }, + } + + container := corev1.Container{} + configureResources("builder", &build, &container, "250m", "500m", "512Mi", "1Gi") + configureTaskResources("builder", &build, &container) + + assert.Equal(t, "250m", container.Resources.Requests.Cpu().String()) + assert.Equal(t, "500m", container.Resources.Limits.Cpu().String()) + assert.Equal(t, "512Mi", container.Resources.Requests.Memory().String()) + assert.Equal(t, "1Gi", container.Resources.Limits.Memory().String()) +} + +func TestConfigureResources(t *testing.T) { + build := v1.Build{ + ObjectMeta: metav1.ObjectMeta{ + Name: "theBuildName", + }, + Spec: v1.BuildSpec{ + Tasks: []v1.Task{ + { + Builder: &v1.BuilderTask{ + BaseTask: v1.BaseTask{ + Name: "builder", + Configuration: v1.BuildConfiguration{ + RequestCPU: "500m", + LimitCPU: "1000m", + RequestMemory: "512Mi", + LimitMemory: "2048Mi", + }, + }, + }, + }, + }, + }, + } + container := corev1.Container{} + configureTaskResources("builder", &build, &container) + + assert.Equal(t, "500m", container.Resources.Requests.Cpu().String()) + assert.Equal(t, "1", container.Resources.Limits.Cpu().String()) + assert.Equal(t, "512Mi", container.Resources.Requests.Memory().String()) + assert.Equal(t, "2Gi", container.Resources.Limits.Memory().String()) +} + +func TestConfigureResourcesOverride(t *testing.T) { + build := v1.Build{ + ObjectMeta: metav1.ObjectMeta{ + Name: "theBuildName", + }, + Spec: v1.BuildSpec{ + Tasks: []v1.Task{ + { + Builder: &v1.BuilderTask{ + BaseTask: v1.BaseTask{ + Name: "builder", + Configuration: v1.BuildConfiguration{ + RequestCPU: "500m", + LimitCPU: "1000m", + RequestMemory: "512Mi", + LimitMemory: "2048Mi", + }, + }, + }, + }, + }, + }, + } + container := corev1.Container{} + configureResources("builder", &build, &container, "10m", "50m", "100Mi", "200Mi") + configureTaskResources("builder", &build, &container) + + assert.Equal(t, "500m", container.Resources.Requests.Cpu().String()) + assert.Equal(t, "1", container.Resources.Limits.Cpu().String()) + assert.Equal(t, "512Mi", container.Resources.Requests.Memory().String()) + assert.Equal(t, "2Gi", container.Resources.Limits.Memory().String()) +} diff --git a/pkg/trait/builder.go b/pkg/trait/builder.go index 5291add368..6b066e48d4 100644 --- a/pkg/trait/builder.go +++ b/pkg/trait/builder.go @@ -124,6 +124,7 @@ func (t *builderTrait) configureForQuarkus(trait Trait, e *Environment, conditio return condition, err } + //nolint: nestif if ok && (isNativeIntegration || isNativeKit) { // TODO expect maven repository in local repo (need to change builder pod accordingly!) command := builder.QuarkusRuntimeSupport(e.CamelCatalog.GetCamelQuarkusVersion()).BuildCommands() @@ -136,7 +137,9 @@ func (t *builderTrait) configureForQuarkus(trait Trait, e *Environment, conditio // it should be performed as the last custom task t.Tasks = append(t.Tasks, fmt.Sprintf(`quarkus-native;%s;/bin/bash -c "%s"`, nativeBuilderImage, command)) // Force the build to run in a separate Pod and strictly sequential - m := "This is a Quarkus native build: setting build configuration with build Pod strategy and native container sensible resources (if not specified by the user). Make sure your cluster can handle it." + m := "This is a Quarkus native build: setting default build configuration with build Pod strategy and " + + "native container sensible resources (max 4 cpus, 16 Gi memory, unless specified by the user). " + + "Make sure your cluster can handle it." t.L.Info(m) condition = newOrAppend(condition, m) @@ -149,11 +152,18 @@ func (t *builderTrait) configureForQuarkus(trait Trait, e *Environment, conditio if !existsTaskRequest(t.TasksRequestMemory, "quarkus-native") { t.TasksRequestMemory = append(t.TasksRequestMemory, "quarkus-native:4Gi") } + if !existsTaskRequest(t.TasksLimitCPU, "quarkus-native") { + t.TasksLimitCPU = append(t.TasksLimitCPU, "quarkus-native:4000m") + } + if !existsTaskRequest(t.TasksLimitMemory, "quarkus-native") { + t.TasksLimitMemory = append(t.TasksLimitMemory, "quarkus-native:16Gi") + } } return condition, nil } +//nolint:unparam func existsTaskRequest(tasks []string, taskName string) bool { for _, task := range tasks { ts := strings.Split(task, ":")