Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(ctrl): default builder Pod resources #6054

Merged
merged 1 commit into from
Feb 2, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions e2e/native/native_with_sources_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ func TestNativeHighMemoryIntegrations(t *testing.T) {

t.Run("java native support", func(t *testing.T) {
name := javaNativeName
g.Expect(KamelRun(t, ctx, ns, "files/Java.java", "--name", name, "-t", "quarkus.build-mode=native", "-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed())
g.Expect(KamelRun(t, ctx, ns, "files/Java.java", "--name", name, "-t", "quarkus.build-mode=native",
"-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed())

g.Eventually(IntegrationPodPhase(t, ctx, ns, name), TestTimeoutVeryLong).Should(Equal(corev1.PodRunning))
g.Eventually(IntegrationPod(t, ctx, ns, name), TestTimeoutShort).
Expand All @@ -52,7 +53,8 @@ func TestNativeHighMemoryIntegrations(t *testing.T) {

t.Run("java native same should not rebuild", func(t *testing.T) {
name := javaNativeCloneName
g.Expect(KamelRun(t, ctx, ns, "files/Java.java", "--name", name, "-t", "quarkus.build-mode=native", "-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed())
g.Expect(KamelRun(t, ctx, ns, "files/Java.java", "--name", name, "-t", "quarkus.build-mode=native",
"-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed())

// This one should run quickly as it suppose to reuse an IntegrationKit
g.Eventually(IntegrationPodPhase(t, ctx, ns, name), TestTimeoutShort).Should(Equal(corev1.PodRunning))
Expand All @@ -66,7 +68,8 @@ func TestNativeHighMemoryIntegrations(t *testing.T) {

t.Run("java native should rebuild", func(t *testing.T) {
name := javaNative2Name
g.Expect(KamelRun(t, ctx, ns, "files/Java2.java", "--name", name, "-t", "quarkus.build-mode=native", "-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed())
g.Expect(KamelRun(t, ctx, ns, "files/Java2.java", "--name", name, "-t", "quarkus.build-mode=native",
"-t", "builder.tasks-limit-memory=quarkus-native:9.5Gi").Execute()).To(Succeed())

g.Eventually(IntegrationPodPhase(t, ctx, ns, name), TestTimeoutVeryLong).Should(Equal(corev1.PodRunning))
g.Eventually(IntegrationPod(t, ctx, ns, name), TestTimeoutShort).
Expand Down
37 changes: 26 additions & 11 deletions pkg/controller/build/build_pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,15 @@ func newBuildPod(ctx context.Context, client client.Client, build *v1.Build) *co
return pod
}

func configureResources(taskName string, build *v1.Build, container *corev1.Container) {
func configureTaskResources(taskName string, build *v1.Build, container *corev1.Container) {
conf := build.TaskConfiguration(taskName)
configureResources(taskName, build, container,
conf.RequestCPU, conf.LimitCPU, conf.RequestMemory, conf.LimitMemory)
}

func configureResources(
taskName string, build *v1.Build, container *corev1.Container,
requestCPU, limitCPU, requestMemory, limitMemory string) {
requestsList := container.Resources.Requests
limitsList := container.Resources.Limits
var err error
Expand All @@ -119,25 +126,25 @@ func configureResources(taskName string, build *v1.Build, container *corev1.Cont
limitsList = make(corev1.ResourceList)
}

requestsList, err = kubernetes.ConfigureResource(conf.RequestCPU, requestsList, corev1.ResourceCPU)
requestsList, err = kubernetes.ConfigureResource(requestCPU, requestsList, corev1.ResourceCPU)
if err != nil {
Log.WithValues("request-namespace", build.Namespace, "request-name", build.Name).
Errorf(err, "Could not configure builder resource cpu, leaving default value")
Errorf(err, "Could not configure %s resource cpu, leaving default value", taskName)
}
requestsList, err = kubernetes.ConfigureResource(conf.RequestMemory, requestsList, corev1.ResourceMemory)
requestsList, err = kubernetes.ConfigureResource(requestMemory, requestsList, corev1.ResourceMemory)
if err != nil {
Log.WithValues("request-namespace", build.Namespace, "request-name", build.Name).
Errorf(err, "Could not configure builder resource memory, leaving default value")
Errorf(err, "Could not configure %s resource memory, leaving default value", taskName)
}
limitsList, err = kubernetes.ConfigureResource(conf.LimitCPU, limitsList, corev1.ResourceCPU)
limitsList, err = kubernetes.ConfigureResource(limitCPU, limitsList, corev1.ResourceCPU)
if err != nil {
Log.WithValues("request-namespace", build.Namespace, "request-name", build.Name).
Errorf(err, "Could not configure builder limit cpu, leaving default value")
Errorf(err, "Could not configure %s limit cpu, leaving default value", taskName)
}
limitsList, err = kubernetes.ConfigureResource(conf.LimitMemory, limitsList, corev1.ResourceMemory)
limitsList, err = kubernetes.ConfigureResource(limitMemory, limitsList, corev1.ResourceMemory)
if err != nil {
Log.WithValues("request-namespace", build.Namespace, "request-name", build.Name).
Errorf(err, "Could not configure builder limit memory, leaving default value")
Errorf(err, "Could not configure %s limit memory, leaving default value", taskName)
}

container.Resources.Requests = requestsList
Expand Down Expand Up @@ -228,7 +235,12 @@ func addBuildTaskToPod(ctx context.Context, client client.Client, build *v1.Buil
}
}

configureResources(taskName, build, &container)
// Default resources for a build. We set a high upper bound limit
// in order to let any heavy Maven build to run without problems.
// If the process cannot complete, then the user should increase these limits accordingly.
configureResources(taskName, build, &container, "500m", "1", "512Mi", "4Gi")
// possible user based resource configuration
configureTaskResources(taskName, build, &container)
addContainerToPod(build, container, pod)
}

Expand All @@ -249,7 +261,10 @@ func addCustomTaskToPod(build *v1.Build, task *v1.UserTask, pod *corev1.Pod) {
}
}

configureResources(task.Name, build, &container)
// Default resources for a custom task. We assume some
// lighter process which won't require too much resources.
configureResources(task.Name, build, &container, "250m", "500m", "256Mi", "1Gi")
configureTaskResources(task.Name, build, &container)
addContainerToPod(build, container, pod)
}

Expand Down
95 changes: 95 additions & 0 deletions pkg/controller/build/build_pod_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"github.com/apache/camel-k/v2/pkg/internal"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

Expand Down Expand Up @@ -67,3 +68,97 @@ func TestNewBuildPodConfiguration(t *testing.T) {
assert.Equal(t, map[string]string{"node": "selector"}, pod.Spec.NodeSelector)
assert.Equal(t, map[string]string{"annotation": "value"}, pod.Annotations)
}

func TestConfigureResourcesDefault(t *testing.T) {
build := v1.Build{
ObjectMeta: metav1.ObjectMeta{
Name: "theBuildName",
},
Spec: v1.BuildSpec{
Tasks: []v1.Task{
{
Builder: &v1.BuilderTask{
BaseTask: v1.BaseTask{
Name: "builder",
Configuration: v1.BuildConfiguration{},
},
},
},
},
},
}

container := corev1.Container{}
configureResources("builder", &build, &container, "250m", "500m", "512Mi", "1Gi")
configureTaskResources("builder", &build, &container)

assert.Equal(t, "250m", container.Resources.Requests.Cpu().String())
assert.Equal(t, "500m", container.Resources.Limits.Cpu().String())
assert.Equal(t, "512Mi", container.Resources.Requests.Memory().String())
assert.Equal(t, "1Gi", container.Resources.Limits.Memory().String())
}

func TestConfigureResources(t *testing.T) {
build := v1.Build{
ObjectMeta: metav1.ObjectMeta{
Name: "theBuildName",
},
Spec: v1.BuildSpec{
Tasks: []v1.Task{
{
Builder: &v1.BuilderTask{
BaseTask: v1.BaseTask{
Name: "builder",
Configuration: v1.BuildConfiguration{
RequestCPU: "500m",
LimitCPU: "1000m",
RequestMemory: "512Mi",
LimitMemory: "2048Mi",
},
},
},
},
},
},
}
container := corev1.Container{}
configureTaskResources("builder", &build, &container)

assert.Equal(t, "500m", container.Resources.Requests.Cpu().String())
assert.Equal(t, "1", container.Resources.Limits.Cpu().String())
assert.Equal(t, "512Mi", container.Resources.Requests.Memory().String())
assert.Equal(t, "2Gi", container.Resources.Limits.Memory().String())
}

func TestConfigureResourcesOverride(t *testing.T) {
build := v1.Build{
ObjectMeta: metav1.ObjectMeta{
Name: "theBuildName",
},
Spec: v1.BuildSpec{
Tasks: []v1.Task{
{
Builder: &v1.BuilderTask{
BaseTask: v1.BaseTask{
Name: "builder",
Configuration: v1.BuildConfiguration{
RequestCPU: "500m",
LimitCPU: "1000m",
RequestMemory: "512Mi",
LimitMemory: "2048Mi",
},
},
},
},
},
},
}
container := corev1.Container{}
configureResources("builder", &build, &container, "10m", "50m", "100Mi", "200Mi")
configureTaskResources("builder", &build, &container)

assert.Equal(t, "500m", container.Resources.Requests.Cpu().String())
assert.Equal(t, "1", container.Resources.Limits.Cpu().String())
assert.Equal(t, "512Mi", container.Resources.Requests.Memory().String())
assert.Equal(t, "2Gi", container.Resources.Limits.Memory().String())
}
12 changes: 11 additions & 1 deletion pkg/trait/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ func (t *builderTrait) configureForQuarkus(trait Trait, e *Environment, conditio
return condition, err
}

//nolint: nestif
if ok && (isNativeIntegration || isNativeKit) {
// TODO expect maven repository in local repo (need to change builder pod accordingly!)
command := builder.QuarkusRuntimeSupport(e.CamelCatalog.GetCamelQuarkusVersion()).BuildCommands()
Expand All @@ -136,7 +137,9 @@ func (t *builderTrait) configureForQuarkus(trait Trait, e *Environment, conditio
// it should be performed as the last custom task
t.Tasks = append(t.Tasks, fmt.Sprintf(`quarkus-native;%s;/bin/bash -c "%s"`, nativeBuilderImage, command))
// Force the build to run in a separate Pod and strictly sequential
m := "This is a Quarkus native build: setting build configuration with build Pod strategy and native container sensible resources (if not specified by the user). Make sure your cluster can handle it."
m := "This is a Quarkus native build: setting default build configuration with build Pod strategy and " +
"native container sensible resources (max 4 cpus, 16 Gi memory, unless specified by the user). " +
"Make sure your cluster can handle it."
t.L.Info(m)

condition = newOrAppend(condition, m)
Expand All @@ -149,11 +152,18 @@ func (t *builderTrait) configureForQuarkus(trait Trait, e *Environment, conditio
if !existsTaskRequest(t.TasksRequestMemory, "quarkus-native") {
t.TasksRequestMemory = append(t.TasksRequestMemory, "quarkus-native:4Gi")
}
if !existsTaskRequest(t.TasksLimitCPU, "quarkus-native") {
t.TasksLimitCPU = append(t.TasksLimitCPU, "quarkus-native:4000m")
}
if !existsTaskRequest(t.TasksLimitMemory, "quarkus-native") {
t.TasksLimitMemory = append(t.TasksLimitMemory, "quarkus-native:16Gi")
}
}

return condition, nil
}

//nolint:unparam
func existsTaskRequest(tasks []string, taskName string) bool {
for _, task := range tasks {
ts := strings.Split(task, ":")
Expand Down