This is an automated email from the ASF dual-hosted git repository.

astefanutti pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel-k.git

commit 7447c265fd87d03d08065ce01393c65b0345333c
Author: Antonin Stefanutti <[email protected]>
AuthorDate: Mon Dec 16 09:57:06 2019 +0100

    feat(build): Task-based builds
---
 pkg/apis/camel/v1alpha1/build_types.go             |  58 +++-
 pkg/apis/camel/v1alpha1/build_types_support.go     |  10 +
 pkg/apis/camel/v1alpha1/zz_generated.deepcopy.go   | 203 +++++++++++---
 pkg/builder/builder.go                             |  43 +--
 pkg/builder/builder_steps.go                       |   6 +-
 pkg/builder/builder_steps_test.go                  |  40 ++-
 pkg/builder/builder_test.go                        |  20 +-
 pkg/builder/builder_types.go                       |  15 +-
 pkg/builder/kaniko/publisher.go                    | 241 +----------------
 pkg/builder/runtime/main.go                        |   6 +-
 pkg/builder/runtime/main_test.go                   |  28 +-
 pkg/builder/runtime/quarkus.go                     |   6 +-
 pkg/builder/s2i/publisher.go                       |   7 +-
 pkg/cmd/builder.go                                 |   4 +-
 pkg/cmd/builder/builder.go                         |  55 ++--
 pkg/controller/build/build_controller.go           |  34 ++-
 pkg/controller/build/initialize_pod.go             |  45 +++-
 pkg/controller/build/initialize_routine.go         |   3 +-
 pkg/controller/build/monitor_pod.go                |  50 ++--
 pkg/controller/build/monitor_routine.go            |   4 +-
 pkg/controller/build/schedule_pod.go               | 182 +++++--------
 pkg/controller/build/schedule_routine.go           |  63 +++--
 pkg/controller/build/util_pod.go                   |  73 -----
 pkg/controller/integrationkit/build.go             |  27 +-
 pkg/controller/integrationplatform/initialize.go   |  14 +-
 pkg/controller/integrationplatform/kaniko_cache.go |   6 +-
 pkg/trait/builder.go                               | 295 ++++++++++++++++++++-
 pkg/trait/builder_test.go                          |  32 ++-
 pkg/trait/deployer.go                              |  77 +-----
 pkg/trait/quarkus.go                               |   7 +-
 pkg/trait/trait_types.go                           |   4 +-
 pkg/util/patch/patch.go                            |  91 +++++++
 32 files changed, 951 insertions(+), 798 deletions(-)

diff --git a/pkg/apis/camel/v1alpha1/build_types.go 
b/pkg/apis/camel/v1alpha1/build_types.go
index 9f13e60..e694b92 100644
--- a/pkg/apis/camel/v1alpha1/build_types.go
+++ b/pkg/apis/camel/v1alpha1/build_types.go
@@ -29,17 +29,52 @@ import (
 type BuildSpec struct {
        // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
        // Important: Run "operator-sdk generate k8s" to regenerate code after 
modifying this file
-       Meta            metav1.ObjectMeta       `json:"meta,omitempty"`
-       Image           string                  `json:"image,omitempty"`
-       Steps           []string                `json:"steps,omitempty"`
-       CamelVersion    string                  `json:"camelVersion,omitempty"`
-       RuntimeVersion  string                  
`json:"runtimeVersion,omitempty"`
-       RuntimeProvider *RuntimeProvider        
`json:"runtimeProvider,omitempty"`
-       Platform        IntegrationPlatformSpec `json:"platform,omitempty"`
-       Sources         []SourceSpec            `json:"sources,omitempty"`
-       Resources       []ResourceSpec          `json:"resources,omitempty"`
-       Dependencies    []string                `json:"dependencies,omitempty"`
-       BuildDir        string                  `json:"buildDir,omitempty"`
+       Tasks []Task `json:"tasks,omitempty"`
+}
+
+type Task struct {
+       Builder *BuilderTask `json:"builder,omitempty"`
+       Kaniko  *KanikoTask  `json:"kaniko,omitempty"`
+}
+
+// BaseTask
+type BaseTask struct {
+       Name         string               `json:"name,omitempty"`
+       Affinity     *corev1.Affinity     `json:"affinity,omitempty"`
+       Volumes      []corev1.Volume      `json:"volumes,omitempty"`
+       VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
+}
+
+// ImageTask
+type ImageTask struct {
+       BaseTask `json:",inline"`
+       Image    string          `json:"image,omitempty"`
+       Args     []string        `json:"args,omitempty"`
+       Env      []corev1.EnvVar `json:"env,omitempty"`
+}
+
+// KanikoTask
+type KanikoTask struct {
+       ImageTask  `json:",inline"`
+       BuiltImage string `json:"builtImage,omitempty"`
+}
+
+// BuilderTask
+type BuilderTask struct {
+       BaseTask        `json:",inline"`
+       Meta            metav1.ObjectMeta `json:"meta,omitempty"`
+       BaseImage       string            `json:"baseImage,omitempty"`
+       CamelVersion    string            `json:"camelVersion,omitempty"`
+       RuntimeVersion  string            `json:"runtimeVersion,omitempty"`
+       RuntimeProvider *RuntimeProvider  `json:"runtimeProvider,omitempty"`
+       Sources         []SourceSpec      `json:"sources,omitempty"`
+       Resources       []ResourceSpec    `json:"resources,omitempty"`
+       Dependencies    []string          `json:"dependencies,omitempty"`
+       Steps           []string          `json:"steps,omitempty"`
+       Maven           MavenSpec         `json:"maven,omitempty"`
+       BuildDir        string            `json:"buildDir,omitempty"`
+       Properties      map[string]string `json:"properties,omitempty"`
+       Timeout         metav1.Duration   `json:"timeout,omitempty"`
 }
 
 // BuildStatus defines the observed state of Build
@@ -53,7 +88,6 @@ type BuildStatus struct {
        StartedAt  metav1.Time      `json:"startedAt,omitempty"`
        Platform   string           `json:"platform,omitempty"`
        Conditions []BuildCondition `json:"conditions,omitempty"`
-
        // Change to Duration / ISO 8601 when CRD uses OpenAPI spec v3
        // https://github.com/OAI/OpenAPI-Specification/issues/845
        Duration string `json:"duration,omitempty"`
diff --git a/pkg/apis/camel/v1alpha1/build_types_support.go 
b/pkg/apis/camel/v1alpha1/build_types_support.go
index 8aae4ad..4fb40b7 100644
--- a/pkg/apis/camel/v1alpha1/build_types_support.go
+++ b/pkg/apis/camel/v1alpha1/build_types_support.go
@@ -22,6 +22,16 @@ import (
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
+// GetName --
+func (t *Task) GetName() string {
+       if t.Builder != nil {
+               return t.Builder.Name
+       } else if t.Kaniko != nil {
+               return t.Kaniko.Name
+       }
+       return ""
+}
+
 // NewBuild --
 func NewBuild(namespace string, name string) Build {
        return Build{
diff --git a/pkg/apis/camel/v1alpha1/zz_generated.deepcopy.go 
b/pkg/apis/camel/v1alpha1/zz_generated.deepcopy.go
index a930cce..1ecf9f0 100644
--- a/pkg/apis/camel/v1alpha1/zz_generated.deepcopy.go
+++ b/pkg/apis/camel/v1alpha1/zz_generated.deepcopy.go
@@ -5,8 +5,8 @@
 package v1alpha1
 
 import (
-       corev1 "k8s.io/api/core/v1"
-       v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+       v1 "k8s.io/api/core/v1"
+       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
        runtime "k8s.io/apimachinery/pkg/runtime"
 )
 
@@ -27,6 +27,41 @@ func (in *Artifact) DeepCopy() *Artifact {
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, 
writing into out. in must be non-nil.
+func (in *BaseTask) DeepCopyInto(out *BaseTask) {
+       *out = *in
+       if in.Affinity != nil {
+               in, out := &in.Affinity, &out.Affinity
+               *out = new(v1.Affinity)
+               (*in).DeepCopyInto(*out)
+       }
+       if in.Volumes != nil {
+               in, out := &in.Volumes, &out.Volumes
+               *out = make([]v1.Volume, len(*in))
+               for i := range *in {
+                       (*in)[i].DeepCopyInto(&(*out)[i])
+               }
+       }
+       if in.VolumeMounts != nil {
+               in, out := &in.VolumeMounts, &out.VolumeMounts
+               *out = make([]v1.VolumeMount, len(*in))
+               for i := range *in {
+                       (*in)[i].DeepCopyInto(&(*out)[i])
+               }
+       }
+       return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, 
creating a new BaseTask.
+func (in *BaseTask) DeepCopy() *BaseTask {
+       if in == nil {
+               return nil
+       }
+       out := new(BaseTask)
+       in.DeepCopyInto(out)
+       return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, 
writing into out. in must be non-nil.
 func (in *Build) DeepCopyInto(out *Build) {
        *out = *in
        out.TypeMeta = in.TypeMeta
@@ -108,32 +143,12 @@ func (in *BuildList) DeepCopyObject() runtime.Object {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, 
writing into out. in must be non-nil.
 func (in *BuildSpec) DeepCopyInto(out *BuildSpec) {
        *out = *in
-       in.Meta.DeepCopyInto(&out.Meta)
-       if in.Steps != nil {
-               in, out := &in.Steps, &out.Steps
-               *out = make([]string, len(*in))
-               copy(*out, *in)
-       }
-       if in.RuntimeProvider != nil {
-               in, out := &in.RuntimeProvider, &out.RuntimeProvider
-               *out = new(RuntimeProvider)
-               (*in).DeepCopyInto(*out)
-       }
-       in.Platform.DeepCopyInto(&out.Platform)
-       if in.Sources != nil {
-               in, out := &in.Sources, &out.Sources
-               *out = make([]SourceSpec, len(*in))
-               copy(*out, *in)
-       }
-       if in.Resources != nil {
-               in, out := &in.Resources, &out.Resources
-               *out = make([]ResourceSpec, len(*in))
-               copy(*out, *in)
-       }
-       if in.Dependencies != nil {
-               in, out := &in.Dependencies, &out.Dependencies
-               *out = make([]string, len(*in))
-               copy(*out, *in)
+       if in.Tasks != nil {
+               in, out := &in.Tasks, &out.Tasks
+               *out = make([]Task, len(*in))
+               for i := range *in {
+                       (*in)[i].DeepCopyInto(&(*out)[i])
+               }
        }
        return
 }
@@ -183,6 +198,58 @@ func (in *BuildStatus) DeepCopy() *BuildStatus {
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, 
writing into out. in must be non-nil.
+func (in *BuilderTask) DeepCopyInto(out *BuilderTask) {
+       *out = *in
+       in.BaseTask.DeepCopyInto(&out.BaseTask)
+       in.Meta.DeepCopyInto(&out.Meta)
+       if in.RuntimeProvider != nil {
+               in, out := &in.RuntimeProvider, &out.RuntimeProvider
+               *out = new(RuntimeProvider)
+               (*in).DeepCopyInto(*out)
+       }
+       if in.Sources != nil {
+               in, out := &in.Sources, &out.Sources
+               *out = make([]SourceSpec, len(*in))
+               copy(*out, *in)
+       }
+       if in.Resources != nil {
+               in, out := &in.Resources, &out.Resources
+               *out = make([]ResourceSpec, len(*in))
+               copy(*out, *in)
+       }
+       if in.Dependencies != nil {
+               in, out := &in.Dependencies, &out.Dependencies
+               *out = make([]string, len(*in))
+               copy(*out, *in)
+       }
+       if in.Steps != nil {
+               in, out := &in.Steps, &out.Steps
+               *out = make([]string, len(*in))
+               copy(*out, *in)
+       }
+       in.Maven.DeepCopyInto(&out.Maven)
+       if in.Properties != nil {
+               in, out := &in.Properties, &out.Properties
+               *out = make(map[string]string, len(*in))
+               for key, val := range *in {
+                       (*out)[key] = val
+               }
+       }
+       out.Timeout = in.Timeout
+       return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, 
creating a new BuilderTask.
+func (in *BuilderTask) DeepCopy() *BuilderTask {
+       if in == nil {
+               return nil
+       }
+       out := new(BuilderTask)
+       in.DeepCopyInto(out)
+       return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, 
writing into out. in must be non-nil.
 func (in *CamelArtifact) DeepCopyInto(out *CamelArtifact) {
        *out = *in
        in.CamelArtifactDependency.DeepCopyInto(&out.CamelArtifactDependency)
@@ -447,6 +514,35 @@ func (in *FailureRecovery) DeepCopy() *FailureRecovery {
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, 
writing into out. in must be non-nil.
+func (in *ImageTask) DeepCopyInto(out *ImageTask) {
+       *out = *in
+       in.BaseTask.DeepCopyInto(&out.BaseTask)
+       if in.Args != nil {
+               in, out := &in.Args, &out.Args
+               *out = make([]string, len(*in))
+               copy(*out, *in)
+       }
+       if in.Env != nil {
+               in, out := &in.Env, &out.Env
+               *out = make([]v1.EnvVar, len(*in))
+               for i := range *in {
+                       (*in)[i].DeepCopyInto(&(*out)[i])
+               }
+       }
+       return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, 
creating a new ImageTask.
+func (in *ImageTask) DeepCopy() *ImageTask {
+       if in == nil {
+               return nil
+       }
+       out := new(ImageTask)
+       in.DeepCopyInto(out)
+       return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, 
writing into out. in must be non-nil.
 func (in *Integration) DeepCopyInto(out *Integration) {
        *out = *in
        out.TypeMeta = in.TypeMeta
@@ -726,7 +822,7 @@ func (in *IntegrationPlatformBuildSpec) DeepCopyInto(out 
*IntegrationPlatformBui
        out.Registry = in.Registry
        if in.Timeout != nil {
                in, out := &in.Timeout, &out.Timeout
-               *out = new(v1.Duration)
+               *out = new(metav1.Duration)
                **out = **in
        }
        in.Maven.DeepCopyInto(&out.Maven)
@@ -997,12 +1093,29 @@ func (in *IntegrationStatus) DeepCopy() 
*IntegrationStatus {
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, 
writing into out. in must be non-nil.
+func (in *KanikoTask) DeepCopyInto(out *KanikoTask) {
+       *out = *in
+       in.ImageTask.DeepCopyInto(&out.ImageTask)
+       return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, 
creating a new KanikoTask.
+func (in *KanikoTask) DeepCopy() *KanikoTask {
+       if in == nil {
+               return nil
+       }
+       out := new(KanikoTask)
+       in.DeepCopyInto(out)
+       return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, 
writing into out. in must be non-nil.
 func (in *MavenSpec) DeepCopyInto(out *MavenSpec) {
        *out = *in
        in.Settings.DeepCopyInto(&out.Settings)
        if in.Timeout != nil {
                in, out := &in.Timeout, &out.Timeout
-               *out = new(v1.Duration)
+               *out = new(metav1.Duration)
                **out = **in
        }
        return
@@ -1090,6 +1203,32 @@ func (in *SourceSpec) DeepCopy() *SourceSpec {
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, 
writing into out. in must be non-nil.
+func (in *Task) DeepCopyInto(out *Task) {
+       *out = *in
+       if in.Builder != nil {
+               in, out := &in.Builder, &out.Builder
+               *out = new(BuilderTask)
+               (*in).DeepCopyInto(*out)
+       }
+       if in.Kaniko != nil {
+               in, out := &in.Kaniko, &out.Kaniko
+               *out = new(KanikoTask)
+               (*in).DeepCopyInto(*out)
+       }
+       return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, 
creating a new Task.
+func (in *Task) DeepCopy() *Task {
+       if in == nil {
+               return nil
+       }
+       out := new(Task)
+       in.DeepCopyInto(out)
+       return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, 
writing into out. in must be non-nil.
 func (in *TraitSpec) DeepCopyInto(out *TraitSpec) {
        *out = *in
        if in.Configuration != nil {
@@ -1117,12 +1256,12 @@ func (in *ValueSource) DeepCopyInto(out *ValueSource) {
        *out = *in
        if in.ConfigMapKeyRef != nil {
                in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef
-               *out = new(corev1.ConfigMapKeySelector)
+               *out = new(v1.ConfigMapKeySelector)
                (*in).DeepCopyInto(*out)
        }
        if in.SecretKeyRef != nil {
                in, out := &in.SecretKeyRef, &out.SecretKeyRef
-               *out = new(corev1.SecretKeySelector)
+               *out = new(v1.SecretKeySelector)
                (*in).DeepCopyInto(*out)
        }
        return
diff --git a/pkg/builder/builder.go b/pkg/builder/builder.go
index 250c682..2683386 100644
--- a/pkg/builder/builder.go
+++ b/pkg/builder/builder.go
@@ -24,8 +24,6 @@ import (
        "sort"
        "time"
 
-       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
        "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
        "github.com/apache/camel-k/pkg/client"
        "github.com/apache/camel-k/pkg/util/cancellable"
@@ -49,18 +47,9 @@ func New(c client.Client) Builder {
        return &m
 }
 
-// Build --
-func (b *defaultBuilder) Build(build v1alpha1.BuildSpec) <-chan 
v1alpha1.BuildStatus {
-       channel := make(chan v1alpha1.BuildStatus)
-       go b.build(build, channel)
-       return channel
-}
-
-func (b *defaultBuilder) build(build v1alpha1.BuildSpec, channel chan<- 
v1alpha1.BuildStatus) {
+// Run --
+func (b *defaultBuilder) Run(build v1alpha1.BuilderTask) v1alpha1.BuildStatus {
        result := v1alpha1.BuildStatus{}
-       result.Phase = v1alpha1.BuildPhaseRunning
-       result.StartedAt = metav1.Now()
-       channel <- result
 
        // create tmp path
        buildDir := build.BuildDir
@@ -82,22 +71,16 @@ func (b *defaultBuilder) build(build v1alpha1.BuildSpec, 
channel chan<- v1alpha1
                Path:      builderPath,
                Namespace: build.Meta.Namespace,
                Build:     build,
-               Image:     build.Platform.Build.BaseImage,
-       }
-
-       if build.Image != "" {
-               c.Image = build.Image
+               BaseImage: build.BaseImage,
        }
 
        // base image is mandatory
-       if c.Image == "" {
+       if c.BaseImage == "" {
                result.Phase = v1alpha1.BuildPhaseFailed
                result.Image = ""
                result.Error = "no base image defined"
        }
 
-       c.BaseImage = c.Image
-
        // Add sources
        for _, data := range build.Sources {
                c.Resources = append(c.Resources, Resource{
@@ -121,10 +104,7 @@ func (b *defaultBuilder) build(build v1alpha1.BuildSpec, 
channel chan<- v1alpha1
        }
 
        if result.Phase == v1alpha1.BuildPhaseFailed {
-               result.Duration = 
metav1.Now().Sub(result.StartedAt.Time).String()
-               channel <- result
-               close(channel)
-               return
+               return result
        }
 
        steps := make([]Step, 0)
@@ -154,7 +134,7 @@ func (b *defaultBuilder) build(build v1alpha1.BuildSpec, 
channel chan<- v1alpha1
                        l := b.log.WithValues(
                                "step", step.ID(),
                                "phase", step.Phase(),
-                               "kit", build.Meta.Name,
+                               "task", build.Name,
                        )
 
                        l.Infof("executing step")
@@ -170,10 +150,7 @@ func (b *defaultBuilder) build(build v1alpha1.BuildSpec, 
channel chan<- v1alpha1
                }
        }
 
-       result.Duration = metav1.Now().Sub(result.StartedAt.Time).String()
-
        if result.Phase != v1alpha1.BuildPhaseInterrupted {
-               result.Phase = v1alpha1.BuildPhaseSucceeded
                result.BaseImage = c.BaseImage
                result.Image = c.Image
 
@@ -185,17 +162,15 @@ func (b *defaultBuilder) build(build v1alpha1.BuildSpec, 
channel chan<- v1alpha1
                result.Artifacts = make([]v1alpha1.Artifact, 0, 
len(c.Artifacts))
                result.Artifacts = append(result.Artifacts, c.Artifacts...)
 
-               b.log.Infof("build request %s executed in %s", build.Meta.Name, 
result.Duration)
                b.log.Infof("dependencies: %s", build.Dependencies)
                b.log.Infof("artifacts: %s", artifactIDs(c.Artifacts))
                b.log.Infof("artifacts selected: %s", 
artifactIDs(c.SelectedArtifacts))
-               b.log.Infof("requested image: %s", build.Image)
+               b.log.Infof("requested image: %s", build.BaseImage)
                b.log.Infof("base image: %s", c.BaseImage)
                b.log.Infof("resolved image: %s", c.Image)
        } else {
-               b.log.Infof("build request %s interrupted after %s", 
build.Meta.Name, result.Duration)
+               b.log.Infof("build task %s interrupted", build.Name)
        }
 
-       channel <- result
-       close(channel)
+       return result
 }
diff --git a/pkg/builder/builder_steps.go b/pkg/builder/builder_steps.go
index d44d579..4b84f9f 100644
--- a/pkg/builder/builder_steps.go
+++ b/pkg/builder/builder_steps.go
@@ -109,7 +109,7 @@ func registerStep(steps ...Step) {
 }
 
 func generateProjectSettings(ctx *Context) error {
-       val, err := kubernetes.ResolveValueSource(ctx.C, ctx.Client, 
ctx.Namespace, &ctx.Build.Platform.Build.Maven.Settings)
+       val, err := kubernetes.ResolveValueSource(ctx.C, ctx.Client, 
ctx.Namespace, &ctx.Build.Maven.Settings)
        if err != nil {
                return err
        }
@@ -283,7 +283,7 @@ func incrementalPackager(ctx *Context) error {
                        }
 
                        ctx.BaseImage = bestImage.Image
-                       ctx.Image = bestImage.Image
+                       //ctx.Image = bestImage.Image
                        ctx.SelectedArtifacts = selectedArtifacts
                }
 
@@ -297,7 +297,7 @@ func packager(ctx *Context, selector artifactsSelector) 
error {
                return err
        }
 
-       tarFileName := path.Join(ctx.Path, "package", "occi.tar")
+       tarFileName := path.Join(ctx.Build.BuildDir, "package", 
ctx.Build.Meta.Name)
        tarFileDir := path.Dir(tarFileName)
 
        err = os.MkdirAll(tarFileDir, 0777)
diff --git a/pkg/builder/builder_steps_test.go 
b/pkg/builder/builder_steps_test.go
index 1985dee..b7f6dff 100644
--- a/pkg/builder/builder_steps_test.go
+++ b/pkg/builder/builder_steps_test.go
@@ -76,20 +76,16 @@ func TestMavenSettingsFromConfigMap(t *testing.T) {
                Catalog:   catalog,
                Client:    c,
                Namespace: "ns",
-               Build: v1alpha1.BuildSpec{
+               Build: v1alpha1.BuilderTask{
                        RuntimeVersion: catalog.RuntimeVersion,
-                       Platform: v1alpha1.IntegrationPlatformSpec{
-                               Build: v1alpha1.IntegrationPlatformBuildSpec{
-                                       CamelVersion: catalog.Version,
-                                       Maven: v1alpha1.MavenSpec{
-                                               Settings: v1alpha1.ValueSource{
-                                                       ConfigMapKeyRef: 
&corev1.ConfigMapKeySelector{
-                                                               
LocalObjectReference: corev1.LocalObjectReference{
-                                                                       Name: 
"maven-settings",
-                                                               },
-                                                               Key: 
"settings.xml",
-                                                       },
+                       CamelVersion:   catalog.Version,
+                       Maven: v1alpha1.MavenSpec{
+                               Settings: v1alpha1.ValueSource{
+                                       ConfigMapKeyRef: 
&corev1.ConfigMapKeySelector{
+                                               LocalObjectReference: 
corev1.LocalObjectReference{
+                                                       Name: "maven-settings",
                                                },
+                                               Key: "settings.xml",
                                        },
                                },
                        },
@@ -128,20 +124,16 @@ func TestMavenSettingsFromSecret(t *testing.T) {
                Catalog:   catalog,
                Client:    c,
                Namespace: "ns",
-               Build: v1alpha1.BuildSpec{
+               Build: v1alpha1.BuilderTask{
                        RuntimeVersion: catalog.RuntimeVersion,
-                       Platform: v1alpha1.IntegrationPlatformSpec{
-                               Build: v1alpha1.IntegrationPlatformBuildSpec{
-                                       CamelVersion: catalog.Version,
-                                       Maven: v1alpha1.MavenSpec{
-                                               Settings: v1alpha1.ValueSource{
-                                                       SecretKeyRef: 
&corev1.SecretKeySelector{
-                                                               
LocalObjectReference: corev1.LocalObjectReference{
-                                                                       Name: 
"maven-settings",
-                                                               },
-                                                               Key: 
"settings.xml",
-                                                       },
+                       CamelVersion:   catalog.Version,
+                       Maven: v1alpha1.MavenSpec{
+                               Settings: v1alpha1.ValueSource{
+                                       SecretKeyRef: &corev1.SecretKeySelector{
+                                               LocalObjectReference: 
corev1.LocalObjectReference{
+                                                       Name: "maven-settings",
                                                },
+                                               Key: "settings.xml",
                                        },
                                },
                        },
diff --git a/pkg/builder/builder_test.go b/pkg/builder/builder_test.go
index b2a64f1..f1f90c1 100644
--- a/pkg/builder/builder_test.go
+++ b/pkg/builder/builder_test.go
@@ -53,27 +53,15 @@ func TestFailure(t *testing.T) {
 
        RegisterSteps(steps)
 
-       r := v1alpha1.BuildSpec{
+       r := v1alpha1.BuilderTask{
                Steps: StepIDsFor(
                        steps.Step1,
                        steps.Step2,
                ),
                RuntimeVersion: catalog.RuntimeVersion,
-               Platform: v1alpha1.IntegrationPlatformSpec{
-                       Build: v1alpha1.IntegrationPlatformBuildSpec{
-                               CamelVersion: catalog.Version,
-                       },
-               },
+               CamelVersion:   catalog.Version,
        }
 
-       progress := b.Build(r)
-
-       status := make([]v1alpha1.BuildStatus, 0)
-       for s := range progress {
-               status = append(status, s)
-       }
-
-       assert.Len(t, status, 2)
-       assert.Equal(t, v1alpha1.BuildPhaseRunning, status[0].Phase)
-       assert.Equal(t, v1alpha1.BuildPhaseFailed, status[1].Phase)
+       status := b.Run(r)
+       assert.Equal(t, v1alpha1.BuildPhaseFailed, status.Phase)
 }
diff --git a/pkg/builder/builder_types.go b/pkg/builder/builder_types.go
index 9a21ed0..52e6462 100644
--- a/pkg/builder/builder_types.go
+++ b/pkg/builder/builder_types.go
@@ -45,7 +45,7 @@ const (
 
 // Builder --
 type Builder interface {
-       Build(build v1alpha1.BuildSpec) <-chan v1alpha1.BuildStatus
+       Run(build v1alpha1.BuilderTask) v1alpha1.BuildStatus
 }
 
 // Step --
@@ -101,7 +101,7 @@ type Context struct {
        client.Client
        C                 cancellable.Context
        Catalog           *camel.RuntimeCatalog
-       Build             v1alpha1.BuildSpec
+       Build             v1alpha1.BuilderTask
        BaseImage         string
        Image             string
        Error             error
@@ -120,16 +120,7 @@ type Context struct {
 
 // HasRequiredImage --
 func (c *Context) HasRequiredImage() bool {
-       return c.Build.Image != ""
-}
-
-// GetImage --
-func (c *Context) GetImage() string {
-       if c.Build.Image != "" {
-               return c.Build.Image
-       }
-
-       return c.Image
+       return c.Build.BaseImage != ""
 }
 
 type publishedImage struct {
diff --git a/pkg/builder/kaniko/publisher.go b/pkg/builder/kaniko/publisher.go
index 734638b..f34f2fb 100644
--- a/pkg/builder/kaniko/publisher.go
+++ b/pkg/builder/kaniko/publisher.go
@@ -18,64 +18,19 @@ limitations under the License.
 package kaniko
 
 import (
-       "fmt"
        "io/ioutil"
        "os"
        "path"
-       "strconv"
-       "time"
 
        "github.com/apache/camel-k/pkg/builder"
-       "github.com/apache/camel-k/pkg/util/defaults"
-       "github.com/apache/camel-k/pkg/util/kubernetes"
        "github.com/apache/camel-k/pkg/util/tar"
-       "sigs.k8s.io/controller-runtime/pkg/client"
-
-       "github.com/pkg/errors"
-
-       corev1 "k8s.io/api/core/v1"
-       apierrors "k8s.io/apimachinery/pkg/api/errors"
-       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-type secretKind struct {
-       fileName    string
-       mountPath   string
-       destination string
-       refEnv      string
-}
-
-var (
-       secretKindGCR = secretKind{
-               fileName:    "kaniko-secret.json",
-               mountPath:   "/secret",
-               destination: "kaniko-secret.json",
-               refEnv:      "GOOGLE_APPLICATION_CREDENTIALS",
-       }
-       secretKindPlainDocker = secretKind{
-               fileName:    "config.json",
-               mountPath:   "/kaniko/.docker",
-               destination: "config.json",
-       }
-       secretKindStandardDocker = secretKind{
-               fileName:    corev1.DockerConfigJsonKey,
-               mountPath:   "/kaniko/.docker",
-               destination: "config.json",
-       }
-
-       allSecretKinds = []secretKind{secretKindGCR, secretKindPlainDocker, 
secretKindStandardDocker}
 )
 
 func publisher(ctx *builder.Context) error {
-       organization := ctx.Build.Platform.Build.Registry.Organization
-       if organization == "" {
-               organization = ctx.Namespace
-       }
-       image := ctx.Build.Platform.Build.Registry.Address + "/" + organization 
+ "/camel-k-" + ctx.Build.Meta.Name + ":" + ctx.Build.Meta.ResourceVersion
        baseDir, _ := path.Split(ctx.Archive)
        contextDir := path.Join(baseDir, "context")
 
-       err := os.Mkdir(contextDir, 0777)
+       err := os.MkdirAll(contextDir, 0777)
        if err != nil {
                return err
        }
@@ -86,7 +41,7 @@ func publisher(ctx *builder.Context) error {
 
        // #nosec G202
        dockerFileContent := []byte(`
-               FROM ` + ctx.Image + `
+               FROM ` + ctx.BaseImage + `
                ADD . /deployments
        `)
 
@@ -95,197 +50,5 @@ func publisher(ctx *builder.Context) error {
                return err
        }
 
-       volumes := []corev1.Volume{
-               {
-                       Name: "camel-k-builder",
-                       VolumeSource: corev1.VolumeSource{
-                               PersistentVolumeClaim: 
&corev1.PersistentVolumeClaimVolumeSource{
-                                       ClaimName: 
ctx.Build.Platform.Build.PersistentVolumeClaim,
-                               },
-                       },
-               },
-       }
-       volumeMounts := []corev1.VolumeMount{
-               {
-                       Name:      "camel-k-builder",
-                       MountPath: BuildDir,
-               },
-       }
-       envs := make([]corev1.EnvVar, 0)
-       baseArgs := []string{
-               "--dockerfile=Dockerfile",
-               "--context=" + contextDir,
-               "--destination=" + image,
-               "--cache=" + 
strconv.FormatBool(ctx.Build.Platform.Build.IsKanikoCacheEnabled()),
-               "--cache-dir=/workspace/cache",
-       }
-
-       args := make([]string, 0, len(baseArgs))
-       args = append(args, baseArgs...)
-
-       if ctx.Build.Platform.Build.Registry.Insecure {
-               args = append(args, "--insecure")
-               args = append(args, "--insecure-pull")
-       }
-
-       if ctx.Build.Platform.Build.Registry.Secret != "" {
-               secretKind, err := getSecretKind(ctx, 
ctx.Build.Platform.Build.Registry.Secret)
-               if err != nil {
-                       return err
-               }
-
-               volumes = append(volumes, corev1.Volume{
-                       Name: "kaniko-secret",
-                       VolumeSource: corev1.VolumeSource{
-                               Secret: &corev1.SecretVolumeSource{
-                                       SecretName: 
ctx.Build.Platform.Build.Registry.Secret,
-                                       Items: []corev1.KeyToPath{
-                                               {
-                                                       Key:  
secretKind.fileName,
-                                                       Path: 
secretKind.destination,
-                                               },
-                                       },
-                               },
-                       },
-               })
-
-               volumeMounts = append(volumeMounts, corev1.VolumeMount{
-                       Name:      "kaniko-secret",
-                       MountPath: secretKind.mountPath,
-               })
-
-               if secretKind.refEnv != "" {
-                       envs = append(envs, corev1.EnvVar{
-                               Name:  secretKind.refEnv,
-                               Value: path.Join(secretKind.mountPath, 
secretKind.destination),
-                       })
-               }
-               args = baseArgs
-       }
-
-       if ctx.Build.Platform.Build.HTTPProxySecret != "" {
-               optional := true
-               envs = append(envs, corev1.EnvVar{
-                       Name: "HTTP_PROXY",
-                       ValueFrom: &corev1.EnvVarSource{
-                               SecretKeyRef: &corev1.SecretKeySelector{
-                                       LocalObjectReference: 
corev1.LocalObjectReference{
-                                               Name: 
ctx.Build.Platform.Build.HTTPProxySecret,
-                                       },
-                                       Key:      "HTTP_PROXY",
-                                       Optional: &optional,
-                               },
-                       },
-               })
-               envs = append(envs, corev1.EnvVar{
-                       Name: "HTTPS_PROXY",
-                       ValueFrom: &corev1.EnvVarSource{
-                               SecretKeyRef: &corev1.SecretKeySelector{
-                                       LocalObjectReference: 
corev1.LocalObjectReference{
-                                               Name: 
ctx.Build.Platform.Build.HTTPProxySecret,
-                                       },
-                                       Key:      "HTTPS_PROXY",
-                                       Optional: &optional,
-                               },
-                       },
-               })
-               envs = append(envs, corev1.EnvVar{
-                       Name: "NO_PROXY",
-                       ValueFrom: &corev1.EnvVarSource{
-                               SecretKeyRef: &corev1.SecretKeySelector{
-                                       LocalObjectReference: 
corev1.LocalObjectReference{
-                                               Name: 
ctx.Build.Platform.Build.HTTPProxySecret,
-                                       },
-                                       Key:      "NO_PROXY",
-                                       Optional: &optional,
-                               },
-                       },
-               })
-       }
-
-       pod := corev1.Pod{
-               TypeMeta: metav1.TypeMeta{
-                       APIVersion: corev1.SchemeGroupVersion.String(),
-                       Kind:       "Pod",
-               },
-               ObjectMeta: metav1.ObjectMeta{
-                       Namespace: ctx.Namespace,
-                       Name:      "camel-k-" + ctx.Build.Meta.Name,
-                       Labels: map[string]string{
-                               "app": "camel-k",
-                       },
-               },
-               Spec: corev1.PodSpec{
-                       Containers: []corev1.Container{
-                               {
-                                       Name:         "kaniko",
-                                       Image:        
fmt.Sprintf("gcr.io/kaniko-project/executor:v%s", defaults.KanikoVersion),
-                                       Args:         args,
-                                       Env:          envs,
-                                       VolumeMounts: volumeMounts,
-                               },
-                       },
-                       RestartPolicy: corev1.RestartPolicyNever,
-                       Volumes:       volumes,
-               },
-       }
-
-       // Co-locate with the build pod for sharing the volume
-       pod.Spec.Affinity = &corev1.Affinity{
-               PodAffinity: &corev1.PodAffinity{
-                       RequiredDuringSchedulingIgnoredDuringExecution: 
[]corev1.PodAffinityTerm{
-                               {
-                                       LabelSelector: &metav1.LabelSelector{
-                                               MatchLabels: map[string]string{
-                                                       
"camel.apache.org/build": ctx.Build.Meta.Name,
-                                               },
-                                       },
-                                       TopologyKey: "kubernetes.io/hostname",
-                               },
-                       },
-               },
-       }
-
-       err = ctx.Client.Delete(ctx.C, &pod)
-       if err != nil && !apierrors.IsNotFound(err) {
-               return errors.Wrap(err, "cannot delete kaniko builder pod")
-       }
-
-       err = ctx.Client.Create(ctx.C, &pod)
-       if err != nil {
-               return errors.Wrap(err, "cannot create kaniko builder pod")
-       }
-
-       err = kubernetes.WaitCondition(ctx.C, ctx.Client, &pod, func(obj 
interface{}) (bool, error) {
-               if val, ok := obj.(*corev1.Pod); ok {
-                       if val.Status.Phase == corev1.PodSucceeded {
-                               return true, nil
-                       }
-                       if val.Status.Phase == corev1.PodFailed {
-                               return false, fmt.Errorf("build failed: %s", 
val.Status.Message)
-                       }
-               }
-               return false, nil
-       }, 10*time.Minute)
-
-       if err != nil {
-               return err
-       }
-
-       ctx.Image = image
        return nil
 }
-
-func getSecretKind(ctx *builder.Context, name string) (secretKind, error) {
-       secret := corev1.Secret{}
-       key := client.ObjectKey{Namespace: ctx.Namespace, Name: name}
-       if err := ctx.Client.Get(ctx.C, key, &secret); err != nil {
-               return secretKind{}, err
-       }
-       for _, k := range allSecretKinds {
-               if _, ok := secret.Data[k.fileName]; ok {
-                       return k, nil
-               }
-       }
-       return secretKind{}, errors.New("unsupported secret type for registry 
authentication")
-}
diff --git a/pkg/builder/runtime/main.go b/pkg/builder/runtime/main.go
index a191afd..c210819 100644
--- a/pkg/builder/runtime/main.go
+++ b/pkg/builder/runtime/main.go
@@ -58,7 +58,7 @@ func loadCamelCatalog(ctx *builder.Context) error {
 
 func generateProject(ctx *builder.Context) error {
        p := maven.NewProjectWithGAV("org.apache.camel.k.integration", 
"camel-k-integration", defaults.Version)
-       p.Properties = ctx.Build.Platform.Build.Properties
+       p.Properties = ctx.Build.Properties
        p.DependencyManagement = &maven.DependencyManagement{Dependencies: 
make([]maven.Dependency, 0)}
        p.Dependencies = make([]maven.Dependency, 0)
 
@@ -86,8 +86,8 @@ func generateProject(ctx *builder.Context) error {
 func computeDependencies(ctx *builder.Context) error {
        mc := maven.NewContext(path.Join(ctx.Path, "maven"), ctx.Maven.Project)
        mc.SettingsContent = ctx.Maven.SettingsData
-       mc.LocalRepository = ctx.Build.Platform.Build.Maven.LocalRepository
-       mc.Timeout = ctx.Build.Platform.Build.Maven.GetTimeout().Duration
+       mc.LocalRepository = ctx.Build.Maven.LocalRepository
+       mc.Timeout = ctx.Build.Maven.GetTimeout().Duration
        
mc.AddArgumentf("org.apache.camel.k:camel-k-maven-plugin:%s:generate-dependency-list",
 ctx.Catalog.RuntimeVersion)
 
        if err := maven.Run(mc); err != nil {
diff --git a/pkg/builder/runtime/main_test.go b/pkg/builder/runtime/main_test.go
index f149325..4c26360 100644
--- a/pkg/builder/runtime/main_test.go
+++ b/pkg/builder/runtime/main_test.go
@@ -34,14 +34,9 @@ func TestNewProject(t *testing.T) {
 
        ctx := builder.Context{
                Catalog: catalog,
-               Build: v1alpha1.BuildSpec{
+               Build: v1alpha1.BuilderTask{
                        CamelVersion:   catalog.Version,
                        RuntimeVersion: catalog.RuntimeVersion,
-                       Platform: v1alpha1.IntegrationPlatformSpec{
-                               Build: v1alpha1.IntegrationPlatformBuildSpec{
-                                       CamelVersion: catalog.Version,
-                               },
-                       },
                        Dependencies: []string{
                                "camel-k:runtime-main",
                                "bom:my.company/my-artifact-1/1.0.0",
@@ -97,14 +92,9 @@ func TestGenerateJvmProject(t *testing.T) {
 
        ctx := builder.Context{
                Catalog: catalog,
-               Build: v1alpha1.BuildSpec{
+               Build: v1alpha1.BuilderTask{
                        CamelVersion:   catalog.Version,
                        RuntimeVersion: catalog.RuntimeVersion,
-                       Platform: v1alpha1.IntegrationPlatformSpec{
-                               Build: v1alpha1.IntegrationPlatformBuildSpec{
-                                       CamelVersion: catalog.Version,
-                               },
-                       },
                        Dependencies: []string{
                                "camel-k:runtime-main",
                        },
@@ -154,14 +144,9 @@ func TestGenerateGroovyProject(t *testing.T) {
 
        ctx := builder.Context{
                Catalog: catalog,
-               Build: v1alpha1.BuildSpec{
+               Build: v1alpha1.BuilderTask{
                        CamelVersion:   catalog.Version,
                        RuntimeVersion: catalog.RuntimeVersion,
-                       Platform: v1alpha1.IntegrationPlatformSpec{
-                               Build: v1alpha1.IntegrationPlatformBuildSpec{
-                                       CamelVersion: catalog.Version,
-                               },
-                       },
                        Dependencies: []string{
                                "camel-k:runtime-main",
                                "camel-k:loader-groovy",
@@ -215,14 +200,9 @@ func TestSanitizeDependencies(t *testing.T) {
 
        ctx := builder.Context{
                Catalog: catalog,
-               Build: v1alpha1.BuildSpec{
+               Build: v1alpha1.BuilderTask{
                        CamelVersion:   catalog.Version,
                        RuntimeVersion: catalog.RuntimeVersion,
-                       Platform: v1alpha1.IntegrationPlatformSpec{
-                               Build: v1alpha1.IntegrationPlatformBuildSpec{
-                                       CamelVersion: catalog.Version,
-                               },
-                       },
                        Dependencies: []string{
                                "camel:undertow",
                                "mvn:org.apache.camel/camel-core/2.18.0",
diff --git a/pkg/builder/runtime/quarkus.go b/pkg/builder/runtime/quarkus.go
index f9c9809..e7dea56 100644
--- a/pkg/builder/runtime/quarkus.go
+++ b/pkg/builder/runtime/quarkus.go
@@ -59,7 +59,7 @@ func loadCamelQuarkusCatalog(ctx *builder.Context) error {
 
 func generateQuarkusProject(ctx *builder.Context) error {
        p := maven.NewProjectWithGAV("org.apache.camel.k.integration", 
"camel-k-integration", defaults.Version)
-       p.Properties = ctx.Build.Platform.Build.Properties
+       p.Properties = ctx.Build.Properties
        p.DependencyManagement = &maven.DependencyManagement{Dependencies: 
make([]maven.Dependency, 0)}
        p.Dependencies = make([]maven.Dependency, 0)
        p.Build = &maven.Build{Plugins: make([]maven.Plugin, 0)}
@@ -106,8 +106,8 @@ func generateQuarkusProject(ctx *builder.Context) error {
 func computeQuarkusDependencies(ctx *builder.Context) error {
        mc := maven.NewContext(path.Join(ctx.Path, "maven"), ctx.Maven.Project)
        mc.SettingsContent = ctx.Maven.SettingsData
-       mc.LocalRepository = ctx.Build.Platform.Build.Maven.LocalRepository
-       mc.Timeout = ctx.Build.Platform.Build.Maven.GetTimeout().Duration
+       mc.LocalRepository = ctx.Build.Maven.LocalRepository
+       mc.Timeout = ctx.Build.Maven.GetTimeout().Duration
 
        // Build the project
        mc.AddArgument("package")
diff --git a/pkg/builder/s2i/publisher.go b/pkg/builder/s2i/publisher.go
index eb8c598..88c0192 100644
--- a/pkg/builder/s2i/publisher.go
+++ b/pkg/builder/s2i/publisher.go
@@ -19,6 +19,8 @@ package s2i
 
 import (
        "io/ioutil"
+       "os"
+       "path"
 
        corev1 "k8s.io/api/core/v1"
        apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -117,6 +119,9 @@ func publisher(ctx *builder.Context) error {
                return errors.Wrap(err, "cannot fully read tar file 
"+ctx.Archive)
        }
 
+       baseDir, _ := path.Split(ctx.Archive)
+       defer os.RemoveAll(baseDir)
+
        restClient, err := customclient.GetClientFor(ctx.Client, 
"build.openshift.io", "v1")
        if err != nil {
                return err
@@ -156,7 +161,7 @@ func publisher(ctx *builder.Context) error {
                        }
                }
                return false, nil
-       }, ctx.Build.Platform.Build.GetTimeout().Duration)
+       }, ctx.Build.Timeout.Duration)
 
        if err != nil {
                return err
diff --git a/pkg/cmd/builder.go b/pkg/cmd/builder.go
index 5c541f7..ac93a47 100644
--- a/pkg/cmd/builder.go
+++ b/pkg/cmd/builder.go
@@ -36,6 +36,7 @@ func newCmdBuilder(rootCmdOptions *RootCmdOptions) 
(*cobra.Command, *builderCmdO
        }
 
        cmd.Flags().String("build-name", "", "The name of the build resource")
+       cmd.Flags().String("task-name", "", "The name of task to execute")
 
        return &cmd, &options
 }
@@ -43,8 +44,9 @@ func newCmdBuilder(rootCmdOptions *RootCmdOptions) 
(*cobra.Command, *builderCmdO
 type builderCmdOptions struct {
        *RootCmdOptions
        BuildName string `mapstructure:"build-name"`
+       TaskName  string `mapstructure:"task-name"`
 }
 
 func (o *builderCmdOptions) run(_ *cobra.Command, _ []string) {
-       builder.Run(o.Namespace, o.BuildName)
+       builder.Run(o.Namespace, o.BuildName, o.TaskName)
 }
diff --git a/pkg/cmd/builder/builder.go b/pkg/cmd/builder/builder.go
index 7e06126..9fa9e9b 100644
--- a/pkg/cmd/builder/builder.go
+++ b/pkg/cmd/builder/builder.go
@@ -21,10 +21,12 @@ import (
        "fmt"
        "math/rand"
        "os"
+       "reflect"
        "runtime"
        "time"
 
-       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+       "github.com/pkg/errors"
+
        "k8s.io/apimachinery/pkg/types"
 
        controller "sigs.k8s.io/controller-runtime/pkg/client"
@@ -37,6 +39,7 @@ import (
        "github.com/apache/camel-k/pkg/util/cancellable"
        "github.com/apache/camel-k/pkg/util/defaults"
        logger "github.com/apache/camel-k/pkg/util/log"
+       "github.com/apache/camel-k/pkg/util/patch"
 )
 
 var log = logger.WithName("builder")
@@ -47,8 +50,8 @@ func printVersion() {
        log.Info(fmt.Sprintf("Camel K Version: %v", defaults.Version))
 }
 
-// Run creates a build resource in the specified namespace
-func Run(namespace string, buildName string) {
+// Run a build resource in the specified namespace
+func Run(namespace string, buildName string, taskName string) {
        logf.SetLogger(zap.New(func(o *zap.Options) {
                o.Development = false
        }))
@@ -61,33 +64,39 @@ func Run(namespace string, buildName string) {
 
        ctx := cancellable.NewContext()
 
-       build := &v1alpha1.Build{
-               ObjectMeta: metav1.ObjectMeta{
-                       Namespace: namespace,
-                       Name:      buildName,
-               },
-       }
-
+       build := &v1alpha1.Build{}
        exitOnError(
-               c.Get(ctx, types.NamespacedName{Namespace: build.Namespace, 
Name: build.Name}, build),
+               c.Get(ctx, types.NamespacedName{Namespace: namespace, Name: 
buildName}, build),
        )
 
-       progress := builder.New(c).Build(build.Spec)
-       for status := range progress {
-               target := build.DeepCopy()
-               target.Status = status
-               // Copy the failure field from the build to persist recovery 
state
-               target.Status.Failure = build.Status.Failure
-               // Patch the build status with the current progress
-               exitOnError(c.Status().Patch(ctx, target, 
controller.MergeFrom(build)))
-               build.Status = target.Status
+       var task *v1alpha1.BuilderTask
+       for _, t := range build.Spec.Tasks {
+               if t.Builder != nil && t.Builder.Name == taskName {
+                       task = t.Builder
+               }
        }
+       if task == nil {
+               exitOnError(errors.Errorf("No task of type [%s] with name [%s] 
in build [%s/%s]",
+                       reflect.TypeOf(v1alpha1.BuilderTask{}).Name(), 
taskName, namespace, buildName))
+       }
+
+       status := builder.New(c).Run(*task)
+       target := build.DeepCopy()
+       target.Status = status
+       // Copy the failure field from the build to persist recovery state
+       target.Status.Failure = build.Status.Failure
+       // Patch the build status with the result
+       p, err := patch.PositiveMergePatch(build, target)
+       exitOnError(err)
+       exitOnError(c.Status().Patch(ctx, target, 
controller.ConstantPatch(types.MergePatchType, p)))
+       build.Status = target.Status
 
        switch build.Status.Phase {
-       case v1alpha1.BuildPhaseSucceeded:
-               os.Exit(0)
-       default:
+       case v1alpha1.BuildPhaseFailed:
+               log.Error(nil, build.Status.Error)
                os.Exit(1)
+       default:
+               os.Exit(0)
        }
 }
 
diff --git a/pkg/controller/build/build_controller.go 
b/pkg/controller/build/build_controller.go
index b6d227d..f9ef70c 100644
--- a/pkg/controller/build/build_controller.go
+++ b/pkg/controller/build/build_controller.go
@@ -134,7 +134,7 @@ func (r *ReconcileBuild) Reconcile(request 
reconcile.Request) (reconcile.Result,
 
        ctx := context.TODO()
 
-       // Fetch the Integration instance
+       // Fetch the Build instance
        var instance v1alpha1.Build
 
        if err := r.client.Get(ctx, request.NamespacedName, &instance); err != 
nil {
@@ -151,8 +151,8 @@ func (r *ReconcileBuild) Reconcile(request 
reconcile.Request) (reconcile.Result,
        target := instance.DeepCopy()
        targetLog := rlog.ForBuild(target)
 
+       pl, err := platform.GetOrLookupCurrent(ctx, r.client, target.Namespace, 
target.Status.Platform)
        if target.Status.Phase == v1alpha1.BuildPhaseNone || 
target.Status.Phase == v1alpha1.BuildPhaseWaitingForPlatform {
-               pl, err := platform.GetOrLookupCurrent(ctx, r.client, 
target.Namespace, target.Status.Platform)
                if err != nil || pl.Status.Phase != 
v1alpha1.IntegrationPlatformPhaseReady {
                        target.Status.Phase = 
v1alpha1.BuildPhaseWaitingForPlatform
                } else {
@@ -174,15 +174,25 @@ func (r *ReconcileBuild) Reconcile(request 
reconcile.Request) (reconcile.Result,
                return reconcile.Result{}, err
        }
 
-       actions := []Action{
-               NewInitializeRoutineAction(),
-               NewInitializePodAction(),
-               NewScheduleRoutineAction(r.reader, r.builder, &r.routines),
-               NewSchedulePodAction(r.reader),
-               NewMonitorRoutineAction(&r.routines),
-               NewMonitorPodAction(),
-               NewErrorRecoveryAction(),
-               NewErrorAction(),
+       var actions []Action
+
+       switch pl.Spec.Build.BuildStrategy {
+       case v1alpha1.IntegrationPlatformBuildStrategyPod:
+               actions = []Action{
+                       NewInitializePodAction(),
+                       NewSchedulePodAction(r.reader),
+                       NewMonitorPodAction(),
+                       NewErrorRecoveryAction(),
+                       NewErrorAction(),
+               }
+       case v1alpha1.IntegrationPlatformBuildStrategyRoutine:
+               actions = []Action{
+                       NewInitializeRoutineAction(),
+                       NewScheduleRoutineAction(r.reader, r.builder, 
&r.routines),
+                       NewMonitorRoutineAction(&r.routines),
+                       NewErrorRecoveryAction(),
+                       NewErrorAction(),
+               }
        }
 
        for _, a := range actions {
@@ -219,7 +229,7 @@ func (r *ReconcileBuild) Reconcile(request 
reconcile.Request) (reconcile.Result,
                }
        }
 
-       // Requeue scheduling build so that it re-enters the build working queue
+       // Requeue scheduling (resp. failed) build so that it re-enters the 
build (resp. recovery) working queue
        if target.Status.Phase == v1alpha1.BuildPhaseScheduling || 
target.Status.Phase == v1alpha1.BuildPhaseFailed {
                return reconcile.Result{
                        RequeueAfter: 5 * time.Second,
diff --git a/pkg/controller/build/initialize_pod.go 
b/pkg/controller/build/initialize_pod.go
index 4576872..15def69 100644
--- a/pkg/controller/build/initialize_pod.go
+++ b/pkg/controller/build/initialize_pod.go
@@ -20,13 +20,16 @@ package build
 import (
        "context"
 
-       "github.com/apache/camel-k/pkg/install"
        "github.com/pkg/errors"
+
        corev1 "k8s.io/api/core/v1"
        k8serrors "k8s.io/apimachinery/pkg/api/errors"
+       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
        k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
 
        "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+       "github.com/apache/camel-k/pkg/install"
 )
 
 // NewInitializePodAction creates a new initialize action
@@ -45,8 +48,7 @@ func (action *initializePodAction) Name() string {
 
 // CanHandle tells whether this action can handle the build
 func (action *initializePodAction) CanHandle(build *v1alpha1.Build) bool {
-       return build.Status.Phase == v1alpha1.BuildPhaseInitialization &&
-               build.Spec.Platform.Build.BuildStrategy == 
v1alpha1.IntegrationPlatformBuildStrategyPod
+       return build.Status.Phase == v1alpha1.BuildPhaseInitialization
 }
 
 // Handle handles the builds
@@ -87,3 +89,40 @@ func (action *initializePodAction) ensureServiceAccount(ctx 
context.Context, bui
 
        return err
 }
+
+func deleteBuilderPod(ctx context.Context, client k8sclient.Writer, build 
*v1alpha1.Build) error {
+       pod := corev1.Pod{
+               TypeMeta: metav1.TypeMeta{
+                       APIVersion: corev1.SchemeGroupVersion.String(),
+                       Kind:       "Pod",
+               },
+               ObjectMeta: metav1.ObjectMeta{
+                       Namespace: build.Namespace,
+                       Name:      buildPodName(build),
+               },
+       }
+
+       err := client.Delete(ctx, &pod)
+       if err != nil && k8serrors.IsNotFound(err) {
+               return nil
+       }
+
+       return err
+}
+
+func getBuilderPod(ctx context.Context, client k8sclient.Reader, build 
*v1alpha1.Build) (*corev1.Pod, error) {
+       pod := corev1.Pod{}
+       err := client.Get(ctx, k8sclient.ObjectKey{Namespace: build.Namespace, 
Name: buildPodName(build)}, &pod)
+       if err != nil && k8serrors.IsNotFound(err) {
+               return nil, nil
+       }
+       if err != nil {
+               return nil, err
+       }
+
+       return &pod, nil
+}
+
+func buildPodName(build *v1alpha1.Build) string {
+       return "camel-k-" + build.Name + "-builder"
+}
diff --git a/pkg/controller/build/initialize_routine.go 
b/pkg/controller/build/initialize_routine.go
index 0f37b03..df48624 100644
--- a/pkg/controller/build/initialize_routine.go
+++ b/pkg/controller/build/initialize_routine.go
@@ -39,8 +39,7 @@ func (action *initializeRoutineAction) Name() string {
 
 // CanHandle tells whether this action can handle the build
 func (action *initializeRoutineAction) CanHandle(build *v1alpha1.Build) bool {
-       return build.Status.Phase == v1alpha1.BuildPhaseInitialization &&
-               build.Spec.Platform.Build.BuildStrategy == 
v1alpha1.IntegrationPlatformBuildStrategyRoutine
+       return build.Status.Phase == v1alpha1.BuildPhaseInitialization
 }
 
 // Handle handles the builds
diff --git a/pkg/controller/build/monitor_pod.go 
b/pkg/controller/build/monitor_pod.go
index 3b7a185..40d7bee 100644
--- a/pkg/controller/build/monitor_pod.go
+++ b/pkg/controller/build/monitor_pod.go
@@ -20,8 +20,10 @@ package build
 import (
        "context"
 
-       "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
        corev1 "k8s.io/api/core/v1"
+       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+       "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
 )
 
 // NewMonitorPodAction creates a new monitor action for scheduled pod
@@ -40,37 +42,51 @@ func (action *monitorPodAction) Name() string {
 
 // CanHandle tells whether this action can handle the build
 func (action *monitorPodAction) CanHandle(build *v1alpha1.Build) bool {
-       return (build.Status.Phase == v1alpha1.BuildPhasePending ||
-               build.Status.Phase == v1alpha1.BuildPhaseRunning) &&
-               build.Spec.Platform.Build.BuildStrategy == 
v1alpha1.IntegrationPlatformBuildStrategyPod
+       return build.Status.Phase == v1alpha1.BuildPhasePending || 
build.Status.Phase == v1alpha1.BuildPhaseRunning
 }
 
 // Handle handles the builds
 func (action *monitorPodAction) Handle(ctx context.Context, build 
*v1alpha1.Build) (*v1alpha1.Build, error) {
-       // Get the build pod
        pod, err := getBuilderPod(ctx, action.client, build)
        if err != nil {
                return nil, err
        }
-       var buildPhase v1alpha1.BuildPhase
 
        switch {
        case pod == nil:
                build.Status.Phase = v1alpha1.BuildPhaseScheduling
+
+       // Pod remains in pending phase when init containers execute
+       case pod.Status.Phase == corev1.PodPending && 
action.isPodScheduled(pod),
+               pod.Status.Phase == corev1.PodRunning:
+               build.Status.Phase = v1alpha1.BuildPhaseRunning
+               if build.Status.StartedAt.Time.IsZero() {
+                       build.Status.StartedAt = metav1.Now()
+               }
+
        case pod.Status.Phase == corev1.PodSucceeded:
-               buildPhase = v1alpha1.BuildPhaseSucceeded
-       case pod.Status.Phase == corev1.PodFailed:
-               buildPhase = v1alpha1.BuildPhaseFailed
-       default:
-               buildPhase = build.Status.Phase
-       }
+               build.Status.Phase = v1alpha1.BuildPhaseSucceeded
+               build.Status.Duration = 
metav1.Now().Sub(build.Status.StartedAt.Time).String()
+               for _, task := range build.Spec.Tasks {
+                       if task.Kaniko != nil {
+                               build.Status.Image = task.Kaniko.BuiltImage
+                               break
+                       }
+               }
 
-       if build.Status.Phase == buildPhase {
-               // Status is already up-to-date
-               return nil, nil
+       case pod.Status.Phase == corev1.PodFailed:
+               build.Status.Phase = v1alpha1.BuildPhaseFailed
+               build.Status.Duration = 
metav1.Now().Sub(build.Status.StartedAt.Time).String()
        }
 
-       build.Status.Phase = buildPhase
-
        return build, nil
 }
+
+func (action *monitorPodAction) isPodScheduled(pod *corev1.Pod) bool {
+       for _, condition := range pod.Status.Conditions {
+               if condition.Type == corev1.PodScheduled && condition.Status == 
corev1.ConditionTrue {
+                       return true
+               }
+       }
+       return false
+}
diff --git a/pkg/controller/build/monitor_routine.go 
b/pkg/controller/build/monitor_routine.go
index 170a575..c1fc7da 100644
--- a/pkg/controller/build/monitor_routine.go
+++ b/pkg/controller/build/monitor_routine.go
@@ -43,9 +43,7 @@ func (action *monitorRoutineAction) Name() string {
 
 // CanHandle tells whether this action can handle the build
 func (action *monitorRoutineAction) CanHandle(build *v1alpha1.Build) bool {
-       return (build.Status.Phase == v1alpha1.BuildPhasePending ||
-               build.Status.Phase == v1alpha1.BuildPhaseRunning) &&
-               build.Spec.Platform.Build.BuildStrategy == 
v1alpha1.IntegrationPlatformBuildStrategyRoutine
+       return build.Status.Phase == v1alpha1.BuildPhasePending || 
build.Status.Phase == v1alpha1.BuildPhaseRunning
 }
 
 // Handle handles the builds
diff --git a/pkg/controller/build/schedule_pod.go 
b/pkg/controller/build/schedule_pod.go
index 7d18aa4..74ff938 100644
--- a/pkg/controller/build/schedule_pod.go
+++ b/pkg/controller/build/schedule_pod.go
@@ -21,20 +21,21 @@ import (
        "context"
        "sync"
 
+       "github.com/pkg/errors"
+
        corev1 "k8s.io/api/core/v1"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
+
+       "sigs.k8s.io/controller-runtime/pkg/client"
        "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
 
        "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
        "github.com/apache/camel-k/pkg/platform"
        "github.com/apache/camel-k/pkg/util/defaults"
-
-       "github.com/pkg/errors"
 )
 
 // NewSchedulePodAction creates a new schedule action
-func NewSchedulePodAction(reader k8sclient.Reader) Action {
+func NewSchedulePodAction(reader client.Reader) Action {
        return &schedulePodAction{
                reader: reader,
        }
@@ -42,8 +43,9 @@ func NewSchedulePodAction(reader k8sclient.Reader) Action {
 
 type schedulePodAction struct {
        baseAction
-       lock   sync.Mutex
-       reader k8sclient.Reader
+       lock          sync.Mutex
+       reader        client.Reader
+       operatorImage string
 }
 
 // Name returns a common name of the action
@@ -53,8 +55,7 @@ func (action *schedulePodAction) Name() string {
 
 // CanHandle tells whether this action can handle the build
 func (action *schedulePodAction) CanHandle(build *v1alpha1.Build) bool {
-       return build.Status.Phase == v1alpha1.BuildPhaseScheduling &&
-               build.Spec.Platform.Build.BuildStrategy == 
v1alpha1.IntegrationPlatformBuildStrategyPod
+       return build.Status.Phase == v1alpha1.BuildPhaseScheduling
 }
 
 // Handle handles the builds
@@ -66,7 +67,7 @@ func (action *schedulePodAction) Handle(ctx context.Context, 
build *v1alpha1.Bui
        builds := &v1alpha1.BuildList{}
        // We use the non-caching client as informers cache is not invalidated 
nor updated
        // atomically by write operations
-       err := action.reader.List(ctx, builds, 
k8sclient.InNamespace(build.Namespace))
+       err := action.reader.List(ctx, builds, 
client.InNamespace(build.Namespace))
        if err != nil {
                return nil, err
        }
@@ -86,15 +87,9 @@ func (action *schedulePodAction) Handle(ctx context.Context, 
build *v1alpha1.Bui
        }
 
        if pod == nil {
-               // Try to get operator image name before starting the build
-               operatorImage, err := platform.GetCurrentOperatorImage(ctx, 
action.client)
-               if err != nil {
-                       return nil, err
-               }
-
                // We may want to explicitly manage build priority as opposed 
to relying on
                // the reconcile loop to handle the queuing
-               pod, err = action.newBuildPod(ctx, build, operatorImage)
+               pod, err = action.newBuildPod(ctx, build)
                if err != nil {
                        return nil, err
                }
@@ -114,11 +109,7 @@ func (action *schedulePodAction) Handle(ctx 
context.Context, build *v1alpha1.Bui
        return build, nil
 }
 
-func (action *schedulePodAction) newBuildPod(ctx context.Context, build 
*v1alpha1.Build, operatorImage string) (*corev1.Pod, error) {
-       builderImage := operatorImage
-       if builderImage == "" {
-               builderImage = defaults.ImageName + ":" + defaults.Version
-       }
+func (action *schedulePodAction) newBuildPod(ctx context.Context, build 
*v1alpha1.Build) (*corev1.Pod, error) {
        pod := &corev1.Pod{
                TypeMeta: metav1.TypeMeta{
                        APIVersion: corev1.SchemeGroupVersion.String(),
@@ -126,7 +117,7 @@ func (action *schedulePodAction) newBuildPod(ctx 
context.Context, build *v1alpha
                },
                ObjectMeta: metav1.ObjectMeta{
                        Namespace: build.Namespace,
-                       Name:      buildPodName(build.Spec.Meta),
+                       Name:      buildPodName(build),
                        Labels: map[string]string{
                                "camel.apache.org/build":     build.Name,
                                "camel.apache.org/component": "builder",
@@ -134,103 +125,74 @@ func (action *schedulePodAction) newBuildPod(ctx 
context.Context, build *v1alpha
                },
                Spec: corev1.PodSpec{
                        ServiceAccountName: "camel-k-builder",
-                       Containers: []corev1.Container{
-                               {
-                                       Name:            "builder",
-                                       Image:           builderImage,
-                                       ImagePullPolicy: "IfNotPresent",
-                                       Command: []string{
-                                               "kamel",
-                                               "builder",
-                                               "--namespace",
-                                               build.Namespace,
-                                               "--build-name",
-                                               build.Name,
-                                       },
-                               },
-                       },
-                       RestartPolicy: corev1.RestartPolicyNever,
+                       RestartPolicy:      corev1.RestartPolicyNever,
                },
        }
 
-       if build.Spec.Platform.Build.PublishStrategy == 
v1alpha1.IntegrationPlatformBuildPublishStrategyKaniko {
-               // Mount persistent volume used to coordinate build output with 
Kaniko cache and image build input
-               pod.Spec.Volumes = []corev1.Volume{
-                       {
-                               Name: "camel-k-builder",
-                               VolumeSource: corev1.VolumeSource{
-                                       PersistentVolumeClaim: 
&corev1.PersistentVolumeClaimVolumeSource{
-                                               ClaimName: 
build.Spec.Platform.Build.PersistentVolumeClaim,
-                                       },
-                               },
-                       },
-               }
-               pod.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{
-                       {
-                               Name:      "camel-k-builder",
-                               MountPath: build.Spec.BuildDir,
-                       },
-               }
-
-               // In case the kaniko cache has not run, the /workspace dir 
needs to have the right permissions set
-               pod.Spec.InitContainers = append(pod.Spec.InitContainers, 
corev1.Container{
-                       Name:            "prepare-kaniko-workspace",
-                       Image:           "busybox",
-                       ImagePullPolicy: corev1.PullIfNotPresent,
-                       Command:         []string{"/bin/sh", "-c"},
-                       Args:            []string{"chmod -R a+rwx /workspace"},
-                       VolumeMounts: []corev1.VolumeMount{
-                               {
-                                       Name:      "camel-k-builder",
-                                       MountPath: "/workspace",
-                               },
-                       },
-               })
-
-               // Use affinity when Kaniko cache warming is enabled
-               if build.Spec.Platform.Build.IsKanikoCacheEnabled() {
-                       // Co-locate with the Kaniko warmer pod for sharing the 
host path volume as the current
-                       // persistent volume claim uses the default storage 
class which is likely relying
-                       // on the host path provisioner.
-                       // This has to be done manually by retrieving the 
Kaniko warmer pod node name and using
-                       // node affinity as pod affinity only works for running 
pods and the Kaniko warmer pod
-                       // has already completed at that stage.
-
-                       // Locate the kaniko warmer pod
-                       pods := &corev1.PodList{}
-                       err := action.client.List(ctx, pods,
-                               k8sclient.InNamespace(build.Namespace),
-                               k8sclient.MatchingLabels{
-                                       "camel.apache.org/component": 
"kaniko-warmer",
-                               })
+       for _, task := range build.Spec.Tasks {
+               if task.Builder != nil {
+                       // TODO: Move the retrieval of the operator image into 
the controller
+                       operatorImage, err := 
platform.GetCurrentOperatorImage(ctx, action.client)
                        if err != nil {
                                return nil, err
                        }
-
-                       if len(pods.Items) != 1 {
-                               return nil, errors.New("failed to locate the 
Kaniko cache warmer pod")
-                       }
-
-                       // Use node affinity with the Kaniko warmer pod node 
name
-                       pod.Spec.Affinity = &corev1.Affinity{
-                               NodeAffinity: &corev1.NodeAffinity{
-                                       
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
-                                               NodeSelectorTerms: 
[]corev1.NodeSelectorTerm{
-                                                       {
-                                                               
MatchExpressions: []corev1.NodeSelectorRequirement{
-                                                                       {
-                                                                               
Key:      "kubernetes.io/hostname",
-                                                                               
Operator: "In",
-                                                                               
Values:   []string{pods.Items[0].Spec.NodeName},
-                                                                       },
-                                                               },
-                                                       },
-                                               },
-                                       },
-                               },
+                       if operatorImage == "" {
+                               action.operatorImage = defaults.ImageName + ":" 
+ defaults.Version
+                       } else {
+                               action.operatorImage = operatorImage
                        }
+                       action.addCamelTaskToPod(build, task.Builder, pod)
+               } else if task.Kaniko != nil {
+                       action.addKanikoTaskToPod(task.Kaniko, pod)
                }
        }
 
+       // Make sure there is one container defined
+       pod.Spec.Containers = 
pod.Spec.InitContainers[len(pod.Spec.InitContainers)-1 : 
len(pod.Spec.InitContainers)]
+       pod.Spec.InitContainers = 
pod.Spec.InitContainers[:len(pod.Spec.InitContainers)-1]
+
        return pod, nil
 }
+
+func (action *schedulePodAction) addCamelTaskToPod(build *v1alpha1.Build, task 
*v1alpha1.BuilderTask, pod *corev1.Pod) {
+       pod.Spec.InitContainers = append(pod.Spec.InitContainers, 
corev1.Container{
+               Name:            task.Name,
+               Image:           action.operatorImage,
+               ImagePullPolicy: "IfNotPresent",
+               Command: []string{
+                       "kamel",
+                       "builder",
+                       "--namespace",
+                       pod.Namespace,
+                       "--build-name",
+                       build.Name,
+                       "--task-name",
+                       task.Name,
+               },
+               VolumeMounts: task.VolumeMounts,
+       })
+
+       action.addBaseTaskToPod(&task.BaseTask, pod)
+}
+
+func (action *schedulePodAction) addKanikoTaskToPod(task *v1alpha1.KanikoTask, 
pod *corev1.Pod) {
+       pod.Spec.InitContainers = append(pod.Spec.InitContainers, 
corev1.Container{
+               Name:            task.Name,
+               Image:           task.Image,
+               ImagePullPolicy: "IfNotPresent",
+               Args:            task.Args,
+               Env:             task.Env,
+               VolumeMounts:    task.VolumeMounts,
+       })
+
+       action.addBaseTaskToPod(&task.BaseTask, pod)
+}
+
+func (action *schedulePodAction) addBaseTaskToPod(task *v1alpha1.BaseTask, pod 
*corev1.Pod) {
+       pod.Spec.Volumes = append(pod.Spec.Volumes, task.Volumes...)
+
+       if task.Affinity != nil {
+               // We may want to handle possible conflicts
+               pod.Spec.Affinity = task.Affinity
+       }
+}
diff --git a/pkg/controller/build/schedule_routine.go 
b/pkg/controller/build/schedule_routine.go
index aa591a0..6535687 100644
--- a/pkg/controller/build/schedule_routine.go
+++ b/pkg/controller/build/schedule_routine.go
@@ -19,8 +19,11 @@ package build
 
 import (
        "context"
+       "fmt"
        "sync"
 
+       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
        "sigs.k8s.io/controller-runtime/pkg/client"
 
        "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
@@ -51,8 +54,7 @@ func (action *scheduleRoutineAction) Name() string {
 
 // CanHandle tells whether this action can handle the build
 func (action *scheduleRoutineAction) CanHandle(build *v1alpha1.Build) bool {
-       return build.Status.Phase == v1alpha1.BuildPhaseScheduling &&
-               build.Spec.Platform.Build.BuildStrategy == 
v1alpha1.IntegrationPlatformBuildStrategyRoutine
+       return build.Status.Phase == v1alpha1.BuildPhaseScheduling
 }
 
 // Handle handles the builds
@@ -88,21 +90,37 @@ func (action *scheduleRoutineAction) Handle(ctx 
context.Context, build *v1alpha1
                return nil, err
        }
 
-       // Start the build
-       progress := action.builder.Build(build.Spec)
-       // And follow the build progress asynchronously to avoid blocking the 
reconcile loop
+       // Start the build asynchronously to avoid blocking the reconcile loop
        go func() {
-               for status := range progress {
-                       target := build.DeepCopy()
-                       target.Status = status
-                       // Copy the failure field from the build to persist 
recovery state
-                       target.Status.Failure = build.Status.Failure
-                       // Patch the build status with the current progress
-                       err := action.client.Status().Patch(ctx, target, 
client.MergeFrom(build))
-                       if err != nil {
-                               action.L.Errorf(err, "Error while updating 
build status: %s", build.Name)
+               defer action.routines.Delete(build.Name)
+
+               status := v1alpha1.BuildStatus{
+                       Phase:     v1alpha1.BuildPhaseRunning,
+                       StartedAt: metav1.Now(),
+               }
+               if err := action.updateBuildStatus(ctx, build, status); err != 
nil {
+                       return
+               }
+
+               for i, task := range build.Spec.Tasks {
+                       if task.Builder == nil {
+                               status := v1alpha1.BuildStatus{
+                                       // Error the build directly as we know 
recovery won't work over ill-defined tasks
+                                       Phase: v1alpha1.BuildPhaseError,
+                                       Error: fmt.Sprintf("task cannot be 
executed using the routine strategy: %s", task.GetName()),
+                               }
+                               if err := action.updateBuildStatus(ctx, build, 
status); err != nil {
+                                       break
+                               }
+                       } else {
+                               status := action.builder.Run(*task.Builder)
+                               if i == len(build.Spec.Tasks)-1 {
+                                       status.Duration = 
metav1.Now().Sub(build.Status.StartedAt.Time).String()
+                               }
+                               if err := action.updateBuildStatus(ctx, build, 
status); err != nil {
+                                       break
+                               }
                        }
-                       build.Status = target.Status
                }
        }()
 
@@ -110,3 +128,18 @@ func (action *scheduleRoutineAction) Handle(ctx 
context.Context, build *v1alpha1
 
        return nil, nil
 }
+
+func (action *scheduleRoutineAction) updateBuildStatus(ctx context.Context, 
build *v1alpha1.Build, status v1alpha1.BuildStatus) error {
+       target := build.DeepCopy()
+       target.Status = status
+       // Copy the failure field from the build to persist recovery state
+       target.Status.Failure = build.Status.Failure
+       // Patch the build status with the current progress
+       err := action.client.Status().Patch(ctx, target, 
client.MergeFrom(build))
+       if err != nil {
+               action.L.Errorf(err, "Cannot update build status: %s", 
build.Name)
+               return err
+       }
+       build.Status = target.Status
+       return nil
+}
diff --git a/pkg/controller/build/util_pod.go b/pkg/controller/build/util_pod.go
deleted file mode 100644
index 6189e38..0000000
--- a/pkg/controller/build/util_pod.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package build
-
-import (
-       "context"
-
-       corev1 "k8s.io/api/core/v1"
-       k8serrors "k8s.io/apimachinery/pkg/api/errors"
-       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-       k8sclient "sigs.k8s.io/controller-runtime/pkg/client"
-
-       "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-)
-
-// deleteBuilderPod --
-func deleteBuilderPod(ctx context.Context, client k8sclient.Writer, build 
*v1alpha1.Build) error {
-       pod := corev1.Pod{
-               TypeMeta: metav1.TypeMeta{
-                       APIVersion: corev1.SchemeGroupVersion.String(),
-                       Kind:       "Pod",
-               },
-               ObjectMeta: metav1.ObjectMeta{
-                       Namespace: build.Namespace,
-                       Name:      buildPodName(build.Spec.Meta),
-                       Labels: map[string]string{
-                               "camel.apache.org/build": build.Name,
-                       },
-               },
-       }
-
-       err := client.Delete(ctx, &pod)
-       if err != nil && k8serrors.IsNotFound(err) {
-               return nil
-       }
-
-       return err
-}
-
-// getBuilderPod --
-func getBuilderPod(ctx context.Context, client k8sclient.Reader, build 
*v1alpha1.Build) (*corev1.Pod, error) {
-       key := k8sclient.ObjectKey{Namespace: build.Namespace, Name: 
buildPodName(build.ObjectMeta)}
-       pod := corev1.Pod{}
-
-       err := client.Get(ctx, key, &pod)
-       if err != nil && k8serrors.IsNotFound(err) {
-               return nil, nil
-       }
-       if err != nil {
-               return nil, err
-       }
-
-       return &pod, nil
-}
-
-func buildPodName(object metav1.ObjectMeta) string {
-       return "camel-k-" + object.Name + "-builder"
-}
diff --git a/pkg/controller/integrationkit/build.go 
b/pkg/controller/integrationkit/build.go
index ce43068..f234343 100644
--- a/pkg/controller/integrationkit/build.go
+++ b/pkg/controller/integrationkit/build.go
@@ -21,17 +21,16 @@ import (
        "context"
        "fmt"
 
-       "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-       "github.com/apache/camel-k/pkg/builder"
-       "github.com/apache/camel-k/pkg/trait"
-       "github.com/apache/camel-k/pkg/util/kubernetes"
+       "github.com/pkg/errors"
 
        k8serrors "k8s.io/apimachinery/pkg/api/errors"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
        "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
 
-       "github.com/pkg/errors"
+       "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+       "github.com/apache/camel-k/pkg/trait"
+       "github.com/apache/camel-k/pkg/util/kubernetes"
 )
 
 // NewBuildAction creates a new build request handling action for the kit
@@ -84,23 +83,15 @@ func (action *buildAction) handleBuildSubmitted(ctx 
context.Context, kit *v1alph
 
                build = &v1alpha1.Build{
                        TypeMeta: metav1.TypeMeta{
-                               APIVersion: "camel.apache.org/v1alpha1",
-                               Kind:       "Build",
+                               APIVersion: 
v1alpha1.SchemeGroupVersion.String(),
+                               Kind:       v1alpha1.BuildKind,
                        },
                        ObjectMeta: metav1.ObjectMeta{
                                Namespace: kit.Namespace,
                                Name:      kit.Name,
                        },
                        Spec: v1alpha1.BuildSpec{
-                               Meta:            kit.ObjectMeta,
-                               CamelVersion:    env.CamelCatalog.Version,
-                               RuntimeVersion:  
env.CamelCatalog.RuntimeVersion,
-                               RuntimeProvider: 
env.CamelCatalog.RuntimeProvider,
-                               Platform:        
env.Platform.Status.IntegrationPlatformSpec,
-                               Dependencies:    kit.Spec.Dependencies,
-                               // TODO: sort for easy read
-                               Steps:    builder.StepIDsFor(env.Steps...),
-                               BuildDir: env.BuildDir,
+                               Tasks: env.BuildTasks,
                        },
                }
 
@@ -142,7 +133,7 @@ func (action *buildAction) handleBuildRunning(ctx 
context.Context, kit *v1alpha1
                // if not there is a chance that the kit has been modified by 
the user
                if kit.Status.Phase != v1alpha1.IntegrationKitPhaseBuildRunning 
{
                        return nil, fmt.Errorf("found kit %s not in the 
expected phase (expectd=%s, found=%s)",
-                               build.Spec.Meta.Name,
+                               kit.Name,
                                
string(v1alpha1.IntegrationKitPhaseBuildRunning),
                                string(kit.Status.Phase),
                        )
@@ -168,7 +159,7 @@ func (action *buildAction) handleBuildRunning(ctx 
context.Context, kit *v1alpha1
                // if not there is a chance that the kit has been modified by 
the user
                if kit.Status.Phase != v1alpha1.IntegrationKitPhaseBuildRunning 
{
                        return nil, fmt.Errorf("found kit %s not the an 
expected phase (expectd=%s, found=%s)",
-                               build.Spec.Meta.Name,
+                               kit.Name,
                                
string(v1alpha1.IntegrationKitPhaseBuildRunning),
                                string(kit.Status.Phase),
                        )
diff --git a/pkg/controller/integrationplatform/initialize.go 
b/pkg/controller/integrationplatform/initialize.go
index ce90c2e..dd50ef2 100644
--- a/pkg/controller/integrationplatform/initialize.go
+++ b/pkg/controller/integrationplatform/initialize.go
@@ -70,15 +70,13 @@ func (action *initializeAction) Handle(ctx context.Context, 
platform *v1alpha1.I
        }
 
        if platform.Status.Build.PublishStrategy == 
v1alpha1.IntegrationPlatformBuildPublishStrategyKaniko {
-               // Create the persistent volume claim used to coordinate build 
pod output
-               // with Kaniko cache and build input
-               action.L.Info("Create persistent volume claim")
-               err := createPersistentVolumeClaim(ctx, action.client, platform)
-               if err != nil {
-                       return nil, err
-               }
-
                if platform.Status.Build.IsKanikoCacheEnabled() {
+                       // Create the persistent volume claim used by the 
Kaniko cache
+                       action.L.Info("Create persistent volume claim")
+                       err := createPersistentVolumeClaim(ctx, action.client, 
platform)
+                       if err != nil {
+                               return nil, err
+                       }
                        // Create the Kaniko warmer pod that caches the base 
image into the Camel K builder volume
                        action.L.Info("Create Kaniko cache warmer pod")
                        err = createKanikoCacheWarmerPod(ctx, action.client, 
platform)
diff --git a/pkg/controller/integrationplatform/kaniko_cache.go 
b/pkg/controller/integrationplatform/kaniko_cache.go
index 7ff25f6..2d5674d 100644
--- a/pkg/controller/integrationplatform/kaniko_cache.go
+++ b/pkg/controller/integrationplatform/kaniko_cache.go
@@ -21,6 +21,8 @@ import (
        "context"
        "fmt"
 
+       "github.com/pkg/errors"
+
        corev1 "k8s.io/api/core/v1"
        apierrors "k8s.io/apimachinery/pkg/api/errors"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -29,8 +31,6 @@ import (
        "github.com/apache/camel-k/pkg/builder/kaniko"
        "github.com/apache/camel-k/pkg/client"
        "github.com/apache/camel-k/pkg/util/defaults"
-
-       "github.com/pkg/errors"
 )
 
 func createKanikoCacheWarmerPod(ctx context.Context, client client.Client, 
platform *v1alpha1.IntegrationPlatform) error {
@@ -80,7 +80,7 @@ func createKanikoCacheWarmerPod(ctx context.Context, client 
client.Client, platf
                                        VolumeMounts: []corev1.VolumeMount{
                                                {
                                                        Name:      
"camel-k-builder",
-                                                       MountPath: "/workspace",
+                                                       MountPath: 
kaniko.BuildDir,
                                                },
                                        },
                                },
diff --git a/pkg/trait/builder.go b/pkg/trait/builder.go
index 096fc07..3001ca2 100644
--- a/pkg/trait/builder.go
+++ b/pkg/trait/builder.go
@@ -18,12 +18,23 @@ limitations under the License.
 package trait
 
 import (
+       "fmt"
+       "path"
+       "strconv"
+
+       "github.com/pkg/errors"
+
+       corev1 "k8s.io/api/core/v1"
+
+       "sigs.k8s.io/controller-runtime/pkg/client"
+
        "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
        "github.com/apache/camel-k/pkg/builder"
        "github.com/apache/camel-k/pkg/builder/kaniko"
        "github.com/apache/camel-k/pkg/builder/runtime"
        "github.com/apache/camel-k/pkg/builder/s2i"
        "github.com/apache/camel-k/pkg/platform"
+       "github.com/apache/camel-k/pkg/util/defaults"
 )
 
 // The builder trait is internally used to determine the best strategy to
@@ -49,22 +60,80 @@ func (t *builderTrait) Configure(e *Environment) (bool, 
error) {
 }
 
 func (t *builderTrait) Apply(e *Environment) error {
-       e.Steps = append(e.Steps, builder.DefaultSteps...)
-       if platform.SupportsS2iPublishStrategy(e.Platform) {
-               e.Steps = append(e.Steps, s2i.S2iSteps...)
-       } else if platform.SupportsKanikoPublishStrategy(e.Platform) {
-               e.Steps = append(e.Steps, kaniko.KanikoSteps...)
-               e.BuildDir = kaniko.BuildDir
-       }
+       camelTask := t.camelTask(e)
+       e.BuildTasks = append(e.BuildTasks, v1alpha1.Task{Builder: camelTask})
 
-       quarkus := e.Catalog.GetTrait("quarkus").(*quarkusTrait)
+       if platform.SupportsKanikoPublishStrategy(e.Platform) {
+               kanikoTask, err := t.kanikoTask(e)
+               if err != nil {
+                       return err
+               }
+               mount := corev1.VolumeMount{Name: "camel-k-builder", MountPath: 
kaniko.BuildDir}
+               camelTask.VolumeMounts = append(camelTask.VolumeMounts, mount)
+               kanikoTask.VolumeMounts = append(kanikoTask.VolumeMounts, mount)
 
-       if quarkus.isEnabled() {
-               // Add build steps for Quarkus runtime
-               quarkus.addBuildSteps(e)
-       } else {
-               // Add build steps for default runtime
-               e.Steps = append(e.Steps, runtime.MainSteps...)
+               if e.Platform.Status.Build.IsKanikoCacheEnabled() {
+                       // Co-locate with the Kaniko warmer pod for sharing the 
host path volume as the current
+                       // persistent volume claim uses the default storage 
class which is likely relying
+                       // on the host path provisioner.
+                       // This has to be done manually by retrieving the 
Kaniko warmer pod node name and using
+                       // node affinity as pod affinity only works for running 
pods and the Kaniko warmer pod
+                       // has already completed at that stage.
+
+                       // Locate the kaniko warmer pod
+                       pods := &corev1.PodList{}
+                       err := e.Client.List(e.C, pods,
+                               client.InNamespace(e.Platform.Namespace),
+                               client.MatchingLabels{
+                                       "camel.apache.org/component": 
"kaniko-warmer",
+                               })
+                       if err != nil {
+                               return err
+                       }
+
+                       if len(pods.Items) != 1 {
+                               return errors.New("failed to locate the Kaniko 
cache warmer pod")
+                       }
+
+                       // Use node affinity with the Kaniko warmer pod node 
name
+                       kanikoTask.Affinity = &corev1.Affinity{
+                               NodeAffinity: &corev1.NodeAffinity{
+                                       
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
+                                               NodeSelectorTerms: 
[]corev1.NodeSelectorTerm{
+                                                       {
+                                                               
MatchExpressions: []corev1.NodeSelectorRequirement{
+                                                                       {
+                                                                               
Key:      "kubernetes.io/hostname",
+                                                                               
Operator: "In",
+                                                                               
Values:   []string{pods.Items[0].Spec.NodeName},
+                                                                       },
+                                                               },
+                                                       },
+                                               },
+                                       },
+                               },
+                       }
+                       // Use the PVC used to warm the Kaniko cache to 
coordinate the Camel Maven build and the Kaniko image build
+                       camelTask.Volumes = append(camelTask.Volumes, 
corev1.Volume{
+                               Name: "camel-k-builder",
+                               VolumeSource: corev1.VolumeSource{
+                                       PersistentVolumeClaim: 
&corev1.PersistentVolumeClaimVolumeSource{
+                                               ClaimName: 
e.Platform.Spec.Build.PersistentVolumeClaim,
+                                       },
+                               },
+                       })
+               } else {
+                       // Use an emptyDir volume to coordinate the Camel Maven 
build and the Kaniko image build
+                       camelTask.Volumes = append(camelTask.Volumes, 
corev1.Volume{
+                               Name: "camel-k-builder",
+                               VolumeSource: corev1.VolumeSource{
+                                       EmptyDir: &corev1.EmptyDirVolumeSource{
+                                       },
+                               },
+                       })
+               }
+
+               e.BuildTasks = append(e.BuildTasks, v1alpha1.Task{Kaniko: 
kanikoTask})
        }
 
        return nil
@@ -79,3 +148,201 @@ func (t *builderTrait) IsPlatformTrait() bool {
 func (t *builderTrait) InfluencesKit() bool {
        return true
 }
+
+func (t *builderTrait) camelTask(e *Environment) *v1alpha1.BuilderTask {
+       task := &v1alpha1.BuilderTask{
+               BaseTask: v1alpha1.BaseTask{
+                       Name: "camel",
+               },
+               Meta:            e.IntegrationKit.ObjectMeta,
+               BaseImage:       e.Platform.Status.Build.BaseImage,
+               CamelVersion:    e.CamelCatalog.Version,
+               RuntimeVersion:  e.CamelCatalog.RuntimeVersion,
+               RuntimeProvider: e.CamelCatalog.RuntimeProvider,
+               //Sources:               e.Integration.Spec.Sources,
+               //Resources:     e.Integration.Spec.Resources,
+               Dependencies: e.IntegrationKit.Spec.Dependencies,
+               //TODO: sort steps for easier read
+               Steps:      builder.StepIDsFor(builder.DefaultSteps...),
+               Properties: e.Platform.Status.Build.Properties,
+               Timeout:    e.Platform.Status.Build.GetTimeout(),
+               Maven:      e.Platform.Status.Build.Maven,
+       }
+
+       if platform.SupportsS2iPublishStrategy(e.Platform) {
+               task.Steps = append(task.Steps, 
builder.StepIDsFor(s2i.S2iSteps...)...)
+       } else if platform.SupportsKanikoPublishStrategy(e.Platform) {
+               task.Steps = append(task.Steps, 
builder.StepIDsFor(kaniko.KanikoSteps...)...)
+               task.BuildDir = kaniko.BuildDir
+       }
+
+       quarkus := e.Catalog.GetTrait("quarkus").(*quarkusTrait)
+       if quarkus.isEnabled() {
+               // Add build steps for Quarkus runtime
+               quarkus.addBuildSteps(task)
+       } else {
+               // Add build steps for default runtime
+               task.Steps = append(task.Steps, 
builder.StepIDsFor(runtime.MainSteps...)...)
+       }
+
+       return task
+}
+
+func (t *builderTrait) kanikoTask(e *Environment) (*v1alpha1.KanikoTask, 
error) {
+       organization := e.Platform.Status.Build.Registry.Organization
+       if organization == "" {
+               organization = e.Platform.Namespace
+       }
+       image := e.Platform.Status.Build.Registry.Address + "/" + organization 
+ "/camel-k-" + e.IntegrationKit.Name + ":" + e.IntegrationKit.ResourceVersion
+
+       env := make([]corev1.EnvVar, 0)
+       baseArgs := []string{
+               "--dockerfile=Dockerfile",
+               "--context=" + path.Join(kaniko.BuildDir, "package", "context"),
+               "--destination=" + image,
+               "--cache=" + 
strconv.FormatBool(e.Platform.Status.Build.IsKanikoCacheEnabled()),
+               "--cache-dir=" + path.Join(kaniko.BuildDir, "cache"),
+       }
+
+       args := make([]string, 0, len(baseArgs))
+       args = append(args, baseArgs...)
+
+       if e.Platform.Status.Build.Registry.Insecure {
+               args = append(args, "--insecure")
+               args = append(args, "--insecure-pull")
+       }
+
+       volumes := make([]corev1.Volume, 0)
+       volumeMounts := make([]corev1.VolumeMount, 0)
+
+       if e.Platform.Status.Build.Registry.Secret != "" {
+               secretKind, err := getSecretKind(e)
+               if err != nil {
+                       return nil, err
+               }
+
+               volumes = append(volumes, corev1.Volume{
+                       Name: "kaniko-secret",
+                       VolumeSource: corev1.VolumeSource{
+                               Secret: &corev1.SecretVolumeSource{
+                                       SecretName: 
e.Platform.Status.Build.Registry.Secret,
+                                       Items: []corev1.KeyToPath{
+                                               {
+                                                       Key:  
secretKind.fileName,
+                                                       Path: 
secretKind.destination,
+                                               },
+                                       },
+                               },
+                       },
+               })
+
+               volumeMounts = append(volumeMounts, corev1.VolumeMount{
+                       Name:      "kaniko-secret",
+                       MountPath: secretKind.mountPath,
+               })
+
+               if secretKind.refEnv != "" {
+                       env = append(env, corev1.EnvVar{
+                               Name:  secretKind.refEnv,
+                               Value: path.Join(secretKind.mountPath, 
secretKind.destination),
+                       })
+               }
+               args = baseArgs
+       }
+
+       if e.Platform.Status.Build.HTTPProxySecret != "" {
+               optional := true
+               env = append(env, corev1.EnvVar{
+                       Name: "HTTP_PROXY",
+                       ValueFrom: &corev1.EnvVarSource{
+                               SecretKeyRef: &corev1.SecretKeySelector{
+                                       LocalObjectReference: 
corev1.LocalObjectReference{
+                                               Name: 
e.Platform.Status.Build.HTTPProxySecret,
+                                       },
+                                       Key:      "HTTP_PROXY",
+                                       Optional: &optional,
+                               },
+                       },
+               })
+               env = append(env, corev1.EnvVar{
+                       Name: "HTTPS_PROXY",
+                       ValueFrom: &corev1.EnvVarSource{
+                               SecretKeyRef: &corev1.SecretKeySelector{
+                                       LocalObjectReference: 
corev1.LocalObjectReference{
+                                               Name: 
e.Platform.Status.Build.HTTPProxySecret,
+                                       },
+                                       Key:      "HTTPS_PROXY",
+                                       Optional: &optional,
+                               },
+                       },
+               })
+               env = append(env, corev1.EnvVar{
+                       Name: "NO_PROXY",
+                       ValueFrom: &corev1.EnvVarSource{
+                               SecretKeyRef: &corev1.SecretKeySelector{
+                                       LocalObjectReference: 
corev1.LocalObjectReference{
+                                               Name: 
e.Platform.Status.Build.HTTPProxySecret,
+                                       },
+                                       Key:      "NO_PROXY",
+                                       Optional: &optional,
+                               },
+                       },
+               })
+       }
+
+       return &v1alpha1.KanikoTask{
+               ImageTask: v1alpha1.ImageTask{
+                       BaseTask: v1alpha1.BaseTask{
+                               Name:         "kaniko",
+                               Volumes:      volumes,
+                               VolumeMounts: volumeMounts,
+                       },
+                       Image: 
fmt.Sprintf("gcr.io/kaniko-project/executor:v%s", defaults.KanikoVersion),
+                       Args:  args,
+                       Env:   env,
+               },
+               BuiltImage: image,
+       }, nil
+}
+
+type secretKind struct {
+       fileName    string
+       mountPath   string
+       destination string
+       refEnv      string
+}
+
+var (
+       secretKindGCR = secretKind{
+               fileName:    "kaniko-secret.json",
+               mountPath:   "/secret",
+               destination: "kaniko-secret.json",
+               refEnv:      "GOOGLE_APPLICATION_CREDENTIALS",
+       }
+       secretKindPlainDocker = secretKind{
+               fileName:    "config.json",
+               mountPath:   "/kaniko/.docker",
+               destination: "config.json",
+       }
+       secretKindStandardDocker = secretKind{
+               fileName:    corev1.DockerConfigJsonKey,
+               mountPath:   "/kaniko/.docker",
+               destination: "config.json",
+       }
+
+       allSecretKinds = []secretKind{secretKindGCR, secretKindPlainDocker, 
secretKindStandardDocker}
+)
+
+func getSecretKind(e *Environment) (secretKind, error) {
+       secret := corev1.Secret{}
+       err := e.Client.Get(e.C, client.ObjectKey{Namespace: 
e.Platform.Namespace, Name: e.Platform.Status.Build.Registry.Secret}, &secret)
+       if err != nil {
+               return secretKind{}, err
+       }
+       for _, k := range allSecretKinds {
+               if _, ok := secret.Data[k.fileName]; ok {
+                       return k, nil
+               }
+       }
+       return secretKind{}, errors.New("unsupported secret type for registry 
authentication")
+}
diff --git a/pkg/trait/builder_test.go b/pkg/trait/builder_test.go
index d2ec6c3..ebe6f9f 100644
--- a/pkg/trait/builder_test.go
+++ b/pkg/trait/builder_test.go
@@ -28,7 +28,6 @@ import (
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
        "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-       "github.com/apache/camel-k/pkg/builder"
        "github.com/apache/camel-k/pkg/builder/kaniko"
        "github.com/apache/camel-k/pkg/builder/s2i"
        "github.com/apache/camel-k/pkg/util/camel"
@@ -52,7 +51,7 @@ func TestBuilderTraitNotAppliedBecauseOfNilKit(t *testing.T) {
                        assert.Nil(t, err)
                        assert.NotEmpty(t, e.ExecutedTraits)
                        assert.Nil(t, e.GetTrait("builder"))
-                       assert.Empty(t, e.Steps)
+                       assert.Empty(t, e.BuildTasks)
                })
        }
 }
@@ -73,7 +72,7 @@ func TestBuilderTraitNotAppliedBecauseOfNilPhase(t 
*testing.T) {
                        assert.Nil(t, err)
                        assert.NotEmpty(t, e.ExecutedTraits)
                        assert.Nil(t, e.GetTrait("builder"))
-                       assert.Empty(t, e.Steps)
+                       assert.Empty(t, e.BuildTasks)
                })
        }
 }
@@ -85,11 +84,12 @@ func TestS2IBuilderTrait(t *testing.T) {
        assert.Nil(t, err)
        assert.NotEmpty(t, env.ExecutedTraits)
        assert.NotNil(t, env.GetTrait("builder"))
-       assert.NotEmpty(t, env.Steps)
-       assert.Len(t, env.Steps, 8)
+       assert.NotEmpty(t, env.BuildTasks)
+       assert.Len(t, env.BuildTasks, 1)
+       assert.NotNil(t, env.BuildTasks[0].Builder)
        assert.Condition(t, func() bool {
-               for _, s := range env.Steps {
-                       if s == s2i.Steps.Publisher && s.Phase() == 
builder.ApplicationPublishPhase {
+               for _, s := range env.BuildTasks[0].Builder.Steps {
+                       if s == s2i.Steps.Publisher.ID() {
                                return true
                        }
                }
@@ -105,17 +105,19 @@ func TestKanikoBuilderTrait(t *testing.T) {
        assert.Nil(t, err)
        assert.NotEmpty(t, env.ExecutedTraits)
        assert.NotNil(t, env.GetTrait("builder"))
-       assert.NotEmpty(t, env.Steps)
-       assert.Len(t, env.Steps, 8)
+       assert.NotEmpty(t, env.BuildTasks)
+       assert.Len(t, env.BuildTasks, 2)
+       assert.NotNil(t, env.BuildTasks[0].Builder)
        assert.Condition(t, func() bool {
-               for _, s := range env.Steps {
-                       if s == kaniko.Steps.Publisher && s.Phase() == 
builder.ApplicationPublishPhase {
+               for _, s := range env.BuildTasks[0].Builder.Steps {
+                       if s == kaniko.Steps.Publisher.ID() {
                                return true
                        }
                }
 
                return false
        })
+       assert.NotNil(t, env.BuildTasks[1].Kaniko)
 }
 
 func createBuilderTestEnv(cluster v1alpha1.IntegrationPlatformCluster, 
strategy v1alpha1.IntegrationPlatformBuildPublishStrategy) *Environment {
@@ -124,6 +126,7 @@ func createBuilderTestEnv(cluster 
v1alpha1.IntegrationPlatformCluster, strategy
                panic(err)
        }
 
+       kanikoCache := false
        res := &Environment{
                C:            context.TODO(),
                CamelCatalog: c,
@@ -146,9 +149,10 @@ func createBuilderTestEnv(cluster 
v1alpha1.IntegrationPlatformCluster, strategy
                        Spec: v1alpha1.IntegrationPlatformSpec{
                                Cluster: cluster,
                                Build: v1alpha1.IntegrationPlatformBuildSpec{
-                                       PublishStrategy: strategy,
-                                       Registry:        
v1alpha1.IntegrationPlatformRegistrySpec{Address: "registry"},
-                                       CamelVersion:    
defaults.DefaultCamelVersion,
+                                       PublishStrategy:  strategy,
+                                       Registry:         
v1alpha1.IntegrationPlatformRegistrySpec{Address: "registry"},
+                                       CamelVersion:     
defaults.DefaultCamelVersion,
+                                       KanikoBuildCache: &kanikoCache,
                                },
                        },
                },
diff --git a/pkg/trait/deployer.go b/pkg/trait/deployer.go
index 6ba76a1..33d288a 100644
--- a/pkg/trait/deployer.go
+++ b/pkg/trait/deployer.go
@@ -18,20 +18,15 @@ limitations under the License.
 package trait
 
 import (
-       "reflect"
-
        "github.com/pkg/errors"
 
-       jsonpatch "github.com/evanphx/json-patch"
-
-       "k8s.io/apimachinery/pkg/runtime"
        "k8s.io/apimachinery/pkg/types"
-       "k8s.io/apimachinery/pkg/util/json"
 
        "sigs.k8s.io/controller-runtime/pkg/client"
 
        "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
        "github.com/apache/camel-k/pkg/util/kubernetes"
+       "github.com/apache/camel-k/pkg/util/patch"
 )
 
 // The deployer trait can be used to explicitly select the kind of high level 
resource that
@@ -87,15 +82,15 @@ func (t *deployerTrait) Apply(e *Environment) error {
                                        return err
                                }
 
-                               patch, err := positiveMergePatch(object, 
resource)
+                               p, err := patch.PositiveMergePatch(object, 
resource)
                                if err != nil {
                                        return err
-                               } else if len(patch) == 0 {
+                               } else if len(p) == 0 {
                                        // Avoid triggering a patch request for 
nothing
                                        continue
                                }
 
-                               err = env.Client.Patch(env.C, resource, 
client.ConstantPatch(types.MergePatchType, patch))
+                               err = env.Client.Patch(env.C, resource, 
client.ConstantPatch(types.MergePatchType, p))
                                if err != nil {
                                        return errors.Wrap(err, "error during 
patch resource")
                                }
@@ -116,67 +111,3 @@ func (t *deployerTrait) IsPlatformTrait() bool {
 func (t *deployerTrait) RequiresIntegrationPlatform() bool {
        return false
 }
-
-func positiveMergePatch(source runtime.Object, target runtime.Object) ([]byte, 
error) {
-       sourceJSON, err := json.Marshal(source)
-       if err != nil {
-               return nil, err
-       }
-
-       targetJSON, err := json.Marshal(target)
-       if err != nil {
-               return nil, err
-       }
-
-       mergePatch, err := jsonpatch.CreateMergePatch(sourceJSON, targetJSON)
-       if err != nil {
-               return nil, err
-       }
-
-       var positivePatch map[string]interface{}
-       err = json.Unmarshal(mergePatch, &positivePatch)
-       if err != nil {
-               return nil, err
-       }
-
-       // The following is a work-around to remove null fields from the JSON 
merge patch,
-       // so that values defaulted by controllers server-side are not deleted.
-       // It's generally acceptable as these values are orthogonal to the 
values managed
-       // by the traits.
-       removeNilValues(reflect.ValueOf(positivePatch), reflect.Value{})
-
-       // Return an empty patch if no keys remain
-       if len(positivePatch) == 0 {
-               return make([]byte, 0), nil
-       }
-
-       return json.Marshal(positivePatch)
-}
-
-func removeNilValues(v reflect.Value, parent reflect.Value) {
-       for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface {
-               v = v.Elem()
-       }
-       switch v.Kind() {
-       case reflect.Array, reflect.Slice:
-               for i := 0; i < v.Len(); i++ {
-                       removeNilValues(v.Index(i), v)
-               }
-       case reflect.Map:
-               for _, k := range v.MapKeys() {
-                       switch c := v.MapIndex(k); {
-                       case !c.IsValid():
-                               // Skip keys previously deleted
-                               continue
-                       case c.IsNil(), c.Elem().Kind() == reflect.Map && 
len(c.Elem().MapKeys()) == 0:
-                               v.SetMapIndex(k, reflect.Value{})
-                       default:
-                               removeNilValues(c, v)
-                       }
-               }
-               // Back process the parent map in case it has been emptied so 
that it's deleted as well
-               if len(v.MapKeys()) == 0 && parent.Kind() == reflect.Map {
-                       removeNilValues(parent, reflect.Value{})
-               }
-       }
-}
diff --git a/pkg/trait/quarkus.go b/pkg/trait/quarkus.go
index 97d7395..3b75eed 100644
--- a/pkg/trait/quarkus.go
+++ b/pkg/trait/quarkus.go
@@ -26,6 +26,7 @@ import (
        k8serrors "k8s.io/apimachinery/pkg/api/errors"
 
        "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
+       "github.com/apache/camel-k/pkg/builder"
        "github.com/apache/camel-k/pkg/builder/runtime"
        "github.com/apache/camel-k/pkg/metadata"
        "github.com/apache/camel-k/pkg/util"
@@ -142,11 +143,11 @@ func (t *quarkusTrait) loadOrCreateCatalog(e 
*Environment, camelVersion string,
        return nil
 }
 
-func (t *quarkusTrait) addBuildSteps(e *Environment) {
-       e.Steps = append(e.Steps, runtime.QuarkusSteps...)
+func (t *quarkusTrait) addBuildSteps(task *v1alpha1.BuilderTask) {
+       task.Steps = append(task.Steps, 
builder.StepIDsFor(runtime.QuarkusSteps...)...)
 }
 
-func (t *quarkusTrait) addClasspath(e *Environment) {
+func (t *quarkusTrait) addClasspath(_ *Environment) {
        // No-op as we rely on the Quarkus runner
 }
 
diff --git a/pkg/trait/trait_types.go b/pkg/trait/trait_types.go
index 6afb47f..fa7401a 100644
--- a/pkg/trait/trait_types.go
+++ b/pkg/trait/trait_types.go
@@ -33,7 +33,6 @@ import (
        "k8s.io/apimachinery/pkg/runtime"
 
        "github.com/apache/camel-k/pkg/apis/camel/v1alpha1"
-       "github.com/apache/camel-k/pkg/builder"
        "github.com/apache/camel-k/pkg/client"
        "github.com/apache/camel-k/pkg/metadata"
        "github.com/apache/camel-k/pkg/platform"
@@ -142,8 +141,7 @@ type Environment struct {
        Resources      *kubernetes.Collection
        PostActions    []func(*Environment) error
        PostProcessors []func(*Environment) error
-       Steps          []builder.Step
-       BuildDir       string
+       BuildTasks     []v1alpha1.Task
        ExecutedTraits []Trait
        EnvVars        []corev1.EnvVar
        Classpath      *strset.Set
diff --git a/pkg/util/patch/patch.go b/pkg/util/patch/patch.go
new file mode 100644
index 0000000..e0ae632
--- /dev/null
+++ b/pkg/util/patch/patch.go
@@ -0,0 +1,91 @@
+/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package patch
+
+import (
+       "reflect"
+
+       jsonpatch "github.com/evanphx/json-patch"
+
+       "k8s.io/apimachinery/pkg/runtime"
+       "k8s.io/apimachinery/pkg/util/json"
+)
+
+func PositiveMergePatch(source runtime.Object, target runtime.Object) ([]byte, 
error) {
+       sourceJSON, err := json.Marshal(source)
+       if err != nil {
+               return nil, err
+       }
+
+       targetJSON, err := json.Marshal(target)
+       if err != nil {
+               return nil, err
+       }
+
+       mergePatch, err := jsonpatch.CreateMergePatch(sourceJSON, targetJSON)
+       if err != nil {
+               return nil, err
+       }
+
+       var positivePatch map[string]interface{}
+       err = json.Unmarshal(mergePatch, &positivePatch)
+       if err != nil {
+               return nil, err
+       }
+
+       // The following is a work-around to remove null fields from the JSON 
merge patch,
+       // so that values defaulted by controllers server-side are not deleted.
+       // It's generally acceptable as these values are orthogonal to the 
values managed
+       // by the traits.
+       removeNilValues(reflect.ValueOf(positivePatch), reflect.Value{})
+
+       // Return an empty patch if no keys remain
+       if len(positivePatch) == 0 {
+               return make([]byte, 0), nil
+       }
+
+       return json.Marshal(positivePatch)
+}
+
+func removeNilValues(v reflect.Value, parent reflect.Value) {
+       for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface {
+               v = v.Elem()
+       }
+       switch v.Kind() {
+       case reflect.Array, reflect.Slice:
+               for i := 0; i < v.Len(); i++ {
+                       removeNilValues(v.Index(i), v)
+               }
+       case reflect.Map:
+               for _, k := range v.MapKeys() {
+                       switch c := v.MapIndex(k); {
+                       case !c.IsValid():
+                               // Skip keys previously deleted
+                               continue
+                       case c.IsNil(), c.Elem().Kind() == reflect.Map && 
len(c.Elem().MapKeys()) == 0:
+                               v.SetMapIndex(k, reflect.Value{})
+                       default:
+                               removeNilValues(c, v)
+                       }
+               }
+               // Back process the parent map in case it has been emptied so 
that it's deleted as well
+               if len(v.MapKeys()) == 0 && parent.Kind() == reflect.Map {
+                       removeNilValues(parent, reflect.Value{})
+               }
+       }
+}

Reply via email to