This is an automated email from the ASF dual-hosted git repository.

pcongiusti pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/camel-k.git

commit 68ba23144cb9c880408cf418149d0f36a493d678
Author: Pasquale Congiusti <pasquale.congiu...@gmail.com>
AuthorDate: Fri Feb 16 11:13:38 2024 +0100

    chore(ci): remove vfsgen dependencies
---
 cmd/util/doc-gen/generators/traitmetadatagen.go    |   54 -
 cmd/util/vfs-gen/main.go                           |  257 ---
 cmd/util/vfs-gen/multifs/multidir.go               |  171 --
 .../bases/camel-k.clusterserviceversion.yaml       |   39 -
 go.mod                                             |    2 -
 go.sum                                             |    4 -
 java/crds/src/main/resources                       |    2 +-
 .../bases/camel-k.clusterserviceversion.yaml       |    2 +-
 pkg/resources/resources/traits.yaml                | 1683 --------------------
 pkg/resources/resources_support.go                 |   17 +-
 pkg/resources/resources_test.go                    |    1 -
 script/Makefile                                    |    6 +-
 12 files changed, 10 insertions(+), 2228 deletions(-)

diff --git a/cmd/util/doc-gen/generators/traitmetadatagen.go 
b/cmd/util/doc-gen/generators/traitmetadatagen.go
index c68caff56..49c987bc3 100644
--- a/cmd/util/doc-gen/generators/traitmetadatagen.go
+++ b/cmd/util/doc-gen/generators/traitmetadatagen.go
@@ -20,39 +20,14 @@ package generators
 import (
        "fmt"
        "io"
-       "os"
-       "path"
        "reflect"
-       "sort"
        "strings"
 
-       "github.com/apache/camel-k/v2/pkg/util"
-
-       "gopkg.in/yaml.v2"
        "k8s.io/gengo/args"
        "k8s.io/gengo/generator"
        "k8s.io/gengo/types"
 )
 
-const traitFile = "traits.yaml"
-
-const licenseHeader = "# 
---------------------------------------------------------------------------\n" +
-       "# Licensed to the Apache Software Foundation (ASF) under one or 
more\n" +
-       "# contributor license agreements.  See the NOTICE file distributed 
with\n" +
-       "# this work for additional information regarding copyright 
ownership.\n" +
-       "# The ASF licenses this file to You under the Apache License, Version 
2.0\n" +
-       "# (the \"License\"); you may not use this file except in compliance 
with\n" +
-       "# the License.  You may obtain a copy of the License at\n" +
-       "#\n" +
-       "#      http://www.apache.org/licenses/LICENSE-2.0\n"; +
-       "#\n" +
-       "# Unless required by applicable law or agreed to in writing, 
software\n" +
-       "# distributed under the License is distributed on an \"AS IS\" 
BASIS,\n" +
-       "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied.\n" +
-       "# See the License for the specific language governing permissions 
and\n" +
-       "# limitations under the License.\n" +
-       "# 
---------------------------------------------------------------------------\n"
-
 // traitMetaDataGen produces YAML documentation about trait descriptions.
 type traitMetaDataGen struct {
        generator.DefaultGen
@@ -112,35 +87,6 @@ func (g *traitMetaDataGen) GenerateType(context 
*generator.Context, t *types.Typ
        return nil
 }
 
-func (g *traitMetaDataGen) Finalize(c *generator.Context, w io.Writer) error {
-       customArgs, ok := g.arguments.CustomArgs.(*CustomArgs)
-       if !ok {
-               return fmt.Errorf("type assertion failed: %v", 
g.arguments.CustomArgs)
-       }
-       deployDir := customArgs.ResourceDir
-       filename := path.Join(deployDir, traitFile)
-
-       // reorder the traits metadata so that it always gets the identical 
result
-       sort.Slice(g.Root.Traits, func(i, j int) bool {
-               return g.Root.Traits[i].Name < g.Root.Traits[j].Name
-       })
-
-       return util.WithFile(filename, os.O_RDWR|os.O_CREATE, 0o777, func(file 
*os.File) error {
-               if err := file.Truncate(0); err != nil {
-                       return err
-               }
-
-               fmt.Fprintf(file, "%s", string(licenseHeader))
-               data, err := yaml.Marshal(g.Root)
-               if err != nil {
-                       fmt.Fprintf(file, "error: %v", err)
-               }
-               fmt.Fprintf(file, "%s", string(data))
-
-               return nil
-       })
-}
-
 func (g *traitMetaDataGen) getTraitID(t *types.Type) string {
        for _, s := range t.CommentLines {
                if strings.Contains(s, tagTrait) {
diff --git a/cmd/util/vfs-gen/main.go b/cmd/util/vfs-gen/main.go
deleted file mode 100644
index 32b511b18..000000000
--- a/cmd/util/vfs-gen/main.go
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-import (
-       "flag"
-       "fmt"
-       "log"
-       "net/http"
-       "os"
-       "path"
-       "path/filepath"
-       "strings"
-       "time"
-
-       "github.com/apache/camel-k/v2/pkg/util"
-
-       "github.com/apache/camel-k/v2/cmd/util/vfs-gen/multifs"
-       "github.com/apache/camel-k/v2/pkg/base"
-       "github.com/shurcooL/httpfs/filter"
-       "github.com/shurcooL/vfsgen"
-)
-
-func main() {
-       var rootDir string
-       var destDir string
-
-       wd, err := os.Getwd()
-       if err != nil {
-               log.Fatalln(err)
-       }
-
-       flag.StringVar(&rootDir, "root", base.GoModDirectory, "The absolute 
path from were the directories can be found (camel-k module directory by 
default)")
-       flag.StringVar(&destDir, "dest", wd, "The destination directory of the 
generated file (working directory by default)")
-       flag.Parse()
-
-       if len(flag.Args()) < 1 {
-               println("usage: vfs-gen [-root <absolute root parent path>] 
[-dest <directory>] directory1 [directory2 ... ...]")
-               os.Exit(1)
-       }
-
-       err = checkDir(rootDir)
-       if err != nil {
-               log.Fatalln(err)
-       }
-
-       dirNames := flag.Args()
-       for _, dirName := range dirNames {
-               absDir := filepath.Join(rootDir, dirName)
-               err := checkDir(absDir)
-               if err != nil {
-                       log.Fatalln(err)
-               }
-       }
-
-       exclusions := calcExclusions(rootDir, dirNames)
-
-       //
-       // Destination file for the generated resources
-       //
-       resourceFile := path.Join(destDir, "resources.go")
-
-       mfs, err := multifs.New(rootDir, dirNames, exclusions)
-       if err != nil {
-               log.Fatalln(err)
-       }
-
-       var fs http.FileSystem = modTimeFS{
-               fs: mfs,
-       }
-
-       //
-       // Filter un-interesting files
-       //
-       fs = filter.Skip(fs, filter.FilesWithExtensions(".go"))
-       fs = filter.Skip(fs, func(path string, fi os.FileInfo) bool {
-               return strings.HasSuffix(path, ".gen.yaml") || 
strings.HasSuffix(path, ".gen.json")
-       })
-       fs = filter.Skip(fs, NamedFilesFilter("kustomization.yaml"))
-       fs = filter.Skip(fs, NamedFilesFilter("Makefile"))
-       fs = filter.Skip(fs, NamedFilesFilter("auto-generated.txt"))
-       fs = filter.Skip(fs, BigFilesFilter(1048576)) // 1M
-       fs = filter.Skip(fs, func(path string, fi os.FileInfo) bool {
-               for _, ex := range exclusions {
-                       if strings.HasPrefix(path, ex) {
-                               return true
-                       }
-               }
-               return false
-       })
-
-       //
-       // Generate the assets
-       //
-       err = vfsgen.Generate(fs, vfsgen.Options{
-               Filename:    resourceFile,
-               PackageName: filepath.Base(destDir),
-       })
-       if err != nil {
-               log.Fatalln(err)
-       }
-
-       //
-       // Post-process the final resource file
-       //
-       header := `/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-`
-       content, err := util.ReadFile(resourceFile)
-       if err != nil {
-               log.Fatalln(err)
-       }
-       var finalContent []byte
-       finalContent = append(finalContent, []byte(header)...)
-       finalContent = append(finalContent, content...)
-       if err := os.WriteFile(resourceFile, finalContent, 0o600); err != nil {
-               log.Fatalln(err)
-       }
-}
-
-func NamedFilesFilter(names ...string) func(path string, fi os.FileInfo) bool {
-       return func(path string, fi os.FileInfo) bool {
-               if fi.IsDir() {
-                       return false
-               }
-
-               for _, name := range names {
-                       if name == filepath.Base(path) {
-                               return true
-                       }
-               }
-
-               return false
-       }
-}
-
-// If file is bigger than maximum size (in bytes) then exclude.
-func BigFilesFilter(size int) func(path string, fi os.FileInfo) bool {
-       return func(path string, fi os.FileInfo) bool {
-               if fi.IsDir() {
-                       return false
-               }
-
-               if fi.Size() > int64(size) {
-                       log.Printf("Warning: File %s is skipped due to being %d 
bytes (greater than maximum %d bytes)", path, fi.Size(), size)
-                       return true
-               }
-
-               return false
-       }
-}
-
-func calcExclusions(root string, dirNames []string) []string {
-       var exclusions []string
-
-       for _, name := range dirNames {
-               dirName := filepath.Join(root, name)
-               if err := filepath.Walk(dirName, func(resPath string, info 
os.FileInfo, err error) error {
-                       if info.IsDir() {
-                               ignoreFileName := path.Join(resPath, 
".vfsignore")
-                               _, err := os.Stat(ignoreFileName)
-                               if err == nil {
-                                       rel, err := filepath.Rel(dirName, 
resPath)
-                                       if err != nil {
-                                               log.Fatalln(err)
-                                       }
-                                       if !strings.HasPrefix(rel, "/") {
-                                               rel = "/" + rel
-                                       }
-                                       exclusions = append(exclusions, rel)
-                               } else if !os.IsNotExist(err) {
-                                       log.Fatalln(err)
-                               }
-                       }
-                       return nil
-               }); err != nil {
-                       log.Fatalln(err)
-               }
-       }
-
-       return exclusions
-}
-
-func checkDir(dirName string) error {
-       dir, err := os.Stat(dirName)
-       if err != nil {
-               return err
-       }
-       if !dir.IsDir() {
-               return fmt.Errorf("path %s is not a directory", dirName)
-       }
-
-       return nil
-}
-
-// modTimeFS wraps http.FileSystem to set mod time to 0 for all files.
-type modTimeFS struct {
-       fs http.FileSystem
-}
-
-func (fs modTimeFS) Open(name string) (http.File, error) {
-       f, err := fs.fs.Open(name)
-       if err != nil {
-               return nil, err
-       }
-       return modTimeFile{f}, nil
-}
-
-type modTimeFile struct {
-       http.File
-}
-
-func (f modTimeFile) Stat() (os.FileInfo, error) {
-       fi, err := f.File.Stat()
-       if err != nil {
-               return nil, err
-       }
-       return modTimeFileInfo{fi}, nil
-}
-
-type modTimeFileInfo struct {
-       os.FileInfo
-}
-
-func (modTimeFileInfo) ModTime() time.Time {
-       return time.Time{}
-}
diff --git a/cmd/util/vfs-gen/multifs/multidir.go 
b/cmd/util/vfs-gen/multifs/multidir.go
deleted file mode 100644
index 7a7f62309..000000000
--- a/cmd/util/vfs-gen/multifs/multidir.go
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Based on the union fs function available at
-https://github.com/shurcooL/httpfs/blob/master/union/union.go
-(Licenced under MIT)
-*/
-
-package multifs
-
-import (
-       "fmt"
-       "io"
-       "net/http"
-       "os"
-       "path/filepath"
-       "strings"
-       "time"
-
-       "github.com/shurcooL/httpfs/vfsutil"
-)
-
-func New(rootDir string, dirNames []string, exclude []string) 
(http.FileSystem, error) {
-       m := &multiFS{
-               rootDir: rootDir,
-               exclude: exclude,
-               mfs:     make(map[string]http.FileSystem),
-               root: &dirInfo{
-                       name: "/",
-               },
-       }
-       for _, dirName := range dirNames {
-               err := m.bind(dirName)
-               if err != nil {
-                       return nil, err
-               }
-       }
-
-       return m, nil
-}
-
-type multiFS struct {
-       rootDir string
-       exclude []string
-       mfs     map[string]http.FileSystem
-       root    *dirInfo
-}
-
-func (m *multiFS) bind(dirName string) error {
-       absDir := filepath.Join(m.rootDir, dirName)
-
-       hfs := http.Dir(absDir)
-       m.mfs["/"+dirName] = hfs
-
-       //
-       // The 1-level down paths are needed since the
-       // remainder are covered by the http filesystems
-       //
-       fileInfos, err := vfsutil.ReadDir(hfs, "/")
-       if err != nil {
-               return err
-       }
-
-       for _, nfo := range fileInfos {
-               path := "/" + nfo.Name()
-
-               if m.excluded(path) {
-                       continue // skip
-               }
-
-               if nfo.IsDir() {
-                       m.root.entries = append(m.root.entries, &dirInfo{
-                               name: path,
-                       })
-               } else {
-                       m.root.entries = append(m.root.entries, nfo)
-               }
-       }
-
-       return nil
-}
-
-func (m *multiFS) excluded(path string) bool {
-       for _, ex := range m.exclude {
-               if strings.HasPrefix(path, ex) {
-                       return true
-               }
-       }
-
-       return false
-}
-
-func (m *multiFS) Open(path string) (http.File, error) {
-       if path == "/" {
-               return &dir{
-                       dirInfo: m.root,
-               }, nil
-       }
-
-       for _, fs := range m.mfs {
-               f, err := fs.Open(path)
-               if err != nil {
-                       continue
-               }
-
-               return f, nil
-       }
-
-       return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist}
-}
-
-// dirInfo is a static definition of a directory.
-type dirInfo struct {
-       name    string
-       entries []os.FileInfo
-}
-
-func (d *dirInfo) Read([]byte) (int, error) {
-       return 0, fmt.Errorf("cannot Read from directory %s", d.name)
-}
-func (d *dirInfo) Close() error               { return nil }
-func (d *dirInfo) Stat() (os.FileInfo, error) { return d, nil }
-
-func (d *dirInfo) Name() string       { return d.name }
-func (d *dirInfo) Size() int64        { return 0 }
-func (d *dirInfo) Mode() os.FileMode  { return 0o755 | os.ModeDir }
-func (d *dirInfo) ModTime() time.Time { return time.Time{} } // Actual mod 
time is not computed because it's expensive and rarely needed.
-func (d *dirInfo) IsDir() bool        { return true }
-func (d *dirInfo) Sys() interface{}   { return nil }
-
-// dir is an opened dir instance.
-type dir struct {
-       *dirInfo
-       pos int // Position within entries for Seek and Readdir.
-}
-
-func (d *dir) Seek(offset int64, whence int) (int64, error) {
-       if offset == 0 && whence == io.SeekStart {
-               d.pos = 0
-               return 0, nil
-       }
-       return 0, fmt.Errorf("unsupported Seek in directory %s", d.dirInfo.name)
-}
-
-func (d *dir) Readdir(count int) ([]os.FileInfo, error) {
-       if d.pos >= len(d.dirInfo.entries) && count > 0 {
-               return nil, io.EOF
-       }
-       if count <= 0 || count > len(d.dirInfo.entries)-d.pos {
-               count = len(d.dirInfo.entries) - d.pos
-       }
-       e := d.dirInfo.entries[d.pos : d.pos+count]
-       d.pos += count
-
-       return e, nil
-}
diff --git a/config/manifests/bases/camel-k.clusterserviceversion.yaml 
b/config/manifests/bases/camel-k.clusterserviceversion.yaml
deleted file mode 100644
index 79bc1eacf..000000000
--- a/config/manifests/bases/camel-k.clusterserviceversion.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
-apiVersion: operators.coreos.com/v1alpha1
-kind: ClusterServiceVersion
-metadata:
-  annotations:
-    alm-examples: '[]'
-    capabilities: Basic Install
-  name: camel-k.v0.0.0
-  namespace: placeholder
-spec:
-  apiservicedefinitions: {}
-  customresourcedefinitions: {}
-  description: ss
-  displayName: ss
-  icon:
-  - base64data: ""
-    mediatype: ""
-  install:
-    spec:
-      deployments: null
-    strategy: ""
-  installModes:
-  - supported: false
-    type: OwnNamespace
-  - supported: false
-    type: SingleNamespace
-  - supported: false
-    type: MultiNamespace
-  - supported: true
-    type: AllNamespaces
-  keywords:
-  - ss
-  links:
-  - name: Camel K
-    url: https://camel-k.domain
-  maturity: alpha
-  provider:
-    name: ss
-    url: ss
-  version: 0.0.0
diff --git a/go.mod b/go.mod
index 7ebfe1ce0..7fdd689f5 100644
--- a/go.mod
+++ b/go.mod
@@ -26,8 +26,6 @@ require (
        github.com/prometheus/common v0.47.0
        github.com/redhat-developer/service-binding-operator v1.4.0
        github.com/rs/xid v1.5.0
-       github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
-       github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd
        github.com/sirupsen/logrus v1.9.3
        github.com/spf13/cobra v1.8.0
        github.com/spf13/pflag v1.0.5
diff --git a/go.sum b/go.sum
index eadb68427..627b56c47 100644
--- a/go.sum
+++ b/go.sum
@@ -390,10 +390,6 @@ github.com/sagikazarmark/locafero v0.4.0 
h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
 github.com/sagikazarmark/locafero v0.4.0/go.mod 
h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
 github.com/sagikazarmark/slog-shim v0.1.0 
h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
 github.com/sagikazarmark/slog-shim v0.1.0/go.mod 
h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
-github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 
h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
-github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod 
h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
-github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd 
h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
-github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod 
h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
 github.com/sirupsen/logrus v1.2.0/go.mod 
h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/sirupsen/logrus v1.4.2/go.mod 
h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
 github.com/sirupsen/logrus v1.6.0/go.mod 
h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
diff --git a/java/crds/src/main/resources b/java/crds/src/main/resources
index 528a7f278..b583165c4 120000
--- a/java/crds/src/main/resources
+++ b/java/crds/src/main/resources
@@ -1 +1 @@
-../../../../config/crd/bases
\ No newline at end of file
+../../../../pkg/resources/config/crd/bases
\ No newline at end of file
diff --git 
a/pkg/resources/config/manifests/bases/camel-k.clusterserviceversion.yaml 
b/pkg/resources/config/manifests/bases/camel-k.clusterserviceversion.yaml
index 88fba65dd..e0bb3a903 100644
--- a/pkg/resources/config/manifests/bases/camel-k.clusterserviceversion.yaml
+++ b/pkg/resources/config/manifests/bases/camel-k.clusterserviceversion.yaml
@@ -23,7 +23,7 @@ metadata:
     categories: Integration & Delivery
     certified: "false"
     containerImage: docker.io/apache/camel-k:2.3.0-SNAPSHOT
-    createdAt: 2024-02-16T09:57:35Z
+    createdAt: 2024-02-16T10:30:52Z
     description: Apache Camel K is a lightweight integration platform, born on 
Kubernetes,
       with serverless superpowers.
     operators.operatorframework.io/builder: operator-sdk-v1.16.0
diff --git a/pkg/resources/resources/traits.yaml 
b/pkg/resources/resources/traits.yaml
deleted file mode 100755
index bd6c3c5d1..000000000
--- a/pkg/resources/resources/traits.yaml
+++ /dev/null
@@ -1,1683 +0,0 @@
-# ---------------------------------------------------------------------------
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ---------------------------------------------------------------------------
-traits:
-- name: 3scale
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The 3scale trait can be used to automatically create 
annotations that
-    allow 3scale to discover the generated service and make it available for 
API management.
-    The 3scale trait is disabled by default.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: Enables automatic configuration of the trait.
-  - name: scheme
-    type: string
-    description: The scheme to use to contact the service (default `http`)
-  - name: path
-    type: string
-    description: The path where the API is published (default `/`)
-  - name: port
-    type: int
-    description: The port where the service is exposed (default `80`)
-  - name: description-path
-    type: string
-    description: The path where the Open-API specification is published 
(default `/openapi.json`)
-- name: affinity
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: Allows constraining which nodes the integration pod(s) are 
eligible
-    to be scheduled on, based on labels on the node, or with inter-pod 
affinity and
-    anti-affinity, based on labels on pods that are already running on the 
nodes.
-    It's disabled by default.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: pod-affinity
-    type: bool
-    description: Always co-locates multiple replicas of the integration in the 
same
-      node (default `false`).
-  - name: pod-anti-affinity
-    type: bool
-    description: Never co-locates multiple replicas of the integration in the 
same
-      node (default `false`).
-  - name: node-affinity-labels
-    type: '[]string'
-    description: Defines a set of nodes the integration pod(s) are eligible to 
be
-      scheduled on, based on labels on the node.
-  - name: pod-affinity-labels
-    type: '[]string'
-    description: Defines a set of pods (namely those matching the label 
selector,
-      relative to the given namespace) that the integration pod(s) should be 
co-located
-      with.
-  - name: pod-anti-affinity-labels
-    type: '[]string'
-    description: Defines a set of pods (namely those matching the label 
selector,
-      relative to the given namespace) that the integration pod(s) should not 
be co-located
-      with.
-- name: aws-secrets-manager
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The Secrets Manager trait can be used to use secrets from AWS 
Secrets
-    Manager The AWS Secrets Manager trait is disabled by default. For more 
information
-    about how to use secrets from AWS Secrets Manager take a look at the 
components
-    docs: xref:components::aws-secrets-manager-component.adoc[AWS Secrets 
Manager
-    component] A sample execution of this trait, would require the following 
trait
-    options: -t aws-secrets-manager.enabled=true -t 
aws-secrets-manager.access-key="aws-access-key"
-    -t aws-secrets-manager.secret-key="aws-secret-key" -t 
aws-secrets-manager.region="aws-region"
-    To enable the automatic context reload on secrets updates you should 
define the
-    following trait options: -t aws-secrets-manager.enabled=true -t 
aws-secrets-manager.access-key="aws-access-key"
-    -t aws-secrets-manager.secret-key="aws-secret-key" -t 
aws-secrets-manager.region="aws-region"
-    -t aws-secrets-manager.context-reload-enabled="true" -t 
aws-secrets-manager.refresh-enabled="true"
-    -t aws-secrets-manager.refresh-period="30000" -t 
aws-secrets-manager.secrets="test*"'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: Enables automatic configuration of the trait.
-  - name: access-key
-    type: string
-    description: 'The AWS Access Key to use. This could be a plain text or a 
configmap/secret
-      The content of the aws access key is expected to be a text containing a 
valid
-      AWS access key. Syntax: [configmap|secret]:name[/key], where name 
represents
-      the resource name, key optionally represents the resource key to be 
filtered
-      (default key value = aws-access-key).'
-  - name: secret-key
-    type: string
-    description: 'The AWS Secret Key to use. This could be a plain text or a 
configmap/secret
-      The content of the aws secret key is expected to be a text containing a 
valid
-      AWS secret key. Syntax: [configmap|secret]:name[/key], where name 
represents
-      the resource name, key optionally represents the resource key to be 
filtered
-      (default key value = aws-secret-key).'
-  - name: region
-    type: string
-    description: The AWS Region to use
-  - name: use-default-credentials-provider
-    type: bool
-    description: Define if we want to use the Default Credentials Provider 
chain as
-      authentication method
-  - name: context-reload-enabled
-    type: bool
-    description: Define if we want to use the Camel Context Reload feature or 
not
-  - name: refresh-enabled
-    type: bool
-    description: Define if we want to use the Refresh Feature for secrets
-  - name: refresh-period
-    type: string
-    description: If Refresh is enabled, this defines the interval to check the 
refresh
-      event
-  - name: secrets
-    type: string
-    description: If Refresh is enabled, the regular expression representing 
the secrets
-      we want to track
-- name: azure-key-vault
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The Azure Key Vault trait can be used to use secrets from 
Azure Key
-    Vault service The Azure Key Vault trait is disabled by default. For more 
information
-    about how to use secrets from Azure Key Vault component take a look at the 
components
-    docs: xref:components::azure-key-vault-component.adoc[Azure Key Vault 
component]
-    A sample execution of this trait, would require the following trait 
options: -t
-    azure-key-vault.enabled=true -t azure-key-vault.tenant-id="tenant-id" -t 
azure-key-vault.client-id="client-id"
-    -t azure-key-vault.client-secret="client-secret" -t 
azure-key-vault.vault-name="vault-name"
-    To enable the automatic context reload on secrets updates you should 
define the
-    following trait options: -t azure-key-vault.enabled=true -t 
azure-key-vault.tenant-id="tenant-id"
-    -t azure-key-vault.client-id="client-id" -t 
azure-key-vault.client-secret="client-secret"
-    -t azure-key-vault.vault-name="vault-name" -t 
azure-key-vault.context-reload-enabled="true"
-    -t azure-key-vault.refresh-enabled="true" -t 
azure-key-vault.refresh-period="30000"
-    -t azure-key-vault.secrets="test*" -t 
azure-key-vault.eventhub-connection-string="connection-string"
-    -t azure-key-vault.blob-account-name="account-name"  -t 
azure-key-vault.blob-container-name="container-name"  -t
-    azure-key-vault.blob-access-key="account-name"'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: Enables automatic configuration of the trait.
-  - name: tenant-id
-    type: string
-    description: The Azure Tenant Id for accessing Key Vault
-  - name: client-id
-    type: string
-    description: The Azure Client Id for accessing Key Vault
-  - name: client-secret
-    type: string
-    description: 'The Azure Client Secret for accessing Key Vault. This could 
be a
-      plain text or a configmap/secret. The content of the azure key vault 
client
-      secret is expected to be a text containing a valid Client Secret. 
Syntax: [configmap|secret]:name[/key],
-      where name represents the resource name, key optionally represents the 
resource
-      key to be filtered (default key value = azure-key-vault-client-secret).'
-  - name: vault-name
-    type: string
-    description: The Azure Vault Name for accessing Key Vault
-  - name: context-reload-enabled
-    type: bool
-    description: Define if we want to use the Camel Context Reload feature or 
not
-  - name: refresh-enabled
-    type: bool
-    description: Define if we want to use the Refresh Feature for secrets
-  - name: refresh-period
-    type: string
-    description: If Refresh is enabled, this defines the interval to check the 
refresh
-      event
-  - name: secrets
-    type: string
-    description: If Refresh is enabled, the regular expression representing 
the secrets
-      we want to track
-  - name: eventhub-connection-string
-    type: string
-    description: If Refresh is enabled, the connection String to point to the 
Eventhub
-      service used to track updates
-  - name: blob-account-name
-    type: string
-    description: If Refresh is enabled, the account name for Azure Storage 
Blob service
-      used to save checkpoint while consuming from Eventhub
-  - name: blob-access-key
-    type: string
-    description: 'If Refresh is enabled, the access key for Azure Storage Blob 
service
-      used to save checkpoint while consuming from Eventhub. This could be a 
plain
-      text or a configmap/secret. The content of the azure key vault blob 
access key
-      is expected to be a text containing a valid Access Key for Azure Storage 
Blob.
-      Syntax: [configmap|secret]:name[/key], where name represents the 
resource name,
-      key optionally represents the resource key to be filtered (default key 
value
-      = azure-storage-blob-access-key).'
-  - name: blob-container-name
-    type: string
-    description: If Refresh is enabled, the container name for Azure Storage 
Blob
-      service used to save checkpoint while consuming from Eventhub
-- name: builder
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The builder trait is internally used to determine the best 
strategy
-    to build and configure IntegrationKits.
-  properties:
-  - name: enabled
-    type: bool
-    description: 'Deprecated: no longer in use.'
-  - name: verbose
-    type: bool
-    description: Enable verbose logging on build components that support it 
(e.g.
-      Kaniko build pod). Deprecated no longer in use
-  - name: properties
-    type: '[]string'
-    description: A list of properties to be provided to the build task
-  - name: strategy
-    type: string
-    description: The strategy to use, either `pod` or `routine` (default 
`routine`)
-  - name: base-image
-    type: string
-    description: Specify a base image
-  - name: incremental-image-build
-    type: bool
-    description: Use the incremental image build option, to reuse existing 
containers
-      (default `true`)
-  - name: order-strategy
-    type: string
-    description: The build order strategy to use, either `dependencies`, 
`fifo` or
-      `sequential` (default `sequential`)
-  - name: request-cpu
-    type: string
-    description: 'When using `pod` strategy, the minimum amount of CPU 
required by
-      the pod builder. Deprecated: use TasksRequestCPU instead with task name 
`builder`.'
-  - name: request-memory
-    type: string
-    description: 'When using `pod` strategy, the minimum amount of memory 
required
-      by the pod builder. Deprecated: use TasksRequestCPU instead with task 
name `builder`.'
-  - name: limit-cpu
-    type: string
-    description: 'When using `pod` strategy, the maximum amount of CPU 
required by
-      the pod builder. Deprecated: use TasksRequestCPU instead with task name 
`builder`.'
-  - name: limit-memory
-    type: string
-    description: 'When using `pod` strategy, the maximum amount of memory 
required
-      by the pod builder. Deprecated: use TasksRequestCPU instead with task 
name `builder`.'
-  - name: maven-profiles
-    type: '[]string'
-    description: 'A list of references pointing to configmaps/secrets that 
contains
-      a maven profile. The content of the maven profile is expected to be a 
text containing
-      a valid maven profile starting with `<profile>` and ending with 
`</profile>`
-      that will be integrated as an inline profile in the POM. Syntax: 
[configmap|secret]:name[/key],
-      where name represents the resource name, key optionally represents the 
resource
-      key to be filtered (default key value = profile.xml).'
-  - name: tasks
-    type: '[]string'
-    description: A list of tasks to be executed (available only when using 
`pod` strategy)
-      with format `<name>;<container-image>;<container-command>`.
-  - name: tasks-filter
-    type: string
-    description: A list of tasks sorted by the order of execution in a csv 
format,
-      ie, `<taskName1>,<taskName2>,...`. Mind that you must include also the 
operator
-      tasks (`builder`, `quarkus-native`, `package`, `jib`, `spectrum`, `s2i`) 
if
-      you need to execute them. Useful only with `pod` strategy.
-  - name: tasks-request-cpu
-    type: '[]string'
-    description: A list of request cpu configuration for the specific task 
with format
-      `<task-name>:<request-cpu-conf>`.
-  - name: tasks-request-memory
-    type: '[]string'
-    description: A list of request memory configuration for the specific task 
with
-      format `<task-name>:<request-memory-conf>`.
-  - name: tasks-limit-cpu
-    type: '[]string'
-    description: A list of limit cpu configuration for the specific task with 
format
-      `<task-name>:<limit-cpu-conf>`.
-  - name: tasks-limit-memory
-    type: '[]string'
-    description: A list of limit memory configuration for the specific task 
with format
-      `<task-name>:<limit-memory-conf>`.
-  - name: node-selector
-    type: map[string]string
-    description: Defines a set of nodes the builder pod is eligible to be 
scheduled
-      on, based on labels on the node.
-  - name: annotations
-    type: map[string]string
-    description: When using `pod` strategy, annotation to use for the builder 
pod.
-- name: camel
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Camel trait can be used to configure versions of Apache 
Camel K
-    runtime and related libraries, it cannot be disabled.
-  properties:
-  - name: enabled
-    type: bool
-    description: 'Deprecated: no longer in use.'
-  - name: runtime-version
-    type: string
-    description: The camel-k-runtime version to use for the integration. It 
overrides
-      the default version set in the Integration Platform.
-  - name: properties
-    type: '[]string'
-    description: A list of properties to be provided to the Integration runtime
-- name: container
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Container trait can be used to configure properties of the 
container
-    where the integration will run. It also provides configuration for 
Services associated
-    to the container.
-  properties:
-  - name: enabled
-    type: bool
-    description: 'Deprecated: no longer in use.'
-  - name: auto
-    type: bool
-    description: To automatically enable the trait
-  - name: request-cpu
-    type: string
-    description: The minimum amount of CPU required.
-  - name: request-memory
-    type: string
-    description: The minimum amount of memory required.
-  - name: limit-cpu
-    type: string
-    description: The maximum amount of CPU required.
-  - name: limit-memory
-    type: string
-    description: The maximum amount of memory required.
-  - name: expose
-    type: bool
-    description: Can be used to enable/disable exposure via kubernetes Service.
-  - name: port
-    type: int
-    description: To configure a different port exposed by the container 
(default `8080`).
-  - name: port-name
-    type: string
-    description: To configure a different port name for the port exposed by 
the container.
-      It defaults to `http` only when the `expose` parameter is true.
-  - name: service-port
-    type: int
-    description: To configure under which service port the container port is 
to be
-      exposed (default `80`).
-  - name: service-port-name
-    type: string
-    description: To configure under which service port name the container port 
is
-      to be exposed (default `http`).
-  - name: name
-    type: string
-    description: The main container name. It's named `integration` by default.
-  - name: image
-    type: string
-    description: The main container image
-  - name: image-pull-policy
-    type: PullPolicy
-    description: 'The pull policy: Always|Never|IfNotPresent'
-- name: cron
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The Cron trait can be used to customize the behaviour of 
periodic
-    timer/cron based integrations. While normally an integration requires a 
pod to
-    be always up and running, some periodic tasks, such as batch jobs, require 
to
-    be activated at specific hours of the day or with a periodic delay of 
minutes.
-    For such tasks, the cron trait can materialize the integration as a 
Kubernetes
-    CronJob instead of a standard deployment, in order to save resources when 
the
-    integration does not need to be executed. Integrations that start from the 
following
-    components are evaluated by the cron trait: `timer`, `cron`, `quartz`. 
WARNING:
-    In case of native build-mode defined in xref:traits:quarkus.adoc[quarkus] 
trait,
-    the component can''t be customized. The rules for using a Kubernetes 
CronJob are
-    the following: - `timer`: when period is set in milliseconds with no 
remaining
-    seconds, for example 120000. If there is any second left as in 121000 
(120s and
-    1s) or the presence of any of these parameters (delay, repeatCount, time) 
then
-    a CronJob  won''t be created, but a standard deployment. - `cron`, 
`quartz`: when
-    the cron expression does not contain seconds (or the "seconds" part is set 
to
-    0). E.g. `cron:tab?schedule=0/2${plus}*{plus}*{plus}*{plus}?` or 
`quartz:trigger?cron=0{plus}0/2{plus}*{plus}*{plus}*{plus}?`.'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: schedule
-    type: string
-    description: The CronJob schedule for the whole integration. If multiple 
routes
-      are declared, they must have the same schedule for this mechanism to 
work correctly.
-  - name: components
-    type: string
-    description: 'A comma separated list of the Camel components that need to 
be customized
-      in order for them to work when the schedule is triggered externally by 
Kubernetes.
-      A specific customizer is activated for each specified component. E.g. 
for the
-      `timer` component, the `cron-timer` customizer is activated (it''s 
present in
-      the `org.apache.camel.k:camel-k-cron` library).  Supported components 
are currently:
-      `cron`, `timer` and `quartz`.'
-  - name: fallback
-    type: bool
-    description: Use the default Camel implementation of the `cron` endpoint 
(`quartz`)
-      instead of trying to materialize the integration as Kubernetes CronJob.
-  - name: concurrency-policy
-    type: string
-    description: 'Specifies how to treat concurrent executions of a Job. Valid 
values
-      are: - "Allow": allows CronJobs to run concurrently; - "Forbid" 
(default): forbids
-      concurrent runs, skipping next run if previous run hasn''t finished yet; 
- "Replace":
-      cancels currently running job and replaces it with a new one'
-  - name: auto
-    type: bool
-    description: Automatically deploy the integration as CronJob when all 
routes are
-      either starting from a periodic consumer (only `cron`, `timer` and 
`quartz`
-      are supported) or a passive consumer (e.g. `direct` is a passive 
consumer).  It's
-      required that all periodic consumers have the same period, and it can be 
expressed
-      as cron schedule (e.g. `1m` can be expressed as `0/1 * * * *`, while 
`35m` or
-      `50s` cannot).
-  - name: starting-deadline-seconds
-    type: int64
-    description: Optional deadline in seconds for starting the job if it 
misses scheduled
-      time for any reason.  Missed jobs executions will be counted as failed 
ones.
-  - name: active-deadline-seconds
-    type: int64
-    description: Specifies the duration in seconds, relative to the start 
time, that
-      the job may be continuously active before it is considered to be failed. 
It
-      defaults to 60s.
-  - name: backoff-limit
-    type: int32
-    description: Specifies the number of retries before marking the job 
failed. It
-      defaults to 2.
-- name: dependencies
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Dependencies trait is internally used to automatically add 
runtime
-    dependencies based on the integration that the user wants to run.
-  properties: []
-- name: deployer
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The deployer trait is responsible for deploying the resources 
owned
-    by the integration, and can be used to explicitly select the underlying 
controller
-    that will manage the integration pods.
-  properties:
-  - name: enabled
-    type: bool
-    description: 'Deprecated: no longer in use.'
-  - name: kind
-    type: string
-    description: Allows to explicitly select the desired deployment kind 
between `deployment`,
-      `cron-job` or `knative-service` when creating the resources for running 
the
-      integration.
-  - name: use-ssa
-    type: bool
-    description: Use server-side apply to update the owned resources (default 
`true`).
-      Note that it automatically falls back to client-side patching, if SSA is 
not
-      available, e.g., on old Kubernetes clusters.
-- name: deployment
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Deployment trait is responsible for generating the 
Kubernetes deployment
-    that will make sure the integration will run in the cluster.
-  properties:
-  - name: enabled
-    type: bool
-    description: 'Deprecated: no longer in use.'
-  - name: progress-deadline-seconds
-    type: int32
-    description: The maximum time in seconds for the deployment to make 
progress before
-      it is considered to be failed. It defaults to `60s`.
-  - name: strategy
-    type: DeploymentStrategyType
-    description: The deployment strategy to use to replace existing pods with 
new
-      ones.
-  - name: rolling-update-max-unavailable
-    type: int
-    description: 'The maximum number of pods that can be unavailable during 
the update.
-      Value can be an absolute number (ex: 5) or a percentage of desired pods 
(ex:
-      10%). Absolute number is calculated from percentage by rounding down. 
This can
-      not be 0 if MaxSurge is 0. Defaults to `25%`.'
-  - name: rolling-update-max-surge
-    type: int
-    description: 'The maximum number of pods that can be scheduled above the 
desired
-      number of pods. Value can be an absolute number (ex: 5) or a percentage 
of desired
-      pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute 
number is
-      calculated from percentage by rounding up. Defaults to `25%`.'
-- name: environment
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The environment trait is used internally to inject standard 
environment
-    variables in the integration container, such as `NAMESPACE`, `POD_NAME` 
and others.
-  properties:
-  - name: enabled
-    type: bool
-    description: 'Deprecated: no longer in use.'
-  - name: container-meta
-    type: bool
-    description: Enables injection of `NAMESPACE` and `POD_NAME` environment 
variables
-      (default `true`)
-  - name: http-proxy
-    type: bool
-    description: Propagates the `HTTP_PROXY`, `HTTPS_PROXY` and `NO_PROXY` 
environment
-      variables (default `true`)
-  - name: vars
-    type: '[]string'
-    description: A list of environment variables to be added to the 
integration container.
-      The syntax is KEY=VALUE, e.g., `MY_VAR="my value"`. These take 
precedence over
-      the previously defined environment variables.
-- name: error-handler
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The error-handler is a platform trait used to inject Error 
Handler
-    source into the integration runtime.
-  properties:
-  - name: enabled
-    type: bool
-    description: 'Deprecated: no longer in use.'
-  - name: ref
-    type: string
-    description: The error handler ref name provided or found in application 
properties
-- name: gc
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The GC Trait garbage-collects all resources that are no longer 
necessary
-    upon integration updates.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: discovery-cache
-    type: 
github.com/apache/camel-k/v2/pkg/apis/camel/v1/trait.DiscoveryCacheType
-    description: 'Discovery client cache to be used, either `disabled`, `disk` 
or
-      `memory` (default `memory`). Deprecated: to be removed from trait 
configuration.'
-- name: gcp-secret-manager
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The Google Secret Manager trait can be used to use secrets 
from Google
-    Secret Manager The Google Secret Manager trait is disabled by default. For 
more
-    information about how to use secrets from Google Secret Manager take a 
look at
-    the components docs: 
xref:components::google-secret-manager-component.adoc[AWS
-    Secrets Manager component] A sample execution of this trait, would require 
the
-    following trait options: -t gpc-secret-manager.enabled=true -t 
gpc-secret-manager.project-id="project-id"
-    -t gpc-secret-manager.service-account-key="file:serviceaccount.json" To 
enable
-    the automatic context reload on secrets updates you should define the 
following
-    trait options: -t gpc-secret-manager.enabled=true -t 
gpc-secret-manager.project-id="project-id"
-    -t gpc-secret-manager.service-account-key="file:serviceaccount.json" -t 
gcp-secret-manager.subscription-name="pubsub-sub"
-    -t gcp-secret-manager.context-reload-enabled="true" -t 
gcp-secret-manager.refresh-enabled="true"
-    -t gcp-secret-manager.refresh-period="30000" -t 
gcp-secret-manager.secrets="test*"'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: Enables automatic configuration of the trait.
-  - name: project-id
-    type: string
-    description: The Project Id from Google Cloud
-  - name: service-account-key
-    type: string
-    description: The Path to a service account Key File to use secrets from 
Google
-      Secret Manager
-  - name: use-default-instance
-    type: bool
-    description: Define if we want to use the Default Instance approach for 
accessing
-      the Google Secret Manager service
-  - name: context-reload-enabled
-    type: bool
-    description: Define if we want to use the Camel Context Reload feature or 
not
-  - name: refresh-enabled
-    type: bool
-    description: Define if we want to use the Refresh Feature for secrets
-  - name: refresh-period
-    type: string
-    description: If Refresh is enabled, this defines the interval to check the 
refresh
-      event
-  - name: secrets
-    type: string
-    description: If Refresh is enabled, the regular expression representing 
the secrets
-      we want to track
-  - name: subscription-name
-    type: string
-    description: If Refresh is enabled, this defines the subscription name to 
the
-      Google PubSub topic used to keep track of updates
-- name: hashicorp-vault
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The Hashicorp Vault trait can be used to use secrets from 
Hashicorp
-    Vault The Hashicorp Vault trait is disabled by default. For more 
information about
-    how to use secrets from Hashicorp vault take a look at the components 
docs: xref:components::hashicorp-vault-component.adoc[Hashicorp
-    Vault component] A sample execution of this trait, would require the 
following
-    trait options: -t hashicorp-vault.enabled=true -t 
hashicorp-vault.token="token"
-    -t hashicorp-vault.port="port" -t hashicorp-vault.engine="engine" -t 
hashicorp-vault.port="port"
-    -t hashicorp-vault.scheme="scheme"'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: Enables automatic configuration of the trait.
-  - name: host
-    type: string
-    description: The Host to use
-  - name: port
-    type: string
-    description: The Port to use
-  - name: engine
-    type: string
-    description: The Hashicorp engine to use
-  - name: token
-    type: string
-    description: 'The token to access Hashicorp Vault. This could be a plain 
text
-      or a configmap/secret The content of the hashicorp vault token is 
expected to
-      be a text containing a valid Hashicorp Vault Token. Syntax: 
[configmap|secret]:name[/key],
-      where name represents the resource name, key optionally represents the 
resource
-      key to be filtered (default key value = hashicorp-vault-token).'
-  - name: scheme
-    type: string
-    description: The scheme to access Hashicorp Vault
-- name: health
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The health trait is responsible for configuring the health 
probes on
-    the integration container. It's disabled by default.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: liveness-probe-enabled
-    type: bool
-    description: Configures the liveness probe for the integration container 
(default
-      `false`).
-  - name: liveness-scheme
-    type: string
-    description: Scheme to use when connecting to the liveness probe (default 
`HTTP`).
-  - name: liveness-initial-delay
-    type: int32
-    description: Number of seconds after the container has started before the 
liveness
-      probe is initiated.
-  - name: liveness-timeout
-    type: int32
-    description: Number of seconds after which the liveness probe times out.
-  - name: liveness-period
-    type: int32
-    description: How often to perform the liveness probe.
-  - name: liveness-success-threshold
-    type: int32
-    description: Minimum consecutive successes for the liveness probe to be 
considered
-      successful after having failed.
-  - name: liveness-failure-threshold
-    type: int32
-    description: Minimum consecutive failures for the liveness probe to be 
considered
-      failed after having succeeded.
-  - name: readiness-probe-enabled
-    type: bool
-    description: Configures the readiness probe for the integration container 
(default
-      `true`).
-  - name: readiness-scheme
-    type: string
-    description: Scheme to use when connecting to the readiness probe (default 
`HTTP`).
-  - name: readiness-initial-delay
-    type: int32
-    description: Number of seconds after the container has started before the 
readiness
-      probe is initiated.
-  - name: readiness-timeout
-    type: int32
-    description: Number of seconds after which the readiness probe times out.
-  - name: readiness-period
-    type: int32
-    description: How often to perform the readiness probe.
-  - name: readiness-success-threshold
-    type: int32
-    description: Minimum consecutive successes for the readiness probe to be 
considered
-      successful after having failed.
-  - name: readiness-failure-threshold
-    type: int32
-    description: Minimum consecutive failures for the readiness probe to be 
considered
-      failed after having succeeded.
-  - name: startup-probe-enabled
-    type: bool
-    description: Configures the startup probe for the integration container 
(default
-      `false`).
-  - name: startup-scheme
-    type: string
-    description: Scheme to use when connecting to the startup probe (default 
`HTTP`).
-  - name: startup-initial-delay
-    type: int32
-    description: Number of seconds after the container has started before the 
startup
-      probe is initiated.
-  - name: startup-timeout
-    type: int32
-    description: Number of seconds after which the startup probe times out.
-  - name: startup-period
-    type: int32
-    description: How often to perform the startup probe.
-  - name: startup-success-threshold
-    type: int32
-    description: Minimum consecutive successes for the startup probe to be 
considered
-      successful after having failed.
-  - name: startup-failure-threshold
-    type: int32
-    description: Minimum consecutive failures for the startup probe to be 
considered
-      failed after having succeeded.
-- name: ingress
-  platform: false
-  profiles:
-  - Kubernetes
-  description: The Ingress trait can be used to expose the service associated 
with
-    the integration to the outside world with a Kubernetes Ingress. It's 
enabled by
-    default whenever a Service is added to the integration (through the 
`service`
-    trait).
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: annotations
-    type: map[string]string
-    description: 'The annotations added to the ingress. This can be used to 
set controller
-      specific annotations, e.g., when using the NGINX Ingress controller: See 
https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md'
-  - name: host
-    type: string
-    description: To configure the host exposed by the ingress.
-  - name: path
-    type: string
-    description: To configure the path exposed by the ingress (default `/`).
-  - name: path-type
-    type: k8s.io/api/networking/v1.PathType
-    description: To configure the path type exposed by the ingress. One of 
`Exact`,
-      `Prefix`, `ImplementationSpecific` (default to `Prefix`).
-  - name: auto
-    type: bool
-    description: To automatically add an ingress whenever the integration uses 
an
-      HTTP endpoint consumer.
-- name: istio
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Istio trait allows configuring properties related to the 
Istio
-    service mesh, such as sidecar injection and outbound IP ranges.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: allow
-    type: string
-    description: Configures a (comma-separated) list of CIDR subnets that 
should not
-      be intercepted by the Istio proxy 
(`10.0.0.0/8,172.16.0.0/12,192.168.0.0/16`
-      by default).
-  - name: inject
-    type: bool
-    description: Forces the value for labels `sidecar.istio.io/inject`. By 
default
-      the label is set to `true` on deployment and not set on Knative Service.
-- name: jolokia
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Jolokia trait activates and configures the Jolokia Java 
agent.
-    See https://jolokia.org/reference/html/agents.html
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: ca-cert
-    type: string
-    description: The PEM encoded CA certification file path, used to verify 
client
-      certificates, applicable when `protocol` is `https` and 
`use-ssl-client-authentication`
-      is `true` (default 
`/var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt`
-      for OpenShift).
-  - name: client-principal
-    type: '[]string'
-    description: The principal(s) which must be given in a client certificate 
to allow
-      access to the Jolokia endpoint, applicable when `protocol` is `https` 
and `use-ssl-client-authentication`
-      is `true` (default `clientPrincipal=cn=system:master-proxy`, 
`cn=hawtio-online.hawtio.svc`
-      and `cn=fuse-console.fuse.svc` for OpenShift).
-  - name: discovery-enabled
-    type: bool
-    description: Listen for multicast requests (default `false`)
-  - name: extended-client-check
-    type: bool
-    description: Mandate the client certificate contains a client flag in the 
extended
-      key usage section, applicable when `protocol` is `https` and 
`use-ssl-client-authentication`
-      is `true` (default `true` for OpenShift).
-  - name: host
-    type: string
-    description: The Host address to which the Jolokia agent should bind to. 
If `"\*"`
-      or `"0.0.0.0"` is given, the servers binds to every network interface 
(default
-      `"*"`).
-  - name: password
-    type: string
-    description: The password used for authentication, applicable when the 
`user`
-      option is set.
-  - name: port
-    type: int
-    description: The Jolokia endpoint port (default `8778`).
-  - name: protocol
-    type: string
-    description: The protocol to use, either `http` or `https` (default 
`https` for
-      OpenShift)
-  - name: user
-    type: string
-    description: The user to be used for authentication
-  - name: use-ssl-client-authentication
-    type: bool
-    description: Whether client certificates should be used for authentication 
(default
-      `true` for OpenShift).
-  - name: options
-    type: '[]string'
-    description: A list of additional Jolokia options as defined in 
https://jolokia.org/reference/html/agents.html#agent-jvm-config[JVM
-      agent configuration options]
-- name: jvm
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The JVM trait is used to configure the JVM that runs the 
Integration.
-    This trait is configured only for Integration and related IntegrationKits 
(bound
-    to a container image) built by Camel K operator. If the system detects the 
usage
-    of a different container image (ie, built externally), then, the trait is 
disabled
-    by the platform. WARNING: you can still enable the trait explicitly even 
when
-    it is disabled by the platform but you should be aware that some 
configurations
-    could fail.'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: debug
-    type: bool
-    description: Activates remote debugging, so that a debugger can be 
attached to
-      the JVM, e.g., using port-forwarding
-  - name: debug-suspend
-    type: bool
-    description: Suspends the target JVM immediately before the main class is 
loaded
-  - name: print-command
-    type: bool
-    description: Prints the command used the start the JVM in the container 
logs (default
-      `true`)
-  - name: debug-address
-    type: string
-    description: Transport address at which to listen for the newly launched 
JVM (default
-      `*:5005`)
-  - name: options
-    type: '[]string'
-    description: A list of JVM options
-  - name: classpath
-    type: string
-    description: Additional JVM classpath (use `Linux` classpath separator)
-- name: kamelets
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The kamelets trait is a platform trait used to inject Kamelets 
into
-    the integration runtime.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: Automatically inject all referenced Kamelets and their 
default configuration
-      (enabled by default)
-  - name: list
-    type: string
-    description: Comma separated list of Kamelet names to load into the 
current integration
-  - name: mount-point
-    type: string
-    description: The directory where the application mounts and reads Kamelet 
spec
-      (default `/etc/camel/kamelets`)
-- name: keda
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The KEDA trait can be used for automatic integration with KEDA 
autoscalers.
-    The trait can be either manually configured using the `triggers` option or 
automatically
-    configured via markers in the Kamelets. For information on how to use KEDA 
enabled
-    Kamelets with the KEDA trait, refer to 
xref:ROOT:kamelets/kamelets-user.adoc#kamelet-keda-user[the
-    KEDA section in the Kamelets user guide]. If you want to create Kamelets 
that
-    contain KEDA metadata, refer to 
xref:ROOT:kamelets/kamelets-dev.adoc#kamelet-keda-dev[the
-    KEDA section in the Kamelets development guide]. The KEDA trait is 
disabled by
-    default.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: Enables automatic configuration of the trait. Allows the 
trait to
-      infer KEDA triggers from the Kamelets.
-  - name: hack-controller-replicas
-    type: bool
-    description: Set the spec->replicas field on the top level controller to 
an explicit
-      value if missing, to allow KEDA to recognize it as a scalable resource.
-  - name: polling-interval
-    type: int32
-    description: Interval (seconds) to check each trigger on.
-  - name: cooldown-period
-    type: int32
-    description: The wait period between the last active trigger reported and 
scaling
-      the resource back to 0.
-  - name: idle-replica-count
-    type: int32
-    description: Enabling this property allows KEDA to scale the resource down 
to
-      the specified number of replicas.
-  - name: min-replica-count
-    type: int32
-    description: Minimum number of replicas.
-  - name: max-replica-count
-    type: int32
-    description: Maximum number of replicas.
-  - name: triggers
-    type: '[]github.com/apache/camel-k/v2/addons/keda.kedaTrigger'
-    description: Definition of triggers according to the KEDA format. Each 
trigger
-      must contain `type` field corresponding to the name of a KEDA autoscaler 
and
-      a key/value map named `metadata` containing specific trigger options. An 
optional
-      `authentication-secret` can be declared per trigger and the operator 
will link
-      each entry of the secret to a KEDA authentication parameter.
-- name: knative
-  platform: false
-  profiles:
-  - Knative
-  description: The Knative trait automatically discovers addresses of Knative 
resources
-    and inject them into the running integration. The full Knative 
configuration is
-    injected in the CAMEL_KNATIVE_CONFIGURATION in JSON format. The Camel 
Knative
-    component will then use the full configuration to configure the routes. 
The trait
-    is enabled by default when the Knative profile is active.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: configuration
-    type: string
-    description: Can be used to inject a Knative complete configuration in 
JSON format.
-  - name: channel-sources
-    type: '[]string'
-    description: List of channels used as source of integration routes. Can 
contain
-      simple channel names or full Camel URIs.
-  - name: channel-sinks
-    type: '[]string'
-    description: List of channels used as destination of integration routes. 
Can contain
-      simple channel names or full Camel URIs.
-  - name: endpoint-sources
-    type: '[]string'
-    description: List of channels used as source of integration routes.
-  - name: endpoint-sinks
-    type: '[]string'
-    description: List of endpoints used as destination of integration routes. 
Can
-      contain simple endpoint names or full Camel URIs.
-  - name: event-sources
-    type: '[]string'
-    description: List of event types that the integration will be subscribed 
to. Can
-      contain simple event types or full Camel URIs (to use a specific broker 
different
-      from "default").
-  - name: event-sinks
-    type: '[]string'
-    description: List of event types that the integration will produce. Can 
contain
-      simple event types or full Camel URIs (to use a specific broker).
-  - name: filter-source-channels
-    type: bool
-    description: Enables filtering on events based on the header 
"ce-knativehistory".
-      Since this header has been removed in newer versions of Knative, 
filtering is
-      disabled by default.
-  - name: sink-binding
-    type: bool
-    description: Allows binding the integration to a sink via a Knative 
SinkBinding
-      resource. This can be used when the integration targets a single sink. 
It's
-      enabled by default when the integration targets a single sink (except 
when the
-      integration is owned by a Knative source).
-  - name: auto
-    type: bool
-    description: Enable automatic discovery of all trait properties.
-  - name: namespace-label
-    type: bool
-    description: 'Enables the camel-k-operator to set the 
"bindings.knative.dev/include=true"
-      label to the namespace As Knative requires this label to perform 
injection of
-      K_SINK URL into the service. If this is false, the integration pod may 
start
-      and fail, read the SinkBinding Knative documentation. (default: true)'
-- name: knative-service
-  platform: false
-  profiles:
-  - Knative
-  description: The Knative Service trait allows configuring options when 
running the
-    Integration as a Knative service, instead of a standard Kubernetes 
Deployment.
-    Running an Integration as a Knative Service enables auto-scaling (and 
scaling-to-zero),
-    but those features are only relevant when the Camel route(s) use(s) an 
HTTP endpoint
-    consumer.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: annotations
-    type: map[string]string
-    description: 'The annotations added to route. This can be used to set 
knative
-      service specific annotations CLI usage example: -t 
"knative-service.annotations.''haproxy.router.openshift.io/balance''=true"'
-  - name: autoscaling-class
-    type: string
-    description: Configures the Knative autoscaling class property (e.g. to 
set `hpa.autoscaling.knative.dev`
-      or `kpa.autoscaling.knative.dev` autoscaling).  Refer to the Knative 
documentation
-      for more information.
-  - name: autoscaling-metric
-    type: string
-    description: Configures the Knative autoscaling metric property (e.g. to 
set `concurrency`
-      based or `cpu` based autoscaling).  Refer to the Knative documentation 
for more
-      information.
-  - name: autoscaling-target
-    type: int
-    description: Sets the allowed concurrency level or CPU percentage 
(depending on
-      the autoscaling metric) for each Pod.  Refer to the Knative 
documentation for
-      more information.
-  - name: min-scale
-    type: int
-    description: The minimum number of Pods that should be running at any time 
for
-      the integration. It's **zero** by default, meaning that the integration 
is scaled
-      down to zero when not used for a configured amount of time.  Refer to 
the Knative
-      documentation for more information.
-  - name: max-scale
-    type: int
-    description: An upper bound for the number of Pods that can be running in 
parallel
-      for the integration. Knative has its own cap value that depends on the 
installation.  Refer
-      to the Knative documentation for more information.
-  - name: rollout-duration
-    type: string
-    description: Enables to gradually shift traffic to the latest Revision and 
sets
-      the rollout duration. It's disabled by default and must be expressed as 
a Golang
-      `time.Duration` string representation, rounded to a second precision.
-  - name: visibility
-    type: string
-    description: Setting `cluster-local`, Knative service becomes a private 
service.
-      Specifically, this option applies the 
`networking.knative.dev/visibility` label
-      to Knative service.  Refer to the Knative documentation for more 
information.
-  - name: auto
-    type: bool
-    description: 'Automatically deploy the integration as Knative service when 
all
-      conditions hold:  * Integration is using the Knative profile * All 
routes are
-      either starting from an HTTP based consumer or a passive consumer (e.g. 
`direct`
-      is a passive consumer)'
-- name: logging
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Logging trait is used to configure Integration runtime 
logging
-    options (such as color and format). The logging backend is provided by 
Quarkus,
-    whose configuration is documented at https://quarkus.io/guides/logging.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: color
-    type: bool
-    description: Colorize the log output
-  - name: format
-    type: string
-    description: Logs message format
-  - name: level
-    type: string
-    description: Adjust the logging level (defaults to `INFO`)
-  - name: json
-    type: bool
-    description: Output the logs in JSON
-  - name: json-pretty-print
-    type: bool
-    description: Enable "pretty printing" of the JSON logs
-- name: master
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The Master trait allows to configure the integration to 
automatically
-    leverage Kubernetes resources for doing leader election and starting 
*master*
-    routes only on certain instances. It''s activated automatically when using 
the
-    master endpoint in a route, e.g. 
`from("master:lockname:telegram:bots")...`. NOTE:
-    this trait adds special permissions to the integration service account in 
order
-    to read/write configmaps and read pods. It''s recommended to use a 
different service
-    account than "default" when running the integration.'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: Enables automatic configuration of the trait.
-  - name: include-delegate-dependencies
-    type: bool
-    description: When this flag is active, the operator analyzes the source 
code to
-      add dependencies required by delegate endpoints. E.g. when using 
`master:lockname:timer`,
-      then `camel:timer` is automatically added to the set of dependencies. 
It's enabled
-      by default.
-  - name: resource-name
-    type: string
-    description: Name of the configmap that will be used to store the lock. 
Defaults
-      to "<integration-name>-lock". Name of the configmap/lease resource that 
will
-      be used to store the lock. Defaults to "<integration-name>-lock".
-  - name: resource-type
-    type: string
-    description: Type of Kubernetes resource to use for locking ("ConfigMap" 
or "Lease").
-      Defaults to "Lease".
-  - name: label-key
-    type: string
-    description: Label that will be used to identify all pods contending the 
lock.
-      Defaults to "camel.apache.org/integration".
-  - name: label-value
-    type: string
-    description: Label value that will be used to identify all pods contending 
the
-      lock. Defaults to the integration name.
-- name: mount
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Mount trait can be used to configure volumes mounted on the 
Integration
-    Pods.
-  properties:
-  - name: enabled
-    type: bool
-    description: 'Deprecated: no longer in use.'
-  - name: configs
-    type: '[]string'
-    description: 'A list of configuration pointing to configmap/secret. The 
configuration
-      are expected to be UTF-8 resources as they are processed by runtime 
Camel Context
-      and tried to be parsed as property files. They are also made available 
on the
-      classpath in order to ease their usage directly from the Route. Syntax: 
[configmap|secret]:name[/key],
-      where name represents the resource name and key optionally represents 
the resource
-      key to be filtered'
-  - name: resources
-    type: '[]string'
-    description: 'A list of resources (text or binary content) pointing to 
configmap/secret.
-      The resources are expected to be any resource type (text or binary 
content).
-      The destination path can be either a default location or any path 
specified
-      by the user. Syntax: [configmap|secret]:name[/key][@path], where name 
represents
-      the resource name, key optionally represents the resource key to be 
filtered
-      and path represents the destination path'
-  - name: volumes
-    type: '[]string'
-    description: 'A list of Persistent Volume Claims to be mounted. Syntax: 
[pvcname:/container/path]'
-  - name: hot-reload
-    type: bool
-    description: Enable "hot reload" when a secret/configmap mounted is edited 
(default
-      `false`). The configmap/secret must be marked with 
`camel.apache.org/integration`
-      label to be taken in account.
-- name: openapi
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The OpenAPI DSL trait is internally used to allow creating 
integrations
-    from a OpenAPI specs.
-  properties:
-  - name: enabled
-    type: bool
-    description: 'Deprecated: no longer in use.'
-  - name: configmaps
-    type: '[]string'
-    description: The configmaps holding the spec of the OpenAPI
-- name: owner
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Owner trait ensures that all created resources belong to 
the integration
-    being created and transfers annotations and labels on the integration onto 
these
-    owned resources.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: target-annotations
-    type: '[]string'
-    description: The set of annotations to be transferred
-  - name: target-labels
-    type: '[]string'
-    description: The set of labels to be transferred
-- name: pdb
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The PDB trait allows to configure the PodDisruptionBudget 
resource
-    for the Integration pods.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: min-available
-    type: string
-    description: The number of pods for the Integration that must still be 
available
-      after an eviction. It can be either an absolute number or a percentage. 
Only
-      one of `min-available` and `max-unavailable` can be specified.
-  - name: max-unavailable
-    type: string
-    description: The number of pods for the Integration that can be 
unavailable after
-      an eviction. It can be either an absolute number or a percentage 
(default `1`
-      if `min-available` is also not set). Only one of `max-unavailable` and 
`min-available`
-      can be specified.
-- name: platform
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The platform trait is a base trait that is used to assign an 
integration
-    platform to an integration. In case the platform is missing, the trait is 
allowed
-    to create a default platform. This feature is especially useful in 
contexts where
-    there's no need to provide a custom configuration for the platform (e.g. 
on OpenShift
-    the default settings work, since there's an embedded container image 
registry).
-  properties:
-  - name: enabled
-    type: bool
-    description: 'Deprecated: no longer in use.'
-  - name: create-default
-    type: bool
-    description: To create a default (empty) platform when the platform is 
missing.
-  - name: global
-    type: bool
-    description: Indicates if the platform should be created globally in the 
case
-      of global operator (default true).
-  - name: auto
-    type: bool
-    description: To automatically detect from the environment if a default 
platform
-      can be created (it will be created on OpenShift only).
-- name: pod
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The pod trait allows the customization of the Integration pods. 
It
-    applies the `PodSpecTemplate` struct contained in the Integration 
`.spec.podTemplate`
-    field, into the Integration deployment Pods template, using strategic 
merge patch.
-    This can be used to customize the container where Camel routes execute, by 
using
-    the `integration` container name.
-  properties: []
-- name: prometheus
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The Prometheus trait configures a Prometheus-compatible 
endpoint.
-    It also creates a `PodMonitor` resource, so that the endpoint can be 
scraped automatically,
-    when using the Prometheus operator. The metrics are exposed using 
Micrometer Metrics.
-    WARNING: The creation of the `PodMonitor` resource requires the 
https://github.com/coreos/prometheus-operator[Prometheus
-    Operator] custom resource definition to be installed. You can set 
`pod-monitor`
-    to `false` for the Prometheus trait to work without the Prometheus 
Operator. WARNING:
-    By default the metrics API is not available in JSON The Prometheus trait 
is disabled
-    by default.'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: pod-monitor
-    type: bool
-    description: Whether a `PodMonitor` resource is created (default `true`).
-  - name: pod-monitor-labels
-    type: '[]string'
-    description: The `PodMonitor` resource labels, applicable when 
`pod-monitor` is
-      `true`.
-- name: pull-secret
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Pull Secret trait sets a pull secret on the pod, to allow 
Kubernetes
-    to retrieve the container image from an external registry. The pull secret 
can
-    be specified manually or, in case you've configured authentication for an 
external
-    container registry on the `IntegrationPlatform`, the same secret is used 
to pull
-    images. It's enabled by default whenever you configure authentication for 
an external
-    container registry, so it assumes that external registries are private. If 
your
-    registry does not need authentication for pulling images, you can disable 
this
-    trait.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: secret-name
-    type: string
-    description: The pull secret name to set on the Pod. If left empty this is 
automatically
-      taken from the `IntegrationPlatform` registry configuration.
-  - name: image-puller-delegation
-    type: bool
-    description: When using a global operator with a shared platform, this 
enables
-      delegation of the `system:image-puller` cluster role on the operator 
namespace
-      to the integration service account.
-  - name: auto
-    type: bool
-    description: Automatically configures the platform registry secret on the 
pod
-      if it is of type `kubernetes.io/dockerconfigjson`.
-- name: quarkus
-  platform: true
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The Quarkus trait configures the Quarkus runtime. It''s 
enabled by
-    default. NOTE: A native based compilation will be forced to use a `pod` 
build
-    strategy. Compiling to a native executable, i.e. when using 
`build-mode=native`,
-    requires at least 4GiB of memory, so the Pod running the native build, 
must have
-    enough memory available.'
-  properties:
-  - name: enabled
-    type: bool
-    description: 'Deprecated: no longer in use.'
-  - name: package-type
-    type: 
'[]github.com/apache/camel-k/v2/pkg/apis/camel/v1/trait.QuarkusPackageType'
-    description: 'The Quarkus package types, `fast-jar` or `native` (default 
`fast-jar`).
-      In case both `fast-jar` and `native` are specified, two `IntegrationKit` 
resources
-      are created, with the native kit having precedence over the `fast-jar` 
one once
-      ready. The order influences the resolution of the current kit for the 
integration.
-      The kit corresponding to the first package type will be assigned to the 
integration
-      in case no existing kit that matches the integration exists. Deprecated: 
use
-      `build-mode` instead.'
-  - name: build-mode
-    type: '[]github.com/apache/camel-k/v2/pkg/apis/camel/v1/trait.QuarkusMode'
-    description: 'The Quarkus mode to run: either `jvm` or `native` (default 
`jvm`).
-      In case both `jvm` and `native` are specified, two `IntegrationKit` 
resources
-      are created, with the `native` kit having precedence over the `jvm` one 
once
-      ready.'
-  - name: native-base-image
-    type: string
-    description: The base image to use when running a native build (default 
`quay.io/quarkus/quarkus-micro-image:2.0`)
-  - name: native-builder-image
-    type: string
-    description: The image containing the tooling required for a native build 
(by
-      default it will use the one provided in the runtime catalog)
-- name: registry
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Registry trait sets up Maven to use the Image registry as a 
Maven
-    repository.
-  properties: []
-- name: resume
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The Resume trait can be used to manage and configure resume 
strategies.
-    This feature is meant to allow quick resume of processing by Camel K 
instances
-    after they have been restarted. WARNING: this is an experimental 
implementation
-    based on the support available on 
link:/components/next/eips/resume-strategies.html[Camel
-    Core resume strategies]. The Resume trait is disabled by default. The main 
different
-    from the implementation on Core is that it''s not necessary to bind the 
strategies
-    to the registry. This step will be done automatically by Camel K, after 
resolving
-    the options passed to the trait. A sample execution of this trait, using 
the Kafka
-    backend (the only one supported at the moment), would require the 
following trait
-    options: -t resume.enabled=true -t resume.resume-path=camel-file-sets -t 
resume.resume-server="address-of-your-kafka:9092"'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: Enables automatic configuration of the trait.
-  - name: resume-strategy,omitempty
-    type: string
-    description: The type of the resume strategy to use
-  - name: resume-path,omitempty
-    type: string
-    description: The path used by the resume strategy (this is specific to the 
resume
-      strategy type)
-  - name: resume-server,omitempty
-    type: string
-    description: The address of the resume server to use (protocol / 
implementation
-      specific)
-  - name: cache-fill-policy,omitempty
-    type: string
-    description: 'The adapter-specific policy to use when filling the cache 
(use:
-      minimizing / maximizing). Check the component documentation if unsure'
-- name: route
-  platform: false
-  profiles:
-  - OpenShift
-  description: 'The Route trait can be used to configure the creation of 
OpenShift
-    routes for the integration. The certificate and key contents may be 
sourced either
-    from the local filesystem or in a OpenShift `secret` object. The user may 
use
-    the parameters ending in `-secret` (example: `tls-certificate-secret`) to 
reference
-    a certificate stored in a `secret`. Parameters ending in `-secret` have 
higher
-    priorities and in case the same route parameter is set, for example: 
`tls-key-secret`
-    and `tls-key`, then `tls-key-secret` is used. The recommended approach to 
set
-    the key and certificates is to use `secrets` to store their contents and 
use the
-    following parameters to reference them: `tls-certificate-secret`, 
`tls-key-secret`,
-    `tls-ca-certificate-secret`, `tls-destination-ca-certificate-secret` See 
the examples
-    section at the end of this page to see the setup options.'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: annotations
-    type: map[string]string
-    description: 'The annotations added to route. This can be used to set 
route specific
-      annotations For annotations options see 
https://docs.openshift.com/container-platform/3.11/architecture/networking/routes.html#route-specific-annotations
-      CLI usage example: -t 
"route.annotations.''haproxy.router.openshift.io/balance''=true"'
-  - name: host
-    type: string
-    description: To configure the host exposed by the route.
-  - name: tls-termination
-    type: string
-    description: The TLS termination type, like `edge`, `passthrough` or 
`reencrypt`.  Refer
-      to the OpenShift route documentation for additional information.
-  - name: tls-certificate
-    type: string
-    description: The TLS certificate contents.  Refer to the OpenShift route 
documentation
-      for additional information.
-  - name: tls-certificate-secret
-    type: string
-    description: The secret name and key reference to the TLS certificate. The 
format
-      is "secret-name[/key-name]", the value represents the secret name, if 
there
-      is only one key in the secret it will be read, otherwise you can set a 
key name
-      separated with a "/".  Refer to the OpenShift route documentation for 
additional
-      information.
-  - name: tls-key
-    type: string
-    description: The TLS certificate key contents.  Refer to the OpenShift 
route documentation
-      for additional information.
-  - name: tls-key-secret
-    type: string
-    description: The secret name and key reference to the TLS certificate key. 
The
-      format is "secret-name[/key-name]", the value represents the secret 
name, if
-      there is only one key in the secret it will be read, otherwise you can 
set a
-      key name separated with a "/".  Refer to the OpenShift route 
documentation for
-      additional information.
-  - name: tls-ca-certificate
-    type: string
-    description: The TLS CA certificate contents.  Refer to the OpenShift 
route documentation
-      for additional information.
-  - name: tls-ca-certificate-secret
-    type: string
-    description: The secret name and key reference to the TLS CA certificate. 
The
-      format is "secret-name[/key-name]", the value represents the secret 
name, if
-      there is only one key in the secret it will be read, otherwise you can 
set a
-      key name separated with a "/".  Refer to the OpenShift route 
documentation for
-      additional information.
-  - name: tls-destination-ca-certificate
-    type: string
-    description: The destination CA certificate provides the contents of the 
ca certificate
-      of the final destination.  When using reencrypt termination this file 
should
-      be provided in order to have routers use it for health checks on the 
secure
-      connection. If this field is not specified, the router may provide its 
own destination
-      CA and perform hostname validation using the short service name 
(service.namespace.svc),
-      which allows infrastructure generated certificates to automatically 
verify.  Refer
-      to the OpenShift route documentation for additional information.
-  - name: tls-destination-ca-certificate-secret
-    type: string
-    description: The secret name and key reference to the destination CA 
certificate.
-      The format is "secret-name[/key-name]", the value represents the secret 
name,
-      if there is only one key in the secret it will be read, otherwise you 
can set
-      a key name separated with a "/".  Refer to the OpenShift route 
documentation
-      for additional information.
-  - name: tls-insecure-edge-termination-policy
-    type: string
-    description: To configure how to deal with insecure traffic, e.g. `Allow`, 
`Disable`
-      or `Redirect` traffic.  Refer to the OpenShift route documentation for 
additional
-      information.
-- name: service
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: The Service trait exposes the integration with a Service 
resource so
-    that it can be accessed by other applications (or integrations) in the 
same namespace.
-    It's enabled by default if the integration depends on a Camel component 
that can
-    expose a HTTP endpoint.
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: To automatically detect from the code if a Service needs to 
be created.
-  - name: node-port
-    type: bool
-    description: 'Enable Service to be exposed as NodePort (default `false`). 
Deprecated:
-      Use service type instead.'
-  - name: type
-    type: github.com/apache/camel-k/v2/pkg/apis/camel/v1/trait.ServiceType
-    description: The type of service to be used, either 'ClusterIP', 
'NodePort' or
-      'LoadBalancer'.
-- name: service-binding
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The Service Binding trait allows users to connect to Services 
in Kubernetes:
-    https://github.com/k8s-service-bindings/spec#service-binding As the 
specification
-    is still evolving this is subject to change.'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: services
-    type: '[]string'
-    description: List of Services in the form 
[[apigroup/]version:]kind:[namespace/]name
-- name: telemetry
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'The Telemetry trait can be used to automatically publish 
tracing information
-    to an OTLP compatible collector. The trait is able to automatically 
discover the
-    telemetry OTLP endpoint available in the namespace (supports **Jaerger** 
in version
-    1.35+). The Telemetry trait is disabled by default. WARNING: The Telemetry 
trait
-    can''t be enabled at the same time as the Tracing trait.'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: Enables automatic configuration of the trait, including 
automatic
-      discovery of the telemetry endpoint.
-  - name: service-name
-    type: string
-    description: The name of the service that publishes telemetry data 
(defaults to
-      the integration name)
-  - name: endpoint
-    type: string
-    description: The target endpoint of the Telemetry service (automatically 
discovered
-      by default)
-  - name: sampler
-    type: string
-    description: The sampler of the telemetry used for tracing (default "on")
-  - name: sampler-ratio
-    type: string
-    description: The sampler ratio of the telemetry used for tracing
-  - name: sampler-parent-based
-    type: bool
-    description: The sampler of the telemetry used for tracing is parent based 
(default
-      "true")
-- name: toleration
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'This trait sets Tolerations over Integration pods. Tolerations 
allow
-    (but do not require) the pods to schedule onto nodes with matching taints. 
See
-    
https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
-    for more details. The toleration should be expressed in a similar manner 
that
-    of taints, i.e., `Key[=Value]:Effect[:Seconds]`, where values in square 
brackets
-    are optional. For examples: - `node-role.kubernetes.io/master:NoSchedule` 
- `node.kubernetes.io/network-unavailable:NoExecute:3000`
-    - `disktype=ssd:PreferNoSchedule` It''s disabled by default.'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: taints
-    type: '[]string'
-    description: The list of taints to tolerate, in the form 
`Key[=Value]:Effect[:Seconds]`
-- name: tracing
-  platform: false
-  profiles:
-  - Kubernetes
-  - Knative
-  - OpenShift
-  description: 'WARNING: The Tracing trait has been **deprecated** in favor of 
the
-    xref:traits:telemetry.adoc[Telemetry] trait. The Tracing trait can be used 
to
-    automatically publish tracing information to an OpenTracing compatible 
collector.
-    The trait is able to automatically discover the tracing endpoint available 
in
-    the namespace (supports **Jaeger**). The Tracing trait is disabled by 
default.
-    WARNING: The Tracing trait can''t be enabled at the same time as the 
Telemetry
-    trait.'
-  properties:
-  - name: enabled
-    type: bool
-    description: Can be used to enable or disable a trait. All traits share 
this common
-      property.
-  - name: auto
-    type: bool
-    description: Enables automatic configuration of the trait, including 
automatic
-      discovery of the tracing endpoint.
-  - name: service-name
-    type: string
-    description: The name of the service that publishes tracing data (defaults 
to
-      the integration name)
-  - name: endpoint
-    type: string
-    description: The target endpoint of the OpenTracing service (automatically 
discovered
-      by default)
-  - name: sampler-type
-    type: string
-    description: The sampler type (default "const")
-  - name: sampler-param
-    type: string
-    description: The sampler specific param (default "1")
diff --git a/pkg/resources/resources_support.go 
b/pkg/resources/resources_support.go
index 7ff6b5600..605d24f4f 100644
--- a/pkg/resources/resources_support.go
+++ b/pkg/resources/resources_support.go
@@ -38,9 +38,7 @@ var resources embed.FS
 func Resource(name string) ([]byte, error) {
        name = strings.Trim(name, " ")
        name = filepath.ToSlash(name)
-       if strings.HasPrefix(name, "/") {
-               name = name[1:]
-       }
+       name = strings.TrimPrefix(name, "/")
 
        file, err := resources.Open(name)
        if err != nil {
@@ -96,9 +94,7 @@ func DirExists(dirName string) bool {
 // WithPrefix lists all file names that begins with the give path prefix
 // If pathPrefix is a path of directories then be sure to end it with a '/'.
 func WithPrefix(pathPrefix string) ([]string, error) {
-       if strings.HasPrefix(pathPrefix, "/") {
-               pathPrefix = pathPrefix[1:]
-       }
+       pathPrefix = strings.TrimPrefix(pathPrefix, "/")
        dirPath := filepath.Dir(pathPrefix)
        paths, err := Resources(dirPath)
        if err != nil {
@@ -118,12 +114,9 @@ func WithPrefix(pathPrefix string) ([]string, error) {
 // Resources lists all file names in the given path.
 func Resources(dirName string) ([]string, error) {
        dirName = filepath.ToSlash(dirName)
-       if strings.HasPrefix(dirName, "/") {
-               dirName = dirName[1:]
-       }
-       if strings.HasSuffix(dirName, "/") {
-               dirName = dirName[:len(dirName)-1]
-       }
+       dirName = strings.TrimPrefix(dirName, "/")
+       dirName = strings.TrimSuffix(dirName, "/")
+
        dir, err := resources.Open(dirName)
        if err != nil {
                if os.IsNotExist(err) {
diff --git a/pkg/resources/resources_test.go b/pkg/resources/resources_test.go
index ac3c26fcd..05d1ed9d5 100644
--- a/pkg/resources/resources_test.go
+++ b/pkg/resources/resources_test.go
@@ -113,7 +113,6 @@ func TestResourcesWithPrefix(t *testing.T) {
        NoErrorAndContains(t, "/config/manager/", 
"config/manager/operator-service-account.yaml", WithPrefix)
        NoErrorAndContains(t, "/config/manager/op", 
"config/manager/operator-service-account.yaml", WithPrefix)
        NoErrorAndContains(t, "/config/manager/operator-service-account", 
"config/manager/operator-service-account.yaml", WithPrefix)
-       NoErrorAndContains(t, "/resources/traits", "resources/traits.yaml", 
WithPrefix)
 
        // directory needs the slash on the end
        NoErrorAndNotContains(t, "/config/manager", 
"config/manager/operator-service-account.yaml", WithPrefix)
diff --git a/script/Makefile b/script/Makefile
index 00e73ee6e..e6b06e27a 100644
--- a/script/Makefile
+++ b/script/Makefile
@@ -607,10 +607,10 @@ $(BUNDLE_CAMEL_APIS): operator-sdk
        @# Remove the camel directory and re-copy only the required api
        rm -rf api_$@/camel/* && cp -rf pkg/apis/camel/$@ api_$@/camel/
        @# operator-sdk generate ... cannot execute across separate modules so 
need to temporarily move api
-       $(OPERATOR_SDK) generate kustomize manifests --apis-dir $(addprefix 
api_, $@) -q
+       $(OPERATOR_SDK) generate kustomize manifests --apis-dir $(addprefix 
api_, $@) -q --input-dir $(MANIFESTS) --output-dir $(MANIFESTS)
        @# Adds the licence header to the csv file.
-       ./script/add_license.sh pkg/resources/config/manifests/bases 
./script/headers/yaml.txt
-       ./script/add_createdAt.sh pkg/resources/config/manifests/bases
+       ./script/add_license.sh $(MANIFESTS)/bases ./script/headers/yaml.txt
+       ./script/add_createdAt.sh $(MANIFESTS)/bases
        @# Clean up temporary working api directories
        rm -rf api_*
 

Reply via email to