This is an automated email from the ASF dual-hosted git repository.
zhongxjian pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/dubbo-kubernetes.git
The following commit(s) were added to refs/heads/master by this push:
new a017a098 [operator] Supplementary code comments v5 (#651)
a017a098 is described below
commit a017a098b5d37f07ce7509ca72caba543335e74a
Author: Jian Zhong <[email protected]>
AuthorDate: Fri Mar 21 11:52:43 2025 +0800
[operator] Supplementary code comments v5 (#651)
---
manifests/charts/admin/templates/deployment.yaml | 15 +---
manifests/charts/admin/templates/job.yaml | 1 -
manifests/charts/admin/values.yaml | 102 +++++++++++------------
operator/pkg/helm/helm.go | 1 -
operator/pkg/util/pointer/pointer.go | 2 +-
pkg/kube/client.go | 26 +-----
pkg/kube/client_factory.go | 32 +++----
pkg/kube/collection/collection.go | 8 ++
pkg/kube/informerfactory/factory.go | 51 ++++++++++--
pkg/kube/kubetypes/types.go | 20 ++---
pkg/kube/util.go | 29 +++++--
pkg/laziness/laziness.go | 17 ++--
12 files changed, 164 insertions(+), 140 deletions(-)
diff --git a/manifests/charts/admin/templates/deployment.yaml
b/manifests/charts/admin/templates/deployment.yaml
index c55591e6..a573c495 100644
--- a/manifests/charts/admin/templates/deployment.yaml
+++ b/manifests/charts/admin/templates/deployment.yaml
@@ -48,9 +48,9 @@ spec:
{{- if $admin.lifecycleHooks }}
{{- toYaml $admin.lifecycleHooks | nindent 10 }}
{{- end }}
- args:
- - run
- - --config-file=/etc/dubbo.io/config.yaml
+ # args:
+ # - run
+ # - --config-file=/etc/dubbo.io/config.yaml
ports:
- name: admin-http
containerPort: 8888
@@ -59,15 +59,6 @@ spec:
- name: admin-admission
containerPort: 5443
env:
- {{- $zooName := printf "zookeeper" -}}
- {{- $nacosName := printf "nacos" -}}
- {{- $zooReplicas := int .Values.zookeeper.replicas -}}
- {{- $nacosReplicas := int .Values.nacos.replicas -}}
- {{- $zooNamespace := include "admin.namespace" . -}}
- {{- $nacosNamespace := include "admin.namespace" . -}}
- {{- $clusterDomain := $admin.clusterDomain -}}
- {{- $zooPort := include "zoo.client" . -}}
- {{- $nacosPort := $nacos.serverPort -}}
{{- if $nacos.enabled }}
- name: DUBBO_STORE_TRADITIONAL_REGISTRY
value: nacos://nacos-0.nacos.dubbo-system.svc.cluster.local:8848
diff --git a/manifests/charts/admin/templates/job.yaml
b/manifests/charts/admin/templates/job.yaml
index d6682c29..49ba707f 100644
--- a/manifests/charts/admin/templates/job.yaml
+++ b/manifests/charts/admin/templates/job.yaml
@@ -21,7 +21,6 @@ spec:
nameservers:
- 8.8.8.8
searches:
- - default.svc.cluster.local
- svc.cluster.local
- cluster.local
containers:
diff --git a/manifests/charts/admin/values.yaml
b/manifests/charts/admin/values.yaml
index b1702581..5bcedd83 100644
--- a/manifests/charts/admin/values.yaml
+++ b/manifests/charts/admin/values.yaml
@@ -38,9 +38,9 @@ _internal_default_values_not_set:
## Configure the application image
image:
# Source of the container image.
- registry: docker.io/apache/dubbo-admin
+ registry: mfordjody/gin-web-log
# Version tag of the container image.
- tag: 0.5.0-SNAPSHOT
+ tag: latest
# Image pull policy, available options are: Always, IfNotPresent, Never.
pullPolicy: IfNotPresent
@@ -92,51 +92,51 @@ _internal_default_values_not_set:
## Termination grace period for pods.
terminationGracePeriodSeconds: 30
- startupProbe:
- # Delay before the probe is initiated.
- initialDelaySeconds: 60
- # Time to wait for the probe to complete.
- timeoutSeconds: 30
- # How often to perform the probe.
- periodSeconds: 10
- # Minimum consecutive successes for the probe to be considered successful.
- successThreshold: 1
- # Perform an HTTP GET request to check.
- httpGet:
- # The path to use for the HTTP GET request.
- path: /health
- # The port on which the HTTP GET request will be made.
- port: 8888
- readinessProbe:
- # Delay before the probe is initiated.
- initialDelaySeconds: 60
- # Time to wait for the probe to complete.
- timeoutSeconds: 30
- # How often to perform the probe.
- periodSeconds: 10
- # Minimum consecutive successes for the probe to be considered successful.
- successThreshold: 1
- # Perform an HTTP GET request to check.
- httpGet:
- # The path to use for the HTTP GET request.
- path: /health
- # The port on which the HTTP GET request will be made.
- port: 8888
- livenessProbe:
- # Delay before the probe is initiated.
- initialDelaySeconds: 60
- # Time to wait for the probe to complete.
- timeoutSeconds: 30
- # How often to perform the probe.
- periodSeconds: 10
- # Minimum consecutive successes for the probe to be considered successful.
- successThreshold: 1
- # Perform an HTTP GET request to check.
- httpGet:
- # The path to use for the HTTP GET request.
- path: /health
- # The port on which the HTTP GET request will be made.
- port: 8888
+# startupProbe:
+# # Delay before the probe is initiated.
+# initialDelaySeconds: 60
+# # Time to wait for the probe to complete.
+# timeoutSeconds: 30
+# # How often to perform the probe.
+# periodSeconds: 10
+# # Minimum consecutive successes for the probe to be considered successful.
+# successThreshold: 1
+# # Perform an HTTP GET request to check.
+# httpGet:
+# # The path to use for the HTTP GET request.
+# path: /health
+# # The port on which the HTTP GET request will be made.
+# port: 8888
+# readinessProbe:
+# # Delay before the probe is initiated.
+# initialDelaySeconds: 60
+# # Time to wait for the probe to complete.
+# timeoutSeconds: 30
+# # How often to perform the probe.
+# periodSeconds: 10
+# # Minimum consecutive successes for the probe to be considered successful.
+# successThreshold: 1
+# # Perform an HTTP GET request to check.
+# httpGet:
+# # The path to use for the HTTP GET request.
+# path: /health
+# # The port on which the HTTP GET request will be made.
+# port: 8888
+# livenessProbe:
+# # Delay before the probe is initiated.
+# initialDelaySeconds: 60
+# # Time to wait for the probe to complete.
+# timeoutSeconds: 30
+# # How often to perform the probe.
+# periodSeconds: 10
+# # Minimum consecutive successes for the probe to be considered successful.
+# successThreshold: 1
+# # Perform an HTTP GET request to check.
+# httpGet:
+# # The path to use for the HTTP GET request.
+# path: /health
+# # The port on which the HTTP GET request will be made.
+# port: 8888
## Define lifecycle hooks for the application container.
lifecycleHooks: ~
# postStart:
@@ -174,15 +174,15 @@ _internal_default_values_not_set:
# Maximum CPU and memory resources allowed for the container.
limits:
# CPU usage limit.
- cpu: 102m
+ cpu: 512m
# Memory usage limit.
- memory: 100Mi
+ memory: 512Mi
# Initial CPU and memory resource requests for the container.
requests:
# CPU usage request.
- cpu: 102m
+ cpu: 512m
# Memory usage request.
- memory: 100Mi
+ memory: 512Mi
## Define toleration's for the application pods.
tolerations: ~
diff --git a/operator/pkg/helm/helm.go b/operator/pkg/helm/helm.go
index 54437411..16f9b96c 100644
--- a/operator/pkg/helm/helm.go
+++ b/operator/pkg/helm/helm.go
@@ -54,7 +54,6 @@ func Render(namespace string, directory string, dop
values.Map) ([]manifest.Mani
return nil, nil, fmt.Errorf("failed to get values from dop:
%v", ok)
}
path := pathJoin("charts", directory)
- // pkgPath := dop.GetPathString("spec.packagePath")
f := manifests.BuiltinDir("")
chrt, err := loadChart(f, path)
output, warnings, err := renderChart(namespace, val, chrt)
diff --git a/operator/pkg/util/pointer/pointer.go
b/operator/pkg/util/pointer/pointer.go
index a0291508..13e3e2f7 100644
--- a/operator/pkg/util/pointer/pointer.go
+++ b/operator/pkg/util/pointer/pointer.go
@@ -18,7 +18,7 @@
package pointer
// Of returns a pointer to the input. In most cases, callers should just do
&t. However, in some cases
-// Go cannot take a pointer. For example, `ptr.Of(f())`.
+// Go cannot take a pointer. For example, `pointer.Of(f())`.
func Of[T any](t T) *T {
return &t
}
diff --git a/pkg/kube/client.go b/pkg/kube/client.go
index e0526802..dc4d7fad 100644
--- a/pkg/kube/client.go
+++ b/pkg/kube/client.go
@@ -28,10 +28,9 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeVersion "k8s.io/apimachinery/pkg/version"
- "k8s.io/client-go/discovery"
+ // "k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
- "k8s.io/client-go/metadata"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"net/http"
@@ -45,19 +44,18 @@ type client struct {
factory *clientFactory
version laziness.Laziness[*kubeVersion.Info]
informerFactory informerfactory.InformerFactory
- restClient *rest.RESTClient
- discoveryClient discovery.CachedDiscoveryInterface
dynamic dynamic.Interface
kube kubernetes.Interface
- metadata metadata.Interface
mapper meta.ResettableRESTMapper
http *http.Client
}
type Client interface {
+ // Ext returns the API extensions client.
Ext() kubeExtClient.Interface
+
+ // Kube returns the core kube client
Kube() kubernetes.Interface
- Dynamic() dynamic.Interface
}
type CLIClient interface {
@@ -82,14 +80,6 @@ func newInternalClient(factory *clientFactory, opts
...ClientOption) (CLIClient,
for _, opt := range opts {
opt(&c)
}
- c.restClient, err = factory.RestClient()
- if err != nil {
- return nil, err
- }
- c.discoveryClient, err = factory.ToDiscoveryClient()
- if err != nil {
- return nil, err
- }
c.mapper, err = factory.mapper.Get()
if err != nil {
return nil, err
@@ -98,10 +88,6 @@ func newInternalClient(factory *clientFactory, opts
...ClientOption) (CLIClient,
if err != nil {
return nil, err
}
- c.metadata, err = metadata.NewForConfig(c.config)
- if err != nil {
- return nil, err
- }
c.dynamic, err = dynamic.NewForConfig(c.config)
if err != nil {
return nil, err
@@ -141,10 +127,6 @@ var (
_ CLIClient = &client{}
)
-func (c *client) Dynamic() dynamic.Interface {
- return c.dynamic
-}
-
func (c *client) Ext() kubeExtClient.Interface {
return c.extSet
}
diff --git a/pkg/kube/client_factory.go b/pkg/kube/client_factory.go
index 1c74d35d..4f1029cb 100644
--- a/pkg/kube/client_factory.go
+++ b/pkg/kube/client_factory.go
@@ -23,7 +23,7 @@ import (
"k8s.io/client-go/discovery"
diskcached "k8s.io/client-go/discovery/cached/disk"
"k8s.io/client-go/discovery/cached/memory"
- "k8s.io/client-go/dynamic"
+ // "k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
@@ -34,6 +34,9 @@ import (
"time"
)
+// clientFactory partially implements the kubectl util.Factory, which is
provides access to various k8s clients.
+// The full Factory can be built with MakeKubeFactory.
+// This split is to avoid huge dependencies.
type clientFactory struct {
clientConfig clientcmd.ClientConfig
expander laziness.Laziness[meta.RESTMapper]
@@ -41,6 +44,7 @@ type clientFactory struct {
discoveryClient laziness.Laziness[discovery.CachedDiscoveryInterface]
}
+// newClientFactory creates a new util.Factory from the given
clientcmd.ClientConfig.
func newClientFactory(clientConfig clientcmd.ClientConfig, diskCache bool)
*clientFactory {
cf := &clientFactory{
clientConfig: clientConfig,
@@ -50,6 +54,7 @@ func newClientFactory(clientConfig clientcmd.ClientConfig,
diskCache bool) *clie
if err != nil {
return nil, err
}
+ // Setup cached discovery. CLIs uses disk cache, controllers
use memory cache.
if diskCache {
cacheDir := filepath.Join(homedir.HomeDir(), ".kube",
"cache")
httpCacheDir := filepath.Join(cacheDir, "http")
@@ -83,10 +88,6 @@ func newClientFactory(clientConfig clientcmd.ClientConfig,
diskCache bool) *clie
return cf
}
-func (c *clientFactory) ToDiscoveryClient()
(discovery.CachedDiscoveryInterface, error) {
- return c.discoveryClient.Get()
-}
-
func (c *clientFactory) RestClient() (*rest.RESTClient, error) {
clientConfig, err := c.ToRestConfig()
if err != nil {
@@ -95,6 +96,10 @@ func (c *clientFactory) RestClient() (*rest.RESTClient,
error) {
return rest.RESTClientFor(clientConfig)
}
+func (c *clientFactory) ToDiscoveryClient()
(discovery.CachedDiscoveryInterface, error) {
+ return c.discoveryClient.Get()
+}
+
func (c *clientFactory) ToRestConfig() (*rest.Config, error) {
restConfig, err := c.clientConfig.ClientConfig()
if err != nil {
@@ -103,22 +108,7 @@ func (c *clientFactory) ToRestConfig() (*rest.Config,
error) {
return SetRestDefaults(restConfig), nil
}
-func (c *clientFactory) ToRestMapper() (meta.RESTMapper, error) {
- return c.expander.Get()
-}
-
-func (c *clientFactory) DynamicClient() (dynamic.Interface, error) {
- restConfig, err := c.ToRestConfig()
- if err != nil {
- return nil, err
- }
- return dynamic.NewForConfig(restConfig)
-}
-
-func (c *clientFactory) ToRawKubeConfigLoader() clientcmd.ClientConfig {
- return c.clientConfig
-}
-
+// overlyCautiousIllegalFileCharacters matches characters that *might* not be
supported. Windows is really restrictive, so this is really restrictive
var overlyCautiousIllegalFileCharacters = regexp.MustCompile(`[^(\w/.)]`)
func computeDiscoverCacheDir(dir, host string) string {
diff --git a/pkg/kube/collection/collection.go
b/pkg/kube/collection/collection.go
index 66f8d9ad..cc320234 100644
--- a/pkg/kube/collection/collection.go
+++ b/pkg/kube/collection/collection.go
@@ -23,11 +23,14 @@ import (
"github.com/apache/dubbo-kubernetes/operator/pkg/schema"
)
+// Schemas contains metadata about configuration resources.
type Schemas struct {
byCollection map[config.GroupVersionKind]schema.Schema
byAddOrder []schema.Schema
}
+// FindByGroupVersionAliasesKind searches and returns the first schema with
the given GVK,
+// if not found, it will search for version aliases for the schema to see if
there is a match.
func (s Schemas) FindByGroupVersionAliasesKind(gvk config.GroupVersionKind)
(schema.Schema, bool) {
for _, rs := range s.byAddOrder {
for _, va := range rs.GroupVersionAliasKinds() {
@@ -39,10 +42,12 @@ func (s Schemas) FindByGroupVersionAliasesKind(gvk
config.GroupVersionKind) (sch
return nil, false
}
+// SchemasBuilder is a builder for the schemas type.
type SchemasBuilder struct {
schemas Schemas
}
+// NewSchemasBuilder returns a new instance of SchemasBuilder.
func NewSchemasBuilder() *SchemasBuilder {
s := Schemas{
byCollection: make(map[config.GroupVersionKind]schema.Schema),
@@ -50,6 +55,7 @@ func NewSchemasBuilder() *SchemasBuilder {
return &SchemasBuilder{schemas: s}
}
+// Add a new collection to the schemas.
func (b *SchemasBuilder) Add(s schema.Schema) error {
if _, found := b.schemas.byCollection[s.GroupVersionKind()]; found {
return fmt.Errorf("collection already exists: %v",
s.GroupVersionKind())
@@ -59,6 +65,7 @@ func (b *SchemasBuilder) Add(s schema.Schema) error {
return nil
}
+// MustAdd calls Add and panics if it fails.
func (b *SchemasBuilder) MustAdd(s schema.Schema) *SchemasBuilder {
if err := b.Add(s); err != nil {
panic(fmt.Sprintf("SchemasBuilder.MustAdd: %v", err))
@@ -66,6 +73,7 @@ func (b *SchemasBuilder) MustAdd(s schema.Schema)
*SchemasBuilder {
return b
}
+// Build a new schemas from this SchemasBuilder.
func (b *SchemasBuilder) Build() Schemas {
s := b.schemas
b.schemas = Schemas{}
diff --git a/pkg/kube/informerfactory/factory.go
b/pkg/kube/informerfactory/factory.go
index 9457dadf..0edc7401 100644
--- a/pkg/kube/informerfactory/factory.go
+++ b/pkg/kube/informerfactory/factory.go
@@ -15,6 +15,19 @@
* limitations under the License.
*/
+// Package informerfactory provides a "factory" to generate informers. This
allows users to create the
+// same informers in multiple different locations, while still using the same
underlying resources.
+// Additionally, aggregate operations like Start, Shutdown, and Wait are
available.
+// Kubernetes core has informer factories with very similar logic. However,
this has a few problems that
+// spurred a fork:
+// * Factories are per package. That means we have ~6 distinct factories,
which makes management a hassle.
+// * Across these, the factories are often inconsistent in functionality.
Changes to these takes >4 months.
+// * Lack of functionality we want (see below).
+//
+// Added functionality:
+// * Single factory for any type, including dynamic informers, meta informers,
typed informers, etc.
+// * Ability to create multiple informers of the same type but with different
filters.
+// * Ability to run a single informer rather than all of them.
package informerfactory
import (
@@ -27,6 +40,7 @@ import (
"sync"
)
+// NewInformerFunc returns a SharedIndexInformer.
type NewInformerFunc func() cache.SharedIndexInformer
type StartableInformer struct {
@@ -35,26 +49,46 @@ type StartableInformer struct {
}
type InformerFactory interface {
+ // Start initializes all requested informers. They are handled in
goroutines
+ // which run until the stop channel gets closed.
Start(stopCh <-chan struct{})
+ // InformerFor returns the SharedIndexInformer the provided type.
InformerFor(resource schema.GroupVersionResource, opts
kubetypes.InformerOptions, newFunc NewInformerFunc) StartableInformer
+ // WaitForCacheSync blocks until all started informers' caches were
synced
+ // or the stop channel gets closed.
WaitForCacheSync(stopCh <-chan struct{}) bool
+ // Shutdown marks a factory as shutting down. At that point no new
+ // informers can be started anymore and Start will return without
+ // doing anything.
+ //
+ // In addition, Shutdown blocks until all goroutines have terminated.
For that
+ // to happen, the close channel(s) that they were started with must be
closed,
+ // either before Shutdown gets called or while it is waiting.
+ //
+ // Shutdown may be called multiple times, even concurrently. All such
calls will
+ // block until all goroutines have terminated.
Shutdown()
}
+// InformerKey represents a unique informer
type informerKey struct {
gvr schema.GroupVersionResource
labelSelector string
fieldSelector string
- informerType kubetypes.InformerType
namespace string
}
type informerFactory struct {
- lock sync.Mutex
- informers map[informerKey]builtInformer
+ lock sync.Mutex
+ informers map[informerKey]builtInformer
+ // startedInformers is used for tracking which informers have been
started.
+ // This allows Start() to be called multiple times safely.
startedInformers sets.Set[informerKey]
- wg sync.WaitGroup
- shuttingDown bool
+ // wg tracks how many goroutines were started.
+ wg sync.WaitGroup
+ // shuttingDown is true when Shutdown has been called. It may still be
running
+ // because it needs to wait for goroutines.
+ shuttingDown bool
}
type builtInformer struct {
@@ -73,6 +107,7 @@ func (s StartableInformer) Start(stopCh <-chan struct{}) {
s.start(stopCh)
}
+// Start initializes all requested informers.
func (f *informerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
@@ -83,6 +118,9 @@ func (f *informerFactory) Start(stopCh <-chan struct{}) {
for informerType, informer := range f.informers {
if !f.startedInformers.Contains(informerType) {
+ // We need a new variable in each loop iteration,
+ // otherwise the goroutine would use the loop variable
+ // and that keeps changing.
informer := informer
f.wg.Add(1)
go func() {
@@ -94,6 +132,7 @@ func (f *informerFactory) Start(stopCh <-chan struct{}) {
}
}
+// WaitForCacheSync waits for all started informers' cache were synced.
func (f *informerFactory) WaitForCacheSync(stopCh <-chan struct{}) bool {
informers := func() []cache.SharedIndexInformer {
f.lock.Lock()
@@ -116,6 +155,7 @@ func (f *informerFactory) WaitForCacheSync(stopCh <-chan
struct{}) bool {
}
func (f *informerFactory) Shutdown() {
+ // Will return immediately if there is nothing to wait for.
defer f.wg.Wait()
f.lock.Lock()
@@ -132,7 +172,6 @@ func (f *informerFactory) InformerFor(resource
schema.GroupVersionResource, opts
gvr: resource,
labelSelector: opts.LabelSelector,
fieldSelector: opts.FieldSelector,
- informerType: opts.InformerType,
namespace: opts.Namespace,
}
inf, exists := f.informers[key]
diff --git a/pkg/kube/kubetypes/types.go b/pkg/kube/kubetypes/types.go
index 85771ae9..eecccd3e 100644
--- a/pkg/kube/kubetypes/types.go
+++ b/pkg/kube/kubetypes/types.go
@@ -18,17 +18,13 @@
package kubetypes
type InformerOptions struct {
- LabelSelector string
- FieldSelector string
- Namespace string
+ // A selector to restrict the list of returned objects by their labels.
+ LabelSelector string
+ // A selector to restrict the list of returned objects by their fields.
+ FieldSelector string
+ // Namespace to watch.
+ Namespace string
+ // ObjectTransform allows arbitrarily modifying objects stored in the
underlying cache.
+ // If unset, a default transform is provided to remove ManagedFields
(high cost, low value)
ObjectTransform func(obj any) (any, error)
- InformerType InformerType
}
-
-type InformerType int
-
-const (
- StandardInformer InformerType = iota
- DynamicInformer
- MetadataInformer
-)
diff --git a/pkg/kube/util.go b/pkg/kube/util.go
index 60be2fc9..f5d87eb0 100644
--- a/pkg/kube/util.go
+++ b/pkg/kube/util.go
@@ -25,6 +25,7 @@ import (
"os"
)
+// DefaultRestConfig returns the rest.Config for the given kube config file
and context.
func DefaultRestConfig(kubeconfig, context string, fns ...func(config
*rest.Config)) (*rest.Config, error) {
bcc, err := BuildClientConfig(kubeconfig, context)
if err != nil {
@@ -36,14 +37,11 @@ func DefaultRestConfig(kubeconfig, context string, fns
...func(config *rest.Conf
return bcc, nil
}
-func BuildClientConfig(kubeconfig, context string) (*rest.Config, error) {
- c, err := BuildClientCmd(kubeconfig, context).ClientConfig()
- if err != nil {
- return nil, err
- }
- return c, nil
-}
-
+// BuildClientCmd builds a client cmd config from a kubeconfig filepath and
context.
+// It overrides the current context with the one provided (empty to use
default).
+//
+// This is a modified version of
k8s.io/client-go/tools/clientcmd/BuildConfigFromFlags with the
+// difference that it loads default configs if not running in-cluster.
func BuildClientCmd(kubeconfig, context string, overrides
...func(configOverrides *clientcmd.ConfigOverrides)) clientcmd.ClientConfig {
if kubeconfig != "" {
info, err := os.Stat(kubeconfig)
@@ -64,6 +62,21 @@ func BuildClientCmd(kubeconfig, context string, overrides
...func(configOverride
return
clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules,
configOverrides)
}
+// BuildClientConfig builds a client rest config from a kubeconfig filepath
and context.
+// It overrides the current context with the one provided (empty to use
default).
+//
+// This is a modified version of
k8s.io/client-go/tools/clientcmd/BuildConfigFromFlags with the
+// difference that it loads default configs if not running in-cluster.
+func BuildClientConfig(kubeconfig, context string) (*rest.Config, error) {
+ c, err := BuildClientCmd(kubeconfig, context).ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+// SetRestDefaults is a helper function that sets default values for the given
rest.Config.
+// This function is idempotent.
func SetRestDefaults(config *rest.Config) *rest.Config {
if config.GroupVersion == nil || config.GroupVersion.Empty() {
config.GroupVersion = &corev1.SchemeGroupVersion
diff --git a/pkg/laziness/laziness.go b/pkg/laziness/laziness.go
index 54ed54d6..7aa10d6a 100644
--- a/pkg/laziness/laziness.go
+++ b/pkg/laziness/laziness.go
@@ -15,6 +15,7 @@
* limitations under the License.
*/
+// Package laziness is a package to expose lazily computed values.
package laziness
import (
@@ -24,13 +25,16 @@ import (
type lazinessImpl[T any] struct {
getter func() (T, error)
- retry bool
- res T
- err error
- done uint32
- m sync.Mutex
+ // retry, if true, will ensure getter() is called for each Get() until
a non-nil error is returned.
+ retry bool
+ // Cached responses. Note: with retry enabled, this will be unset until
a non-nil error
+ res T
+ err error
+ done uint32
+ m sync.Mutex
}
+// Laziness represents a value whose computation is deferred until the first
access.
type Laziness[T any] interface {
Get() (T, error)
}
@@ -43,6 +47,8 @@ func New[T any](f func() (T, error)) Laziness[T] {
}
}
+// NewWithRetry returns a new lazily computed value. The value will be
computed on each call until a
+// non-nil error is returned.
func NewWithRetry[T any](f func() (T, error)) Laziness[T] {
return &lazinessImpl[T]{
getter: f,
@@ -62,6 +68,7 @@ func (l *lazinessImpl[T]) doSlow() (T, error) {
defer l.m.Unlock()
if l.done == 0 {
done := uint32(1)
+ // Defer in case of panic
defer func() {
atomic.StoreUint32(&l.done, done)
}()