This is an automated email from the ASF dual-hosted git repository.
sunnianjun pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/shardingsphere-on-cloud.git
The following commit(s) were added to refs/heads/main by this push:
new d08f01a feat: introduce new CRD ComputeNode (#181)
d08f01a is described below
commit d08f01aa8a832350c0ca4acfa1c70a4d2f3d3a7b
Author: liyao <[email protected]>
AuthorDate: Mon Jan 16 09:48:15 2023 +0800
feat: introduce new CRD ComputeNode (#181)
* chore: add clean directive in Makefile
Signed-off-by: mlycore <[email protected]>
* feat: add new CRD ComputeNode
Signed-off-by: mlycore <[email protected]>
* feat: add feature gate option for ComputeNode
Signed-off-by: mlycore <[email protected]>
* chore: remove unused return
Signed-off-by: mlycore <[email protected]>
* feat: introduce ComputeNodeController
Signed-off-by: mlycore <[email protected]>
* refactor: merge resource reconcile with ComputeNode resources
Signed-off-by: mlycore <[email protected]>
* chore: update go mod
Signed-off-by: mlycore <[email protected]>
* chore: remove unused crd functions
Signed-off-by: mlycore <[email protected]>
* chore: update crd manifests
Signed-off-by: mlycore <[email protected]>
* chore: remove unused comments
Signed-off-by: mlycore <[email protected]>
* chore: remove unused log
Signed-off-by: mlycore <[email protected]>
* refactor: introduce consts
Signed-off-by: mlycore <[email protected]>
Signed-off-by: mlycore <[email protected]>
---
.../shardingsphere.apache.org_computenodes.yaml | 1077 ++++++++++++++++++++
...ingsphere.apache.org_shardingsphereproxies.yaml | 10 -
...pache.org_shardingsphereproxyserverconfigs.yaml | 1 -
shardingsphere-operator/Makefile | 6 +-
.../api/v1alpha1/compute_node_types.go | 444 ++++++++
.../api/v1alpha1/zz_generated.deepcopy.go | 657 ++++++++++++
.../cmd/shardingsphere-operator/manager/manager.go | 20 +-
shardingsphere-operator/go.mod | 1 +
shardingsphere-operator/go.sum | 2 +
.../pkg/controllers/compute_node_controller.go | 343 +++++++
.../pkg/controllers/proxy_controller.go | 1 -
shardingsphere-operator/pkg/reconcile/configmap.go | 95 ++
.../pkg/reconcile/deployment.go | 514 +++-------
.../{deployment.go => deployment_proxy.go} | 0
shardingsphere-operator/pkg/reconcile/resource.go | 4 +-
shardingsphere-operator/pkg/reconcile/service.go | 59 ++
16 files changed, 2830 insertions(+), 404 deletions(-)
diff --git
a/charts/apache-shardingsphere-operator-charts/crds/shardingsphere.apache.org_computenodes.yaml
b/charts/apache-shardingsphere-operator-charts/crds/shardingsphere.apache.org_computenodes.yaml
new file mode 100644
index 0000000..fcb7ec2
--- /dev/null
+++
b/charts/apache-shardingsphere-operator-charts/crds/shardingsphere.apache.org_computenodes.yaml
@@ -0,0 +1,1077 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.9.0
+ creationTimestamp: null
+ name: computenodes.shardingsphere.apache.org
+spec:
+ group: shardingsphere.apache.org
+ names:
+ kind: ComputeNode
+ listKind: ComputeNodeList
+ plural: computenodes
+ singular: computenode
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.readyInstances
+ name: ReadyInstances
+ type: integer
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ - jsonPath: .status.loadBalancer.clusterIP
+ name: ClusterIP
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: ComputeNode is the Schema for the ShardingSphere Proxy API
+ properties:
+ apiVersion:
+ description: 'APIVersion defines the versioned schema of this
representation
+ of an object. Servers should convert recognized schemas to the
latest
+ internal value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ type: string
+ kind:
+ description: 'Kind is a string value representing the REST
resource this
+ object represents. Servers may infer this from the endpoint the
client
+ submits requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ProxySpec defines the desired state of
ShardingSphereProxy
+ properties:
+ bootstrap:
+ description: BootstrapConfig is used for any ShardingSphere
Proxy
+ startup
+ properties:
+ agentConfig:
+ description: AgentConfig defines the config for
ShardingSphere-Agent,
+ renderred as agent.yaml
+ properties:
+ plugins:
+ description: AgentPlugin defines a set of plugins for
ShardingSphere
+ Agent
+ properties:
+ logging:
+ description: PluginLogging defines the plugin for
logging
+ properties:
+ baseLogging:
+ properties:
+ props:
+ additionalProperties:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type: object
+ type: object
+ metrics:
+ description: PluginMetrics defines the plugin for
metrics
+ properties:
+ prometheus:
+ properties:
+ host:
+ type: string
+ port:
+ format: int32
+ type: integer
+ properties:
+ additionalProperties:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - host
+ - port
+ type: object
+ type: object
+ tracing:
+ description: PluginTracing defines the plugin for
tracing
+ properties:
+ tracing:
+ properties:
+ jaeger:
+ properties:
+ host:
+ type: string
+ port:
+ format: int32
+ type: integer
+ props:
+ additionalProperties:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields:
true
+ required:
+ - host
+ - port
+ type: object
+ openTelemetry:
+ properties:
+ props:
+ additionalProperties:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields:
true
+ type: object
+ skyWalking:
+ properties:
+ props:
+ additionalProperties:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields:
true
+ type: object
+ zipkin:
+ properties:
+ host:
+ type: string
+ port:
+ format: int32
+ type: integer
+ props:
+ additionalProperties:
+ type: string
+ type: object
+ x-kubernetes-preserve-unknown-fields:
true
+ required:
+ - host
+ - port
+ type: object
+ type: object
+ type: object
+ type: object
+ type: object
+ logbackConfig:
+ description: LogbackConfig contains contents of the
expected logback.xml
+ type: string
+ serverConfig:
+ description: ServerConfig defines the bootstrap config for
a ShardingSphere
+ Proxy
+ properties:
+ authority:
+ description: ComputeNodeAuth is used to set up
initial user
+ to login compute node, and authority data of storage
node.
+ properties:
+ privilege:
+ description: ComputeNodePrivilege for storage
node, the
+ default value is ALL_PERMITTED
+ properties:
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ users:
+ items:
+ description: 'ComputeNodeUser is a slice about
authorized
+ host and password for compute node. Format:
user:<username>@<hostname>,hostname
+ is % or empty string means do not care about
authorized
+ host password:<password>'
+ properties:
+ password:
+ type: string
+ user:
+ type: string
+ required:
+ - password
+ - user
+ type: object
+ type: array
+ required:
+ - users
+ type: object
+ mode:
+ description: ComputeNodeServerMode is the mode for
ShardingSphere
+ Proxy
+ properties:
+ repository:
+ description: Repository is the metadata persistent
store
+ for ShardingSphere
+ properties:
+ props:
+ additionalProperties:
+ type: string
+ description: properties of metadata repository
Props
+ ComputeNodeClusterProps
`json:"props,omitempty"`
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ type:
+ description: type of metadata repository
+ enum:
+ - ZooKeeper
+ - Etcd
+ type: string
+ required:
+ - type
+ type: object
+ type:
+ type: string
+ required:
+ - type
+ type: object
+ props:
+ additionalProperties:
+ type: string
+ description: Props *ComputeNodeProps
`json:"props,omitempty"`
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ required:
+ - authority
+ - mode
+ type: object
+ type: object
+ env:
+ description: port is ShardingSphere-Proxy startup port Ports
[]corev1.ContainerPort
+ `json:"ports,omitempty"`
+ items:
+ description: EnvVar represents an environment variable
present in
+ a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a
C_IDENTIFIER.
+ type: string
+ value:
+ description: 'Variable references $(VAR_NAME) are
expanded using
+ the previously defined environment variables in the
container
+ and any service environment variables. If a variable
cannot
+ be resolved, the reference in the input string will be
unchanged.
+ Double $$ are reduced to a single $, which allows for
escaping
+ the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
produce the
+ string literal "$(VAR_NAME)". Escaped references will
never
+ be expanded, regardless of whether the variable exists
or
+ not. Defaults to "".'
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
value. Cannot
+ be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or
its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ fieldRef:
+ description: 'Selects a field of the pod: supports
metadata.name,
+ metadata.namespace, `metadata.labels[''<KEY>'']`,
`metadata.annotations[''<KEY>'']`,
+ spec.nodeName, spec.serviceAccountName,
status.hostIP,
+ status.podIP, status.podIPs.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the
specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ resourceFieldRef:
+ description: 'Selects a resource of the container:
only
+ resources limits and requests (limits.cpu,
limits.memory,
+ limits.ephemeral-storage, requests.cpu,
requests.memory
+ and requests.ephemeral-storage) are currently
supported.'
+ properties:
+ containerName:
+ description: 'Container name: required for
volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the
exposed
+ resources, defaults to "1"
+ pattern:
^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's
namespace
+ properties:
+ key:
+ description: The key of the secret to select
from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion,
kind, uid?'
+ type: string
+ optional:
+ description: Specify whether the Secret or its
key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ imagePullSecrets:
+ items:
+ description: LocalObjectReference contains enough
information to
+ let you locate the referenced object inside the same
namespace.
+ properties:
+ name:
+ description: 'Name of the referent. More info:
https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?'
+ type: string
+ type: object
+ type: array
+ portBindings:
+ description: Service Service `json:"service,omitempty"`
+ items:
+ properties:
+ containerPort:
+ description: Number of port to expose on the pod's IP
address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external port to.
+ type: string
+ name:
+ description: If specified, this must be an IANA_SVC_NAME
and
+ unique within the pod. Each named port in a pod must
have
+ a unique name. Name for the port that can be referred
to by
+ services.
+ type: string
+ nodePort:
+ description: 'The port on each node on which this
service is
+ exposed when type is NodePort or LoadBalancer.
Usually assigned
+ by the system. If a value is specified, in-range, and
not
+ in use it will be used, otherwise the operation will
fail. If
+ not specified, a port will be allocated if this
Service requires
+ one. If this field is specified when creating a
Service which
+ does not need it, creation will fail. This field will
be wiped
+ when updating a Service to no longer need it (e.g.
changing
+ type from NodePort to ClusterIP). More info:
https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport'
+ format: int32
+ type: integer
+ protocol:
+ default: TCP
+ description: Protocol for port. Must be UDP, TCP, or
SCTP. Defaults
+ to "TCP".
+ type: string
+ servicePort:
+ description: The port that will be exposed by this
service.
+ format: int32
+ type: integer
+ required:
+ - containerPort
+ - servicePort
+ type: object
+ type: array
+ probes:
+ description: ProxyProbe defines the probe actions for
LivenesProbe,
+ ReadinessProbe and StartupProbe
+ properties:
+ livenessProbe:
+ description: Probes are not allowed for ephemeral
containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: Command is the command line to
execute inside
+ the container, the working directory for the
command is
+ root ('/') in the container's filesystem. The
command
+ is simply exec'd, it is not run inside a shell,
so traditional
+ shell instructions ('|', etc) won't work. To use
a shell,
+ you need to explicitly call out to that shell.
Exit
+ status of 0 is treated as live/healthy and
non-zero
+ is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: Minimum consecutive failures for the
probe to
+ be considered failed after having succeeded.
Defaults to
+ 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC
port.
+ This is a beta field and requires enabling
GRPCContainerProbe
+ feature gate.
+ properties:
+ port:
+ description: Port number of the gRPC service.
Number must
+ be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: "Service is the name of the service
to place
+ in the gRPC HealthCheckRequest (see
https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+ \n If this is not specified, the default
behavior is
+ defined by gRPC."
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to
perform.
+ properties:
+ host:
+ description: Host name to connect to, defaults to
the
+ pod IP. You probably want to set "Host" in
httpHeaders
+ instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
HTTP
+ allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
header to
+ be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Name or number of the port to access
on the
+ container. Number must be in the range 1 to
65535. Name
+ must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: Scheme to use for connecting to the
host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: 'Number of seconds after the container
has started
+ before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the
probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the
probe to
+ be considered successful after having failed.
Defaults to
+ 1. Must be 1 for liveness and startup. Minimum value
is
+ 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a
TCP
+ port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to,
defaults
+ to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number or name of the port to access
on the
+ container. Number must be in the range 1 to
65535. Name
+ must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: Optional duration in seconds the pod
needs to
+ terminate gracefully upon probe failure. The grace
period
+ is the duration in seconds after the processes
running in
+ the pod are sent a termination signal and the time
when
+ the processes are forcibly halted with a kill
signal. Set
+ this value longer than the expected cleanup time for
your
+ process. If this value is nil, the pod's
terminationGracePeriodSeconds
+ will be used. Otherwise, this value overrides the
value
+ provided by the pod spec. Value must be non-negative
integer.
+ The value zero indicates stop immediately via the
kill signal
+ (no opportunity to shut down). This is a beta field
and
+ requires enabling ProbeTerminationGracePeriod
feature gate.
+ Minimum value is 1.
spec.terminationGracePeriodSeconds is
+ used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe
times
+ out. Defaults to 1 second. Minimum value is 1. More
info:
+
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ type: object
+ readinessProbe:
+ description: Probes are not allowed for ephemeral
containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: Command is the command line to
execute inside
+ the container, the working directory for the
command is
+ root ('/') in the container's filesystem. The
command
+ is simply exec'd, it is not run inside a shell,
so traditional
+ shell instructions ('|', etc) won't work. To use
a shell,
+ you need to explicitly call out to that shell.
Exit
+ status of 0 is treated as live/healthy and
non-zero
+ is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: Minimum consecutive failures for the
probe to
+ be considered failed after having succeeded.
Defaults to
+ 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC
port.
+ This is a beta field and requires enabling
GRPCContainerProbe
+ feature gate.
+ properties:
+ port:
+ description: Port number of the gRPC service.
Number must
+ be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: "Service is the name of the service
to place
+ in the gRPC HealthCheckRequest (see
https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+ \n If this is not specified, the default
behavior is
+ defined by gRPC."
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to
perform.
+ properties:
+ host:
+ description: Host name to connect to, defaults to
the
+ pod IP. You probably want to set "Host" in
httpHeaders
+ instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
HTTP
+ allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
header to
+ be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Name or number of the port to access
on the
+ container. Number must be in the range 1 to
65535. Name
+ must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: Scheme to use for connecting to the
host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: 'Number of seconds after the container
has started
+ before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the
probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the
probe to
+ be considered successful after having failed.
Defaults to
+ 1. Must be 1 for liveness and startup. Minimum value
is
+ 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a
TCP
+ port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to,
defaults
+ to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number or name of the port to access
on the
+ container. Number must be in the range 1 to
65535. Name
+ must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: Optional duration in seconds the pod
needs to
+ terminate gracefully upon probe failure. The grace
period
+ is the duration in seconds after the processes
running in
+ the pod are sent a termination signal and the time
when
+ the processes are forcibly halted with a kill
signal. Set
+ this value longer than the expected cleanup time for
your
+ process. If this value is nil, the pod's
terminationGracePeriodSeconds
+ will be used. Otherwise, this value overrides the
value
+ provided by the pod spec. Value must be non-negative
integer.
+ The value zero indicates stop immediately via the
kill signal
+ (no opportunity to shut down). This is a beta field
and
+ requires enabling ProbeTerminationGracePeriod
feature gate.
+ Minimum value is 1.
spec.terminationGracePeriodSeconds is
+ used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe
times
+ out. Defaults to 1 second. Minimum value is 1. More
info:
+
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ type: object
+ startupProbe:
+ description: Probes are not allowed for ephemeral
containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: Command is the command line to
execute inside
+ the container, the working directory for the
command is
+ root ('/') in the container's filesystem. The
command
+ is simply exec'd, it is not run inside a shell,
so traditional
+ shell instructions ('|', etc) won't work. To use
a shell,
+ you need to explicitly call out to that shell.
Exit
+ status of 0 is treated as live/healthy and
non-zero
+ is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: Minimum consecutive failures for the
probe to
+ be considered failed after having succeeded.
Defaults to
+ 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving a GRPC
port.
+ This is a beta field and requires enabling
GRPCContainerProbe
+ feature gate.
+ properties:
+ port:
+ description: Port number of the gRPC service.
Number must
+ be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: "Service is the name of the service
to place
+ in the gRPC HealthCheckRequest (see
https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+ \n If this is not specified, the default
behavior is
+ defined by gRPC."
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request to
perform.
+ properties:
+ host:
+ description: Host name to connect to, defaults to
the
+ pod IP. You probably want to set "Host" in
httpHeaders
+ instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
HTTP
+ allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
header to
+ be used in HTTP probes
+ properties:
+ name:
+ description: The header field name
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Name or number of the port to access
on the
+ container. Number must be in the range 1 to
65535. Name
+ must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: Scheme to use for connecting to the
host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: 'Number of seconds after the container
has started
+ before liveness probes are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ periodSeconds:
+ description: How often (in seconds) to perform the
probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: Minimum consecutive successes for the
probe to
+ be considered successful after having failed.
Defaults to
+ 1. Must be 1 for liveness and startup. Minimum value
is
+ 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving a
TCP
+ port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect to,
defaults
+ to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number or name of the port to access
on the
+ container. Number must be in the range 1 to
65535. Name
+ must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: Optional duration in seconds the pod
needs to
+ terminate gracefully upon probe failure. The grace
period
+ is the duration in seconds after the processes
running in
+ the pod are sent a termination signal and the time
when
+ the processes are forcibly halted with a kill
signal. Set
+ this value longer than the expected cleanup time for
your
+ process. If this value is nil, the pod's
terminationGracePeriodSeconds
+ will be used. Otherwise, this value overrides the
value
+ provided by the pod spec. Value must be non-negative
integer.
+ The value zero indicates stop immediately via the
kill signal
+ (no opportunity to shut down). This is a beta field
and
+ requires enabling ProbeTerminationGracePeriod
feature gate.
+ Minimum value is 1.
spec.terminationGracePeriodSeconds is
+ used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: 'Number of seconds after which the probe
times
+ out. Defaults to 1 second. Minimum value is 1. More
info:
+
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes'
+ format: int32
+ type: integer
+ type: object
+ type: object
+ replicas:
+ description: replicas is the expected number of replicas of
ShardingSphere-Proxy
+ format: int32
+ type: integer
+ resources:
+ description: ResourceRequirements describes the compute
resource requirements.
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern:
^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Limits describes the maximum amount of
compute resources
+ allowed. More info:
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern:
^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: 'Requests describes the minimum amount of
compute
+ resources required. If Requests is omitted for a
container,
+ it defaults to Limits if that is explicitly specified,
otherwise
+ to an implementation-defined value. More info:
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
+ type: object
+ type: object
+ selector:
+ description: selector defines a set of label selectors
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
requirements.
+ The requirements are ANDed.
+ items:
+ description: A label selector requirement is a selector
that
+ contains values, a key, and an operator that relates
the key
+ and values.
+ properties:
+ key:
+ description: key is the label key that the selector
applies
+ to.
+ type: string
+ operator:
+ description: operator represents a key's
relationship to
+ a set of values. Valid operators are In, NotIn,
Exists
+ and DoesNotExist.
+ type: string
+ values:
+ description: values is an array of string values. If
the
+ operator is In or NotIn, the values array must be
non-empty.
+ If the operator is Exists or DoesNotExist, the
values
+ array must be empty. This array is replaced during
a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: matchLabels is a map of {key,value} pairs. A
single
+ {key,value} in the matchLabels map is equivalent to an
element
+ of matchExpressions, whose key field is "key", the
operator
+ is "In", and the values array contains only "value". The
requirements
+ are ANDed.
+ type: object
+ type: object
+ serverVersion:
+ description: version is the version of ShardingSphere-Proxy
+ type: string
+ serviceType:
+ description: Service Type string describes ingress methods for
a service
+ enum:
+ - ClusterIP
+ - NodePort
+ - LoadBalancer
+ - ExternalName
+ type: string
+ storageNodeConnector:
+ description: MySQLDriver Defines the mysql-driven version in
ShardingSphere-proxy
+ properties:
+ type:
+ description: ConnectorType defines the frontend protocol
for ShardingSphere
+ Proxy
+ type: string
+ version:
+ description: mysql-driven version,must be x.y.z
+ pattern: ^([1-9]\d|[1-9])(\.([1-9]\d|\d)){2}$
+ type: string
+ required:
+ - type
+ - version
+ type: object
+ required:
+ - selector
+ type: object
+ status:
+ description: ComputeNodeStatus defines the observed state of
ShardingSphere
+ Proxy
+ properties:
+ conditions:
+ description: Conditions The conditions array, the reason and
message
+ fields
+ items:
+ description: ComputeNodeCondition | **phase** |
**condition** |
+ **descriptions**| | ------------- | ---------- |
----------------------------------------------------
+ | | NotReady | Deployed | pods are deployed but are
not
+ created or currently pending| | NotReady | Started
| pods
+ are started but not satisfy ready requirements| | Ready
|
+ Ready | minimum pods satisfy ready requirements| |
NotReady |
+ Unknown | can not locate the status of pods | |
NotReady |
+ Failed | ShardingSphere-Proxy failed to start
correctly due
+ to some problems|
+ properties:
+ lastUpdateTime:
+ format: date-time
+ type: string
+ message:
+ type: string
+ reason:
+ type: string
+ status:
+ type: string
+ type:
+ type: string
+ required:
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ loadBalancer:
+ description: LoadBalancer contains the current status of the
load-balancer,
+ if one is present.
+ properties:
+ clusterIP:
+ type: string
+ ingress:
+ description: Ingress is a list containing ingress points
for the
+ load-balancer. Traffic intended for the service should
be sent
+ to these ingress points.
+ items:
+ description: 'LoadBalancerIngress represents the status
of a
+ load-balancer ingress point: traffic intended for the
service
+ should be sent to an ingress point.'
+ properties:
+ hostname:
+ description: Hostname is set for load-balancer
ingress points
+ that are DNS based (typically AWS load-balancers)
+ type: string
+ ip:
+ description: IP is set for load-balancer ingress
points
+ that are IP based (typically GCE or OpenStack
load-balancers)
+ type: string
+ ports:
+ description: Ports is a list of records of service
ports
+ If used, every port defined in the service should
have
+ an entry in it
+ items:
+ properties:
+ error:
+ description: 'Error is to record the problem
with
+ the service port The format of the error
shall comply
+ with the following rules: - built-in error
values
+ shall be specified in this file and those
shall
+ use CamelCase names - cloud provider
specific error
+ values must have names that comply with the
format
+ foo.example.com/CamelCase. --- The regex it
matches
+ is (dns1123SubdomainFmt/)?(qualifiedNameFmt)'
+ maxLength: 316
+ pattern:
^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ port:
+ description: Port is the port number of the
service
+ port of which status is recorded here
+ format: int32
+ type: integer
+ protocol:
+ default: TCP
+ description: 'Protocol is the protocol of the
service
+ port of which status is recorded here The
supported
+ values are: "TCP", "UDP", "SCTP"'
+ type: string
+ required:
+ - port
+ - protocol
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ type: object
+ type: array
+ type: object
+ observedGeneration:
+ description: The generation observed by the deployment
controller.
+ format: int64
+ type: integer
+ phase:
+ description: 'ShardingSphere-Proxy phase are a brief summary
of the
+ ShardingSphere-Proxy life cycle There are two possible phase
values:
+ Ready: ShardingSphere-Proxy can already provide external
services
+ NotReady: ShardingSphere-Proxy cannot provide external
services'
+ type: string
+ readyInstances:
+ description: ReadyInstances shows the number of replicas that
ShardingSphere-Proxy
+ is running normally
+ format: int32
+ type: integer
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git
a/charts/apache-shardingsphere-operator-charts/crds/shardingsphere.apache.org_shardingsphereproxies.yaml
b/charts/apache-shardingsphere-operator-charts/crds/shardingsphere.apache.org_shardingsphereproxies.yaml
index 01a235e..3dd133a 100644
---
a/charts/apache-shardingsphere-operator-charts/crds/shardingsphere.apache.org_shardingsphereproxies.yaml
+++
b/charts/apache-shardingsphere-operator-charts/crds/shardingsphere.apache.org_shardingsphereproxies.yaml
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
@@ -1095,15 +1094,6 @@ spec:
description: Conditions The conditions array, the reason and
message
fields
items:
- description: Condition | **condition** | **status** |
**directions**|
- | ------------- | ---------- |
----------------------------------------------------
- | | Initialized | true | Initialization
successful| |
- Initialized | false | initialization failed| |
Started |
- true | pod started successfully but not ready| |
Started |
- false | pod started failed| | Ready | true
|
- The pod is ready and can provide external services| |
Unknown |
- true | ShardingSphere-Proxy failed to start
correctly due
- to some problems |
properties:
lastUpdateTime:
format: date-time
diff --git
a/charts/apache-shardingsphere-operator-charts/crds/shardingsphere.apache.org_shardingsphereproxyserverconfigs.yaml
b/charts/apache-shardingsphere-operator-charts/crds/shardingsphere.apache.org_shardingsphereproxyserverconfigs.yaml
index bebd4b6..7ae13c5 100644
---
a/charts/apache-shardingsphere-operator-charts/crds/shardingsphere.apache.org_shardingsphereproxyserverconfigs.yaml
+++
b/charts/apache-shardingsphere-operator-charts/crds/shardingsphere.apache.org_shardingsphereproxyserverconfigs.yaml
@@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
diff --git a/shardingsphere-operator/Makefile b/shardingsphere-operator/Makefile
index 7e005f6..150b4c3 100644
--- a/shardingsphere-operator/Makefile
+++ b/shardingsphere-operator/Makefile
@@ -55,10 +55,14 @@ fmt: ## Run go fmt against code.
test: manifests generate fmt envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p
path)" go test ./... -coverprofile cover.out
+.PHONY: clean
+clean:
+ -rm ./bin/manager
+
##@ Build
.PHONY: build
-build: generate fmt ## Build manager binary.
+build: clean generate fmt ## Build manager binary.
go build -o bin/manager cmd/shardingsphere-operator/main.go
.PHONY: run
diff --git a/shardingsphere-operator/api/v1alpha1/compute_node_types.go
b/shardingsphere-operator/api/v1alpha1/compute_node_types.go
new file mode 100644
index 0000000..0f47a8a
--- /dev/null
+++ b/shardingsphere-operator/api/v1alpha1/compute_node_types.go
@@ -0,0 +1,444 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package v1alpha1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// +kubebuilder:object:root=true
+// ComputeNodeList contains a list of ComputeNode
+type ComputeNodeList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ComputeNode `json:"items"`
+}
+
+//
+kubebuilder:printcolumn:JSONPath=".status.readyInstances",name=ReadyInstances,type=integer
+// +kubebuilder:printcolumn:JSONPath=".status.phase",name=Phase,type=string
+//
+kubebuilder:printcolumn:JSONPath=".status.loadBalancer.clusterIP",name="ClusterIP",type=string
+//
+kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name=Age,type=date
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// ComputeNode is the Schema for the ShardingSphere Proxy API
+type ComputeNode struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ComputeNodeSpec `json:"spec,omitempty"`
+ // +optional
+ Status ComputeNodeStatus `json:"status,omitempty"`
+}
+
+type PrivilegeType string
+
+const (
+ AllPermitted PrivilegeType = "ALL_PERMITTED"
+)
+
+// ComputeNodePrivilege for storage node, the default value is ALL_PERMITTED
+type ComputeNodePrivilege struct {
+ Type PrivilegeType `json:"type"`
+}
+
+// ComputeNodeUser is a slice about authorized host and password for compute
node.
+// Format:
+// user:<username>@<hostname>,hostname is % or empty string means do not care
about authorized host
+// password:<password>
+type ComputeNodeUser struct {
+ User string `json:"user"`
+ Password string `json:"password"`
+}
+
+// ComputeNodeAuth is used to set up initial user to login compute node, and
authority data of storage node.
+type ComputeNodeAuthority struct {
+ Users []ComputeNodeUser `json:"users"`
+ // +optional
+ Privilege ComputeNodePrivilege `json:"privilege"`
+}
+
+type RepositoryType string
+
+const (
+ RepositoryTypeZookeeper RepositoryType = "ZooKeeper"
+ RepositoryTypeEtcd RepositoryType = "Etcd"
+)
+
+// Repository is the metadata persistent store for ShardingSphere
+type Repository struct {
+ // +kubebuilder:validation:Enum=ZooKeeper;Etcd
+ // type of metadata repository
+ Type RepositoryType `json:"type"`
+ // properties of metadata repository
+ // +optional
+ // Props ComputeNodeClusterProps `json:"props,omitempty"`
+ Props Properties `json:"props,omitempty"`
+}
+
+// ComputeNodeClustersProps is the properties of a ShardingSphere Cluster
+// type ComputeNodeClusterProps struct {
+// // namespace of registry center
+// Namespace string `json:"namespace" yaml:"namespace"`
+// // server lists of registry center
+// ServerLists string `json:"server-lists" yaml:"server-lists"`
+// // retryIntervalMilliseconds Milliseconds of retry interval. default:
500
+// // +optional
+// RetryIntervalMilliseconds int
`json:"retryIntervalMilliseconds,omitempty"
yaml:"retryIntervalMilliseconds,omitempty"`
+// // the max retries of client connection. default: 3
+// // +optional
+// MaxRetries int `json:"maxRetries,omitempty" yaml:"maxRetries,omitempty"`
+// // the seconds of ephemeral data live. default: 60
+// // +optional
+// TimeToLiveSeconds int `json:"timeToLiveSeconds,omitempty"
yaml:"timeToLiveSeconds,omitempty"`
+// // the milliseconds of operation timeout. default: 500
+// // +optional
+// OperationTimeoutMilliseconds int
`json:"operationTimeoutMilliseconds,omitempty"
yaml:"operationTimeoutMilliseconds,omitempty"`
+// // password of login
+// // +optional
+// Digest string `json:"digest,omitempty" yaml:"digest,omitempty"`
+// }
+
+type ModeType string
+
+const (
+ ModeTypeCluster ModeType = "Cluster"
+ ModeTypeStandalone ModeType = "Standalone"
+)
+
+// ComputeNodeProps is which Apache ShardingSphere provides the way of
property configuration to configure system level configuration.
+// type ComputeNodeProps struct {
+// // the max thread size of worker group to execute SQL. One
ShardingSphereDataSource will use a independent thread pool, it does not share
thread pool even different data source in same JVM.
+// // +optional
+// KernelExecutorSize int `json:"kernel-executor-size,omitempty"
yaml:"kernel-executor-size,omitempty"`
+// // whether validate table meta data consistency when application
startup or updated.
+// // +optional
+// CheckTableMetadataEnabled bool
`json:"check-table-metadata-enabled,omitempty"
yaml:"check-table-metadata-enabled,omitempty"`
+// // ShardingSphere Proxy backend query fetch size. A larger value may
increase the memory usage of ShardingSphere ShardingSphereProxy. The default
value is -1, which means set the minimum value for different JDBC drivers.
+// // +optional
+// ProxyBackendQueryFetchSize int
`json:"proxy-backend-query-fetch-size,omitempty"
yaml:"proxy-backend-query-fetch-size,omitempty"`
+// // whether validate duplicate table when application startup or updated.
+// // +optional
+// CheckDuplicateTableEnabled bool
`json:"check-duplicate-table-enabled,omitempty"
yaml:"check-duplicate-table-enabled,omitempty"`
+// // ShardingSphere Proxy frontend Netty executor size. The default value
is 0, which means let Netty decide.
+// // +optional
+// ProxyFrontendExecutorSize int
`json:"proxy-frontend-executor-size,omitempty"
yaml:"proxy-frontend-executor-size,omitempty"`
+// // available options of proxy backend executor suitable: OLAP(default),
OLTP. The OLTP option may reduce time cost of writing packets to client, but it
may increase the latency of SQL execution and block other clients if client
connections are more than proxy-frontend-executor-size, especially executing
slow SQL.
+// // +optional
+// ProxyBackendExecutorSuitable string
`json:"proxy-backend-executor-suitable,omitempty"
yaml:"proxy-backend-executor-suitable,omitempty"`
+// // +optional
+// ProxyBackendDriverType string
`json:"proxy-backend-driver-type,omitempty"
yaml:"proxy-backend-driver-type,omitempty"`
+// // +optional
+// ProxyFrontendDatabaseProtocolType string
`json:"proxy-frontend-database-protocol-type"
yaml:"proxy-frontend-database-protocol-type,omitempty"`
+// }
+
+// ComputeNodeServerMode is the mode for ShardingSphere Proxy
+type ComputeNodeServerMode struct {
+ // +optional
+ Repository Repository `json:"repository"`
+ Type ModeType `json:"type"`
+}
+
+// ServerConfig defines the bootstrap config for a ShardingSphere Proxy
+type ServerConfig struct {
+ Authority ComputeNodeAuthority `json:"authority"`
+ Mode ComputeNodeServerMode `json:"mode"`
+ //+optional
+ // Props *ComputeNodeProps `json:"props,omitempty"`
+ Props Properties `json:"props,omitempty"`
+}
+
+// LogbackConfig contains contents of the expected logback.xml
+type LogbackConfig string
+
+// +kubebuilder:pruning:PreserveUnknownFields
+type Properties map[string]string
+
+type BaseLogging struct {
+ Props Properties `json:"props,omitempty"`
+}
+
+// PluginLogging defines the plugin for logging
+type PluginLogging struct {
+ BaseLogging BaseLogging `json:"baseLogging,omitempty"
yaml:"BaseLogging"`
+}
+
+type Prometheus struct {
+ Host string `json:"host"`
+ Port int32 `json:"port"`
+ Props Properties `json:"properties,omitempty"`
+}
+
+// PluginMetrics defines the plugin for metrics
+type PluginMetrics struct {
+ Prometheus Prometheus `json:"prometheus,omitempty" yaml:"Prometheus"`
+}
+
+type JaegerTracing struct {
+ Host string `json:"host"`
+ Port int32 `json:"port"`
+ Props Properties `json:"props,omitempty"`
+}
+
+type ZipkinTracing struct {
+ Host string `json:"host"`
+ Port int32 `json:"port"`
+ Props Properties `json:"props,omitempty"`
+}
+
+type SkyWalkingTracing struct {
+ Props Properties `json:"props,omitempty"`
+}
+
+type OpenTelemetryTracing struct {
+ Props Properties `json:"props,omitempty"`
+}
+
+type Tracing struct {
+ // +optional
+ Jaeger JaegerTracing `json:"jaeger,omitempty" yaml:"Jaeger"`
+ // +optional
+ Zipkin ZipkinTracing `json:"zipkin,omitempty" yaml:"Zipkin"`
+ // +optional
+ SkyWalking SkyWalkingTracing `json:"skyWalking,omitempty"
yaml:"SkyWalking"`
+ // +optional
+ OpenTelemetry OpenTelemetryTracing `json:"openTelemetry,omitempty"
yaml:"OpenTelemetry"`
+}
+
+// PluginTracing defines the plugin for tracing
+type PluginTracing struct {
+ Tracing Tracing `json:"tracing,omitempty"`
+}
+
+// AgentPlugin defines a set of plugins for ShardingSphere Agent
+type AgentPlugin struct {
+ // +optional
+ Logging PluginLogging `json:"logging,omitempty"`
+ // +optional
+ Metrics PluginMetrics `json:"metrics,omitempty"`
+ // +optional
+ Tracing PluginTracing `json:"tracing,omitempty"`
+}
+
+// AgentConfig defines the config for ShardingSphere-Agent, renderred as
agent.yaml
+type AgentConfig struct {
+ Plugins AgentPlugin `json:"plugins,omitempty"`
+}
+
+// ServiceType defines the Service in Kubernetes of ShardingSphere-Proxy
+type Service struct {
+ Ports []corev1.ServicePort `json:"ports,omitempty"`
+ //
+kubebuilder:validation:Enum=ClusterIP;NodePort;LoadBalancer;ExternalName
+ Type corev1.ServiceType `json:"type"`
+}
+
+// ProxyProbe defines the probe actions for LivenesProbe, ReadinessProbe and
StartupProbe
+type ProxyProbe struct {
+ // Probes are not allowed for ephemeral containers.
+ // +optional
+ LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty"`
+ // Probes are not allowed for ephemeral containers.
+ // +optional
+ ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" `
+ // Probes are not allowed for ephemeral containers.
+ // +optional
+ StartupProbe *corev1.Probe `json:"startupProbe,omitempty"`
+}
+
+// ConnectorType defines the frontend protocol for ShardingSphere Proxy
+type ConnectorType string
+
+const (
+ ConnectorTypeMySQL ConnectorType = "mysql"
+ ConnectorTypePostgreSQL ConnectorType = "postgresql"
+)
+
+// MySQLDriver Defines the mysql-driven version in ShardingSphere-proxy
+type StorageNodeConnector struct {
+ Type ConnectorType `json:"type"`
+ //
+kubebuilder:validation:Pattern=`^([1-9]\d|[1-9])(\.([1-9]\d|\d)){2}$`
+ // mysql-driven version,must be x.y.z
+ Version string `json:"version"`
+}
+
+// BootstrapConfig is used for any ShardingSphere Proxy startup
+type BootstrapConfig struct {
+ // +optional
+ ServerConfig ServerConfig `json:"serverConfig,omitempty"`
+ // +optional
+ LogbackConfig LogbackConfig `json:"logbackConfig,omitempty"`
+ // +optional
+ AgentConfig AgentConfig `json:"agentConfig,omitempty"`
+}
+
+type PortBinding struct {
+ // If specified, this must be an IANA_SVC_NAME and unique within the
pod. Each
+ // named port in a pod must have a unique name. Name for the port that
can be
+ // referred to by services.
+ // +optional
+ Name string `json:"name,omitempty"`
+
+ // Number of port to expose on the pod's IP address.
+ // This must be a valid port number, 0 < x < 65536.
+ ContainerPort int32 `json:"containerPort" yaml:"containerPort"`
+ // Protocol for port. Must be UDP, TCP, or SCTP.
+ // Defaults to "TCP".
+ // +optional
+ // +default="TCP"
+ Protocol corev1.Protocol `json:"protocol,omitempty"`
+ // What host IP to bind the external port to.
+ // +optional
+ HostIP string `json:"hostIP,omitempty" yaml:"hostIP"`
+
+ // The port that will be exposed by this service.
+ ServicePort int32 `json:"servicePort" yaml:"servicePort"`
+
+ // The port on each node on which this service is exposed when type is
+ // NodePort or LoadBalancer. Usually assigned by the system. If a
value is
+ // specified, in-range, and not in use it will be used, otherwise the
+ // operation will fail. If not specified, a port will be allocated if
this
+ // Service requires one. If this field is specified when creating a
+ // Service which does not need it, creation will fail. This field will
be
+ // wiped when updating a Service to no longer need it (e.g. changing
type
+ // from NodePort to ClusterIP).
+ // More info:
https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
+ // +optional
+ NodePort int32 `json:"nodePort,omitempty" yaml:"nodePort"`
+}
+
+// ProxySpec defines the desired state of ShardingSphereProxy
+type ComputeNodeSpec struct {
+ // +optional
+ StorageNodeConnector *StorageNodeConnector
`json:"storageNodeConnector,omitempty"`
+ // version is the version of ShardingSphere-Proxy
+ ServerVersion string `json:"serverVersion,omitempty"
yaml:"serverVersion"`
+
+ // replicas is the expected number of replicas of ShardingSphere-Proxy
+ // +optional
+ Replicas int32 `json:"replicas,omitempty"`
+ // selector defines a set of label selectors
+ Selector *metav1.LabelSelector `json:"selector"`
+
+ // +optional
+ Probes *ProxyProbe `json:"probes,omitempty"`
+ // +optional
+ ImagePullSecrets []corev1.LocalObjectReference
`json:"imagePullSecrets,omitempty"`
+ // port is ShardingSphere-Proxy startup port
+ // +optional
+ // Ports []corev1.ContainerPort `json:"ports,omitempty"`
+ // +optional
+ Env []corev1.EnvVar `json:"env,omitempty"`
+ // +optional
+ Resources v1.ResourceRequirements `json:"resources,omitempty"`
+ // Service Service `json:"service,omitempty"`
+ // +optional
+ PortBindings []PortBinding `json:"portBindings,omitempty"
yaml:"portBinding"`
+
+ //
+kubebuilder:validation:Enum=ClusterIP;NodePort;LoadBalancer;ExternalName
+ // +optional
+ ServiceType corev1.ServiceType `json:"serviceType,omitempty"
yaml:"serviceType"`
+
+ // +optional
+ Bootstrap BootstrapConfig `json:"bootstrap,omitempty"`
+}
+
+// ComputeNodeStatus defines the observed state of ShardingSphere Proxy
+type ComputeNodeStatus struct {
+ // The generation observed by the deployment controller.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // ShardingSphere-Proxy phase are a brief summary of the
ShardingSphere-Proxy life cycle
+ // There are two possible phase values:
+ // Ready: ShardingSphere-Proxy can already provide external services
+ // NotReady: ShardingSphere-Proxy cannot provide external services
+ // +optional
+ Phase ComputeNodePhaseStatus `json:"phase"`
+
+ // Conditions The conditions array, the reason and message fields
+ // +optional
+ Conditions ComputeNodeConditions `json:"conditions"`
+ // ReadyInstances shows the number of replicas that
ShardingSphere-Proxy is running normally
+ // +optional
+ ReadyInstances int32 `json:"readyInstances"`
+
+ // LoadBalancer contains the current status of the load-balancer,
+ // if one is present.
+ // +optional
+ LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty"`
+}
+
+type LoadBalancerStatus struct {
+ // +optional
+ ClusterIP string `json:"clusterIP,omitempty"`
+
+ // Ingress is a list containing ingress points for the load-balancer.
+ // Traffic intended for the service should be sent to these ingress
points.
+ // +optional
+ Ingress []corev1.LoadBalancerIngress `json:"ingress,omitempty"`
+}
+
+type ComputeNodePhaseStatus string
+
+const (
+ ComputeNodeStatusReady ComputeNodePhaseStatus = "Ready"
+ ComputeNodeStatusNotReady ComputeNodePhaseStatus = "NotReady"
+)
+
+type ComputeNodeConditionType string
+
+// ComputeNodeConditionType shows some states during the startup process of
ShardingSphere-Proxy
+const (
+ ComputeNodeConditionInitialized ComputeNodeConditionType = "Initialized"
+ ComputeNodeConditionStarted ComputeNodeConditionType = "Started"
+ ComputeNodeConditionReady ComputeNodeConditionType = "Ready"
+ ComputeNodeConditionUnknown ComputeNodeConditionType = "Unknown"
+ ComputeNodeConditionDeployed ComputeNodeConditionType = "Deployed"
+ ComputeNodeConditionFailed ComputeNodeConditionType = "Failed"
+)
+
+type ComputeNodeConditions []ComputeNodeCondition
+
+type ConditionStatus string
+
+const (
+ ConditionStatusTrue = "True"
+ ConditionStatusFalse = "False"
+ ConditionStatusUnknown = "Unknown"
+)
+
+// ComputeNodeCondition
+// | **phase** | **condition** | **descriptions**|
+// | ------------- | ---------- |
---------------------------------------------------- |
+// | NotReady | Deployed | pods are deployed but are not created or
currently pending|
+// | NotReady | Started | pods are started but not satisfy ready
requirements|
+// | Ready | Ready | minimum pods satisfy ready requirements|
+// | NotReady | Unknown | can not locate the status of pods |
+// | NotReady | Failed | ShardingSphere-Proxy failed to start
correctly due to some problems|
+type ComputeNodeCondition struct {
+ Type ComputeNodeConditionType `json:"type"`
+ Status ConditionStatus `json:"status"`
+ LastUpdateTime metav1.Time
`json:"lastUpdateTime,omitempty"`
+ Reason string `json:"reason"`
+ Message string `json:"message"`
+}
+
+func init() {
+ SchemeBuilder.Register(&ComputeNode{}, &ComputeNodeList{})
+}
diff --git a/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go
b/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go
index a7d0009..0a8a0e2 100644
--- a/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go
+++ b/shardingsphere-operator/api/v1alpha1/zz_generated.deepcopy.go
@@ -24,9 +24,44 @@ package v1alpha1
import (
"k8s.io/api/autoscaling/v2beta2"
"k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *AgentConfig) DeepCopyInto(out *AgentConfig) {
+ *out = *in
+ in.Plugins.DeepCopyInto(&out.Plugins)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new AgentConfig.
+func (in *AgentConfig) DeepCopy() *AgentConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(AgentConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *AgentPlugin) DeepCopyInto(out *AgentPlugin) {
+ *out = *in
+ in.Logging.DeepCopyInto(&out.Logging)
+ in.Metrics.DeepCopyInto(&out.Metrics)
+ in.Tracing.DeepCopyInto(&out.Tracing)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new AgentPlugin.
+func (in *AgentPlugin) DeepCopy() *AgentPlugin {
+ if in == nil {
+ return nil
+ }
+ out := new(AgentPlugin)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
func (in *Auth) DeepCopyInto(out *Auth) {
*out = *in
@@ -74,6 +109,45 @@ func (in *AutomaticScaling) DeepCopy() *AutomaticScaling {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *BaseLogging) DeepCopyInto(out *BaseLogging) {
+ *out = *in
+ if in.Props != nil {
+ in, out := &in.Props, &out.Props
+ *out = make(Properties, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new BaseLogging.
+func (in *BaseLogging) DeepCopy() *BaseLogging {
+ if in == nil {
+ return nil
+ }
+ out := new(BaseLogging)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *BootstrapConfig) DeepCopyInto(out *BootstrapConfig) {
+ *out = *in
+ in.ServerConfig.DeepCopyInto(&out.ServerConfig)
+ in.AgentConfig.DeepCopyInto(&out.AgentConfig)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new BootstrapConfig.
+func (in *BootstrapConfig) DeepCopy() *BootstrapConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(BootstrapConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
func (in *ClusterConfig) DeepCopyInto(out *ClusterConfig) {
*out = *in
@@ -105,6 +179,241 @@ func (in *ClusterProps) DeepCopy() *ClusterProps {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ComputeNode) DeepCopyInto(out *ComputeNode) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ComputeNode.
+func (in *ComputeNode) DeepCopy() *ComputeNode {
+ if in == nil {
+ return nil
+ }
+ out := new(ComputeNode)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver,
creating a new runtime.Object.
+func (in *ComputeNode) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ComputeNodeAuthority) DeepCopyInto(out *ComputeNodeAuthority) {
+ *out = *in
+ if in.Users != nil {
+ in, out := &in.Users, &out.Users
+ *out = make([]ComputeNodeUser, len(*in))
+ copy(*out, *in)
+ }
+ out.Privilege = in.Privilege
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ComputeNodeAuthority.
+func (in *ComputeNodeAuthority) DeepCopy() *ComputeNodeAuthority {
+ if in == nil {
+ return nil
+ }
+ out := new(ComputeNodeAuthority)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ComputeNodeCondition) DeepCopyInto(out *ComputeNodeCondition) {
+ *out = *in
+ in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ComputeNodeCondition.
+func (in *ComputeNodeCondition) DeepCopy() *ComputeNodeCondition {
+ if in == nil {
+ return nil
+ }
+ out := new(ComputeNodeCondition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in ComputeNodeConditions) DeepCopyInto(out *ComputeNodeConditions) {
+ {
+ in := &in
+ *out = make(ComputeNodeConditions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ComputeNodeConditions.
+func (in ComputeNodeConditions) DeepCopy() ComputeNodeConditions {
+ if in == nil {
+ return nil
+ }
+ out := new(ComputeNodeConditions)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ComputeNodeList) DeepCopyInto(out *ComputeNodeList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ComputeNode, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ComputeNodeList.
+func (in *ComputeNodeList) DeepCopy() *ComputeNodeList {
+ if in == nil {
+ return nil
+ }
+ out := new(ComputeNodeList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver,
creating a new runtime.Object.
+func (in *ComputeNodeList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ComputeNodePrivilege) DeepCopyInto(out *ComputeNodePrivilege) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ComputeNodePrivilege.
+func (in *ComputeNodePrivilege) DeepCopy() *ComputeNodePrivilege {
+ if in == nil {
+ return nil
+ }
+ out := new(ComputeNodePrivilege)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ComputeNodeServerMode) DeepCopyInto(out *ComputeNodeServerMode) {
+ *out = *in
+ in.Repository.DeepCopyInto(&out.Repository)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ComputeNodeServerMode.
+func (in *ComputeNodeServerMode) DeepCopy() *ComputeNodeServerMode {
+ if in == nil {
+ return nil
+ }
+ out := new(ComputeNodeServerMode)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ComputeNodeSpec) DeepCopyInto(out *ComputeNodeSpec) {
+ *out = *in
+ if in.StorageNodeConnector != nil {
+ in, out := &in.StorageNodeConnector, &out.StorageNodeConnector
+ *out = new(StorageNodeConnector)
+ **out = **in
+ }
+ if in.Selector != nil {
+ in, out := &in.Selector, &out.Selector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Probes != nil {
+ in, out := &in.Probes, &out.Probes
+ *out = new(ProxyProbe)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ImagePullSecrets != nil {
+ in, out := &in.ImagePullSecrets, &out.ImagePullSecrets
+ *out = make([]v1.LocalObjectReference, len(*in))
+ copy(*out, *in)
+ }
+ if in.Env != nil {
+ in, out := &in.Env, &out.Env
+ *out = make([]v1.EnvVar, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.Resources.DeepCopyInto(&out.Resources)
+ if in.PortBindings != nil {
+ in, out := &in.PortBindings, &out.PortBindings
+ *out = make([]PortBinding, len(*in))
+ copy(*out, *in)
+ }
+ in.Bootstrap.DeepCopyInto(&out.Bootstrap)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ComputeNodeSpec.
+func (in *ComputeNodeSpec) DeepCopy() *ComputeNodeSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ComputeNodeSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ComputeNodeStatus) DeepCopyInto(out *ComputeNodeStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make(ComputeNodeConditions, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.LoadBalancer.DeepCopyInto(&out.LoadBalancer)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ComputeNodeStatus.
+func (in *ComputeNodeStatus) DeepCopy() *ComputeNodeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ComputeNodeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ComputeNodeUser) DeepCopyInto(out *ComputeNodeUser) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ComputeNodeUser.
+func (in *ComputeNodeUser) DeepCopy() *ComputeNodeUser {
+ if in == nil {
+ return nil
+ }
+ out := new(ComputeNodeUser)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
func (in *Condition) DeepCopyInto(out *Condition) {
*out = *in
@@ -142,6 +451,50 @@ func (in Conditions) DeepCopy() Conditions {
return *out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *JaegerTracing) DeepCopyInto(out *JaegerTracing) {
+ *out = *in
+ if in.Props != nil {
+ in, out := &in.Props, &out.Props
+ *out = make(Properties, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new JaegerTracing.
+func (in *JaegerTracing) DeepCopy() *JaegerTracing {
+ if in == nil {
+ return nil
+ }
+ out := new(JaegerTracing)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) {
+ *out = *in
+ if in.Ingress != nil {
+ in, out := &in.Ingress, &out.Ingress
+ *out = make([]v1.LoadBalancerIngress, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new LoadBalancerStatus.
+func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(LoadBalancerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
func (in *MySQLDriver) DeepCopyInto(out *MySQLDriver) {
*out = *in
@@ -157,6 +510,91 @@ func (in *MySQLDriver) DeepCopy() *MySQLDriver {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *OpenTelemetryTracing) DeepCopyInto(out *OpenTelemetryTracing) {
+ *out = *in
+ if in.Props != nil {
+ in, out := &in.Props, &out.Props
+ *out = make(Properties, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new OpenTelemetryTracing.
+func (in *OpenTelemetryTracing) DeepCopy() *OpenTelemetryTracing {
+ if in == nil {
+ return nil
+ }
+ out := new(OpenTelemetryTracing)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *PluginLogging) DeepCopyInto(out *PluginLogging) {
+ *out = *in
+ in.BaseLogging.DeepCopyInto(&out.BaseLogging)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new PluginLogging.
+func (in *PluginLogging) DeepCopy() *PluginLogging {
+ if in == nil {
+ return nil
+ }
+ out := new(PluginLogging)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *PluginMetrics) DeepCopyInto(out *PluginMetrics) {
+ *out = *in
+ in.Prometheus.DeepCopyInto(&out.Prometheus)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new PluginMetrics.
+func (in *PluginMetrics) DeepCopy() *PluginMetrics {
+ if in == nil {
+ return nil
+ }
+ out := new(PluginMetrics)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *PluginTracing) DeepCopyInto(out *PluginTracing) {
+ *out = *in
+ in.Tracing.DeepCopyInto(&out.Tracing)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new PluginTracing.
+func (in *PluginTracing) DeepCopy() *PluginTracing {
+ if in == nil {
+ return nil
+ }
+ out := new(PluginTracing)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *PortBinding) DeepCopyInto(out *PortBinding) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new PortBinding.
+func (in *PortBinding) DeepCopy() *PortBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(PortBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
func (in *Privilege) DeepCopyInto(out *Privilege) {
*out = *in
@@ -172,6 +610,49 @@ func (in *Privilege) DeepCopy() *Privilege {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *Prometheus) DeepCopyInto(out *Prometheus) {
+ *out = *in
+ if in.Props != nil {
+ in, out := &in.Props, &out.Props
+ *out = make(Properties, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new Prometheus.
+func (in *Prometheus) DeepCopy() *Prometheus {
+ if in == nil {
+ return nil
+ }
+ out := new(Prometheus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in Properties) DeepCopyInto(out *Properties) {
+ {
+ in := &in
+ *out = make(Properties, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new Properties.
+func (in Properties) DeepCopy() Properties {
+ if in == nil {
+ return nil
+ }
+ out := new(Properties)
+ in.DeepCopyInto(out)
+ return *out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
func (in *Props) DeepCopyInto(out *Props) {
*out = *in
@@ -224,6 +705,36 @@ func (in *ProxyConfigStatus) DeepCopy() *ProxyConfigStatus
{
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ProxyProbe) DeepCopyInto(out *ProxyProbe) {
+ *out = *in
+ if in.LivenessProbe != nil {
+ in, out := &in.LivenessProbe, &out.LivenessProbe
+ *out = new(v1.Probe)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ReadinessProbe != nil {
+ in, out := &in.ReadinessProbe, &out.ReadinessProbe
+ *out = new(v1.Probe)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.StartupProbe != nil {
+ in, out := &in.StartupProbe, &out.StartupProbe
+ *out = new(v1.Probe)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ProxyProbe.
+func (in *ProxyProbe) DeepCopy() *ProxyProbe {
+ if in == nil {
+ return nil
+ }
+ out := new(ProxyProbe)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
func (in *ProxySpec) DeepCopyInto(out *ProxySpec) {
*out = *in
@@ -293,6 +804,28 @@ func (in *ProxyStatus) DeepCopy() *ProxyStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *Repository) DeepCopyInto(out *Repository) {
+ *out = *in
+ if in.Props != nil {
+ in, out := &in.Props, &out.Props
+ *out = make(Properties, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new Repository.
+func (in *Repository) DeepCopy() *Repository {
+ if in == nil {
+ return nil
+ }
+ out := new(Repository)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
func (in *RepositoryConfig) DeepCopyInto(out *RepositoryConfig) {
*out = *in
@@ -309,6 +842,52 @@ func (in *RepositoryConfig) DeepCopy() *RepositoryConfig {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ServerConfig) DeepCopyInto(out *ServerConfig) {
+ *out = *in
+ in.Authority.DeepCopyInto(&out.Authority)
+ in.Mode.DeepCopyInto(&out.Mode)
+ if in.Props != nil {
+ in, out := &in.Props, &out.Props
+ *out = make(Properties, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ServerConfig.
+func (in *ServerConfig) DeepCopy() *ServerConfig {
+ if in == nil {
+ return nil
+ }
+ out := new(ServerConfig)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *Service) DeepCopyInto(out *Service) {
+ *out = *in
+ if in.Ports != nil {
+ in, out := &in.Ports, &out.Ports
+ *out = make([]v1.ServicePort, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new Service.
+func (in *Service) DeepCopy() *Service {
+ if in == nil {
+ return nil
+ }
+ out := new(Service)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
func (in *ServiceType) DeepCopyInto(out *ServiceType) {
*out = *in
@@ -442,6 +1021,62 @@ func (in *ShardingSphereProxyServerConfigList)
DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *SkyWalkingTracing) DeepCopyInto(out *SkyWalkingTracing) {
+ *out = *in
+ if in.Props != nil {
+ in, out := &in.Props, &out.Props
+ *out = make(Properties, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new SkyWalkingTracing.
+func (in *SkyWalkingTracing) DeepCopy() *SkyWalkingTracing {
+ if in == nil {
+ return nil
+ }
+ out := new(SkyWalkingTracing)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *StorageNodeConnector) DeepCopyInto(out *StorageNodeConnector) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new StorageNodeConnector.
+func (in *StorageNodeConnector) DeepCopy() *StorageNodeConnector {
+ if in == nil {
+ return nil
+ }
+ out := new(StorageNodeConnector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *Tracing) DeepCopyInto(out *Tracing) {
+ *out = *in
+ in.Jaeger.DeepCopyInto(&out.Jaeger)
+ in.Zipkin.DeepCopyInto(&out.Zipkin)
+ in.SkyWalking.DeepCopyInto(&out.SkyWalking)
+ in.OpenTelemetry.DeepCopyInto(&out.OpenTelemetry)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new Tracing.
+func (in *Tracing) DeepCopy() *Tracing {
+ if in == nil {
+ return nil
+ }
+ out := new(Tracing)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
func (in *User) DeepCopyInto(out *User) {
*out = *in
@@ -456,3 +1091,25 @@ func (in *User) DeepCopy() *User {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver,
writing into out. in must be non-nil.
+func (in *ZipkinTracing) DeepCopyInto(out *ZipkinTracing) {
+ *out = *in
+ if in.Props != nil {
+ in, out := &in.Props, &out.Props
+ *out = make(Properties, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver,
creating a new ZipkinTracing.
+func (in *ZipkinTracing) DeepCopy() *ZipkinTracing {
+ if in == nil {
+ return nil
+ }
+ out := new(ZipkinTracing)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git
a/shardingsphere-operator/cmd/shardingsphere-operator/manager/manager.go
b/shardingsphere-operator/cmd/shardingsphere-operator/manager/manager.go
index 8be01e6..275deb3 100644
--- a/shardingsphere-operator/cmd/shardingsphere-operator/manager/manager.go
+++ b/shardingsphere-operator/cmd/shardingsphere-operator/manager/manager.go
@@ -20,9 +20,10 @@ package manager
import (
"context"
"flag"
-
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/metrics"
"os"
+
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/metrics"
+
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/controllers"
"go.uber.org/zap/zapcore"
@@ -47,6 +48,11 @@ func init() {
type Options struct {
ctrl.Options
+ FeatureGateOptions
+}
+
+type FeatureGateOptions struct {
+ ComputeNode bool
}
func ParseOptionsFromFlags() *Options {
@@ -56,6 +62,7 @@ func ParseOptionsFromFlags() *Options {
flag.BoolVar(&opt.LeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active
controller manager.")
+ flag.BoolVar(&opt.ComputeNode, "feature-gate-compute-node", false,
"Enable support for CustomResourceDefinition ComputeNode.")
opts := zap.Options{
Development: true,
@@ -97,6 +104,17 @@ func New(opts *Options) *Manager {
setupLog.Error(err, "unable to create controller",
"controller", "ShardingSphereProxyServerConfig")
os.Exit(1)
}
+
+ if opts.ComputeNode {
+ if err = (&controllers.ComputeNodeReconciler{
+ Client: mgr.GetClient(),
+ Scheme: mgr.GetScheme(),
+ }).SetupWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create controller",
"controller", "ComputeNode")
+ os.Exit(1)
+ }
+ }
+
return &Manager{
Manager: mgr,
}
diff --git a/shardingsphere-operator/go.mod b/shardingsphere-operator/go.mod
index 050d842..fa31a21 100644
--- a/shardingsphere-operator/go.mod
+++ b/shardingsphere-operator/go.mod
@@ -47,6 +47,7 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions
v1.0.2-0.20181231171920-c182affec369 // indirect
+ github.com/mlycore/log v0.0.0-20230112100542-3bb540211ad0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd //
indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 //
indirect
diff --git a/shardingsphere-operator/go.sum b/shardingsphere-operator/go.sum
index 30b57c2..41171e0 100644
--- a/shardingsphere-operator/go.sum
+++ b/shardingsphere-operator/go.sum
@@ -341,6 +341,8 @@ github.com/mitchellh/iochan v1.0.0/go.mod
h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod
h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod
h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod
h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mlycore/log v0.0.0-20230112100542-3bb540211ad0
h1:aEguBSnj+u0mIuKBk7VSjC5vOLPj6yktPw2og/5vBUA=
+github.com/mlycore/log v0.0.0-20230112100542-3bb540211ad0/go.mod
h1:o0me6JuyA3eshxtKC6ElXStaf0VMbUESW8BnruY53f0=
github.com/moby/spdystream v0.2.0/go.mod
h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod
h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod
h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
diff --git a/shardingsphere-operator/pkg/controllers/compute_node_controller.go
b/shardingsphere-operator/pkg/controllers/compute_node_controller.go
new file mode 100644
index 0000000..4b7cea3
--- /dev/null
+++ b/shardingsphere-operator/pkg/controllers/compute_node_controller.go
@@ -0,0 +1,343 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package controllers
+
+import (
+ "context"
+ "time"
+
+
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
+
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/pkg/reconcile"
+ appsv1 "k8s.io/api/apps/v1"
+ v1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ logger "sigs.k8s.io/controller-runtime/pkg/log"
+)
+
+const defaultRequeueTime = 10 * time.Second
+
+type ComputeNodeReconciler struct {
+ client.Client
+ Scheme *runtime.Scheme
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *ComputeNodeReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&v1alpha1.ComputeNode{}).
+ Owns(&appsv1.Deployment{}).
+ Owns(&v1.Pod{}).
+ Owns(&v1.Service{}).
+ Owns(&v1.ConfigMap{}).
+ Complete(r)
+}
+
+func (r *ComputeNodeReconciler) Reconcile(ctx context.Context, req
ctrl.Request) (ctrl.Result, error) {
+ log := logger.FromContext(ctx)
+
+ cn := &v1alpha1.ComputeNode{}
+ if err := r.Get(ctx, req.NamespacedName, cn); err != nil {
+ if apierrors.IsNotFound(err) {
+ return ctrl.Result{RequeueAfter: defaultRequeueTime},
nil
+ } else {
+ log.Error(err, "get computenode")
+ return ctrl.Result{Requeue: true}, err
+ }
+ }
+
+ errors := []error{}
+ if err := r.reconcileDeployment(ctx, cn); err != nil {
+ log.Error(err, "Reconcile Deployment Error")
+ errors = append(errors, err)
+ }
+ if err := r.reconcileService(ctx, cn); err != nil {
+ log.Error(err, "Reconcile Service Error")
+ errors = append(errors, err)
+ }
+ if err := r.reconcileConfigMap(ctx, cn); err != nil {
+ log.Error(err, "Reconcile ConfigMap Error")
+ errors = append(errors, err)
+ }
+ if err := r.reconcileStatus(ctx, cn.Namespace, cn.Name, cn.Labels); err
!= nil {
+ log.Error(err, "Reconcile PodList Error")
+ errors = append(errors, err)
+ }
+
+ if len(errors) != 0 {
+ return ctrl.Result{Requeue: true}, errors[0]
+ }
+
+ return ctrl.Result{RequeueAfter: defaultRequeueTime}, nil
+}
+
+func (r *ComputeNodeReconciler) reconcileDeployment(ctx context.Context, cn
*v1alpha1.ComputeNode) error {
+ cur := &appsv1.Deployment{}
+ if err := r.Get(ctx, types.NamespacedName{
+ Namespace: cn.Namespace,
+ Name: cn.Name,
+ }, cur); err != nil {
+ if apierrors.IsNotFound(err) {
+ // create
+ exp := reconcile.ComputeNodeNewDeployment(cn)
+ if err := r.Create(ctx, exp); err != nil {
+ return err
+ }
+ return nil
+ } else {
+ return err
+ }
+ }
+ // update
+ exp := reconcile.ComputeNodeUpdateDeployment(cn, cur)
+ if err := r.Update(ctx, exp); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *ComputeNodeReconciler) reconcileService(ctx context.Context, cn
*v1alpha1.ComputeNode) error {
+ cur := &v1.Service{}
+ if err := r.Get(ctx, types.NamespacedName{
+ Namespace: cn.Namespace,
+ Name: cn.Name,
+ }, cur); err != nil {
+ if apierrors.IsNotFound(err) {
+ // create
+ exp := reconcile.ComputeNodeNewService(cn)
+ if err := r.Create(ctx, exp); err != nil {
+ return err
+ }
+ return nil
+ } else {
+ return err
+ }
+ }
+ // update
+ exp := reconcile.ComputeNodeUpdateService(cn, cur)
+ if err := r.Update(ctx, exp); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *ComputeNodeReconciler) reconcileConfigMap(ctx context.Context, cn
*v1alpha1.ComputeNode) error {
+ cur := &v1.ConfigMap{}
+ if err := r.Get(ctx, types.NamespacedName{
+ Namespace: cn.Namespace,
+ Name: cn.Name,
+ }, cur); err != nil {
+ if apierrors.IsNotFound(err) {
+ // create
+ exp := reconcile.ComputeNodeNewConfigMap(cn)
+ if err := r.Create(ctx, exp); err != nil {
+ return err
+ }
+ return nil
+ } else {
+ return err
+ }
+ }
+
+ // update
+ //FIXME: need to rolling update Deployment if ConfigMap indeed updated
+ exp := reconcile.ComputeNodeUpdateConfigMap(cn, cur)
+ if err := r.Update(ctx, exp); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *ComputeNodeReconciler) reconcileStatus(ctx context.Context,
namespace, name string, labels map[string]string) error {
+ podList := &v1.PodList{}
+ if err := r.List(ctx, podList, client.InNamespace(namespace),
client.MatchingLabels(labels)); err != nil {
+ return err
+ }
+
+ service := &v1.Service{}
+ if err := r.Get(ctx, types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ }, service); err != nil {
+ return err
+ }
+
+ rt, err := r.getRuntimeComputeNode(ctx, types.NamespacedName{
+ Namespace: namespace,
+ Name: name,
+ })
+ if err != nil {
+ return err
+ }
+
+ rt.Status = ReconcileComputeNodeStatus(*podList, *service, *rt)
+
+ // TODO: Compare Status with or without modification
+ if err := r.Status().Update(ctx, rt); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func getReadyNodes(podlist v1.PodList) int32 {
+ var cnt int32
+ for _, p := range podlist.Items {
+ if p.Status.Phase == v1.PodRunning {
+ for _, c := range p.Status.Conditions {
+ if c.Type == v1.PodReady && c.Status ==
v1.ConditionTrue {
+ for _, con := range
p.Status.ContainerStatuses {
+ if con.Name ==
"shardingsphere-proxy" && con.Ready {
+ cnt++
+ }
+ }
+ }
+ }
+ }
+ }
+ return cnt
+}
+
+func newConditions(conditions []v1alpha1.ComputeNodeCondition, cond
v1alpha1.ComputeNodeCondition) []v1alpha1.ComputeNodeCondition {
+ if conditions == nil {
+ conditions = []v1alpha1.ComputeNodeCondition{}
+ }
+ if cond.Type == "" {
+ return conditions
+ }
+
+ found := false
+ for idx, _ := range conditions {
+ if conditions[idx].Type == cond.Type {
+ conditions[idx].LastUpdateTime = cond.LastUpdateTime
+ conditions[idx].Status = cond.Status
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ conditions = append(conditions, cond)
+ }
+
+ return conditions
+}
+
+func updateReadyConditions(conditions []v1alpha1.ComputeNodeCondition, cond
v1alpha1.ComputeNodeCondition) []v1alpha1.ComputeNodeCondition {
+ return newConditions(conditions, cond)
+}
+
+func updateNotReadyConditions(conditions []v1alpha1.ComputeNodeCondition, cond
v1alpha1.ComputeNodeCondition) []v1alpha1.ComputeNodeCondition {
+ cur := newConditions(conditions, cond)
+
+ for idx, _ := range cur {
+ if cur[idx].Type == v1alpha1.ComputeNodeConditionReady {
+ cur[idx].LastUpdateTime = metav1.Now()
+ cur[idx].Status = v1alpha1.ConditionStatusFalse
+ }
+ }
+
+ return cur
+}
+
+func clusterCondition(podlist v1.PodList) v1alpha1.ComputeNodeCondition {
+ cond := v1alpha1.ComputeNodeCondition{}
+ if len(podlist.Items) == 0 {
+ return cond
+ }
+
+ condStarted := v1alpha1.ComputeNodeCondition{
+ Type: v1alpha1.ComputeNodeConditionStarted,
+ Status: v1alpha1.ConditionStatusTrue,
+ LastUpdateTime: metav1.Now(),
+ }
+ condUnknown := v1alpha1.ComputeNodeCondition{
+ Type: v1alpha1.ComputeNodeConditionUnknown,
+ Status: v1alpha1.ConditionStatusTrue,
+ LastUpdateTime: metav1.Now(),
+ }
+ condDeployed := v1alpha1.ComputeNodeCondition{
+ Type: v1alpha1.ComputeNodeConditionDeployed,
+ Status: v1alpha1.ConditionStatusTrue,
+ LastUpdateTime: metav1.Now(),
+ }
+ condFailed := v1alpha1.ComputeNodeCondition{
+ Type: v1alpha1.ComputeNodeConditionFailed,
+ Status: v1alpha1.ConditionStatusTrue,
+ LastUpdateTime: metav1.Now(),
+ }
+
+ //FIXME: do not capture ConditionStarted in some cases
+ for _, p := range podlist.Items {
+ switch p.Status.Phase {
+ case v1.PodRunning:
+ return condStarted
+ case v1.PodUnknown:
+ return condUnknown
+ case v1.PodPending:
+ return condDeployed
+ case v1.PodFailed:
+ return condFailed
+ }
+ }
+ return cond
+}
+
+func ReconcileComputeNodeStatus(podlist v1.PodList, svc v1.Service, rt
v1alpha1.ComputeNode) v1alpha1.ComputeNodeStatus {
+ readyNodes := getReadyNodes(podlist)
+
+ rt.Status.ReadyInstances = readyNodes
+ if rt.Spec.Replicas == 0 {
+ rt.Status.Phase = v1alpha1.ComputeNodeStatusNotReady
+ } else {
+ if readyNodes < miniReadyCount {
+ rt.Status.Phase = v1alpha1.ComputeNodeStatusNotReady
+ } else {
+ rt.Status.Phase = v1alpha1.ComputeNodeStatusReady
+ }
+ }
+
+ if rt.Status.Phase == v1alpha1.ComputeNodeStatusReady {
+ rt.Status.Conditions =
updateReadyConditions(rt.Status.Conditions, v1alpha1.ComputeNodeCondition{
+ Type: v1alpha1.ComputeNodeConditionReady,
+ Status: v1alpha1.ConditionStatusTrue,
+ LastUpdateTime: metav1.Now(),
+ })
+ } else {
+ cond := clusterCondition(podlist)
+ rt.Status.Conditions =
updateNotReadyConditions(rt.Status.Conditions, cond)
+ }
+
+ rt.Status.LoadBalancer.ClusterIP = svc.Spec.ClusterIP
+ rt.Status.LoadBalancer.Ingress = svc.Status.LoadBalancer.Ingress
+
+ return rt.Status
+}
+
+func (r *ComputeNodeReconciler) getRuntimeComputeNode(ctx context.Context,
namespacedName types.NamespacedName) (*v1alpha1.ComputeNode, error) {
+ rt := &v1alpha1.ComputeNode{}
+ err := r.Get(ctx, namespacedName, rt)
+ return rt, err
+}
diff --git a/shardingsphere-operator/pkg/controllers/proxy_controller.go
b/shardingsphere-operator/pkg/controllers/proxy_controller.go
index ad5ab81..0498adf 100644
--- a/shardingsphere-operator/pkg/controllers/proxy_controller.go
+++ b/shardingsphere-operator/pkg/controllers/proxy_controller.go
@@ -189,7 +189,6 @@ func (r *ProxyReconciler) reconcileService(ctx
context.Context, namespacedName t
if err := r.Create(ctx, exp); err != nil {
return ctrl.Result{}, err
}
- return ctrl.Result{}, nil
}
} else {
act := service.DeepCopy()
diff --git a/shardingsphere-operator/pkg/reconcile/configmap.go
b/shardingsphere-operator/pkg/reconcile/configmap.go
new file mode 100644
index 0000000..aea4dac
--- /dev/null
+++ b/shardingsphere-operator/pkg/reconcile/configmap.go
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package reconcile
+
+import (
+ "encoding/json"
+ "reflect"
+
+
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
+ "gopkg.in/yaml.v2"
+ v1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+const (
+ ConfigForLogback = "logback.xml"
+ ConfigForServer = "server.yaml"
+
+ AnnoClusterRepoConfig =
"computenode.shardingsphere.org/server-config-mode-cluster"
+ AnnoLogbackConfig = "computenode.shardingsphere.org/logback"
+)
+
+func ComputeNodeNewConfigMap(cn *v1alpha1.ComputeNode) *v1.ConfigMap {
+ cluster := cn.Annotations[AnnoClusterRepoConfig]
+ logback := cn.Annotations[AnnoLogbackConfig]
+
+ cm := ComputeNodeDefaultConfigMap(cn.GetObjectMeta(),
cn.GroupVersionKind())
+ cm.Name = cn.Name
+ cm.Namespace = cn.Namespace
+ cm.Labels = cn.Labels
+
+ if len(logback) > 0 {
+ cm.Data[ConfigForLogback] = logback
+ } else {
+ cm.Data[ConfigForLogback] = string(defaultLogback)
+ }
+
+ // NOTE: ShardingSphere Proxy 5.3.0 needs a server.yaml no matter if it
is empty
+ if !reflect.DeepEqual(cn.Spec.Bootstrap.ServerConfig,
v1alpha1.ServerConfig{}) {
+ servconf := cn.Spec.Bootstrap.ServerConfig.DeepCopy()
+ if cn.Spec.Bootstrap.ServerConfig.Mode.Type ==
v1alpha1.ModeTypeCluster {
+ if len(cluster) > 0 {
+ json.Unmarshal([]byte(cluster),
&servconf.Mode.Repository)
+ }
+ }
+ if y, err := yaml.Marshal(servconf); err == nil {
+ cm.Data[ConfigForServer] = string(y)
+ }
+ } else {
+ cm.Data[ConfigForServer] = "# Empty file is needed"
+ }
+
+ return cm
+}
+
+func ComputeNodeDefaultConfigMap(meta metav1.Object, gvk
schema.GroupVersionKind) *v1.ConfigMap {
+ return &v1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "shardingsphere-proxy",
+ Namespace: "default",
+ Labels: map[string]string{},
+ OwnerReferences: []metav1.OwnerReference{
+ *metav1.NewControllerRef(meta, gvk),
+ },
+ },
+ Data: map[string]string{},
+ }
+}
+
+// FIXME: check if changed first, then decide if need to respawn the Pods
+func ComputeNodeUpdateConfigMap(cn *v1alpha1.ComputeNode, cur *v1.ConfigMap)
*v1.ConfigMap {
+ exp := &v1.ConfigMap{}
+ exp.ObjectMeta = cur.ObjectMeta
+ exp.ObjectMeta.ResourceVersion = ""
+ exp.Labels = cur.Labels
+ exp.Annotations = cur.Annotations
+ exp.Data = ComputeNodeNewConfigMap(cn).Data
+ return exp
+}
diff --git a/shardingsphere-operator/pkg/reconcile/deployment.go
b/shardingsphere-operator/pkg/reconcile/deployment.go
index 29a2ea0..8a7e90d 100644
--- a/shardingsphere-operator/pkg/reconcile/deployment.go
+++ b/shardingsphere-operator/pkg/reconcile/deployment.go
@@ -19,101 +19,166 @@ package reconcile
import (
"fmt"
- "reflect"
- "strconv"
- "strings"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
-
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
)
-func NewDeployment(ssproxy *v1alpha1.ShardingSphereProxy) *v1.Deployment {
- return ConstructCascadingDeployment(ssproxy)
-}
-
const (
- AnnoRollingUpdateMaxSurge =
"shardingsphereproxy.shardingsphere.org/rolling-update-max-surge"
- AnnoRollingUpdateMaxUnavailable =
"shardingsphereproxy.shardingsphere.org/rolling-update-max-unavailable"
-
- //miniReadyCount Minimum number of replicas that can be served
- miniReadyCount = 1
+ DefaultExtlibPath = "/opt/shardingsphere-proxy/ext-lib"
)
-func ConstructCascadingDeployment(proxy *v1alpha1.ShardingSphereProxy)
*v1.Deployment {
- if proxy == nil || reflect.DeepEqual(proxy,
&v1alpha1.ShardingSphereProxy{}) {
- return &v1.Deployment{}
+func ComputeNodeNewDeployment(cn *v1alpha1.ComputeNode) *v1.Deployment {
+ deploy := ComputeNodeDefaultDeployment(cn.GetObjectMeta(),
cn.GroupVersionKind())
+
+ // basic information
+ deploy.Name = cn.Name
+ deploy.Namespace = cn.Namespace
+ deploy.Labels = cn.Labels
+ // deploy.Spec.Selector.MatchLabels = cn.Labels
+ deploy.Spec.Selector = cn.Spec.Selector
+ deploy.Spec.Replicas = &cn.Spec.Replicas
+ deploy.Spec.Template.Labels = cn.Labels
+ deploy.Spec.Template.Spec.Containers[0].Image = fmt.Sprintf("%s:%s",
imageName, cn.Spec.ServerVersion)
+ // TODO: don't use v1.Port directly
+ // deploy.Spec.Template.Spec.Containers[0].Ports = cn.Spec.Ports
+ if deploy.Spec.Template.Spec.Containers[0].Ports == nil {
+ deploy.Spec.Template.Spec.Containers[0].Ports =
[]corev1.ContainerPort{}
+ }
+ for _, pb := range cn.Spec.PortBindings {
+ deploy.Spec.Template.Spec.Containers[0].Ports =
append(deploy.Spec.Template.Spec.Containers[0].Ports, corev1.ContainerPort{
+ Name: pb.Name,
+ HostIP: pb.HostIP,
+ ContainerPort: pb.ContainerPort,
+ Protocol: pb.Protocol,
+ })
}
- var (
- maxUnavailable intstr.IntOrString
- maxSurge intstr.IntOrString
- )
+ // additional information
+ deploy.Spec.Template.Spec.Containers[0].Resources = cn.Spec.Resources
+ for _, v := range deploy.Spec.Template.Spec.Volumes {
+ if v.Name == "shardingsphere-proxy-config" {
+ v.ConfigMap.LocalObjectReference.Name = cn.Name
+ }
+ }
- if proxy.Annotations[AnnoRollingUpdateMaxUnavailable] != "" {
- n, _ :=
strconv.Atoi(proxy.Annotations[AnnoRollingUpdateMaxUnavailable])
- maxUnavailable = intstr.FromInt(n)
- } else {
- maxUnavailable = intstr.FromInt(0)
+ if cn.Spec.Probes != nil {
+ if cn.Spec.Probes.StartupProbe != nil {
+ deploy.Spec.Template.Spec.Containers[0].StartupProbe =
cn.Spec.Probes.StartupProbe.DeepCopy()
+ }
+ if cn.Spec.Probes.LivenessProbe != nil {
+ deploy.Spec.Template.Spec.Containers[0].LivenessProbe =
cn.Spec.Probes.LivenessProbe.DeepCopy()
+ }
+ if cn.Spec.Probes.ReadinessProbe != nil {
+ deploy.Spec.Template.Spec.Containers[0].ReadinessProbe
= cn.Spec.Probes.ReadinessProbe.DeepCopy()
+ }
+ }
+ if len(cn.Spec.ImagePullSecrets) > 0 {
+ deploy.Spec.Template.Spec.ImagePullSecrets =
cn.Spec.ImagePullSecrets
}
+ if cn.Spec.StorageNodeConnector != nil {
+ if cn.Spec.StorageNodeConnector.Type ==
v1alpha1.ConnectorTypeMySQL {
+ // add or update initContainer
+ if len(deploy.Spec.Template.Spec.InitContainers) > 0 {
+ for idx, v := range
deploy.Spec.Template.Spec.InitContainers[0].Env {
+ if v.Name == "MYSQL_CONNECTOR_VERSION" {
+
deploy.Spec.Template.Spec.InitContainers[0].Env[idx].Value =
cn.Spec.StorageNodeConnector.Version
+ }
+ }
+ } else {
+ deploy.Spec.Template.Spec.InitContainers =
[]corev1.Container{
+ {
+ Name: "boostrap",
+ Image: "busybox:1.35.0",
+ Command: []string{"/bin/sh",
"-c", download_script},
+ Env: []corev1.EnvVar{
+ {
+ Name:
"MYSQL_CONNECTOR_VERSION",
+ Value:
cn.Spec.StorageNodeConnector.Version,
+ },
+ },
+ VolumeMounts:
[]corev1.VolumeMount{
+ {
+ Name:
"mysql-connector-java",
+ MountPath:
DefaultExtlibPath,
+ },
+ },
+ },
+ }
- if proxy.Annotations[AnnoRollingUpdateMaxSurge] != "" {
- n, _ :=
strconv.Atoi(proxy.Annotations[AnnoRollingUpdateMaxSurge])
- maxSurge = intstr.FromInt(n)
- } else {
- maxSurge = intstr.FromInt(1)
+
deploy.Spec.Template.Spec.Containers[0].VolumeMounts =
append(deploy.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
+ Name: "mysql-connector-java",
+ SubPath:
fmt.Sprintf("mysql-connector-java-%s.jar",
cn.Spec.StorageNodeConnector.Version),
+ MountPath:
fmt.Sprintf("%s/mysql-connector-java-%s.jar", DefaultExtlibPath,
cn.Spec.StorageNodeConnector.Version),
+ })
+
+ deploy.Spec.Template.Spec.Volumes =
append(deploy.Spec.Template.Spec.Volumes, corev1.Volume{
+ Name: "mysql-connector-java",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir:
&corev1.EmptyDirVolumeSource{},
+ },
+ })
+ }
+ }
}
- dp := &v1.Deployment{
+ return deploy
+}
+
+const download_script = `wget
https://repo1.maven.org/maven2/mysql/mysql-connector-java/${MYSQL_CONNECTOR_VERSION}/mysql-connector-java-${MYSQL_CONNECTOR_VERSION}.jar;
+wget
https://repo1.maven.org/maven2/mysql/mysql-connector-java/${MYSQL_CONNECTOR_VERSION}/mysql-connector-java-${MYSQL_CONNECTOR_VERSION}.jar.md5;
+if [ $(md5sum /mysql-connector-java-${MYSQL_CONNECTOR_VERSION}.jar | cut -d '
' -f1) = $(cat /mysql-connector-java-${MYSQL_CONNECTOR_VERSION}.jar.md5) ];
+then echo success;
+else echo failed;exit 1;fi;mv
/mysql-connector-java-${MYSQL_CONNECTOR_VERSION}.jar
/opt/shardingsphere-proxy/ext-lib`
+
+func ComputeNodeDefaultDeployment(meta metav1.Object, gvk
schema.GroupVersionKind) *v1.Deployment {
+ defaultMaxUnavailable := intstr.FromInt(0)
+ defaultMaxSurge := intstr.FromInt(3)
+ defaultImage := "apache/shardingsphere-proxy:5.3.0"
+
+ return &v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
- Name: proxy.Name,
- Namespace: proxy.Namespace,
+ Name: "shardingsphere-proxy",
+ Namespace: "default",
+ Labels: map[string]string{},
OwnerReferences: []metav1.OwnerReference{
- *metav1.NewControllerRef(proxy.GetObjectMeta(),
proxy.GroupVersionKind()),
+ *metav1.NewControllerRef(meta, gvk),
},
},
Spec: v1.DeploymentSpec{
Strategy: v1.DeploymentStrategy{
Type: v1.RollingUpdateDeploymentStrategyType,
RollingUpdate: &v1.RollingUpdateDeployment{
- MaxUnavailable: &maxUnavailable,
- MaxSurge: &maxSurge,
+ MaxUnavailable: &defaultMaxUnavailable,
+ MaxSurge: &defaultMaxSurge,
},
},
Selector: &metav1.LabelSelector{
- MatchLabels: map[string]string{
- "apps": proxy.Name,
- },
+ MatchLabels: map[string]string{},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
- Labels: map[string]string{
- "apps": proxy.Name,
- },
+ Labels: map[string]string{},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
- Name:
"proxy",
- Image:
fmt.Sprintf("%s:%s", imageName, proxy.Spec.Version),
+ Name:
"shardingsphere-proxy",
+ Image:
defaultImage,
ImagePullPolicy:
corev1.PullIfNotPresent,
Ports:
[]corev1.ContainerPort{
{
-
ContainerPort: proxy.Spec.Port,
- },
- },
- Env: []corev1.EnvVar{
- {
- Name:
"PORT",
- Value:
strconv.FormatInt(int64(proxy.Spec.Port), 10),
+ Name:
"proxy",
+
ContainerPort: 3307,
},
},
VolumeMounts:
[]corev1.VolumeMount{
{
- Name:
"config",
+ Name:
"shardingsphere-proxy-config",
MountPath: "/opt/shardingsphere-proxy/conf",
},
},
@@ -121,11 +186,11 @@ func ConstructCascadingDeployment(proxy
*v1alpha1.ShardingSphereProxy) *v1.Deplo
},
Volumes: []corev1.Volume{
{
- Name: "config",
+ Name:
"shardingsphere-proxy-config",
VolumeSource:
corev1.VolumeSource{
ConfigMap:
&corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
-
Name: proxy.Spec.ProxyConfigName,
+
Name: "shardingsphere-proxy-config",
},
},
},
@@ -135,341 +200,14 @@ func ConstructCascadingDeployment(proxy
*v1alpha1.ShardingSphereProxy) *v1.Deplo
},
},
}
- if proxy.Spec.AutomaticScaling == nil {
- dp.Spec.Replicas = &proxy.Spec.Replicas
- }
-
- dp.Spec.Template.Spec.Containers[0].Resources = proxy.Spec.Resources
-
- if proxy.Spec.LivenessProbe != nil {
- dp.Spec.Template.Spec.Containers[0].LivenessProbe =
proxy.Spec.LivenessProbe
- }
- if proxy.Spec.ReadinessProbe != nil {
- dp.Spec.Template.Spec.Containers[0].ReadinessProbe =
proxy.Spec.ReadinessProbe
- }
- if proxy.Spec.StartupProbe != nil {
- dp.Spec.Template.Spec.Containers[0].StartupProbe =
proxy.Spec.StartupProbe
- }
- if len(proxy.Spec.ImagePullSecrets) > 0 {
- dp.Spec.Template.Spec.ImagePullSecrets =
proxy.Spec.ImagePullSecrets
- }
- return processOptionalParameter(proxy, dp)
}
-func processOptionalParameter(proxy *v1alpha1.ShardingSphereProxy, dp
*v1.Deployment) *v1.Deployment {
- if proxy.Spec.MySQLDriver != nil {
- addInitContainer(dp, proxy.Spec.MySQLDriver)
- }
- return dp
-}
-
-const script = `wget
https://repo1.maven.org/maven2/mysql/mysql-connector-java/${VERSION}/mysql-connector-java-${VERSION}.jar;
-wget
https://repo1.maven.org/maven2/mysql/mysql-connector-java/${VERSION}/mysql-connector-java-${VERSION}.jar.md5;
-if [ $(md5sum /mysql-connector-java-${VERSION}.jar | cut -d ' ' -f1) = $(cat
/mysql-connector-java-${VERSION}.jar.md5) ];
-then echo success;
-else echo failed;exit 1;fi;mv /mysql-connector-java-${VERSION}.jar
/opt/shardingsphere-proxy/ext-lib`
-
-func addInitContainer(dp *v1.Deployment, mysql *v1alpha1.MySQLDriver) {
- if len(dp.Spec.Template.Spec.InitContainers) == 0 {
- dp.Spec.Template.Spec.Containers[0].VolumeMounts =
append(dp.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{
- Name: "mysql-connect-jar",
- MountPath: "/opt/shardingsphere-proxy/ext-lib",
- })
-
- dp.Spec.Template.Spec.Volumes =
append(dp.Spec.Template.Spec.Volumes, corev1.Volume{
- Name: "mysql-connect-jar",
- VolumeSource: corev1.VolumeSource{
- EmptyDir: &corev1.EmptyDirVolumeSource{},
- },
- })
- }
-
- dp.Spec.Template.Spec.InitContainers = []corev1.Container{
- {
- Name: "download-mysql-connect",
- Image: "busybox:1.35.0",
- Command: []string{"/bin/sh", "-c", script},
- Env: []corev1.EnvVar{
- {
- Name: "VERSION",
- Value: mysql.Version,
- },
- },
- VolumeMounts: []corev1.VolumeMount{
- {
- Name: "mysql-connect-jar",
- MountPath:
"/opt/shardingsphere-proxy/ext-lib",
- },
- },
- },
- }
-
-}
-
-// UpdateDeployment FIXME:merge UpdateDeployment and
ConstructCascadingDeployment
-func UpdateDeployment(proxy *v1alpha1.ShardingSphereProxy, act *v1.Deployment)
*v1.Deployment {
- exp := act.DeepCopy()
-
- var (
- maxUnavailable intstr.IntOrString
- maxSurge intstr.IntOrString
- )
-
- if proxy.Annotations[AnnoRollingUpdateMaxUnavailable] != "" {
- n, _ :=
strconv.Atoi(proxy.Annotations[AnnoRollingUpdateMaxUnavailable])
- maxUnavailable = intstr.FromInt(n)
- } else {
- maxUnavailable = intstr.FromInt(0)
- }
-
- if proxy.Annotations[AnnoRollingUpdateMaxSurge] != "" {
- n, _ :=
strconv.Atoi(proxy.Annotations[AnnoRollingUpdateMaxSurge])
- maxSurge = intstr.FromInt(n)
- } else {
- maxSurge = intstr.FromInt(1)
- }
-
- exp.Spec.Strategy.Type = v1.RollingUpdateDeploymentStrategyType
- if exp.Spec.Strategy.RollingUpdate == nil {
- exp.Spec.Strategy.RollingUpdate = &v1.RollingUpdateDeployment{}
- }
-
- exp.Spec.Strategy.RollingUpdate.MaxSurge = &maxSurge
- exp.Spec.Strategy.RollingUpdate.MaxUnavailable = &maxUnavailable
-
- if proxy.Spec.AutomaticScaling == nil ||
!proxy.Spec.AutomaticScaling.Enable {
- exp.Spec.Replicas = updateReplicas(proxy, act)
- }
- exp.Spec.Template = updatePodTemplateSpec(proxy, act)
+func ComputeNodeUpdateDeployment(cn *v1alpha1.ComputeNode, cur *v1.Deployment)
*v1.Deployment {
+ exp := &v1.Deployment{}
+ exp.ObjectMeta = cur.ObjectMeta
+ exp.ObjectMeta.ResourceVersion = ""
+ exp.Labels = cur.Labels
+ exp.Annotations = cur.Annotations
+ exp.Spec = ComputeNodeNewDeployment(cn).Spec
return exp
}
-
-func updateReplicas(proxy *v1alpha1.ShardingSphereProxy, act *v1.Deployment)
*int32 {
- if *act.Spec.Replicas != proxy.Spec.Replicas {
- return &proxy.Spec.Replicas
- }
- return act.Spec.Replicas
-}
-
-func updatePodTemplateSpec(proxy *v1alpha1.ShardingSphereProxy, act
*v1.Deployment) corev1.PodTemplateSpec {
- exp := act.Spec.Template.DeepCopy()
-
- SSProxyContainer := updateSSProxyContainer(proxy, act)
- for i, _ := range exp.Spec.Containers {
- if exp.Spec.Containers[i].Name == "proxy" {
- exp.Spec.Containers[i] = *SSProxyContainer
- }
- }
-
- if proxy.Spec.MySQLDriver != nil {
- initContainer := updateInitContainer(proxy, act)
- for i, _ := range exp.Spec.InitContainers {
- if exp.Spec.InitContainers[i].Name ==
"download-mysql-connect" {
- exp.Spec.InitContainers[i] = *initContainer
- }
- }
- }
-
- configName := updateConfigName(proxy, act)
- exp.Spec.Volumes[0].ConfigMap.Name = configName
-
- return *exp
-}
-
-func updateConfigName(proxy *v1alpha1.ShardingSphereProxy, act *v1.Deployment)
string {
- if act.Spec.Template.Spec.Volumes[0].ConfigMap.Name !=
proxy.Spec.ProxyConfigName {
- return proxy.Spec.ProxyConfigName
- }
- return act.Spec.Template.Spec.Volumes[0].ConfigMap.Name
-}
-
-func updateInitContainer(proxy *v1alpha1.ShardingSphereProxy, act
*v1.Deployment) *corev1.Container {
- var exp *corev1.Container
-
- for _, c := range act.Spec.Template.Spec.InitContainers {
- if c.Name == "download-mysql-connect" {
- for i, _ := range c.Env {
- if c.Env[i].Name == "VERSION" {
- if c.Env[i].Value !=
proxy.Spec.MySQLDriver.Version {
- c.Env[i].Value =
proxy.Spec.MySQLDriver.Version
- }
- }
- }
- exp = c.DeepCopy()
- }
- }
-
- return exp
-}
-
-func updateSSProxyContainer(proxy *v1alpha1.ShardingSphereProxy, act
*v1.Deployment) *corev1.Container {
- var exp *corev1.Container
-
- for _, c := range act.Spec.Template.Spec.Containers {
- if c.Name == "proxy" {
- exp = c.DeepCopy()
-
- tag := strings.Split(c.Image, ":")[1]
- if tag != proxy.Spec.Version {
- exp.Image = fmt.Sprintf("%s:%s", imageName,
proxy.Spec.Version)
- }
-
- exp.Resources = proxy.Spec.Resources
-
- if proxy.Spec.LivenessProbe != nil &&
!reflect.DeepEqual(c.LivenessProbe, *proxy.Spec.LivenessProbe) {
- exp.LivenessProbe = proxy.Spec.LivenessProbe
- }
-
- if proxy.Spec.ReadinessProbe != nil &&
!reflect.DeepEqual(c.ReadinessProbe, *proxy.Spec.ReadinessProbe) {
- exp.ReadinessProbe = proxy.Spec.ReadinessProbe
- }
-
- if proxy.Spec.StartupProbe != nil &&
!reflect.DeepEqual(c.StartupProbe, *proxy.Spec.StartupProbe) {
- exp.StartupProbe = proxy.Spec.StartupProbe
- }
-
- for i, _ := range c.Env {
- if c.Env[i].Name == "PORT" {
- proxyPort :=
strconv.FormatInt(int64(proxy.Spec.Port), 10)
- if c.Env[i].Value != proxyPort {
- c.Env[i].Value = proxyPort
- exp.Ports[0].ContainerPort =
proxy.Spec.Port
- }
- }
- }
- exp.Env = c.Env
- }
- }
- return exp
-}
-
-func getReadyNodes(podlist corev1.PodList) int32 {
- var cnt int32
- for _, p := range podlist.Items {
- if p.Status.Phase == corev1.PodRunning {
- for _, c := range p.Status.Conditions {
- if c.Type == corev1.PodReady && c.Status ==
corev1.ConditionTrue {
- for _, con := range
p.Status.ContainerStatuses {
- if con.Name == "proxy" &&
con.Ready {
- cnt++
- }
- }
- }
- }
- }
- }
- return cnt
-}
-
-func ReconcileStatus(podlist corev1.PodList, rt v1alpha1.ShardingSphereProxy)
v1alpha1.ProxyStatus {
- readyNodes := getReadyNodes(podlist)
-
- rt.Status.ReadyNodes = readyNodes
- if rt.Spec.Replicas == 0 {
- rt.Status.Phase = v1alpha1.StatusNotReady
- } else {
- if readyNodes < miniReadyCount {
- rt.Status.Phase = v1alpha1.StatusNotReady
- } else {
- rt.Status.Phase = v1alpha1.StatusReady
- }
- }
-
- if rt.Status.Phase == v1alpha1.StatusReady {
- rt.Status.Conditions =
updateReadyConditions(rt.Status.Conditions, v1alpha1.Condition{
- Type: v1alpha1.ConditionReady,
- Status: metav1.ConditionTrue,
- LastUpdateTime: metav1.Now(),
- })
- } else {
- cond := clusterCondition(podlist)
- rt.Status.Conditions =
updateNotReadyConditions(rt.Status.Conditions, cond)
- }
-
- return rt.Status
-}
-
-func newConditions(conditions []v1alpha1.Condition, cond v1alpha1.Condition)
[]v1alpha1.Condition {
- if conditions == nil {
- conditions = []v1alpha1.Condition{}
- }
- if cond.Type == "" {
- return conditions
- }
-
- found := false
- for idx, _ := range conditions {
- if conditions[idx].Type == cond.Type {
- conditions[idx].LastUpdateTime = cond.LastUpdateTime
- conditions[idx].Status = cond.Status
- found = true
- break
- }
- }
-
- if !found {
- conditions = append(conditions, cond)
- }
-
- return conditions
-}
-
-func updateReadyConditions(conditions []v1alpha1.Condition, cond
v1alpha1.Condition) []v1alpha1.Condition {
- return newConditions(conditions, cond)
-}
-
-func updateNotReadyConditions(conditions []v1alpha1.Condition, cond
v1alpha1.Condition) []v1alpha1.Condition {
- cur := newConditions(conditions, cond)
-
- for idx, _ := range cur {
- if cur[idx].Type == v1alpha1.ConditionReady {
- cur[idx].LastUpdateTime = metav1.Now()
- cur[idx].Status = metav1.ConditionFalse
- }
- }
-
- return cur
-}
-
-func clusterCondition(podlist corev1.PodList) v1alpha1.Condition {
- cond := v1alpha1.Condition{}
- if len(podlist.Items) == 0 {
- return cond
- }
-
- condStarted := v1alpha1.Condition{
- Type: v1alpha1.ConditionStarted,
- Status: metav1.ConditionTrue,
- LastUpdateTime: metav1.Now(),
- }
- condUnknown := v1alpha1.Condition{
- Type: v1alpha1.ConditionUnknown,
- Status: metav1.ConditionTrue,
- LastUpdateTime: metav1.Now(),
- }
- condDeployed := v1alpha1.Condition{
- Type: v1alpha1.ConditionDeployed,
- Status: metav1.ConditionTrue,
- LastUpdateTime: metav1.Now(),
- }
- condFailed := v1alpha1.Condition{
- Type: v1alpha1.ConditionFailed,
- Status: metav1.ConditionTrue,
- LastUpdateTime: metav1.Now(),
- }
-
- //FIXME: do not capture ConditionStarted in some cases
- for _, p := range podlist.Items {
- switch p.Status.Phase {
- case corev1.PodRunning:
- return condStarted
- case corev1.PodUnknown:
- return condUnknown
- case corev1.PodPending:
- return condDeployed
- case corev1.PodFailed:
- return condFailed
- }
- }
- return cond
-}
diff --git a/shardingsphere-operator/pkg/reconcile/deployment.go
b/shardingsphere-operator/pkg/reconcile/deployment_proxy.go
similarity index 100%
copy from shardingsphere-operator/pkg/reconcile/deployment.go
copy to shardingsphere-operator/pkg/reconcile/deployment_proxy.go
diff --git a/shardingsphere-operator/pkg/reconcile/resource.go
b/shardingsphere-operator/pkg/reconcile/resource.go
index e90f0d8..d6bd6e5 100644
--- a/shardingsphere-operator/pkg/reconcile/resource.go
+++ b/shardingsphere-operator/pkg/reconcile/resource.go
@@ -27,7 +27,7 @@ import (
const imageName = "apache/shardingsphere-proxy"
-const logback = `<?xml version="1.0"?>
+const defaultLogback = `<?xml version="1.0"?>
<configuration>
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
@@ -73,7 +73,7 @@ func ConstructCascadingConfigmap(proxyConfig
*v1alpha1.ShardingSphereProxyServer
},
Data: map[string]string{
"server.yaml": y,
- "logback.xml": logback,
+ "logback.xml": defaultLogback,
},
}
}
diff --git a/shardingsphere-operator/pkg/reconcile/service.go
b/shardingsphere-operator/pkg/reconcile/service.go
index 65cefc2..3ddcf51 100644
--- a/shardingsphere-operator/pkg/reconcile/service.go
+++ b/shardingsphere-operator/pkg/reconcile/service.go
@@ -21,10 +21,69 @@ import (
"reflect"
"github.com/apache/shardingsphere-on-cloud/shardingsphere-operator/api/v1alpha1"
+ corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/intstr"
)
+func ComputeNodeNewService(cn *v1alpha1.ComputeNode) *v1.Service {
+ svc := ComputeNodeDefaultService(cn.GetObjectMeta(),
cn.GroupVersionKind())
+ svc.Name = cn.Name
+ svc.Namespace = cn.Namespace
+ svc.Labels = cn.Labels
+ // svc.Spec.Selector = cn.Labels
+ svc.Spec.Selector = cn.Spec.Selector.MatchLabels
+ // svc.Spec.Type = cn.Spec.Service.Type
+ svc.Spec.Type = cn.Spec.ServiceType
+ // svc.Spec.Ports = cn.Spec.Service.Ports
+
+ if svc.Spec.Ports == nil {
+ svc.Spec.Ports = []corev1.ServicePort{}
+ }
+ for _, pb := range cn.Spec.PortBindings {
+ svc.Spec.Ports = append(svc.Spec.Ports, corev1.ServicePort{
+ Name: pb.Name,
+ TargetPort: intstr.FromInt(int(pb.ContainerPort)),
+ Port: pb.ServicePort,
+ NodePort: pb.NodePort,
+ Protocol: pb.Protocol,
+ })
+ }
+
+ return svc
+}
+
+func ComputeNodeDefaultService(meta metav1.Object, gvk
schema.GroupVersionKind) *v1.Service {
+ return &v1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "shardingsphere-proxy",
+ Namespace: "default",
+ Labels: map[string]string{},
+ OwnerReferences: []metav1.OwnerReference{
+ *metav1.NewControllerRef(meta, gvk),
+ },
+ },
+ Spec: v1.ServiceSpec{
+ Selector: map[string]string{},
+ Type: v1.ServiceTypeClusterIP,
+ Ports: []v1.ServicePort{},
+ },
+ }
+}
+
+func ComputeNodeUpdateService(cn *v1alpha1.ComputeNode, cur *v1.Service)
*v1.Service {
+ exp := &v1.Service{}
+ exp.ObjectMeta = cur.ObjectMeta
+ exp.Labels = cur.Labels
+ exp.Annotations = cur.Annotations
+ exp.Spec = ComputeNodeNewService(cn).Spec
+ exp.Spec.ClusterIP = cur.Spec.ClusterIP
+ exp.Spec.ClusterIPs = cur.Spec.ClusterIPs
+ return exp
+}
+
func NewService(ssproxy *v1alpha1.ShardingSphereProxy) *v1.Service {
return ConstructCascadingService(ssproxy)
}