This is an automated email from the ASF dual-hosted git repository.

wusheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/skywalking-banyandb-helm.git


The following commit(s) were added to refs/heads/master by this push:
     new 47477a6  Support lifecycle (#28)
47477a6 is described below

commit 47477a6ea253cb86ff609ebf3f6d3517417fbf70
Author: Gao Hongtao <hanahm...@gmail.com>
AuthorDate: Mon Apr 21 11:07:11 2025 +0800

    Support lifecycle (#28)
---
 CHANGES.md                                         |   8 +
 README.md                                          |   4 +
 chart/Chart.lock                                   |   6 +-
 chart/Chart.yaml                                   |  10 +-
 chart/templates/cluster_data_statefulset.yaml      | 453 ++++++++++++++
 ...oyment.yaml => cluster_liaison_deployment.yaml} |   0
 ..._deployment.yaml => cluster_ui_deployment.yaml} |   0
 chart/templates/standalone_statefulset.yaml        | 272 +++++++++
 chart/templates/statefulset.yaml                   | 649 ---------------------
 chart/{values.yaml => values-lifecycle.yaml}       | 353 +++++++----
 chart/values.yaml                                  | 305 ++++++----
 doc/backup.md                                      |  19 +-
 doc/lifecycle.md                                   |  86 +++
 doc/parameters.md                                  | 171 +++---
 test/e2e/values.cluster.yaml                       | 209 +++----
 15 files changed, 1459 insertions(+), 1086 deletions(-)

diff --git a/CHANGES.md b/CHANGES.md
index a69fc74..65af26a 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -2,6 +2,14 @@ Changes by Version
 ==================
 Release Notes.
 
+0.5.0
+------------------
+
+#### Features
+
+- Support Lifecycle Sidecar
+- Introduce the data node template to support different node roles
+
 0.4.0
 ------------------
 
diff --git a/README.md b/README.md
index 8cf190a..e43e4f5 100644
--- a/README.md
+++ b/README.md
@@ -123,6 +123,10 @@ spec:
 
 The backup and restore functionalities are supported in the BanyanDB Helm 
Chart. The detailed configuration can be found in [backup.md](./doc/backup.md).
 
+## Lifecycle Management
+
+The lifecycle management functionalities are supported in the BanyanDB Helm 
Chart. The detailed configuration can be found in 
[lifecycle.md](./doc/lifecycle.md).
+
 # Install the development version of BanyanDB using the master branch
 
 This is needed **only** when you want to install 
[BanyanDB](https://github.com/apache/skywalking-banyandb/tree/main) from the 
master branch. 
diff --git a/chart/Chart.lock b/chart/Chart.lock
index e132a72..3dbcbbc 100644
--- a/chart/Chart.lock
+++ b/chart/Chart.lock
@@ -1,6 +1,6 @@
 dependencies:
 - name: etcd
   repository: oci://registry-1.docker.io/bitnamicharts
-  version: 9.14.3
-digest: sha256:31aacd1ae011febfa82522a1777fd2f36a52529bea2e343bacb0060c51068906
-generated: "2024-09-14T06:34:19.861488669Z"
+  version: 11.2.4
+digest: sha256:d0428d056259aaf870a742343c696f9ef5e291d0736c6b09a651fb7ed2f3bdd1
+generated: "2025-04-18T04:39:16.012618336Z"
diff --git a/chart/Chart.yaml b/chart/Chart.yaml
index 36dd6b9..e1a97a4 100644
--- a/chart/Chart.yaml
+++ b/chart/Chart.yaml
@@ -20,9 +20,9 @@ version: 0.4.0
 description: Helm Chart for Apache SkyWalking BanyanDB
 icon: 
https://raw.githubusercontent.com/apache/skywalking-kubernetes/master/logo/sw-logo-for-chart.jpg
 sources:
-  - https://github.com/apache/skywalking-banyandb-helm
+- https://github.com/apache/skywalking-banyandb-helm
 dependencies:
-  - name: etcd
-    version: 9.14.3
-    repository: oci://registry-1.docker.io/bitnamicharts
-    condition: etcd.enabled
+- name: etcd
+  version: 11.2.4
+  repository: oci://registry-1.docker.io/bitnamicharts
+  condition: etcd.enabled
diff --git a/chart/templates/cluster_data_statefulset.yaml 
b/chart/templates/cluster_data_statefulset.yaml
new file mode 100644
index 0000000..6b67364
--- /dev/null
+++ b/chart/templates/cluster_data_statefulset.yaml
@@ -0,0 +1,453 @@
+{{/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if and .Values.cluster.enabled .Values.cluster.data }}
+{{- $nodeTemplate := .Values.cluster.data.nodeTemplate }}
+{{- range $roleName, $roleConfig := .Values.cluster.data.roles }}
+{{- /* Merge nodeTemplate with role-specific config */}}
+{{- $roleConfig := merge $roleConfig $nodeTemplate  }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  labels: {{ include "banyandb.labels" $ | nindent 4 }}
+    app.kubernetes.io/component: data
+    app.kubernetes.io/role: {{ $roleName }}
+  name: {{ template "banyandb.fullname" $ }}-data-{{ $roleName }}
+spec:
+  serviceName: banyandb
+  replicas: {{ $roleConfig.replicas }}
+  selector:
+    matchLabels: {{ include "banyandb.selectorLabels" $ | nindent 6 }}
+      app.kubernetes.io/component: data
+      app.kubernetes.io/role: {{ $roleName }}
+  updateStrategy:
+    type: RollingUpdate
+  podManagementPolicy: Parallel
+  template:
+    metadata:
+      labels: {{ include "banyandb.labels" $ | nindent 8 }}
+        app.kubernetes.io/component: data
+        app.kubernetes.io/role: {{ $roleName }}
+      {{- $mergedAnnotations := merge 
$.Values.cluster.data.nodeTemplate.podAnnotations $roleConfig.podAnnotations }}
+      {{- if $mergedAnnotations }}
+      annotations:
+{{ toYaml $mergedAnnotations | indent 8 }}
+      {{- end }}
+    spec:
+      serviceAccountName: {{ template "banyandb.serviceAccountName" $ }}
+      {{- $mergedSecurity := merge 
$.Values.cluster.data.nodeTemplate.securityContext $roleConfig.securityContext 
}}
+      {{- with $mergedSecurity }}
+      securityContext:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      priorityClassName: {{ $roleConfig.priorityClassName }}
+      initContainers:
+        {{- if $roleConfig.restoreInitContainer.enabled }}
+        - name: restore-init
+          image: {{ $.Values.image.repository }}:{{ required 
"banyandb.image.tag is required" $.Values.image.tag }}-slim
+          imagePullPolicy: {{ $.Values.image.pullPolicy }}
+          command:
+            - "/restore"
+            - "run"
+            - "--source={{ $roleConfig.backupSidecar.dest }}"
+          {{- if $.Values.storage.enabled }}
+          volumeMounts:
+            {{- range $claim := $.Values.storage.persistentVolumeClaims }}
+            {{- if eq $claim.nodeRole $roleName }}
+            {{- if $claim.existingClaimName }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.existingClaimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- else }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.claimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+          {{- end }}
+          {{- with $roleConfig.restoreInitContainer.resources }}
+          resources:
+            {{- toYaml . | nindent 12 }}
+          {{- end }}
+        {{- end }}
+      containers:
+        - name: data
+          image: {{ $.Values.image.repository }}:{{ required 
"banyandb.image.tag is required" $.Values.image.tag }}-slim
+          imagePullPolicy: {{ $.Values.image.pullPolicy }}
+          env:
+            {{- $mergedEnv := concat $.Values.cluster.data.nodeTemplate.env 
$roleConfig.env }}
+            {{- range $env := $mergedEnv }}
+            - name: {{ $env.name }}
+              value: {{ $env.value }}
+            {{- end }}
+            - name: BYDB_DATA_NODE_ROLE
+              value: {{ $roleName }}
+            {{- if $roleConfig.tls}}
+            {{- if $roleConfig.tls.grpcSecretName }}
+            - name: BYDB_TLS
+              value: "true"
+            - name: BYDB_CERT_FILE
+              value: "/etc/tls/{{ $roleConfig.tls.grpcSecretName }}/tls.crt"
+            - name: BYDB_KEY_FILE
+              value: "/etc/tls/{{ $roleConfig.tls.grpcSecretName }}/tls.key"
+            - name: BYDB_HTTP_GRPC_CERT_FILE
+              value: "/etc/tls/{{ $roleConfig.tls.grpcSecretName }}/tls.crt"
+            {{- end }}
+            {{- end }}
+            {{- if and $.Values.etcd.auth.rbac.create (not 
$.Values.etcd.auth.rbac.allowNoneAuthentication) }}
+            - name: BYDB_ETCD_USERNAME
+              value: "root"
+            - name: BYDB_ETCD_PASSWORD
+              value: {{ $.Values.etcd.auth.rbac.rootPassword }}
+            {{- end }}
+            {{- if $.Values.etcd.auth.client.secureTransport }}
+            - name: BYDB_ETCD_TLS_CA_FILE
+              value: "/etc/tls/{{ $roleConfig.tls.etcdSecretName }}/ca.crt"
+            {{- end }}
+            {{- if $.Values.etcd.auth.client.enableAuthentication }}
+            - name: BYDB_ETCD_TLS_CERT_FILE
+              value: "/etc/tls/{{ $roleConfig.tls.etcdSecretName }}/tls.crt"
+            - name: BYDB_ETCD_TLS_KEY_FILE
+              value: "/etc/tls/{{ $roleConfig.tls.etcdSecretName }}/tls.key"
+            {{- end }}
+            {{- if and (not $.Values.etcd.enabled) 
$.Values.cluster.etcdEndpoints }}
+            - name: BYDB_ETCD_ENDPOINTS
+              value: "{{- $.Values.cluster.etcdEndpoints | join "," -}}"
+            {{- else }}
+            {{- include "banyandb.etcdEndpoints" $ | nindent 12 }}
+            {{- end }}
+            - name: BYDB_NODE_HOST_PROVIDER
+              value: "ip"
+            - name: BYDB_NODE_HOST
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.podIP
+          args:
+            - data 
+          ports:
+            - containerPort: 17912
+              name: grpc
+            - containerPort: 17913
+              name: http-healthz
+            - containerPort: 6060
+              name: pprof
+            - containerPort: 2121
+              name: observebility
+          readinessProbe:
+            httpGet:
+              path: /api/healthz
+              port: 17913
+              scheme: HTTP
+            initialDelaySeconds: {{ 
$roleConfig.readinessProbe.initialDelaySeconds }}
+            periodSeconds: {{ $roleConfig.readinessProbe.periodSeconds }}
+            timeoutSeconds: {{ $roleConfig.readinessProbe.timeoutSeconds }}
+            successThreshold: {{ $roleConfig.readinessProbe.successThreshold }}
+            failureThreshold: {{ $roleConfig.readinessProbe.failureThreshold }}
+          livenessProbe:
+            httpGet:
+              path: /api/healthz
+              port: 17913
+              scheme: HTTP
+            initialDelaySeconds: {{ 
$roleConfig.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ $roleConfig.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ $roleConfig.livenessProbe.timeoutSeconds }}
+            successThreshold: {{ $roleConfig.livenessProbe.successThreshold }}
+            failureThreshold: {{ $roleConfig.livenessProbe.failureThreshold }}
+          startupProbe:
+            httpGet:
+              path: /api/healthz
+              port: 17913
+              scheme: HTTP
+            initialDelaySeconds: {{ 
$roleConfig.startupProbe.initialDelaySeconds }}
+            periodSeconds: {{ $roleConfig.startupProbe.periodSeconds }}
+            timeoutSeconds: {{ $roleConfig.startupProbe.timeoutSeconds }}
+            successThreshold: {{ $roleConfig.startupProbe.successThreshold }}
+            failureThreshold: {{ $roleConfig.startupProbe.failureThreshold }}
+          {{- if $roleConfig.resources }}
+          resources:
+            {{- if $roleConfig.resources.requests }}
+            requests:
+              {{- range $request := $roleConfig.resources.requests }}
+              {{ $request.key }}: {{ $request.value }}
+              {{- end }}
+            {{- end }}
+            {{- if $roleConfig.resources.limits }}
+            limits:
+              {{- range $limit := $roleConfig.resources.limits }}
+              {{ $limit.key }}: {{ $limit.value }}
+              {{- end }}
+            {{- end }}
+          {{- end }}
+
+          {{- if or $.Values.storage.enabled $roleConfig.tls }}
+          volumeMounts:
+            {{- if $.Values.storage.enabled }}
+            {{- range $claim := $.Values.storage.persistentVolumeClaims }}
+            {{- if eq $claim.nodeRole $roleName }}
+            {{- if $claim.existingClaimName }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.existingClaimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- else }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.claimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+
+            {{- if $roleConfig.tls }}
+            {{- if $roleConfig.tls.grpcSecretName }}
+            - mountPath: /etc/tls/{{ $roleConfig.tls.grpcSecretName }}
+              name: {{ $roleConfig.tls.grpcSecretName }}-volume
+            {{- end }}
+            {{- if $roleConfig.tls.etcdSecretName }}
+            - mountPath: /etc/tls/{{ $roleConfig.tls.etcdSecretName }}
+              name: {{ $roleConfig.tls.etcdSecretName }}-volume
+            {{- end }}
+            {{- end }}
+          {{- end }}
+        {{- if $roleConfig.backupSidecar.enabled }}
+        - name: backup
+          image: {{ $.Values.image.repository }}:{{ required 
"banyandb.image.tag is required" $.Values.image.tag }}-slim
+          imagePullPolicy: {{ $.Values.image.pullPolicy }}
+          command:
+            - "/backup"
+            - "--dest={{ $roleConfig.backupSidecar.dest }}"
+            - "--time-style={{ $roleConfig.backupSidecar.timeStyle }}"
+            - "--schedule={{ $roleConfig.backupSidecar.schedule }}"
+            - "--grpc-addr=127.0.0.1:17912"
+          env:
+            - name: ORDINAL_NUMBER
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.labels['apps.kubernetes.io/pod-index']
+          {{- if $.Values.storage.enabled }}
+          volumeMounts:
+            {{- range $claim := $.Values.storage.persistentVolumeClaims }}
+            {{- if eq $claim.nodeRole $roleName }}
+            {{- if $claim.existingClaimName }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.existingClaimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- else }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.claimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+          {{- end }}
+          {{- with $roleConfig.backupSidecar.resources }}
+          resources:
+            {{- toYaml . | nindent 4 }}
+          {{- end }}
+        {{- end }}
+        {{- if $roleConfig.lifecycleSidecar.enabled }}
+        - name: lifecycle
+          image: {{ $.Values.image.repository }}:{{ required 
"banyandb.image.tag is required" $.Values.image.tag }}-slim
+          imagePullPolicy: {{ $.Values.image.pullPolicy }}
+          command:
+            - "/lifecycle"
+            - "--schedule={{ $roleConfig.lifecycleSidecar.schedule }}"
+          {{- if $.Values.storage.enabled }}
+          volumeMounts:
+            {{- range $claim := $.Values.storage.persistentVolumeClaims }}
+            {{- if eq $claim.nodeRole $roleName }}
+            {{- if $claim.existingClaimName }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.existingClaimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- else }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.claimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+          {{- end }}
+          {{- with $roleConfig.lifecycleSidecar.resources }}
+          resources:
+            {{- toYaml . | nindent 4 }}
+          {{- end }}
+        {{- end }}
+        {{- range $sidecar := $roleConfig.sidecar }}
+        - name: {{ $sidecar.name }}
+          image: {{ $sidecar.image }}
+          imagePullPolicy: {{ $sidecar.imagePullPolicy }}
+          command:
+            {{- range $sidecar.commands.normal }}
+            - {{ . | quote }}
+            {{- end }}
+          lifecycle:
+            preStop:
+              exec:
+                command:
+                  {{- range $sidecar.commands.preStop }}
+                  - {{ . | quote }}
+                  {{- end }}
+          {{- if $.Values.storage.enabled }}
+          volumeMounts:
+            {{- range $claim := $.Values.storage.persistentVolumeClaims }}
+            {{- if eq $claim.nodeRole $roleName }}
+            {{- if $claim.existingClaimName }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.existingClaimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- else }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.claimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+          {{- end }}
+        {{- end }}
+
+      {{- if $roleConfig.tls }}
+      volumes:
+        {{- if $roleConfig.tls.grpcSecretName }}
+        - name: {{ $roleConfig.tls.grpcSecretName }}-volume
+          secret:
+            secretName: {{ $roleConfig.tls.grpcSecretName }}
+        {{- end }}
+        {{- if $roleConfig.tls.etcdSecretName }}
+        - name: {{ $roleConfig.tls.etcdSecretName }}-volume
+          secret:
+            secretName: {{ $roleConfig.tls.etcdSecretName }}
+        {{- end }}
+      {{- end }}
+
+      {{- if $roleConfig.tolerations }}
+      tolerations:
+        {{- range $toleration := $roleConfig.tolerations }}
+        - key: {{ $toleration.key }}
+          operator: {{ $toleration.operator }}
+          value: {{ $toleration.value }}
+          effect: {{ $toleration.effect }}
+        {{- end }}
+      {{- end }}
+
+      {{- if $roleConfig.nodeSelector }}
+      nodeSelector:
+        {{- range $selector := $roleConfig.nodeSelector }}
+        {{ $selector.key }}: {{ $selector.value }}
+        {{- end }}
+      {{- end }}
+
+      {{- if $roleConfig.affinity }}
+      affinity: {{ toYaml $roleConfig.affinity | nindent 8 }}
+      {{- else if or $roleConfig.podAffinityPreset 
$roleConfig.podAntiAffinityPreset }}
+      affinity:
+        {{- if and $roleConfig.podAffinityPreset (not (empty 
$roleConfig.podAffinityPreset)) }}
+        podAffinity:
+          {{- if eq $roleConfig.podAffinityPreset "soft" }}
+          preferredDuringSchedulingIgnoredDuringExecution:
+            - weight: 100
+              podAffinityTerm:
+                labelSelector:
+                  matchLabels: {{ include "banyandb.selectorLabels" $ | 
nindent 20 }}
+                    app.kubernetes.io/component: data
+                    app.kubernetes.io/role: {{ $roleName }}
+                topologyKey: kubernetes.io/hostname
+          {{- else if eq $roleConfig.podAffinityPreset "hard" }}
+          requiredDuringSchedulingIgnoredDuringExecution:
+            - labelSelector:
+                labelSelector:
+                  matchLabels: {{ include "banyandb.selectorLabels" $ | 
nindent 20 }}
+                    app.kubernetes.io/component: data
+                    app.kubernetes.io/role: {{ $roleName }}
+              topologyKey: kubernetes.io/hostname
+          {{- end }}
+        {{- end }}
+        {{- if and $roleConfig.podAntiAffinityPreset (not (empty 
$roleConfig.podAntiAffinityPreset)) }}
+        podAntiAffinity:
+          {{- if eq $roleConfig.podAntiAffinityPreset "soft" }}
+          preferredDuringSchedulingIgnoredDuringExecution:
+            - weight: 100
+              podAffinityTerm:
+                labelSelector:
+                  matchLabels: {{ include "banyandb.selectorLabels" $ | 
nindent 20 }}
+                    app.kubernetes.io/component: data
+                    app.kubernetes.io/role: {{ $roleName }}
+                topologyKey: kubernetes.io/hostname
+          {{- else if eq $roleConfig.podAntiAffinityPreset "hard" }}
+          requiredDuringSchedulingIgnoredDuringExecution:
+            - labelSelector:
+                labelSelector:
+                  matchLabels: {{ include "banyandb.selectorLabels" $ | 
nindent 20 }}
+                    app.kubernetes.io/component: data
+                    app.kubernetes.io/role: {{ $roleName }}
+              topologyKey: kubernetes.io/hostname
+          {{- end }}
+        {{- end }}
+      {{- end }}
+  {{- if $.Values.storage.enabled }}
+  volumeClaimTemplates:
+    {{- range $claim := $.Values.storage.persistentVolumeClaims }}
+    {{- if eq $claim.nodeRole $roleName }}
+    - metadata:
+        name: {{ $claim.claimName }}
+      spec:
+        {{- if $claim.accessModes }}
+        accessModes:
+          {{- range $claim.accessModes }}
+          - {{ . }}
+          {{- end }}
+        {{- end }}
+
+        {{- if $claim.size }}
+        resources:
+          requests:
+            storage: {{ $claim.size }}
+        {{- end }}
+
+        {{- if $claim.storageClass }}
+        storageClassName: {{ $claim.storageClass }}
+        {{- end }}
+
+        {{- if $claim.volumeMode }}
+        volumeMode: {{ $claim.volumeMode }}
+        {{- end }}
+    {{- end }}
+    {{- end }}
+  {{- end }}
+---
+{{- end }}
+{{- end }}
diff --git a/chart/templates/liaison_deployment.yaml 
b/chart/templates/cluster_liaison_deployment.yaml
similarity index 100%
rename from chart/templates/liaison_deployment.yaml
rename to chart/templates/cluster_liaison_deployment.yaml
diff --git a/chart/templates/ui_deployment.yaml 
b/chart/templates/cluster_ui_deployment.yaml
similarity index 100%
rename from chart/templates/ui_deployment.yaml
rename to chart/templates/cluster_ui_deployment.yaml
diff --git a/chart/templates/standalone_statefulset.yaml 
b/chart/templates/standalone_statefulset.yaml
new file mode 100644
index 0000000..ef60bd7
--- /dev/null
+++ b/chart/templates/standalone_statefulset.yaml
@@ -0,0 +1,272 @@
+{{/*
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements.  See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/}}
+
+{{- if .Values.standalone.enabled }}
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  labels: {{ include "banyandb.labels" . | nindent 4 }}
+    app.kubernetes.io/component: standalone
+  name: {{ template "banyandb.fullname" . }}
+spec:
+  serviceName: banyandb
+  replicas: 1
+  selector:
+    matchLabels: {{ include "banyandb.selectorLabels" . | nindent 6 }}
+      app.kubernetes.io/component: standalone
+  template:
+    metadata:
+      labels: {{ include "banyandb.labels" . | nindent 8 }}
+        app.kubernetes.io/component: standalone
+      {{- if .Values.standalone.podAnnotations }}
+      annotations:
+{{ toYaml .Values.standalone.podAnnotations | indent 8 }}
+      {{- end }}
+    spec:
+      serviceAccountName: {{ template "banyandb.serviceAccountName" . }}
+      {{- with .Values.standalone.securityContext }}
+      securityContext:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      priorityClassName: {{ .Values.standalone.priorityClassName }}
+      containers:
+        - name: standalone
+          image: {{ .Values.image.repository }}:{{ required 
"banyandb.image.tag is required" .Values.image.tag }}
+          imagePullPolicy: {{ .Values.image.pullPolicy }}
+          env:
+            {{- range $env := .Values.standalone.env }}
+            - name: {{ $env.name }}
+              value: {{ $env.value }}
+            {{- end }}
+            {{- if .Values.standalone.tls}}
+            {{- if .Values.standalone.tls.grpcSecretName }}
+            - name: BYDB_TLS
+              value: "true"
+            - name: BYDB_CERT_FILE
+              value: "/etc/tls/{{ .Values.standalone.tls.grpcSecretName 
}}/tls.crt"
+            - name: BYDB_KEY_FILE
+              value: "/etc/tls/{{ .Values.standalone.tls.grpcSecretName 
}}/tls.key"
+            - name: BYDB_HTTP_GRPC_CERT_FILE
+              value: "/etc/tls/{{ .Values.standalone.tls.grpcSecretName 
}}/tls.crt"
+            {{- end }}
+            {{- if .Values.standalone.tls.httpSecretName }}
+            - name: BYDB_HTTP_TLS
+              value: "true"
+            - name: BYDB_HTTP_CERT_FILE
+              value: "/etc/tls/{{ .Values.standalone.tls.httpSecretName 
}}/tls.crt"
+            - name: BYDB_HTTP_KEY_FILE
+              value: "/etc/tls/{{ .Values.standalone.tls.httpSecretName 
}}/tls.key"
+            {{- end }}
+            {{- end }}
+          args:
+            - standalone
+          ports:
+            - containerPort: 17912
+              name: grpc
+            - containerPort: 17913
+              name: http
+            - containerPort: 6060
+              name: pprof
+            - containerPort: 2121
+              name: observebility
+          readinessProbe:
+            httpGet:
+              path: /api/healthz
+              port: 17913
+              {{- if .Values.standalone.tls }}
+              scheme: HTTPS
+              {{- else }}
+              scheme: HTTP
+              {{- end }}
+            initialDelaySeconds: {{ 
.Values.standalone.readinessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.standalone.readinessProbe.periodSeconds 
}}
+            timeoutSeconds: {{ 
.Values.standalone.readinessProbe.timeoutSeconds }}
+            successThreshold: {{ 
.Values.standalone.readinessProbe.successThreshold }}
+            failureThreshold: {{ 
.Values.standalone.readinessProbe.failureThreshold }}
+          livenessProbe:
+            httpGet:
+              path: /api/healthz
+              port: 17913
+              {{- if .Values.standalone.tls }}
+              scheme: HTTPS
+              {{- else }}
+              scheme: HTTP
+              {{- end }}
+            initialDelaySeconds: {{ 
.Values.standalone.livenessProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.standalone.livenessProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.standalone.livenessProbe.timeoutSeconds 
}}
+            successThreshold: {{ 
.Values.standalone.livenessProbe.successThreshold }}
+            failureThreshold: {{ 
.Values.standalone.livenessProbe.failureThreshold }}
+          startupProbe:
+            httpGet:
+              path: /api/healthz
+              port: 17913
+              {{- if .Values.standalone.tls }}
+              scheme: HTTPS
+              {{- else }}
+              scheme: HTTP
+              {{- end }}
+            initialDelaySeconds: {{ 
.Values.standalone.startupProbe.initialDelaySeconds }}
+            periodSeconds: {{ .Values.standalone.startupProbe.periodSeconds }}
+            timeoutSeconds: {{ .Values.standalone.startupProbe.timeoutSeconds 
}}
+            successThreshold: {{ 
.Values.standalone.startupProbe.successThreshold }}
+            failureThreshold: {{ 
.Values.standalone.startupProbe.failureThreshold }}
+          {{- if.Values.standalone.resources }}
+          resources:
+            {{- if.Values.standalone.resources.requests }}
+            requests:
+              {{- range $request := .Values.standalone.resources.requests }}
+              {{ $request.key }}: {{ $request.value }}
+              {{- end }}
+            {{- end }}
+            {{- if.Values.standalone.resources.limits }}
+            limits:
+              {{- range $limit := .Values.standalone.resources.limits }}
+              {{ $limit.key }}: {{ $limit.value }}
+              {{- end }}
+            {{- end }}
+          {{- end }}
+
+          {{- if or .Values.storage.enabled .Values.standalone.tls }}
+          volumeMounts:
+            {{- if .Values.storage.enabled }}
+            {{- range $claim := .Values.storage.persistentVolumeClaims }}
+            {{- if $claim.existingClaimName }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.existingClaimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- else }}
+            {{- range $claim.mountTargets }}
+            - mountPath: /tmp/{{ . }}
+              name: {{ $claim.claimName }}
+              subPath: {{ . }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+            {{- end }}
+
+            {{- if .Values.standalone.tls -}}
+            {{- if .Values.standalone.tls.grpcSecretName }}
+            - mountPath: /etc/tls/{{ .Values.standalone.tls.grpcSecretName }}
+              name: {{ .Values.standalone.tls.grpcSecretName }}-volume
+            {{- end }}
+            {{- if and .Values.standalone.tls.httpSecretName (ne 
.Values.standalone.tls.httpSecretName .Values.standalone.tls.grpcSecretName) }}
+            - mountPath: /etc/tls/{{ .Values.standalone.tls.httpSecretName }}
+              name: {{ .Values.standalone.tls.httpSecretName }}-volume
+            {{- end }}
+            {{- end -}}
+          {{- end }}
+
+      {{- if .Values.standalone.tls }}
+      volumes:
+        {{- if .Values.standalone.tls.grpcSecretName }}
+        - name: {{ .Values.standalone.tls.grpcSecretName }}-volume
+          secret:
+            secretName: {{ .Values.standalone.tls.grpcSecretName }}
+        {{- end }}
+        {{- if and .Values.standalone.tls.httpSecretName (ne 
.Values.standalone.tls.httpSecretName .Values.standalone.tls.grpcSecretName) }}
+        - name: {{ .Values.standalone.tls.httpSecretName }}-volume
+          secret:
+            secretName: {{ .Values.standalone.tls.httpSecretName }}
+        {{- end }}
+      {{- end }}
+
+      {{- if .Values.standalone.tolerations }}
+      tolerations:
+        {{- range $toleration := .Values.standalone.tolerations }}
+        - key: {{ $toleration.key }}
+          operator: {{ $toleration.operator }}
+          value: {{ $toleration.value }}
+          effect: {{ $toleration.effect }}
+        {{- end }}
+      {{- end }}
+
+      {{- if .Values.standalone.nodeSelector }}
+      nodeSelector:
+        {{- range $selector := .Values.standalone.nodeSelector }}
+        {{ $selector.key }}: {{ $selector.value }}
+        {{- end }}
+      {{- end }}
+
+      {{- if .Values.standalone.affinity }}
+      {{- $affinity := .Values.standalone.affinity }}
+      affinity:
+        {{- if $affinity.nodeAffinity }}
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            {{- range $requirement := 
$affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms
 }}
+              {{- range $matchExpression := $requirement.matchExpressions }}
+                - matchExpressions:
+                    - key: {{ $matchExpression.key }}
+                      operator: {{ $matchExpression.operator }}
+                      values:
+                        {{- range $v := $matchExpression.values }}
+                        - {{ $v }}
+                        {{- end }}
+              {{- end }}
+            {{- end }}
+        {{- end }}
+
+        {{- if $affinity.podAffinity }}
+        podAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            {{- range $term := 
$affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution }}
+            - topologyKey: {{ $term.topologyKey }}
+              namespaces:
+                {{- range $ns := $term.namespaces }}
+                - {{ $ns }}
+                {{- end }}
+              labelSelector:
+                matchLabels:
+                  {{- range $label := $term.labelSelector.matchLabels }}
+                  {{ $label.key }}: {{ $label.value }}
+                  {{- end }}
+            {{- end }}
+        {{- end }}
+      {{- end }}
+  {{- if .Values.storage.enabled }}
+  volumeClaimTemplates:
+    {{- range $claim := .Values.storage.persistentVolumeClaims }}
+    - metadata:
+        name: {{ $claim.claimName }}
+      spec:
+        {{- if $claim.accessModes }}
+        accessModes:
+          {{- range $claim.accessModes }}
+          - {{ . }}
+          {{- end }}
+        {{- end }}
+
+        {{- if $claim.size }}
+        resources:
+          requests:
+            storage: {{ $claim.size }}
+        {{- end }}
+
+        {{- if $claim.storageClass }}
+        storageClassName: {{ $claim.storageClass }}
+        {{- end }}
+
+        {{- if $claim.volumeMode }}
+        volumeMode: {{ $claim.volumeMode }}
+        {{- end }}
+    {{- end }}
+  {{- end }}
+{{- end }}
diff --git a/chart/templates/statefulset.yaml b/chart/templates/statefulset.yaml
deleted file mode 100644
index 2f3280e..0000000
--- a/chart/templates/statefulset.yaml
+++ /dev/null
@@ -1,649 +0,0 @@
-{{/*
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements.  See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/}}
-
-{{- if .Values.standalone.enabled }}
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  labels: {{ include "banyandb.labels" . | nindent 4 }}
-    app.kubernetes.io/component: standalone
-  name: {{ template "banyandb.fullname" . }}
-spec:
-  serviceName: banyandb
-  replicas: 1
-  selector:
-    matchLabels: {{ include "banyandb.selectorLabels" . | nindent 6 }}
-      app.kubernetes.io/component: standalone
-  template:
-    metadata:
-      labels: {{ include "banyandb.labels" . | nindent 8 }}
-        app.kubernetes.io/component: standalone
-      {{- if .Values.standalone.podAnnotations }}
-      annotations:
-{{ toYaml .Values.standalone.podAnnotations | indent 8 }}
-      {{- end }}
-    spec:
-      serviceAccountName: {{ template "banyandb.serviceAccountName" . }}
-      {{- with .Values.standalone.securityContext }}
-      securityContext:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      priorityClassName: {{ .Values.standalone.priorityClassName }}
-      containers:
-        - name: standalone
-          image: {{ .Values.image.repository }}:{{ required 
"banyandb.image.tag is required" .Values.image.tag }}
-          imagePullPolicy: {{ .Values.image.pullPolicy }}
-          env:
-            {{- range $env := .Values.standalone.env }}
-            - name: {{ $env.name }}
-              value: {{ $env.value }}
-            {{- end }}
-            {{- if .Values.standalone.tls}}
-            {{- if .Values.standalone.tls.grpcSecretName }}
-            - name: BYDB_TLS
-              value: "true"
-            - name: BYDB_CERT_FILE
-              value: "/etc/tls/{{ .Values.standalone.tls.grpcSecretName 
}}/tls.crt"
-            - name: BYDB_KEY_FILE
-              value: "/etc/tls/{{ .Values.standalone.tls.grpcSecretName 
}}/tls.key"
-            - name: BYDB_HTTP_GRPC_CERT_FILE
-              value: "/etc/tls/{{ .Values.standalone.tls.grpcSecretName 
}}/tls.crt"
-            {{- end }}
-            {{- if .Values.standalone.tls.httpSecretName }}
-            - name: BYDB_HTTP_TLS
-              value: "true"
-            - name: BYDB_HTTP_CERT_FILE
-              value: "/etc/tls/{{ .Values.standalone.tls.httpSecretName 
}}/tls.crt"
-            - name: BYDB_HTTP_KEY_FILE
-              value: "/etc/tls/{{ .Values.standalone.tls.httpSecretName 
}}/tls.key"
-            {{- end }}
-            {{- end }}
-          args:
-            - standalone
-          ports:
-            - containerPort: 17912
-              name: grpc
-            - containerPort: 17913
-              name: http
-            - containerPort: 6060
-              name: pprof
-            - containerPort: 2121
-              name: observebility
-          readinessProbe:
-            httpGet:
-              path: /api/healthz
-              port: 17913
-              {{- if .Values.standalone.tls }}
-              scheme: HTTPS
-              {{- else }}
-              scheme: HTTP
-              {{- end }}
-            initialDelaySeconds: {{ 
.Values.standalone.readinessProbe.initialDelaySeconds }}
-            periodSeconds: {{ .Values.standalone.readinessProbe.periodSeconds 
}}
-            timeoutSeconds: {{ 
.Values.standalone.readinessProbe.timeoutSeconds }}
-            successThreshold: {{ 
.Values.standalone.readinessProbe.successThreshold }}
-            failureThreshold: {{ 
.Values.standalone.readinessProbe.failureThreshold }}
-          livenessProbe:
-            httpGet:
-              path: /api/healthz
-              port: 17913
-              {{- if .Values.standalone.tls }}
-              scheme: HTTPS
-              {{- else }}
-              scheme: HTTP
-              {{- end }}
-            initialDelaySeconds: {{ 
.Values.standalone.livenessProbe.initialDelaySeconds }}
-            periodSeconds: {{ .Values.standalone.livenessProbe.periodSeconds }}
-            timeoutSeconds: {{ .Values.standalone.livenessProbe.timeoutSeconds 
}}
-            successThreshold: {{ 
.Values.standalone.livenessProbe.successThreshold }}
-            failureThreshold: {{ 
.Values.standalone.livenessProbe.failureThreshold }}
-          startupProbe:
-            httpGet:
-              path: /api/healthz
-              port: 17913
-              {{- if .Values.standalone.tls }}
-              scheme: HTTPS
-              {{- else }}
-              scheme: HTTP
-              {{- end }}
-            initialDelaySeconds: {{ 
.Values.standalone.startupProbe.initialDelaySeconds }}
-            periodSeconds: {{ .Values.standalone.startupProbe.periodSeconds }}
-            timeoutSeconds: {{ .Values.standalone.startupProbe.timeoutSeconds 
}}
-            successThreshold: {{ 
.Values.standalone.startupProbe.successThreshold }}
-            failureThreshold: {{ 
.Values.standalone.startupProbe.failureThreshold }}
-          {{- if.Values.standalone.resources }}
-          resources:
-            {{- if.Values.standalone.resources.requests }}
-            requests:
-              {{- range $request := .Values.standalone.resources.requests }}
-              {{ $request.key }}: {{ $request.value }}
-              {{- end }}
-            {{- end }}
-            {{- if.Values.standalone.resources.limits }}
-            limits:
-              {{- range $limit := .Values.standalone.resources.limits }}
-              {{ $limit.key }}: {{ $limit.value }}
-              {{- end }}
-            {{- end }}
-          {{- end }}
-
-          {{- if or .Values.storage.enabled .Values.standalone.tls }}
-          volumeMounts:
-            {{- if .Values.storage.enabled }}
-            {{- range $claim := .Values.storage.persistentVolumeClaims }}
-            {{- if $claim.existingClaimName }}
-            {{- range $claim.mountTargets }}
-            - mountPath: /tmp/{{ . }}
-              name: {{ $claim.existingClaimName }}
-              subPath: {{ . }}
-            {{- end }}
-            {{- else }}
-            {{- range $claim.mountTargets }}
-            - mountPath: /tmp/{{ . }}
-              name: {{ $claim.claimName }}
-              subPath: {{ . }}
-            {{- end }}
-            {{- end }}
-            {{- end }}
-            {{- end }}
-
-            {{- if .Values.standalone.tls -}}
-            {{- if .Values.standalone.tls.grpcSecretName }}
-            - mountPath: /etc/tls/{{ .Values.standalone.tls.grpcSecretName }}
-              name: {{ .Values.standalone.tls.grpcSecretName }}-volume
-            {{- end }}
-            {{- if and .Values.standalone.tls.httpSecretName (ne 
.Values.standalone.tls.httpSecretName .Values.standalone.tls.grpcSecretName) }}
-            - mountPath: /etc/tls/{{ .Values.standalone.tls.httpSecretName }}
-              name: {{ .Values.standalone.tls.httpSecretName }}-volume
-            {{- end }}
-            {{- end -}}
-          {{- end }}
-
-      {{- if .Values.standalone.tls }}
-      volumes:
-        {{- if .Values.standalone.tls.grpcSecretName }}
-        - name: {{ .Values.standalone.tls.grpcSecretName }}-volume
-          secret:
-            secretName: {{ .Values.standalone.tls.grpcSecretName }}
-        {{- end }}
-        {{- if and .Values.standalone.tls.httpSecretName (ne 
.Values.standalone.tls.httpSecretName .Values.standalone.tls.grpcSecretName) }}
-        - name: {{ .Values.standalone.tls.httpSecretName }}-volume
-          secret:
-            secretName: {{ .Values.standalone.tls.httpSecretName }}
-        {{- end }}
-      {{- end }}
-
-      {{- if .Values.standalone.tolerations }}
-      tolerations:
-        {{- range $toleration := .Values.standalone.tolerations }}
-        - key: {{ $toleration.key }}
-          operator: {{ $toleration.operator }}
-          value: {{ $toleration.value }}
-          effect: {{ $toleration.effect }}
-        {{- end }}
-      {{- end }}
-
-      {{- if .Values.standalone.nodeSelector }}
-      nodeSelector:
-        {{- range $selector := .Values.standalone.nodeSelector }}
-        {{ $selector.key }}: {{ $selector.value }}
-        {{- end }}
-      {{- end }}
-
-      {{- if .Values.standalone.affinity }}
-      {{- $affinity := .Values.standalone.affinity }}
-      affinity:
-        {{- if $affinity.nodeAffinity }}
-        nodeAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            nodeSelectorTerms:
-            {{- range $requirement := 
$affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms
 }}
-              {{- range $matchExpression := $requirement.matchExpressions }}
-                - matchExpressions:
-                    - key: {{ $matchExpression.key }}
-                      operator: {{ $matchExpression.operator }}
-                      values:
-                        {{- range $v := $matchExpression.values }}
-                        - {{ $v }}
-                        {{- end }}
-              {{- end }}
-            {{- end }}
-        {{- end }}
-
-        {{- if $affinity.podAffinity }}
-        podAffinity:
-          requiredDuringSchedulingIgnoredDuringExecution:
-            {{- range $term := 
$affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution }}
-            - topologyKey: {{ $term.topologyKey }}
-              namespaces:
-                {{- range $ns := $term.namespaces }}
-                - {{ $ns }}
-                {{- end }}
-              labelSelector:
-                matchLabels:
-                  {{- range $label := $term.labelSelector.matchLabels }}
-                  {{ $label.key }}: {{ $label.value }}
-                  {{- end }}
-            {{- end }}
-        {{- end }}
-      {{- end }}
-  {{- if .Values.storage.enabled }}
-  volumeClaimTemplates:
-    {{- range $claim := .Values.storage.persistentVolumeClaims }}
-    - metadata:
-        name: {{ $claim.claimName }}
-      spec:
-        {{- if $claim.accessModes }}
-        accessModes:
-          {{- range $claim.accessModes }}
-          - {{ . }}
-          {{- end }}
-        {{- end }}
-
-        {{- if $claim.size }}
-        resources:
-          requests:
-            storage: {{ $claim.size }}
-        {{- end }}
-
-        {{- if $claim.storageClass }}
-        storageClassName: {{ $claim.storageClass }}
-        {{- end }}
-
-        {{- if $claim.volumeMode }}
-        volumeMode: {{ $claim.volumeMode }}
-        {{- end }}
-    {{- end }}
-  {{- end }}
-{{- end }}
-
-{{- if and .Values.cluster.enabled .Values.cluster.data }}
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  labels: {{ include "banyandb.labels" . | nindent 4 }}
-    app.kubernetes.io/component: data
-  name: {{ template "banyandb.fullname" . }}-data
-spec:
-  serviceName: banyandb
-  replicas: {{ .Values.cluster.data.replicas }}
-  selector:
-    matchLabels: {{ include "banyandb.selectorLabels" . | nindent 6 }}
-      app.kubernetes.io/component: data
-  updateStrategy:
-    type: RollingUpdate
-  podManagementPolicy: Parallel
-  template:
-    metadata:
-      labels: {{ include "banyandb.labels" . | nindent 8 }}
-        app.kubernetes.io/component: data
-      {{- if .Values.cluster.data.podAnnotations }}
-      annotations:
-{{ toYaml .Values.cluster.data.podAnnotations | indent 8 }}
-      {{- end }}
-    spec:
-      serviceAccountName: {{ template "banyandb.serviceAccountName" . }}
-      {{- with .Values.cluster.data.securityContext }}
-      securityContext:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      priorityClassName: {{ .Values.cluster.data.priorityClassName }}
-      initContainers:
-        {{- if .Values.cluster.data.restoreInitContainer.enabled }}
-        - name: restore-init
-          image: {{ .Values.image.repository }}:{{ required 
"banyandb.image.tag is required" .Values.image.tag }}-slim
-          imagePullPolicy: {{ .Values.image.pullPolicy }}
-          command:
-            - "/restore"
-            - "run"
-            - "--source={{ .Values.cluster.data.backupSidecar.dest }}"
-          {{- if $.Values.storage.enabled }}
-          volumeMounts:
-            {{- range $claim := $.Values.storage.persistentVolumeClaims }}
-            {{- if $claim.existingClaimName }}
-            {{- range $claim.mountTargets }}
-            - mountPath: /tmp/{{ . }}
-              name: {{ $claim.existingClaimName }}
-              subPath: {{ . }}
-            {{- end }}
-            {{- else }}
-            {{- range $claim.mountTargets }}
-            - mountPath: /tmp/{{ . }}
-              name: {{ $claim.claimName }}
-              subPath: {{ . }}
-            {{- end }}
-            {{- end }}
-            {{- end }}
-          {{- end }}
-          {{- with .Values.cluster.data.restoreInitContainer.resources }}
-          resources:
-            {{- toYaml . | nindent 12 }}
-          {{- end }}
-        {{- end }}
-      containers:
-        - name: data
-          image: {{ .Values.image.repository }}:{{ required 
"banyandb.image.tag is required" .Values.image.tag }}-slim
-          imagePullPolicy: {{ .Values.image.pullPolicy }}
-          env:
-            {{- range $env := .Values.cluster.data.env }}
-            - name: {{ $env.name }}
-              value: {{ $env.value }}
-            {{- end }}
-            {{- if .Values.cluster.data.tls}}
-            {{- if .Values.cluster.data.tls.grpcSecretName }}
-            - name: BYDB_TLS
-              value: "true"
-            - name: BYDB_CERT_FILE
-              value: "/etc/tls/{{ .Values.cluster.data.tls.grpcSecretName 
}}/tls.crt"
-            - name: BYDB_KEY_FILE
-              value: "/etc/tls/{{ .Values.cluster.data.tls.grpcSecretName 
}}/tls.key"
-            - name: BYDB_HTTP_GRPC_CERT_FILE
-              value: "/etc/tls/{{ .Values.cluster.data.tls.grpcSecretName 
}}/tls.crt"
-            {{- end }}
-            {{- end }}
-            {{- if and .Values.etcd.auth.rbac.create (not 
.Values.etcd.auth.rbac.allowNoneAuthentication) }}
-            - name: BYDB_ETCD_USERNAME
-              value: "root"
-            - name: BYDB_ETCD_PASSWORD
-              value: {{ .Values.etcd.auth.rbac.rootPassword }}
-            {{- end }}
-            {{- if .Values.etcd.auth.client.secureTransport }}
-            - name: BYDB_ETCD_TLS_CA_FILE
-              value: "/etc/tls/{{ .Values.cluster.data.tls.etcdSecretName 
}}/ca.crt"
-            {{- end }}
-            {{- if .Values.etcd.auth.client.enableAuthentication }}
-            - name: BYDB_ETCD_TLS_CERT_FILE
-              value: "/etc/tls/{{ .Values.cluster.data.tls.etcdSecretName 
}}/tls.crt"
-            - name: BYDB_ETCD_TLS_KEY_FILE
-              value: "/etc/tls/{{ .Values.cluster.data.tls.etcdSecretName 
}}/tls.key"
-            {{- end }}
-            {{- if and (not .Values.etcd.enabled) 
.Values.cluster.etcdEndpoints }}
-            - name: BYDB_ETCD_ENDPOINTS
-              value: "{{- .Values.cluster.etcdEndpoints | join "," -}}"
-            {{- else }}
-            {{- include "banyandb.etcdEndpoints" . | nindent 12 }}
-            {{- end }}
-            - name: BYDB_NODE_HOST_PROVIDER
-              value: "ip"
-            - name: BYDB_NODE_HOST
-              valueFrom:
-                fieldRef:
-                  fieldPath: status.podIP
-          args:
-            - data 
-          ports:
-            - containerPort: 17912
-              name: grpc
-            - containerPort: 17913
-              name: http-healthz
-            - containerPort: 6060
-              name: pprof
-            - containerPort: 2121
-              name: observebility
-          readinessProbe:
-            httpGet:
-              path: /api/healthz
-              port: 17913
-              scheme: HTTP
-            initialDelaySeconds: {{ 
.Values.cluster.data.readinessProbe.initialDelaySeconds }}
-            periodSeconds: {{ 
.Values.cluster.data.readinessProbe.periodSeconds }}
-            timeoutSeconds: {{ 
.Values.cluster.data.readinessProbe.timeoutSeconds }}
-            successThreshold: {{ 
.Values.cluster.data.readinessProbe.successThreshold }}
-            failureThreshold: {{ 
.Values.cluster.data.readinessProbe.failureThreshold }}
-          livenessProbe:
-            httpGet:
-              path: /api/healthz
-              port: 17913
-              scheme: HTTP
-            initialDelaySeconds: {{ 
.Values.cluster.data.livenessProbe.initialDelaySeconds }}
-            periodSeconds: {{ .Values.cluster.data.livenessProbe.periodSeconds 
}}
-            timeoutSeconds: {{ 
.Values.cluster.data.livenessProbe.timeoutSeconds }}
-            successThreshold: {{ 
.Values.cluster.data.livenessProbe.successThreshold }}
-            failureThreshold: {{ 
.Values.cluster.data.livenessProbe.failureThreshold }}
-          startupProbe:
-            httpGet:
-              path: /api/healthz
-              port: 17913
-              scheme: HTTP
-            initialDelaySeconds: {{ 
.Values.cluster.data.startupProbe.initialDelaySeconds }}
-            periodSeconds: {{ .Values.cluster.data.startupProbe.periodSeconds 
}}
-            timeoutSeconds: {{ 
.Values.cluster.data.startupProbe.timeoutSeconds }}
-            successThreshold: {{ 
.Values.cluster.data.startupProbe.successThreshold }}
-            failureThreshold: {{ 
.Values.cluster.data.startupProbe.failureThreshold }}
-          {{- if.Values.cluster.data.resources }}
-          resources:
-            {{- if.Values.cluster.data.resources.requests }}
-            requests:
-              {{- range $request := .Values.cluster.data.resources.requests }}
-              {{ $request.key }}: {{ $request.value }}
-              {{- end }}
-            {{- end }}
-            {{- if.Values.cluster.data.resources.limits }}
-            limits:
-              {{- range $limit := .Values.cluster.data.resources.limits }}
-              {{ $limit.key }}: {{ $limit.value }}
-              {{- end }}
-            {{- end }}
-          {{- end }}
-
-          {{- if or .Values.storage.enabled .Values.cluster.data.tls }}
-          volumeMounts:
-            {{- if .Values.storage.enabled }}
-            {{- range $claim := .Values.storage.persistentVolumeClaims }}
-            {{- if $claim.existingClaimName }}
-            {{- range $claim.mountTargets }}
-            - mountPath: /tmp/{{ . }}
-              name: {{ $claim.existingClaimName }}
-              subPath: {{ . }}
-            {{- end }}
-            {{- else }}
-            {{- range $claim.mountTargets }}
-            - mountPath: /tmp/{{ . }}
-              name: {{ $claim.claimName }}
-              subPath: {{ . }}
-            {{- end }}
-            {{- end }}
-            {{- end }}
-            {{- end }}
-
-            {{- if .Values.cluster.data.tls }}
-            {{- if .Values.cluster.data.tls.grpcSecretName }}
-            - mountPath: /etc/tls/{{ .Values.cluster.data.tls.grpcSecretName }}
-              name: {{ .Values.cluster.data.tls.grpcSecretName }}-volume
-            {{- end }}
-            {{- if .Values.cluster.data.tls.etcdSecretName }}
-            - mountPath: /etc/tls/{{ .Values.cluster.data.tls.etcdSecretName }}
-              name: {{ .Values.cluster.data.tls.etcdSecretName }}-volume
-            {{- end }}
-            {{- end }}
-          {{- end }}
-        {{- if .Values.cluster.data.backupSidecar.enabled }}
-        - name: backup
-          image: {{ .Values.image.repository }}:{{ required 
"banyandb.image.tag is required" .Values.image.tag }}-slim
-          imagePullPolicy: {{ .Values.image.pullPolicy }}
-          command:
-            - "/backup"
-            - "--dest={{ .Values.cluster.data.backupSidecar.dest }}"
-            - "--time-style={{ .Values.cluster.data.backupSidecar.timeStyle }}"
-            - "--schedule={{ .Values.cluster.data.backupSidecar.schedule }}"
-            - "--grpc-addr=127.0.0.1:17912"
-          env:
-            - name: ORDINAL_NUMBER
-              valueFrom:
-                fieldRef:
-                  fieldPath: metadata.labels['apps.kubernetes.io/pod-index']
-          {{- if $.Values.storage.enabled }}
-          volumeMounts:
-            {{- range $claim := $.Values.storage.persistentVolumeClaims }}
-            {{- if $claim.existingClaimName }}
-            {{- range $claim.mountTargets }}
-            - mountPath: /tmp/{{ . }}
-              name: {{ $claim.existingClaimName }}
-              subPath: {{ . }}
-            {{- end }}
-            {{- else }}
-            {{- range $claim.mountTargets }}
-            - mountPath: /tmp/{{ . }}
-              name: {{ $claim.claimName }}
-              subPath: {{ . }}
-            {{- end }}
-            {{- end }}
-            {{- end }}
-          {{- end }}
-          {{- with .Values.cluster.data.backupSidecar.resources }}
-          resources:
-            {{- toYaml . | nindent 4 }}
-          {{- end }}
-        {{- end }}
-        {{- range $sidecar := .Values.cluster.data.sidecar }}
-        - name: {{ $sidecar.name }}
-          image: {{ $sidecar.image }}
-          imagePullPolicy: {{ $sidecar.imagePullPolicy }}
-          command:
-            {{- range $sidecar.commands.normal }}
-            - {{ . | quote }}
-            {{- end }}
-          lifecycle:
-            preStop:
-              exec:
-                command:
-                  {{- range $sidecar.commands.preStop }}
-                  - {{ . | quote }}
-                  {{- end }}
-          {{- if $.Values.storage.enabled }}
-          volumeMounts:
-            {{- range $claim := $.Values.storage.persistentVolumeClaims }}
-            {{- if $claim.existingClaimName }}
-            {{- range $claim.mountTargets }}
-            - mountPath: /tmp/{{ . }}
-              name: {{ $claim.existingClaimName }}
-              subPath: {{ . }}
-            {{- end }}
-            {{- else }}
-            {{- range $claim.mountTargets }}
-            - mountPath: /tmp/{{ . }}
-              name: {{ $claim.claimName }}
-              subPath: {{ . }}
-            {{- end }}
-            {{- end }}
-            {{- end }}
-          {{- end }}
-        {{- end }}
-
-      {{- if .Values.cluster.data.tls }}
-      volumes:
-        {{- if .Values.cluster.data.tls.grpcSecretName }}
-        - name: {{ .Values.cluster.data.tls.grpcSecretName }}-volume
-          secret:
-            secretName: {{ .Values.cluster.data.tls.grpcSecretName }}
-        {{- end }}
-        {{- if .Values.cluster.data.tls.etcdSecretName }}
-        - name: {{ .Values.cluster.data.tls.etcdSecretName }}-volume
-          secret:
-            secretName: {{ .Values.cluster.data.tls.etcdSecretName }}
-        {{- end }}
-      {{- end }}
-
-      {{- if .Values.cluster.data.tolerations }}
-      tolerations:
-        {{- range $toleration := .Values.cluster.data.tolerations }}
-        - key: {{ $toleration.key }}
-          operator: {{ $toleration.operator }}
-          value: {{ $toleration.value }}
-          effect: {{ $toleration.effect }}
-        {{- end }}
-      {{- end }}
-
-      {{- if .Values.cluster.data.nodeSelector }}
-      nodeSelector:
-        {{- range $selector := .Values.cluster.data.nodeSelector }}
-        {{ $selector.key }}: {{ $selector.value }}
-        {{- end }}
-      {{- end }}
-
-      {{- if .Values.cluster.data.affinity }}
-      affinity: {{ toYaml .Values.cluster.data.affinity | nindent 8 }}
-      {{- else if or .Values.cluster.data.podAffinityPreset 
.Values.cluster.data.podAntiAffinityPreset }}
-      affinity:
-        {{- if and .Values.cluster.data.podAffinityPreset (not (empty 
.Values.cluster.data.podAffinityPreset)) }}
-        podAffinity:
-          {{- if eq .Values.cluster.data.podAffinityPreset "soft" }}
-          preferredDuringSchedulingIgnoredDuringExecution:
-            - weight: 100
-              podAffinityTerm:
-                labelSelector:
-                  matchLabels: {{ include "banyandb.selectorLabels" . | 
nindent 20 }}
-                    app.kubernetes.io/component: data
-                topologyKey: kubernetes.io/hostname
-          {{- else if eq .Values.cluster.data.podAffinityPreset "hard" }}
-          requiredDuringSchedulingIgnoredDuringExecution:
-            - labelSelector:
-                labelSelector:
-                  matchLabels: {{ include "banyandb.selectorLabels" . | 
nindent 20 }}
-                    app.kubernetes.io/component: data
-              topologyKey: kubernetes.io/hostname
-          {{- end }}
-        {{- end }}
-        {{- if and .Values.cluster.data.podAntiAffinityPreset (not (empty 
.Values.cluster.data.podAntiAffinityPreset)) }}
-        podAntiAffinity:
-          {{- if eq .Values.cluster.data.podAntiAffinityPreset "soft" }}
-          preferredDuringSchedulingIgnoredDuringExecution:
-            - weight: 100
-              podAffinityTerm:
-                labelSelector:
-                  matchLabels: {{ include "banyandb.selectorLabels" . | 
nindent 20 }}
-                    app.kubernetes.io/component: data
-                topologyKey: kubernetes.io/hostname
-          {{- else if eq .Values.cluster.data.podAntiAffinityPreset "hard" }}
-          requiredDuringSchedulingIgnoredDuringExecution:
-            - labelSelector:
-                labelSelector:
-                  matchLabels: {{ include "banyandb.selectorLabels" . | 
nindent 20 }}
-                    app.kubernetes.io/component: data
-              topologyKey: kubernetes.io/hostname
-          {{- end }}
-        {{- end }}
-      {{- end }}
-  {{- if .Values.storage.enabled }}
-  volumeClaimTemplates:
-    {{- range $claim := .Values.storage.persistentVolumeClaims }}
-    - metadata:
-        name: {{ $claim.claimName }}
-      spec:
-        {{- if $claim.accessModes }}
-        accessModes:
-          {{- range $claim.accessModes }}
-          - {{ . }}
-          {{- end }}
-        {{- end }}
-
-        {{- if $claim.size }}
-        resources:
-          requests:
-            storage: {{ $claim.size }}
-        {{- end }}
-
-        {{- if $claim.storageClass }}
-        storageClassName: {{ $claim.storageClass }}
-        {{- end }}
-
-        {{- if $claim.volumeMode }}
-        volumeMode: {{ $claim.volumeMode }}
-        {{- end }}
-    {{- end }}
-  {{- end }}
-{{- end }}
diff --git a/chart/values.yaml b/chart/values-lifecycle.yaml
similarity index 69%
copy from chart/values.yaml
copy to chart/values-lifecycle.yaml
index 79b6700..29b3e74 100644
--- a/chart/values.yaml
+++ b/chart/values-lifecycle.yaml
@@ -338,141 +338,184 @@ cluster:
   ## @section Configuration for data component
   ##
   data:
-    ## @param cluster.data.name Name of the data component
+    ## Add nodeTemplate with common defaults
     ##
-    name: banyandb
-    ## @param cluster.data.replicas Number of data replicas
-    ##
-    replicas: 3
-    ## @param cluster.data.podAnnotations Pod annotations for data component
-    ##
-    podAnnotations: {}
-    ## @param cluster.data.securityContext Security context for data pods
-    ##
-    securityContext: {}
-    ## @param cluster.data.env Environment variables for data pods
-    ##
-    env: []
-    ## @param cluster.data.priorityClassName Priority class name for data pods
-    ##
-    priorityClassName: ""
-    ## Pod disruption budget for data
-    ##
-    podDisruptionBudget:
-      ## @param cluster.data.podDisruptionBudget.maxUnavailable Maximum 
unavailable pods for data component
-      maxUnavailable: 1
-    ## @param cluster.data.tolerations Tolerations for data pods
-    ##
-    tolerations: []
-    ## @param cluster.data.nodeSelector Node selector for data pods
-    ##
-    nodeSelector: []
-    ## @param cluster.data.affinity Affinity rules for data pods
-    ##
-    affinity: {}
-    ## @param cluster.data.podAffinityPreset Pod affinity preset for data
-    ##
-    podAffinityPreset: ""
-    ## @param cluster.data.podAntiAffinityPreset Pod anti-affinity preset for 
data
-    ##
-    podAntiAffinityPreset: soft
-    ## Resource requests/limits for data
-    ##
-    resources:
-      ## @param cluster.data.resources.requests Resource requests for data pods
-      requests: []
-      ## @param cluster.data.resources.limits Resource limits for data pods
-      limits: []
-    ## GRPC service settings for data
-    ##
-    grpcSvc:
-      ## @param cluster.data.grpcSvc.labels Labels for GRPC service for data
-      labels: {}
-      ## @param cluster.data.grpcSvc.annotations Annotations for GRPC service 
for data
-      annotations: {}
-      ## @param cluster.data.grpcSvc.port Port number for GRPC service for data
-      port: 17912
-    ## @param cluster.data.sidecar Sidecar containers for data
-    ##
-    sidecar: []
-    ## Backup sidecar configuration
-    ##
-    backupSidecar:
-      ## @param cluster.data.backupSidecar.enabled Enable backup sidecar 
(boolean)
+    nodeTemplate:
+      ## @param cluster.data.nodeTemplate.replicas Number of data replicas by 
default
       ##
-      enabled: false
-      ## @param cluster.data.backupSidecar.dest Backup destination path
+      replicas: 2
+      ## @param cluster.data.nodeTemplate.podAnnotations Pod annotations for 
data pods
       ##
-      dest: "file:///tmp/backups/data-$(ORDINAL_NUMBER)"
-      ## @param cluster.data.backupSidecar.timeStyle Backup time style (e.g., 
daily)
+      podAnnotations: {}
+      ## @param cluster.data.nodeTemplate.securityContext Security context for 
data pods
       ##
-      timeStyle: "daily"
-      ## @param cluster.data.backupSidecar.schedule Backup schedule (cron 
format)
+      securityContext: {}
+      ## @param cluster.data.nodeTemplate.env Environment variables for data 
pods
       ##
-      schedule: "@hourly"
-      ## @param cluster.data.backupSidecar.resources Resources for backup 
sidecar
+      env: []
+      ## @param cluster.data.nodeTemplate.priorityClassName Priority class 
name for data pods
       ##
-      resources: {}
-    ## Restore init container configuration
-    ##
-    restoreInitContainer:
-      ## @param cluster.data.restoreInitContainer.enabled Enable restore init 
container (boolean)
+      priorityClassName: ""
+      ## Pod disruption budget for data pods
       ##
-      enabled: false
-      ## @param cluster.data.restoreInitContainer.resources Resources for 
restore init container
+      podDisruptionBudget:
+        ## @param cluster.data.nodeTemplate.podDisruptionBudget.maxUnavailable 
Maximum unavailable data pods
+        ##
+        maxUnavailable: 1
+      ## @param cluster.data.nodeTemplate.tolerations Tolerations for data pods
       ##
-      resources: {}
-    ## Liveness probe for data
-    ##
-    livenessProbe:
-      ## @param cluster.data.livenessProbe.initialDelaySeconds Initial delay 
for data liveness probe
-      initialDelaySeconds: 20
-      ## @param cluster.data.livenessProbe.periodSeconds Probe period for data 
liveness probe
-      periodSeconds: 30
-      ## @param cluster.data.livenessProbe.timeoutSeconds Timeout in seconds 
for data liveness probe
-      timeoutSeconds: 5
-      ## @param cluster.data.livenessProbe.successThreshold Success threshold 
for data liveness probe
-      successThreshold: 1
-      ## @param cluster.data.livenessProbe.failureThreshold Failure threshold 
for data liveness probe
-      failureThreshold: 5
-    ## Readiness probe for data
-    ##
-    readinessProbe:
-      ## @param cluster.data.readinessProbe.initialDelaySeconds Initial delay 
for data readiness probe
+      tolerations: []
+      ## @param cluster.data.nodeTemplate.nodeSelector Node selector for data 
pods
       ##
-      initialDelaySeconds: 20
-      ## @param cluster.data.readinessProbe.periodSeconds Probe period for 
data readiness probe
+      nodeSelector: []
+      ## @param cluster.data.nodeTemplate.affinity Affinity rules for data pods
       ##
-      periodSeconds: 30
-      ## @param cluster.data.readinessProbe.timeoutSeconds Timeout in seconds 
for data readiness probe
+      affinity: {}
+      ## @param cluster.data.nodeTemplate.podAffinityPreset Pod affinity 
preset for data pods
       ##
-      timeoutSeconds: 5
-      ## @param cluster.data.readinessProbe.successThreshold Success threshold 
for data readiness probe
+      podAffinityPreset: ""
+      ## @param cluster.data.nodeTemplate.podAntiAffinityPreset Pod 
anti-affinity preset for data pods
       ##
-      successThreshold: 1
-      ## @param cluster.data.readinessProbe.failureThreshold Failure threshold 
for data readiness probe
+      podAntiAffinityPreset: soft
+      ## Resource requests/limits for data pods
       ##
-      failureThreshold: 5
-    ## Startup probe for data
-    ##
-    startupProbe:
-      ## @param cluster.data.startupProbe.initialDelaySeconds Initial delay 
for data startup probe
+      resources:
+        ## @param cluster.data.nodeTemplate.resources.requests Resource 
requests for data pods
+        ##
+        requests: []
+        ## @param cluster.data.nodeTemplate.resources.limits Resource limits 
for data pods
+        ##
+        limits: []
+      ## GRPC service settings for data pods
       ##
-      initialDelaySeconds: 0
-      ## @param cluster.data.startupProbe.periodSeconds Probe period for data 
startup probe
+      grpcSvc:
+        ## @param cluster.data.nodeTemplate.grpcSvc.labels Labels for GRPC 
service for data pods
+        ##
+        labels: {}
+        ## @param cluster.data.nodeTemplate.grpcSvc.annotations Annotations 
for GRPC service for data pods
+        ##
+        annotations: {}
+        ## @param cluster.data.nodeTemplate.grpcSvc.port Port number for GRPC 
service for data pods
+        ##
+        port: 17912
+      ## @param cluster.data.nodeTemplate.sidecar Sidecar containers for data 
pods
       ##
-      periodSeconds: 10
-      ## @param cluster.data.startupProbe.timeoutSeconds Timeout in seconds 
for data startup probe
+      sidecar: []
+      ## Backup sidecar configuration for data pods
       ##
-      timeoutSeconds: 5
-      ## @param cluster.data.startupProbe.successThreshold Success threshold 
for data startup probe
+      backupSidecar:
+        ## @param cluster.data.nodeTemplate.backupSidecar.enabled Enable 
backup sidecar for data pods (boolean)
+        ##
+        enabled: false
+        ## @param cluster.data.nodeTemplate.backupSidecar.dest Backup 
destination path for data pods
+        ##
+        dest: "file:///tmp/backups/data-$(ORDINAL_NUMBER)"
+        ## @param cluster.data.nodeTemplate.backupSidecar.timeStyle Backup 
time style for data pods (e.g., daily)
+        ##
+        timeStyle: "daily"
+        ## @param cluster.data.nodeTemplate.backupSidecar.schedule Backup 
schedule for data pods (cron format)
+        schedule: "@hourly"
+        ##
+        ## @param cluster.data.nodeTemplate.backupSidecar.resources Resources 
for backup sidecar for data pods
+        ##
+        resources: {}
+      ## Lifecycle sidecar configuration for data pods
       ##
-      successThreshold: 1
-      ## @param cluster.data.startupProbe.failureThreshold Failure threshold 
for data startup probe
+      lifecycleSidecar:
+        ## @param cluster.data.nodeTemplate.lifecycleSidecar.enabled Enable 
lifecycle sidecar for data pods (boolean)
+        ##
+        enabled: false
+        ## @param cluster.data.nodeTemplate.lifecycleSidecar.schedule Schedule 
for lifecycle sidecar (cron format)
+        ##
+        schedule: "@hourly"
+        ## @param cluster.data.nodeTemplate.lifecycleSidecar.resources 
Resources for lifecycle sidecar for data pods
+        ##
+        resources: {}
+      ## Restore init container configuration for data pods
+      ##
+      restoreInitContainer:
+        ## @param cluster.data.nodeTemplate.restoreInitContainer.enabled 
Enable restore init container for data pods (boolean)
+        ##
+        enabled: false
+        ## @param cluster.data.nodeTemplate.restoreInitContainer.resources 
Resources for restore init container for data pods
+        ##
+        resources: {}
+      ## Liveness probe for data pods
+      ##
+      livenessProbe:
+        ## @param cluster.data.nodeTemplate.livenessProbe.initialDelaySeconds 
Initial delay for data liveness probe
+        ##
+        initialDelaySeconds: 20
+        ## @param cluster.data.nodeTemplate.livenessProbe.periodSeconds Probe 
period for data liveness probe
+        ##
+        periodSeconds: 30
+        ## @param cluster.data.nodeTemplate.livenessProbe.timeoutSeconds 
Timeout in seconds for data liveness probe
+        ##
+        timeoutSeconds: 5
+        ## @param cluster.data.nodeTemplate.livenessProbe.successThreshold 
Success threshold for data liveness probe
+        ##
+        successThreshold: 1
+        ## @param cluster.data.nodeTemplate.livenessProbe.failureThreshold 
Failure threshold for data liveness probe
+        ##
+        failureThreshold: 5
+      ## Readiness probe for data pods
+      ##
+      readinessProbe:
+        ## @param cluster.data.nodeTemplate.readinessProbe.initialDelaySeconds 
Initial delay for data readiness probe
+        ##
+        initialDelaySeconds: 20
+        ## @param cluster.data.nodeTemplate.readinessProbe.periodSeconds Probe 
period for data readiness probe
+        ##
+        periodSeconds: 30
+        ## @param cluster.data.nodeTemplate.readinessProbe.timeoutSeconds 
Timeout in seconds for data readiness probe
+        ##
+        timeoutSeconds: 5
+        ## @param cluster.data.nodeTemplate.readinessProbe.successThreshold 
Success threshold for data readiness probe
+        ##
+        successThreshold: 1
+        ## @param cluster.data.nodeTemplate.readinessProbe.failureThreshold 
Failure threshold for data readiness probe
+        ##
+        failureThreshold: 5
+      ## Startup probe for data pods
+      ##
+      startupProbe:
+        ## @param cluster.data.nodeTemplate.startupProbe.initialDelaySeconds 
Initial delay for data startup probe
+        ##
+        initialDelaySeconds: 0
+        ## @param cluster.data.nodeTemplate.startupProbe.periodSeconds Probe 
period for data startup probe
+        ##
+        periodSeconds: 10
+        ## @param cluster.data.nodeTemplate.startupProbe.timeoutSeconds 
Timeout in seconds for data startup probe
+        ##
+        timeoutSeconds: 5
+        ## @param cluster.data.nodeTemplate.startupProbe.successThreshold 
Success threshold for data startup probe
+        ##
+        successThreshold: 1
+        ## @param cluster.data.nodeTemplate.startupProbe.failureThreshold 
Failure threshold for data startup probe
+        ##
+        failureThreshold: 60
+
+    ## @extra cluster.data.roles List of data roles (hot, warm, cold)
+    ##
+    roles:
+      hot:
+        ## Override lifecycle sidecar settings for hot nodes
+        lifecycleSidecar:
+          ## @param cluster.data.roles.hot.lifecycleSidecar.schedule Schedule 
for lifecycle sidecar for hot data pods
+          schedule: "@daily"
+          enabled: true
+      warm:
+        ## Override lifecycle sidecar settings for warm nodes
+        lifecycleSidecar:
+          ## @param cluster.data.roles.warm.lifecycleSidecar.schedule Schedule 
for lifecycle sidecar for warm data pods
+          schedule: "@daily"
+          enabled: true
+      cold:
+        ## @param cluster.data.roles.cold.replicas Override number of cold 
data replicas
+        replicas: 1
+
+      ## @section Configuration for UI component
       ##
-      failureThreshold: 60
-  ## @section Configuration for UI component
-  ##
   ui:
     ## @param cluster.ui.type UI deployment type (None, Standalone, Embedded)
     ##
@@ -622,10 +665,13 @@ storage:
   ## @param storage.persistentVolumeClaims[0].mountTargets Mount targets for 
the PVC
   ##
   - mountTargets: [ "measure" ]
+    ## @param storage.persistentVolumeClaims[0].nodeRole Node role this PVC is 
bound to (hot, warm, cold)
+    ##
+    nodeRole: hot
     ## @param storage.persistentVolumeClaims[0].existingClaimName Existing PVC 
name (if any)
     existingClaimName: null
     ## @param storage.persistentVolumeClaims[0].claimName Name of the PVC
-    claimName: measure-data
+    claimName: hot-measure-data
     ## @param storage.persistentVolumeClaims[0].size Size of the PVC
     size: 50Gi
     ## @param storage.persistentVolumeClaims[0].accessModes Access modes for 
the PVC
@@ -637,10 +683,13 @@ storage:
     volumeMode: Filesystem
   ## @param storage.persistentVolumeClaims[1].mountTargets Mount targets for 
the PVC
   - mountTargets: [ "stream" ]
+    ## @param storage.persistentVolumeClaims[1].nodeRole Node role this PVC is 
bound to
+    ##
+    nodeRole: hot
     ## @param storage.persistentVolumeClaims[1].existingClaimName Existing PVC 
name (if any)
     existingClaimName: null
     ## @param storage.persistentVolumeClaims[1].claimName Name of the PVC
-    claimName: stream-data
+    claimName: hot-stream-data
     ## @param storage.persistentVolumeClaims[1].size Size of the PVC
     size: 50Gi
     ## @param storage.persistentVolumeClaims[1].accessModes Access modes for 
the PVC
@@ -652,10 +701,13 @@ storage:
     volumeMode: Filesystem
   ## @param storage.persistentVolumeClaims[2].mountTargets Mount targets for 
the PVC
   - mountTargets: [ "property" ]
+    ## @param storage.persistentVolumeClaims[2].nodeRole Node role this PVC is 
bound to
+    ##
+    nodeRole: hot
     ## @param storage.persistentVolumeClaims[2].existingClaimName Existing PVC 
name (if any)
     existingClaimName: null
     ## @param storage.persistentVolumeClaims[2].claimName Name of the PVC
-    claimName: property-data
+    claimName: hot-property-data
     ## @param storage.persistentVolumeClaims[2].size Size of the PVC
     size: 5Gi
     ## @param storage.persistentVolumeClaims[2].accessModes Access modes for 
the PVC
@@ -665,9 +717,37 @@ storage:
     storageClass: null
     ## @param storage.persistentVolumeClaims[2].volumeMode Volume mode for the 
PVC
     volumeMode: Filesystem
+  ## Warm storage configuration
+  - mountTargets: [ "measure", "stream" ]
+    ## @param storage.persistentVolumeClaims[3].nodeRole Node role this PVC is 
bound to
+    ##
+    nodeRole: warm
+    ## @param storage.persistentVolumeClaims[3].claimName Name of the PVC
+    claimName: warm-data
+    ## @param storage.persistentVolumeClaims[3].size Size of the PVC
+    size: 100Gi
+    ## @param storage.persistentVolumeClaims[3].accessModes Access modes for 
the PVC
+    accessModes:
+    - ReadWriteOnce
+    ## @param storage.persistentVolumeClaims[3].storageClass Storage class for 
the PVC
+    storageClass: null
+  ## Cold storage configuration
+  - mountTargets: [ "measure", "stream" ]
+    ## @param storage.persistentVolumeClaims[4].nodeRole Node role this PVC is 
bound to
+    ##
+    nodeRole: cold
+    ## @param storage.persistentVolumeClaims[4].claimName Name of the PVC
+    claimName: cold-data
+    ## @param storage.persistentVolumeClaims[4].size Size of the PVC
+    size: 500Gi
+    ## @param storage.persistentVolumeClaims[4].accessModes Access modes for 
the PVC
+    accessModes:
+    - ReadWriteOnce
+    ## @param storage.persistentVolumeClaims[4].storageClass Storage class for 
the PVC
+    storageClass: null
 
-## @section Service account configuration
-##
+  ## @section Service account configuration
+  ##
 serviceAccount:
   ## @param serviceAccount.create Create a service account (boolean)
   ##
@@ -724,6 +804,17 @@ etcd:
       ## @param etcd.auth.client.caFilename CA certificate filename for TLS
       ##
       caFilename: ""
+    ## Authentication token
+    ## ref: 
https://etcd.io/docs/latest/learning/design-auth-v3/#two-types-of-tokens-simple-and-jwt
+    ##
+    token:
+      ## @param auth.token.enabled Enables token authentication
+      ##
+      enabled: true
+      ## @param auth.token.type Authentication token type. Allowed values: 
'simple' or 'jwt'
+      ## ref: https://etcd.io/docs/latest/op-guide/configuration/#--auth-token
+      ##
+      type: simple
   ## @section Liveness probe configuration for etcd
   ##
   livenessProbe:
@@ -736,3 +827,21 @@ etcd:
     ## @param etcd.readinessProbe.initialDelaySeconds Initial delay for 
readiness probe
     ##
     initialDelaySeconds: 10
+  ## @param etcd.autoCompactionMode Auto-compaction mode (periodic, revision)
+  ##
+  autoCompactionMode: periodic
+  ## @param etcd.autoCompactionRetention Auto-compaction retention period
+  ##
+  autoCompactionRetention: "1"
+  ## @extra etcd.defrag Configuration for defragmentation
+  ##
+  defrag:
+    ## @param etcd.defrag.enabled Enable defragmentation (boolean)
+    ##
+    enabled: true
+    ## @extra etcd.defrag.cronjob Cron job configuration for defragmentation
+    ##
+    cronjob:
+      ## @param etcd.defrag.cronjob.schedule Cron schedule for defragmentation
+      ##
+      schedule: "0 0 * * *"
diff --git a/chart/values.yaml b/chart/values.yaml
index 79b6700..435b6bf 100644
--- a/chart/values.yaml
+++ b/chart/values.yaml
@@ -338,139 +338,170 @@ cluster:
   ## @section Configuration for data component
   ##
   data:
-    ## @param cluster.data.name Name of the data component
+    ## Add nodeTemplate with common defaults
     ##
-    name: banyandb
-    ## @param cluster.data.replicas Number of data replicas
-    ##
-    replicas: 3
-    ## @param cluster.data.podAnnotations Pod annotations for data component
-    ##
-    podAnnotations: {}
-    ## @param cluster.data.securityContext Security context for data pods
-    ##
-    securityContext: {}
-    ## @param cluster.data.env Environment variables for data pods
-    ##
-    env: []
-    ## @param cluster.data.priorityClassName Priority class name for data pods
-    ##
-    priorityClassName: ""
-    ## Pod disruption budget for data
-    ##
-    podDisruptionBudget:
-      ## @param cluster.data.podDisruptionBudget.maxUnavailable Maximum 
unavailable pods for data component
-      maxUnavailable: 1
-    ## @param cluster.data.tolerations Tolerations for data pods
-    ##
-    tolerations: []
-    ## @param cluster.data.nodeSelector Node selector for data pods
-    ##
-    nodeSelector: []
-    ## @param cluster.data.affinity Affinity rules for data pods
-    ##
-    affinity: {}
-    ## @param cluster.data.podAffinityPreset Pod affinity preset for data
-    ##
-    podAffinityPreset: ""
-    ## @param cluster.data.podAntiAffinityPreset Pod anti-affinity preset for 
data
-    ##
-    podAntiAffinityPreset: soft
-    ## Resource requests/limits for data
-    ##
-    resources:
-      ## @param cluster.data.resources.requests Resource requests for data pods
-      requests: []
-      ## @param cluster.data.resources.limits Resource limits for data pods
-      limits: []
-    ## GRPC service settings for data
-    ##
-    grpcSvc:
-      ## @param cluster.data.grpcSvc.labels Labels for GRPC service for data
-      labels: {}
-      ## @param cluster.data.grpcSvc.annotations Annotations for GRPC service 
for data
-      annotations: {}
-      ## @param cluster.data.grpcSvc.port Port number for GRPC service for data
-      port: 17912
-    ## @param cluster.data.sidecar Sidecar containers for data
-    ##
-    sidecar: []
-    ## Backup sidecar configuration
-    ##
-    backupSidecar:
-      ## @param cluster.data.backupSidecar.enabled Enable backup sidecar 
(boolean)
+    nodeTemplate:
+      ## @param cluster.data.nodeTemplate.replicas Number of data replicas by 
default
       ##
-      enabled: false
-      ## @param cluster.data.backupSidecar.dest Backup destination path
+      replicas: 2
+      ## @param cluster.data.nodeTemplate.podAnnotations Pod annotations for 
data pods
       ##
-      dest: "file:///tmp/backups/data-$(ORDINAL_NUMBER)"
-      ## @param cluster.data.backupSidecar.timeStyle Backup time style (e.g., 
daily)
+      podAnnotations: {}
+      ## @param cluster.data.nodeTemplate.securityContext Security context for 
data pods
       ##
-      timeStyle: "daily"
-      ## @param cluster.data.backupSidecar.schedule Backup schedule (cron 
format)
+      securityContext: {}
+      ## @param cluster.data.nodeTemplate.env Environment variables for data 
pods
       ##
-      schedule: "@hourly"
-      ## @param cluster.data.backupSidecar.resources Resources for backup 
sidecar
+      env: []
+      ## @param cluster.data.nodeTemplate.priorityClassName Priority class 
name for data pods
       ##
-      resources: {}
-    ## Restore init container configuration
-    ##
-    restoreInitContainer:
-      ## @param cluster.data.restoreInitContainer.enabled Enable restore init 
container (boolean)
+      priorityClassName: ""
+      ## Pod disruption budget for data pods
       ##
-      enabled: false
-      ## @param cluster.data.restoreInitContainer.resources Resources for 
restore init container
+      podDisruptionBudget:
+        ## @param cluster.data.nodeTemplate.podDisruptionBudget.maxUnavailable 
Maximum unavailable data pods
+        ##
+        maxUnavailable: 1
+      ## @param cluster.data.nodeTemplate.tolerations Tolerations for data pods
       ##
-      resources: {}
-    ## Liveness probe for data
-    ##
-    livenessProbe:
-      ## @param cluster.data.livenessProbe.initialDelaySeconds Initial delay 
for data liveness probe
-      initialDelaySeconds: 20
-      ## @param cluster.data.livenessProbe.periodSeconds Probe period for data 
liveness probe
-      periodSeconds: 30
-      ## @param cluster.data.livenessProbe.timeoutSeconds Timeout in seconds 
for data liveness probe
-      timeoutSeconds: 5
-      ## @param cluster.data.livenessProbe.successThreshold Success threshold 
for data liveness probe
-      successThreshold: 1
-      ## @param cluster.data.livenessProbe.failureThreshold Failure threshold 
for data liveness probe
-      failureThreshold: 5
-    ## Readiness probe for data
-    ##
-    readinessProbe:
-      ## @param cluster.data.readinessProbe.initialDelaySeconds Initial delay 
for data readiness probe
+      tolerations: []
+      ## @param cluster.data.nodeTemplate.nodeSelector Node selector for data 
pods
       ##
-      initialDelaySeconds: 20
-      ## @param cluster.data.readinessProbe.periodSeconds Probe period for 
data readiness probe
+      nodeSelector: []
+      ## @param cluster.data.nodeTemplate.affinity Affinity rules for data pods
       ##
-      periodSeconds: 30
-      ## @param cluster.data.readinessProbe.timeoutSeconds Timeout in seconds 
for data readiness probe
+      affinity: {}
+      ## @param cluster.data.nodeTemplate.podAffinityPreset Pod affinity 
preset for data pods
       ##
-      timeoutSeconds: 5
-      ## @param cluster.data.readinessProbe.successThreshold Success threshold 
for data readiness probe
+      podAffinityPreset: ""
+      ## @param cluster.data.nodeTemplate.podAntiAffinityPreset Pod 
anti-affinity preset for data pods
       ##
-      successThreshold: 1
-      ## @param cluster.data.readinessProbe.failureThreshold Failure threshold 
for data readiness probe
+      podAntiAffinityPreset: soft
+      ## Resource requests/limits for data pods
       ##
-      failureThreshold: 5
-    ## Startup probe for data
-    ##
-    startupProbe:
-      ## @param cluster.data.startupProbe.initialDelaySeconds Initial delay 
for data startup probe
+      resources:
+        ## @param cluster.data.nodeTemplate.resources.requests Resource 
requests for data pods
+        ##
+        requests: []
+        ## @param cluster.data.nodeTemplate.resources.limits Resource limits 
for data pods
+        ##
+        limits: []
+      ## GRPC service settings for data pods
       ##
-      initialDelaySeconds: 0
-      ## @param cluster.data.startupProbe.periodSeconds Probe period for data 
startup probe
+      grpcSvc:
+        ## @param cluster.data.nodeTemplate.grpcSvc.labels Labels for GRPC 
service for data pods
+        ##
+        labels: {}
+        ## @param cluster.data.nodeTemplate.grpcSvc.annotations Annotations 
for GRPC service for data pods
+        ##
+        annotations: {}
+        ## @param cluster.data.nodeTemplate.grpcSvc.port Port number for GRPC 
service for data pods
+        ##
+        port: 17912
+      ## @param cluster.data.nodeTemplate.sidecar Sidecar containers for data 
pods
       ##
-      periodSeconds: 10
-      ## @param cluster.data.startupProbe.timeoutSeconds Timeout in seconds 
for data startup probe
+      sidecar: []
+      ## Backup sidecar configuration for data pods
       ##
-      timeoutSeconds: 5
-      ## @param cluster.data.startupProbe.successThreshold Success threshold 
for data startup probe
+      backupSidecar:
+        ## @param cluster.data.nodeTemplate.backupSidecar.enabled Enable 
backup sidecar for data pods (boolean)
+        ##
+        enabled: false
+        ## @param cluster.data.nodeTemplate.backupSidecar.dest Backup 
destination path for data pods
+        ##
+        dest: "file:///tmp/backups/data-$(ORDINAL_NUMBER)"
+        ## @param cluster.data.nodeTemplate.backupSidecar.timeStyle Backup 
time style for data pods (e.g., daily)
+        ##
+        timeStyle: "daily"
+        ## @param cluster.data.nodeTemplate.backupSidecar.schedule Backup 
schedule for data pods (cron format)
+        schedule: "@hourly"
+        ##
+        ## @param cluster.data.nodeTemplate.backupSidecar.resources Resources 
for backup sidecar for data pods
+        ##
+        resources: {}
+      ## Lifecycle sidecar configuration for data pods
       ##
-      successThreshold: 1
-      ## @param cluster.data.startupProbe.failureThreshold Failure threshold 
for data startup probe
+      lifecycleSidecar:
+        ## @param cluster.data.nodeTemplate.lifecycleSidecar.enabled Enable 
lifecycle sidecar for data pods (boolean)
+        ##
+        enabled: false
+        ## @param cluster.data.nodeTemplate.lifecycleSidecar.schedule Schedule 
for lifecycle sidecar (cron format)
+        ##
+        schedule: "@hourly"
+        ## @param cluster.data.nodeTemplate.lifecycleSidecar.resources 
Resources for lifecycle sidecar for data pods
+        ##
+        resources: {}
+      ## Restore init container configuration for data pods
       ##
-      failureThreshold: 60
+      restoreInitContainer:
+        ## @param cluster.data.nodeTemplate.restoreInitContainer.enabled 
Enable restore init container for data pods (boolean)
+        ##
+        enabled: false
+        ## @param cluster.data.nodeTemplate.restoreInitContainer.resources 
Resources for restore init container for data pods
+        ##
+        resources: {}
+      ## Liveness probe for data pods
+      ##
+      livenessProbe:
+        ## @param cluster.data.nodeTemplate.livenessProbe.initialDelaySeconds 
Initial delay for data liveness probe
+        ##
+        initialDelaySeconds: 20
+        ## @param cluster.data.nodeTemplate.livenessProbe.periodSeconds Probe 
period for data liveness probe
+        ##
+        periodSeconds: 30
+        ## @param cluster.data.nodeTemplate.livenessProbe.timeoutSeconds 
Timeout in seconds for data liveness probe
+        ##
+        timeoutSeconds: 5
+        ## @param cluster.data.nodeTemplate.livenessProbe.successThreshold 
Success threshold for data liveness probe
+        ##
+        successThreshold: 1
+        ## @param cluster.data.nodeTemplate.livenessProbe.failureThreshold 
Failure threshold for data liveness probe
+        ##
+        failureThreshold: 5
+      ## Readiness probe for data pods
+      ##
+      readinessProbe:
+        ## @param cluster.data.nodeTemplate.readinessProbe.initialDelaySeconds 
Initial delay for data readiness probe
+        ##
+        initialDelaySeconds: 20
+        ## @param cluster.data.nodeTemplate.readinessProbe.periodSeconds Probe 
period for data readiness probe
+        ##
+        periodSeconds: 30
+        ## @param cluster.data.nodeTemplate.readinessProbe.timeoutSeconds 
Timeout in seconds for data readiness probe
+        ##
+        timeoutSeconds: 5
+        ## @param cluster.data.nodeTemplate.readinessProbe.successThreshold 
Success threshold for data readiness probe
+        ##
+        successThreshold: 1
+        ## @param cluster.data.nodeTemplate.readinessProbe.failureThreshold 
Failure threshold for data readiness probe
+        ##
+        failureThreshold: 5
+      ## Startup probe for data pods
+      ##
+      startupProbe:
+        ## @param cluster.data.nodeTemplate.startupProbe.initialDelaySeconds 
Initial delay for data startup probe
+        ##
+        initialDelaySeconds: 0
+        ## @param cluster.data.nodeTemplate.startupProbe.periodSeconds Probe 
period for data startup probe
+        ##
+        periodSeconds: 10
+        ## @param cluster.data.nodeTemplate.startupProbe.timeoutSeconds 
Timeout in seconds for data startup probe
+        ##
+        timeoutSeconds: 5
+        ## @param cluster.data.nodeTemplate.startupProbe.successThreshold 
Success threshold for data startup probe
+        ##
+        successThreshold: 1
+        ## @param cluster.data.nodeTemplate.startupProbe.failureThreshold 
Failure threshold for data startup probe
+        ##
+        failureThreshold: 60
+
+    ## @extra cluster.data.roles List of data roles (hot, warm, cold)
+    ##
+    roles:
+      ## @param cluster.data.roles.hot Hot data role
+      ##
+      hot: {}
+     
   ## @section Configuration for UI component
   ##
   ui:
@@ -622,10 +653,13 @@ storage:
   ## @param storage.persistentVolumeClaims[0].mountTargets Mount targets for 
the PVC
   ##
   - mountTargets: [ "measure" ]
+    ## @param storage.persistentVolumeClaims[0].nodeRole Node role this PVC is 
bound to (hot, warm, cold)
+    ##
+    nodeRole: hot
     ## @param storage.persistentVolumeClaims[0].existingClaimName Existing PVC 
name (if any)
     existingClaimName: null
     ## @param storage.persistentVolumeClaims[0].claimName Name of the PVC
-    claimName: measure-data
+    claimName: hot-measure-data
     ## @param storage.persistentVolumeClaims[0].size Size of the PVC
     size: 50Gi
     ## @param storage.persistentVolumeClaims[0].accessModes Access modes for 
the PVC
@@ -637,10 +671,13 @@ storage:
     volumeMode: Filesystem
   ## @param storage.persistentVolumeClaims[1].mountTargets Mount targets for 
the PVC
   - mountTargets: [ "stream" ]
+    ## @param storage.persistentVolumeClaims[1].nodeRole Node role this PVC is 
bound to
+    ##
+    nodeRole: hot
     ## @param storage.persistentVolumeClaims[1].existingClaimName Existing PVC 
name (if any)
     existingClaimName: null
     ## @param storage.persistentVolumeClaims[1].claimName Name of the PVC
-    claimName: stream-data
+    claimName: hot-stream-data
     ## @param storage.persistentVolumeClaims[1].size Size of the PVC
     size: 50Gi
     ## @param storage.persistentVolumeClaims[1].accessModes Access modes for 
the PVC
@@ -652,10 +689,13 @@ storage:
     volumeMode: Filesystem
   ## @param storage.persistentVolumeClaims[2].mountTargets Mount targets for 
the PVC
   - mountTargets: [ "property" ]
+    ## @param storage.persistentVolumeClaims[2].nodeRole Node role this PVC is 
bound to
+    ##
+    nodeRole: hot
     ## @param storage.persistentVolumeClaims[2].existingClaimName Existing PVC 
name (if any)
     existingClaimName: null
     ## @param storage.persistentVolumeClaims[2].claimName Name of the PVC
-    claimName: property-data
+    claimName: hot-property-data
     ## @param storage.persistentVolumeClaims[2].size Size of the PVC
     size: 5Gi
     ## @param storage.persistentVolumeClaims[2].accessModes Access modes for 
the PVC
@@ -724,6 +764,17 @@ etcd:
       ## @param etcd.auth.client.caFilename CA certificate filename for TLS
       ##
       caFilename: ""
+    ## Authentication token
+    ## ref: 
https://etcd.io/docs/latest/learning/design-auth-v3/#two-types-of-tokens-simple-and-jwt
+    ##
+    token:
+      ## @param etcd.auth.token.enabled Enables token authentication
+      ##
+      enabled: true
+      ## @param etcd.auth.token.type Authentication token type. Allowed 
values: 'simple' or 'jwt'
+      ## ref: https://etcd.io/docs/latest/op-guide/configuration/#--auth-token
+      ##
+      type: simple
   ## @section Liveness probe configuration for etcd
   ##
   livenessProbe:
@@ -736,3 +787,21 @@ etcd:
     ## @param etcd.readinessProbe.initialDelaySeconds Initial delay for 
readiness probe
     ##
     initialDelaySeconds: 10
+  ## @param etcd.autoCompactionMode Auto-compaction mode (periodic, revision)
+  ##
+  autoCompactionMode: periodic
+  ## @param etcd.autoCompactionRetention Auto-compaction retention period
+  ##
+  autoCompactionRetention: "1"
+  ## @extra etcd.defrag Configuration for defragmentation
+  ##
+  defrag:
+    ## @param etcd.defrag.enabled Enable defragmentation (boolean)
+    ##
+    enabled: true
+    ## @extra etcd.defrag.cronjob Cron job configuration for defragmentation
+    ##
+    cronjob:
+      ## @param etcd.defrag.cronjob.schedule Cron schedule for defragmentation
+      ##
+      schedule: "0 0 * * *"
diff --git a/doc/backup.md b/doc/backup.md
index db778e0..66edc15 100644
--- a/doc/backup.md
+++ b/doc/backup.md
@@ -17,10 +17,11 @@ Example configuration snippet:
 ```yaml
 cluster:
   data:
-    backupSidecar:
-      enabled: true
-      # Set the remote backup destination (e.g., file:///backups)
-      dest: "file:///backups"
+    nodeTemplate:
+      backupSidecar:
+        enabled: true
+        # Set the remote backup destination (e.g., file:///backups)
+        dest: "file:///backups"
 ```
 
 The backup sidecar container will run with an entrypoint similar to:
@@ -42,10 +43,12 @@ To enable restore:
   ```yaml
   cluster:
     data:
-      restoreInitContainer:
-        enabled: true
-        # Optionally, configure additional parameters such as:
-        command: [ "--source=file:///backups" ]
+      nodeTemplate:
+        # Enable the restore init container
+        restoreInitContainer:
+          enabled: true
+          # Optionally, configure additional parameters such as:
+          command: [ "--source=file:///backups" ]
   ```
 
 - Ensure that the backup, restore, and main containers share the required 
volumes (e.g., for `/data/stream`, `/data/measure`, and `/data/property`). This 
is typically configured via the Kubernetes volume definitions in the 
StatefulSet.
diff --git a/doc/lifecycle.md b/doc/lifecycle.md
new file mode 100644
index 0000000..ed570ef
--- /dev/null
+++ b/doc/lifecycle.md
@@ -0,0 +1,86 @@
+# Data Node Lifecycle Management on Kubernetes
+
+This guide explains how to configure and deploy SkyWalking BanyanDB data nodes 
with lifecycle management using the Helm chart in a Kubernetes environment.
+
+**Note:** Lifecycle management is only available in the Helm values file 
`values-lifecycle.yaml`. The default `values.yaml` does **not** include 
lifecycle management options.
+
+## 1. What is Lifecycle Management?
+
+Lifecycle management automates the movement and retention of data across 
different data node roles (hot, warm, cold) based on policies and schedules. 
This helps optimize storage usage and cost by moving less frequently accessed 
data to cheaper storage.
+
+## 2. Comparing `values.yaml` and `values-lifecycle.yaml`
+
+- **`values.yaml`**: Only defines basic data node roles (e.g., `hot`), without 
lifecycle management or scheduling.
+- **`values-lifecycle.yaml`**: Adds lifecycle management via the 
`lifecycleSidecar` for each data node role (`hot`, `warm`, `cold`). You can set 
schedules and enable/disable lifecycle management per role.
+
+## 3. Enabling Lifecycle Management
+
+To enable lifecycle management, use `values-lifecycle.yaml` and configure the 
`lifecycleSidecar` section under `cluster.data.nodeTemplate` or per role under 
`cluster.data.roles`.
+
+Example configuration snippet:
+
+```yaml
+cluster:
+  data:
+    roles:
+      hot:
+        lifecycleSidecar:
+          enabled: true
+          schedule: "@daily"
+      warm:
+        lifecycleSidecar:
+          enabled: true
+          schedule: "@daily"
+      cold:
+        # cold nodes may not need lifecycle management
+        lifecycleSidecar:
+          enabled: false
+```
+
+- The `lifecycleSidecar` runs as a sidecar container in each data pod.
+- You can override the schedule and enablement per role (`hot`, `warm`, 
`cold`).
+
+## 4. How Lifecycle Management Works
+
+- The lifecycle sidecar periodically runs according to the configured schedule.
+- It manages data retention, migration, and cleanup based on the node's role 
and policy.
+- For example, data may be moved from hot to warm nodes, or deleted from cold 
nodes after a retention period.
+
+## 5. Deploying with Lifecycle Management
+
+1. **Choose the correct values file**: Use `values-lifecycle.yaml` when 
installing or upgrading the Helm chart.
+2. **Customize lifecycle settings**: Edit the `lifecycleSidecar` section as 
needed for your use case.
+3. **Install or upgrade the chart**:
+
+   ```sh
+   helm install banyandb ./chart -f values-lifecycle.yaml
+   # or, for upgrade:
+   helm upgrade banyandb ./chart -f values-lifecycle.yaml
+   ```
+
+4. **Verify deployment**: Check that data pods have the lifecycle sidecar 
running:
+
+   ```sh
+   kubectl get pods -l app.kubernetes.io/name=banyandb
+   kubectl describe pod <pod-name>
+   ```
+
+   You should see a container named similar to `lifecycle-sidecar` in each 
data pod.
+
+## 6. Disabling Lifecycle Management
+
+To disable lifecycle management for all or specific roles, set `enabled: 
false` under the relevant `lifecycleSidecar` section.
+
+```yaml
+cluster:
+  data:
+    nodeTemplate:
+      lifecycleSidecar:
+        enabled: false
+    roles:
+      hot:
+        lifecycleSidecar:
+          enabled: false
+```
+
+For more details, refer to the comments in `values-lifecycle.yaml` and the 
[official documentation](https://skywalking.apache.org/docs/).
diff --git a/doc/parameters.md b/doc/parameters.md
index 3225506..859af6e 100644
--- a/doc/parameters.md
+++ b/doc/parameters.md
@@ -125,48 +125,52 @@ The content of this document describes the parameters 
that can be configured in
 
 ### Configuration for data component
 
-| Name                                              | Description              
                   | Value                                        |
-| ------------------------------------------------- | 
------------------------------------------- | 
-------------------------------------------- |
-| `cluster.data.name`                               | Name of the data 
component                  | `banyandb`                                   |
-| `cluster.data.replicas`                           | Number of data replicas  
                   | `3`                                          |
-| `cluster.data.podAnnotations`                     | Pod annotations for data 
component          | `{}`                                         |
-| `cluster.data.securityContext`                    | Security context for 
data pods              | `{}`                                         |
-| `cluster.data.env`                                | Environment variables 
for data pods         | `[]`                                         |
-| `cluster.data.priorityClassName`                  | Priority class name for 
data pods           | `""`                                         |
-| `cluster.data.podDisruptionBudget.maxUnavailable` | Maximum unavailable pods 
for data component | `1`                                          |
-| `cluster.data.tolerations`                        | Tolerations for data 
pods                   | `[]`                                         |
-| `cluster.data.nodeSelector`                       | Node selector for data 
pods                 | `[]`                                         |
-| `cluster.data.affinity`                           | Affinity rules for data 
pods                | `{}`                                         |
-| `cluster.data.podAffinityPreset`                  | Pod affinity preset for 
data                | `""`                                         |
-| `cluster.data.podAntiAffinityPreset`              | Pod anti-affinity preset 
for data           | `soft`                                       |
-| `cluster.data.resources.requests`                 | Resource requests for 
data pods             | `[]`                                         |
-| `cluster.data.resources.limits`                   | Resource limits for data 
pods               | `[]`                                         |
-| `cluster.data.grpcSvc.labels`                     | Labels for GRPC service 
for data            | `{}`                                         |
-| `cluster.data.grpcSvc.annotations`                | Annotations for GRPC 
service for data       | `{}`                                         |
-| `cluster.data.grpcSvc.port`                       | Port number for GRPC 
service for data       | `17912`                                      |
-| `cluster.data.sidecar`                            | Sidecar containers for 
data                 | `[]`                                         |
-| `cluster.data.backupSidecar.enabled`              | Enable backup sidecar 
(boolean)             | `false`                                      |
-| `cluster.data.backupSidecar.dest`                 | Backup destination path  
                   | `file:///tmp/backups/data-$(ORDINAL_NUMBER)` |
-| `cluster.data.backupSidecar.timeStyle`            | Backup time style (e.g., 
daily)             | `daily`                                      |
-| `cluster.data.backupSidecar.schedule`             | Backup schedule (cron 
format)               | `@hourly`                                    |
-| `cluster.data.backupSidecar.resources`            | Resources for backup 
sidecar                | `{}`                                         |
-| `cluster.data.restoreInitContainer.enabled`       | Enable restore init 
container (boolean)     | `false`                                      |
-| `cluster.data.restoreInitContainer.resources`     | Resources for restore 
init container        | `{}`                                         |
-| `cluster.data.livenessProbe.initialDelaySeconds`  | Initial delay for data 
liveness probe       | `20`                                         |
-| `cluster.data.livenessProbe.periodSeconds`        | Probe period for data 
liveness probe        | `30`                                         |
-| `cluster.data.livenessProbe.timeoutSeconds`       | Timeout in seconds for 
data liveness probe  | `5`                                          |
-| `cluster.data.livenessProbe.successThreshold`     | Success threshold for 
data liveness probe   | `1`                                          |
-| `cluster.data.livenessProbe.failureThreshold`     | Failure threshold for 
data liveness probe   | `5`                                          |
-| `cluster.data.readinessProbe.initialDelaySeconds` | Initial delay for data 
readiness probe      | `20`                                         |
-| `cluster.data.readinessProbe.periodSeconds`       | Probe period for data 
readiness probe       | `30`                                         |
-| `cluster.data.readinessProbe.timeoutSeconds`      | Timeout in seconds for 
data readiness probe | `5`                                          |
-| `cluster.data.readinessProbe.successThreshold`    | Success threshold for 
data readiness probe  | `1`                                          |
-| `cluster.data.readinessProbe.failureThreshold`    | Failure threshold for 
data readiness probe  | `5`                                          |
-| `cluster.data.startupProbe.initialDelaySeconds`   | Initial delay for data 
startup probe        | `0`                                          |
-| `cluster.data.startupProbe.periodSeconds`         | Probe period for data 
startup probe         | `10`                                         |
-| `cluster.data.startupProbe.timeoutSeconds`        | Timeout in seconds for 
data startup probe   | `5`                                          |
-| `cluster.data.startupProbe.successThreshold`      | Success threshold for 
data startup probe    | `1`                                          |
-| `cluster.data.startupProbe.failureThreshold`      | Failure threshold for 
data startup probe    | `60`                                         |
+| Name                                                           | Description 
                                          | Value                               
         |
+| -------------------------------------------------------------- | 
----------------------------------------------------- | 
-------------------------------------------- |
+| `cluster.data.nodeTemplate.replicas`                           | Number of 
data replicas by default                    | `2`                               
           |
+| `cluster.data.nodeTemplate.podAnnotations`                     | Pod 
annotations for data pods                         | `{}`                        
                 |
+| `cluster.data.nodeTemplate.securityContext`                    | Security 
context for data pods                        | `{}`                             
            |
+| `cluster.data.nodeTemplate.env`                                | Environment 
variables for data pods                   | `[]`                                
         |
+| `cluster.data.nodeTemplate.priorityClassName`                  | Priority 
class name for data pods                     | `""`                             
            |
+| `cluster.data.nodeTemplate.podDisruptionBudget.maxUnavailable` | Maximum 
unavailable data pods                         | `1`                             
             |
+| `cluster.data.nodeTemplate.tolerations`                        | Tolerations 
for data pods                             | `[]`                                
         |
+| `cluster.data.nodeTemplate.nodeSelector`                       | Node 
selector for data pods                           | `[]`                         
                |
+| `cluster.data.nodeTemplate.affinity`                           | Affinity 
rules for data pods                          | `{}`                             
            |
+| `cluster.data.nodeTemplate.podAffinityPreset`                  | Pod 
affinity preset for data pods                     | `""`                        
                 |
+| `cluster.data.nodeTemplate.podAntiAffinityPreset`              | Pod 
anti-affinity preset for data pods                | `soft`                      
                 |
+| `cluster.data.nodeTemplate.resources.requests`                 | Resource 
requests for data pods                       | `[]`                             
            |
+| `cluster.data.nodeTemplate.resources.limits`                   | Resource 
limits for data pods                         | `[]`                             
            |
+| `cluster.data.nodeTemplate.grpcSvc.labels`                     | Labels for 
GRPC service for data pods                 | `{}`                               
          |
+| `cluster.data.nodeTemplate.grpcSvc.annotations`                | Annotations 
for GRPC service for data pods            | `{}`                                
         |
+| `cluster.data.nodeTemplate.grpcSvc.port`                       | Port number 
for GRPC service for data pods            | `17912`                             
         |
+| `cluster.data.nodeTemplate.sidecar`                            | Sidecar 
containers for data pods                      | `[]`                            
             |
+| `cluster.data.nodeTemplate.backupSidecar.enabled`              | Enable 
backup sidecar for data pods (boolean)         | `false`                        
              |
+| `cluster.data.nodeTemplate.backupSidecar.dest`                 | Backup 
destination path for data pods                 | 
`file:///tmp/backups/data-$(ORDINAL_NUMBER)` |
+| `cluster.data.nodeTemplate.backupSidecar.timeStyle`            | Backup time 
style for data pods (e.g., daily)         | `daily`                             
         |
+| `cluster.data.nodeTemplate.backupSidecar.schedule`             | Backup 
schedule for data pods (cron format)           | `@hourly`                      
              |
+| `cluster.data.nodeTemplate.backupSidecar.resources`            | Resources 
for backup sidecar for data pods            | `{}`                              
           |
+| `cluster.data.nodeTemplate.lifecycleSidecar.enabled`           | Enable 
lifecycle sidecar for data pods (boolean)      | `false`                        
              |
+| `cluster.data.nodeTemplate.lifecycleSidecar.schedule`          | Schedule 
for lifecycle sidecar (cron format)          | `@hourly`                        
            |
+| `cluster.data.nodeTemplate.lifecycleSidecar.resources`         | Resources 
for lifecycle sidecar for data pods         | `{}`                              
           |
+| `cluster.data.nodeTemplate.restoreInitContainer.enabled`       | Enable 
restore init container for data pods (boolean) | `false`                        
              |
+| `cluster.data.nodeTemplate.restoreInitContainer.resources`     | Resources 
for restore init container for data pods    | `{}`                              
           |
+| `cluster.data.nodeTemplate.livenessProbe.initialDelaySeconds`  | Initial 
delay for data liveness probe                 | `20`                            
             |
+| `cluster.data.nodeTemplate.livenessProbe.periodSeconds`        | Probe 
period for data liveness probe                  | `30`                          
               |
+| `cluster.data.nodeTemplate.livenessProbe.timeoutSeconds`       | Timeout in 
seconds for data liveness probe            | `5`                                
          |
+| `cluster.data.nodeTemplate.livenessProbe.successThreshold`     | Success 
threshold for data liveness probe             | `1`                             
             |
+| `cluster.data.nodeTemplate.livenessProbe.failureThreshold`     | Failure 
threshold for data liveness probe             | `5`                             
             |
+| `cluster.data.nodeTemplate.readinessProbe.initialDelaySeconds` | Initial 
delay for data readiness probe                | `20`                            
             |
+| `cluster.data.nodeTemplate.readinessProbe.periodSeconds`       | Probe 
period for data readiness probe                 | `30`                          
               |
+| `cluster.data.nodeTemplate.readinessProbe.timeoutSeconds`      | Timeout in 
seconds for data readiness probe           | `5`                                
          |
+| `cluster.data.nodeTemplate.readinessProbe.successThreshold`    | Success 
threshold for data readiness probe            | `1`                             
             |
+| `cluster.data.nodeTemplate.readinessProbe.failureThreshold`    | Failure 
threshold for data readiness probe            | `5`                             
             |
+| `cluster.data.nodeTemplate.startupProbe.initialDelaySeconds`   | Initial 
delay for data startup probe                  | `0`                             
             |
+| `cluster.data.nodeTemplate.startupProbe.periodSeconds`         | Probe 
period for data startup probe                   | `10`                          
               |
+| `cluster.data.nodeTemplate.startupProbe.timeoutSeconds`        | Timeout in 
seconds for data startup probe             | `5`                                
          |
+| `cluster.data.nodeTemplate.startupProbe.successThreshold`      | Success 
threshold for data startup probe              | `1`                             
             |
+| `cluster.data.nodeTemplate.startupProbe.failureThreshold`      | Failure 
threshold for data startup probe              | `60`                            
             |
+| `cluster.data.roles`                                           | List of 
data roles (hot, warm, cold)                  |                                 
             |
+| `cluster.data.roles.hot`                                       | Hot data 
role                                         | `{}`                             
            |
 
 ### Configuration for UI component
 
@@ -219,31 +223,34 @@ The content of this document describes the parameters 
that can be configured in
 
 ### Storage configuration for persistent volumes
 
-| Name                                                  | Description          
               | Value               |
-| ----------------------------------------------------- | 
----------------------------------- | ------------------- |
-| `storage.enabled`                                     | Enable persistent 
storage (boolean) | `false`             |
-| `storage.persistentVolumeClaims`                      | List of PVC 
configurations          |                     |
-| `storage.persistentVolumeClaims[0].mountTargets`      | Mount targets for 
the PVC           | `["measure"]`       |
-| `storage.persistentVolumeClaims[0].existingClaimName` | Existing PVC name 
(if any)          | `nil`               |
-| `storage.persistentVolumeClaims[0].claimName`         | Name of the PVC      
               | `measure-data`      |
-| `storage.persistentVolumeClaims[0].size`              | Size of the PVC      
               | `50Gi`              |
-| `storage.persistentVolumeClaims[0].accessModes`       | Access modes for the 
PVC            | `["ReadWriteOnce"]` |
-| `storage.persistentVolumeClaims[0].storageClass`      | Storage class for 
the PVC           | `nil`               |
-| `storage.persistentVolumeClaims[0].volumeMode`        | Volume mode for the 
PVC             | `Filesystem`        |
-| `storage.persistentVolumeClaims[1].mountTargets`      | Mount targets for 
the PVC           | `["stream"]`        |
-| `storage.persistentVolumeClaims[1].existingClaimName` | Existing PVC name 
(if any)          | `nil`               |
-| `storage.persistentVolumeClaims[1].claimName`         | Name of the PVC      
               | `stream-data`       |
-| `storage.persistentVolumeClaims[1].size`              | Size of the PVC      
               | `50Gi`              |
-| `storage.persistentVolumeClaims[1].accessModes`       | Access modes for the 
PVC            | `["ReadWriteOnce"]` |
-| `storage.persistentVolumeClaims[1].storageClass`      | Storage class for 
the PVC           | `nil`               |
-| `storage.persistentVolumeClaims[1].volumeMode`        | Volume mode for the 
PVC             | `Filesystem`        |
-| `storage.persistentVolumeClaims[2].mountTargets`      | Mount targets for 
the PVC           | `["property"]`      |
-| `storage.persistentVolumeClaims[2].existingClaimName` | Existing PVC name 
(if any)          | `nil`               |
-| `storage.persistentVolumeClaims[2].claimName`         | Name of the PVC      
               | `property-data`     |
-| `storage.persistentVolumeClaims[2].size`              | Size of the PVC      
               | `5Gi`               |
-| `storage.persistentVolumeClaims[2].accessModes`       | Access modes for the 
PVC            | `["ReadWriteOnce"]` |
-| `storage.persistentVolumeClaims[2].storageClass`      | Storage class for 
the PVC           | `nil`               |
-| `storage.persistentVolumeClaims[2].volumeMode`        | Volume mode for the 
PVC             | `Filesystem`        |
+| Name                                                  | Description          
                            | Value               |
+| ----------------------------------------------------- | 
------------------------------------------------ | ------------------- |
+| `storage.enabled`                                     | Enable persistent 
storage (boolean)              | `false`             |
+| `storage.persistentVolumeClaims`                      | List of PVC 
configurations                       |                     |
+| `storage.persistentVolumeClaims[0].mountTargets`      | Mount targets for 
the PVC                        | `["measure"]`       |
+| `storage.persistentVolumeClaims[0].nodeRole`          | Node role this PVC 
is bound to (hot, warm, cold) | `hot`               |
+| `storage.persistentVolumeClaims[0].existingClaimName` | Existing PVC name 
(if any)                       | `nil`               |
+| `storage.persistentVolumeClaims[0].claimName`         | Name of the PVC      
                            | `hot-measure-data`  |
+| `storage.persistentVolumeClaims[0].size`              | Size of the PVC      
                            | `50Gi`              |
+| `storage.persistentVolumeClaims[0].accessModes`       | Access modes for the 
PVC                         | `["ReadWriteOnce"]` |
+| `storage.persistentVolumeClaims[0].storageClass`      | Storage class for 
the PVC                        | `nil`               |
+| `storage.persistentVolumeClaims[0].volumeMode`        | Volume mode for the 
PVC                          | `Filesystem`        |
+| `storage.persistentVolumeClaims[1].mountTargets`      | Mount targets for 
the PVC                        | `["stream"]`        |
+| `storage.persistentVolumeClaims[1].nodeRole`          | Node role this PVC 
is bound to                   | `hot`               |
+| `storage.persistentVolumeClaims[1].existingClaimName` | Existing PVC name 
(if any)                       | `nil`               |
+| `storage.persistentVolumeClaims[1].claimName`         | Name of the PVC      
                            | `hot-stream-data`   |
+| `storage.persistentVolumeClaims[1].size`              | Size of the PVC      
                            | `50Gi`              |
+| `storage.persistentVolumeClaims[1].accessModes`       | Access modes for the 
PVC                         | `["ReadWriteOnce"]` |
+| `storage.persistentVolumeClaims[1].storageClass`      | Storage class for 
the PVC                        | `nil`               |
+| `storage.persistentVolumeClaims[1].volumeMode`        | Volume mode for the 
PVC                          | `Filesystem`        |
+| `storage.persistentVolumeClaims[2].mountTargets`      | Mount targets for 
the PVC                        | `["property"]`      |
+| `storage.persistentVolumeClaims[2].nodeRole`          | Node role this PVC 
is bound to                   | `hot`               |
+| `storage.persistentVolumeClaims[2].existingClaimName` | Existing PVC name 
(if any)                       | `nil`               |
+| `storage.persistentVolumeClaims[2].claimName`         | Name of the PVC      
                            | `hot-property-data` |
+| `storage.persistentVolumeClaims[2].size`              | Size of the PVC      
                            | `5Gi`               |
+| `storage.persistentVolumeClaims[2].accessModes`       | Access modes for the 
PVC                         | `["ReadWriteOnce"]` |
+| `storage.persistentVolumeClaims[2].storageClass`      | Storage class for 
the PVC                        | `nil`               |
+| `storage.persistentVolumeClaims[2].volumeMode`        | Volume mode for the 
PVC                          | `Filesystem`        |
 
 ### Service account configuration
 
@@ -273,14 +280,16 @@ The content of this document describes the parameters 
that can be configured in
 
 ### Client TLS configuration
 
-| Name                                    | Description                        
           | Value     |
-| --------------------------------------- | 
--------------------------------------------- | --------- |
-| `etcd.auth.client.secureTransport`      | Enable TLS for client 
communication (boolean) | `false`   |
-| `etcd.auth.client.existingSecret`       | Existing secret containing TLS 
certs          | `""`      |
-| `etcd.auth.client.enableAuthentication` | Enable client authentication 
(boolean)        | `false`   |
-| `etcd.auth.client.certFilename`         | Client certificate filename        
           | `tls.crt` |
-| `etcd.auth.client.certKeyFilename`      | Client certificate key filename    
           | `tls.key` |
-| `etcd.auth.client.caFilename`           | CA certificate filename for TLS    
           | `""`      |
+| Name                                    | Description                        
                          | Value     |
+| --------------------------------------- | 
------------------------------------------------------------ | --------- |
+| `etcd.auth.client.secureTransport`      | Enable TLS for client 
communication (boolean)                | `false`   |
+| `etcd.auth.client.existingSecret`       | Existing secret containing TLS 
certs                         | `""`      |
+| `etcd.auth.client.enableAuthentication` | Enable client authentication 
(boolean)                       | `false`   |
+| `etcd.auth.client.certFilename`         | Client certificate filename        
                          | `tls.crt` |
+| `etcd.auth.client.certKeyFilename`      | Client certificate key filename    
                          | `tls.key` |
+| `etcd.auth.client.caFilename`           | CA certificate filename for TLS    
                          | `""`      |
+| `etcd.auth.token.enabled`               | Enables token authentication       
                          | `true`    |
+| `etcd.auth.token.type`                  | Authentication token type. Allowed 
values: 'simple' or 'jwt' | `simple`  |
 
 ### Liveness probe configuration for etcd
 
@@ -290,6 +299,12 @@ The content of this document describes the parameters that 
can be configured in
 
 ### Readiness probe configuration for etcd
 
-| Name                                      | Description                      
 | Value |
-| ----------------------------------------- | 
--------------------------------- | ----- |
-| `etcd.readinessProbe.initialDelaySeconds` | Initial delay for readiness 
probe | `10`  |
+| Name                                      | Description                      
          | Value       |
+| ----------------------------------------- | 
------------------------------------------ | ----------- |
+| `etcd.readinessProbe.initialDelaySeconds` | Initial delay for readiness 
probe          | `10`        |
+| `etcd.autoCompactionMode`                 | Auto-compaction mode (periodic, 
revision)  | `periodic`  |
+| `etcd.autoCompactionRetention`            | Auto-compaction retention period 
          | `1`         |
+| `etcd.defrag`                             | Configuration for 
defragmentation          |             |
+| `etcd.defrag.enabled`                     | Enable defragmentation (boolean) 
          | `true`      |
+| `etcd.defrag.cronjob`                     | Cron job configuration for 
defragmentation |             |
+| `etcd.defrag.cronjob.schedule`            | Cron schedule for 
defragmentation          | `0 0 * * *` |
diff --git a/test/e2e/values.cluster.yaml b/test/e2e/values.cluster.yaml
index 30a8828..ed23110 100644
--- a/test/e2e/values.cluster.yaml
+++ b/test/e2e/values.cluster.yaml
@@ -148,113 +148,116 @@ cluster:
       failureThreshold: 60
 
   data:
-    replicas: 1
-    podAnnotations:
-      example: banyandb-foo
-    securityContext: {}
-    # runAsUser: 1000
-    # runAsGroup: 1000
-    # fsGroup: 1000
-    env: []
-    # - name: BANYANDB_FOO
-    #   value: "bar"
-    priorityClassName: ""
-    podDisruptionBudget:
-      maxUnavailable: 1
-    # minAvailable: 1
-    # maxUnavailable: 2
-    # matchLabels:
-    #   - key: foo
-    #     value: bar
-    # matchExpressions:
-    #   - key: foo
-    #     operator: In
-    #     values: [bar, baz]
-    # paused: false
-    tolerations: []
-    # - key: foo
-    #   value: bar
-    #   operator: Equal
-    #   effect: NoSchedule
-    nodeSelector: []
-    # - key: foo
-    #   value: bar
-    affinity: {}
-    # nodeAffinity:
-    #   requiredDuringSchedulingIgnoredDuringExecution:
-    #     nodeSelectorTerms:
-    #       - matchExpressions:
-    #           - key: foo
-    #             operator: In
-    #             values:
-    #               - bar
-    #               - baz
-    # podAffinity:
-    #   requiredDuringSchedulingIgnoredDuringExecution:
-    #     - labelSelector:
-    #         matchLabels:
-    #           - key: app
-    #             value: banyandb
-    #       topologyKey: "kubernetes.io/hostname"
-    #       namespaces: []
-    podAffinityPreset: ""
-    podAntiAffinityPreset: soft
-    resources:
-      requests: []
-      # - key: cpu
-      #   value: "100m"
-      # - key: memory
-      #   value: "256Mi"
-      limits: []
-      # - key: cpu
-      #   value: "500m"
-      # - key: memory
-      #   value: "512Mi"
-      # tls:
-      #   grpcSecretName: ""
-      #   etcdSecretName: ""
+    nodeTemplate:
+      replicas: 1
+      podAnnotations:
+        example: banyandb-foo
+      securityContext: {}
+      # runAsUser: 1000
+      # runAsGroup: 1000
+      # fsGroup: 1000
+      env: []
+      # - name: BANYANDB_FOO
+      #   value: "bar"
+      priorityClassName: ""
+      podDisruptionBudget:
+        maxUnavailable: 1
+      # minAvailable: 1
+      # maxUnavailable: 2
+      # matchLabels:
+      #   - key: foo
+      #     value: bar
+      # matchExpressions:
+      #   - key: foo
+      #     operator: In
+      #     values: [bar, baz]
+      # paused: false
+      tolerations: []
+      # - key: foo
+      #   value: bar
+      #   operator: Equal
+      #   effect: NoSchedule
+      nodeSelector: []
+      # - key: foo
+      #   value: bar
+      affinity: {}
+      # nodeAffinity:
+      #   requiredDuringSchedulingIgnoredDuringExecution:
+      #     nodeSelectorTerms:
+      #       - matchExpressions:
+      #           - key: foo
+      #             operator: In
+      #             values:
+      #               - bar
+      #               - baz
+      # podAffinity:
+      #   requiredDuringSchedulingIgnoredDuringExecution:
+      #     - labelSelector:
+      #         matchLabels:
+      #           - key: app
+      #             value: banyandb
+      #       topologyKey: "kubernetes.io/hostname"
+      #       namespaces: []
+      podAffinityPreset: ""
+      podAntiAffinityPreset: soft
+      resources:
+        requests: []
+        # - key: cpu
+        #   value: "100m"
+        # - key: memory
+        #   value: "256Mi"
+        limits: []
+        # - key: cpu
+        #   value: "500m"
+        # - key: memory
+        #   value: "512Mi"
+        # tls:
+        #   grpcSecretName: ""
+        #   etcdSecretName: ""
 
-    grpcSvc:
-      labels: {}
-      annotations: {}
-      port: 17912
+      grpcSvc:
+        labels: {}
+        annotations: {}
+        port: 17912
 
-    sidecar: []
-    # - name: cleanup-sidecar
-    #   image: busybox:latest
-    #   imagePullPolicy: IfNotPresent
-    #   commands: 
-    #     normal: ["sh", "-c", "while true; do echo 'sidecar task'; sleep 60; 
done"]
-    #     preStop: ["sh", "-c", "echo cleanup"]
-    backupSidecar:
-      enabled: true 
-      dest: "file:///tmp/backups/data-$(ORDINAL_NUMBER)"
-      timeStyle: "daily"
-      schedule: "@every 10s"
-      resources: {}
-    restoreInitContainer:
-      enabled: true
-      resources: {}
-    livenessProbe:
-      initialDelaySeconds: 20
-      periodSeconds: 5
-      timeoutSeconds: 5
-      successThreshold: 1
-      failureThreshold: 60
+      sidecar: []
+      # - name: cleanup-sidecar
+      #   image: busybox:latest
+      #   imagePullPolicy: IfNotPresent
+      #   commands: 
+      #     normal: ["sh", "-c", "while true; do echo 'sidecar task'; sleep 
60; done"]
+      #     preStop: ["sh", "-c", "echo cleanup"]
+      backupSidecar:
+        enabled: true 
+        dest: "file:///tmp/backups/data-$(ORDINAL_NUMBER)"
+        timeStyle: "daily"
+        schedule: "@every 10s"
+        resources: {}
+      restoreInitContainer:
+        enabled: true
+        resources: {}
+      livenessProbe:
+        initialDelaySeconds: 20
+        periodSeconds: 5
+        timeoutSeconds: 5
+        successThreshold: 1
+        failureThreshold: 60
 
-    readinessProbe:
-      initialDelaySeconds: 20
-      periodSeconds: 5
-      timeoutSeconds: 5
-      successThreshold: 1
-      failureThreshold: 60
+      readinessProbe:
+        initialDelaySeconds: 20
+        periodSeconds: 5
+        timeoutSeconds: 5
+        successThreshold: 1
+        failureThreshold: 60
 
-    startupProbe:
-      initialDelaySeconds: 0
-      periodSeconds: 5
-      timeoutSeconds: 5
-      successThreshold: 1
-      failureThreshold: 60
+      startupProbe:
+        initialDelaySeconds: 0
+        periodSeconds: 5
+        timeoutSeconds: 5
+        successThreshold: 1
+        failureThreshold: 60
+    roles:
+      hot: {}
   ui:
     # Available UI type: 
     # None: Disable UI

Reply via email to