This is an automated email from the ASF dual-hosted git repository.

adheipsingh pushed a commit to branch 1.3.0-volume-attribute-className
in repository https://gitbox.apache.org/repos/asf/druid-operator.git

commit e1482cfc757cb58ddf830e7b4214c4b4563c14c2
Author: AdheipSingh <[email protected]>
AuthorDate: Wed Oct 8 17:36:45 2025 +0530

    add examples, clean up logs
---
 controllers/druid/volume_claim_template_update.go |   2 -
 e2e/configs/druid-cr-vacn.yaml                    | 321 ++++++++++++++++++++++
 e2e/configs/volume-attributes-class.yaml          |  13 +-
 3 files changed, 327 insertions(+), 9 deletions(-)

diff --git a/controllers/druid/volume_claim_template_update.go 
b/controllers/druid/volume_claim_template_update.go
index d33ce84..df8293b 100644
--- a/controllers/druid/volume_claim_template_update.go
+++ b/controllers/druid/volume_claim_template_update.go
@@ -49,7 +49,6 @@ func patchStatefulSetVolumeClaimTemplates(ctx 
context.Context, sdk client.Client
 
        // Check for VolumeClaimTemplate changes
        changes := detectVolumeClaimTemplateChanges(statefulSet, nodeSpec)
-       fmt.Println(changes)
        if !changes.HasChanges {
                return false, nil
        }
@@ -101,7 +100,6 @@ func detectVolumeClaimTemplateChanges(sts 
*appsv1.StatefulSet, nodeSpec *v1alpha
                VolumeAttributeClassChanges: make(map[string]bool),
        }
 
-       fmt.Println(sts.Name)
        var changeDetails []string
 
        // Create maps of current VCT configuration
diff --git a/e2e/configs/druid-cr-vacn.yaml b/e2e/configs/druid-cr-vacn.yaml
new file mode 100644
index 0000000..41945ce
--- /dev/null
+++ b/e2e/configs/druid-cr-vacn.yaml
@@ -0,0 +1,321 @@
+apiVersion: "druid.apache.org/v1alpha1"
+kind: "Druid"
+metadata:
+  name: tiny-cluster
+spec:
+  image: apache/druid:32.0.0
+  # Optionally specify image for all nodes. Can be specify on nodes also
+  # imagePullSecrets:
+  # - name: tutu
+  startScript: /druid.sh
+  podLabels:
+    environment: stage
+    release: alpha
+  podAnnotations:
+    dummy: k8s_extn_needs_atleast_one_annotation
+  workloadAnnotations:
+    kubernetes.io/cluster-scoped-annotation: "cluster"
+  readinessProbe:
+    httpGet:
+      path: /status/health
+      port: 8088
+  additionalContainer:
+  - containerName: mysqlconnector 
+    runAsInit: true
+    image: apache/druid:25.0.0
+    command:
+    - "sh"
+    - "-c"
+    - "wget -O /tmp/mysql-connector-j-8.0.32.tar.gz 
https://downloads.mysql.com/archives/get/p/3/file/mysql-connector-j-8.0.32.tar.gz
 && cd /tmp && tar -xf /tmp/mysql-connector-j-8.0.32.tar.gz && cp 
/tmp/mysql-connector-j-8.0.32/mysql-connector-j-8.0.32.jar 
/opt/druid/extensions/mysql-connector/mysql-connector-java.jar"
+    volumeMounts:
+    - name: mysqlconnector
+      mountPath: "/opt/druid/extensions/mysql-connector"  
+  volumes:
+  - name: mysqlconnector
+    emptyDir: {} 
+  volumeMounts:
+  - name: mysqlconnector
+    mountPath: "/opt/druid/extensions/mysql-connector"
+  securityContext:
+    fsGroup: 0
+    runAsUser: 0
+    runAsGroup: 0
+  containerSecurityContext:
+    privileged: true
+  services:
+    - spec:
+        type: ClusterIP
+        clusterIP: None
+  commonConfigMountPath: "/opt/druid/conf/druid/cluster/_common"
+  jvm.options: |-
+    -server
+    -XX:MaxDirectMemorySize=10240g
+    -Duser.timezone=UTC
+    -Dfile.encoding=UTF-8
+    -Dlog4j.debug
+    -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+  log4j.config: |-
+    <?xml version="1.0" encoding="UTF-8" ?>
+    <Configuration status="WARN">
+        <Appenders>
+            <Http name="Http" url="https://demo.parseable.com/api/v1/ingest"; 
method="POST">
+              <Property name="Authorization" value="Basic YWRtaW46YWRtaW4=" />
+              <Property name="X-P-Stream" value="druide2e" />
+              <Property name="Accept" value="application/json" />
+              <Property name="X-Java-Runtime" value="$${java:runtime}" />
+              <JsonLayout properties="true"/>
+            </Http>
+        </Appenders>
+        <Loggers>
+            <Root level="info">
+                <AppenderRef ref="Console"/>
+            </Root>
+        </Loggers>
+    </Configuration>
+  common.runtime.properties: |
+    #
+    # Zookeeper-less Druid Cluster
+    #
+    druid.zk.service.enabled=false
+    druid.discovery.type=k8s
+    druid.discovery.k8s.clusterIdentifier=druid-it
+    druid.serverview.type=http
+    druid.coordinator.loadqueuepeon.type=http
+    druid.indexer.runner.type=httpRemote
+    # Metadata Store
+    druid.metadata.storage.type=derby
+    
druid.metadata.storage.connector.connectURI=jdbc:derby://localhost:1527/var/druid/metadata.db;create=true
+    druid.metadata.storage.connector.host=localhost
+    druid.metadata.storage.connector.port=1527
+    druid.metadata.storage.connector.createTables=true
+    # Deep Storage
+    druid.storage.type=s3
+    druid.storage.bucket=druid
+    druid.storage.baseKey=druid/segments
+    druid.s3.accessKey=minio
+    druid.s3.secretKey=minio123
+    druid.s3.protocol=http
+    druid.s3.enabePathStyleAccess=true
+    druid.s3.endpoint.signingRegion=us-east-1
+    druid.s3.enablePathStyleAccess=true
+    druid.s3.endpoint.url=http://myminio-hl.druid.svc.cluster.local:9000/
+    #
+    # Extensions
+    #
+    druid.extensions.loadList=["druid-avro-extensions", "druid-s3-extensions", 
"druid-hdfs-storage", "druid-kafka-indexing-service", "druid-datasketches", 
"druid-kubernetes-extensions"]
+    #
+    # Service discovery
+    #
+    druid.selectors.indexing.serviceName=druid/overlord
+    druid.selectors.coordinator.serviceName=druid/coordinator
+    druid.indexer.logs.type=s3
+    druid.indexer.logs.s3Bucket=druid
+    druid.indexer.logs.s3Prefix=druid/indexing-logs
+    druid.lookup.enableLookupSyncOnStartup=false
+  env:
+    - name: POD_NAME
+      valueFrom:
+        fieldRef:
+          fieldPath: metadata.name
+    - name: POD_NAMESPACE
+      valueFrom:
+        fieldRef:
+          fieldPath: metadata.namespace
+  nodes:
+    brokers:
+      # Optionally specify for running broker as Deployment
+      # kind: Deployment
+      nodeType: "broker"
+      # Optionally specify for broker nodes
+      # imagePullSecrets:
+      # - name: tutu
+      priorityClassName: system-cluster-critical 
+      workloadAnnotations:
+        kubernetes.io/node-scoped-annotation: "broker"
+      druid.port: 8088
+      services:
+        - spec:
+            type: ClusterIP
+            clusterIP: None
+      nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/broker"
+      replicas: 1
+      volumeClaimTemplates:
+       - metadata:
+           name: data-volume
+           annotations:
+             backup.kubernetes.io/enabled: "true"
+             volume.kubernetes.io/tier: "performance"
+         spec:
+           accessModes:
+           - ReadWriteOnce
+           resources:
+             requests:
+               storage: 2Gi
+           storageClassName: gp3
+           volumeAttributesClassName: gp3-high-perf
+      runtime.properties: |
+        druid.service=druid/broker
+        # HTTP server threads
+        druid.broker.http.numConnections=5
+        druid.server.http.numThreads=40
+        # Processing threads and buffers
+        druid.processing.buffer.sizeBytes=25000000
+        druid.sql.enable=true
+      extra.jvm.options: |-
+        -Xmx512m
+        -Xms512m
+
+    coordinators:
+      # Optionally specify for running coordinator as Deployment
+      # kind: Deployment
+      nodeType: "coordinator"
+      druid.port: 8088
+      services:
+        - spec:
+            type: ClusterIP
+            clusterIP: None
+      nodeConfigMountPath: 
"/opt/druid/conf/druid/cluster/master/coordinator-overlord"
+      replicas: 1
+      runtime.properties: |
+        druid.service=druid/coordinator
+        # HTTP server threads
+        druid.coordinator.startDelay=PT30S
+        druid.coordinator.period=PT30S
+        # Configure this coordinator to also run as Overlord
+        druid.coordinator.asOverlord.enabled=true
+        druid.coordinator.asOverlord.overlordService=druid/overlord
+        druid.indexer.queue.startDelay=PT30S
+      extra.jvm.options: |-
+        -Xmx800m
+        -Xms800m
+      dynamicConfig:
+        millisToWaitBeforeDeleting: 900000
+        mergeBytesLimit: 524288000
+        mergeSegmentsLimit: 100
+        maxSegmentsToMove: 5
+        percentOfSegmentsToConsiderPerMove: 100
+        useBatchedSegmentSampler: true
+        replicantLifetime: 15
+        replicationThrottleLimit: 10
+        balancerComputeThreads: 1
+        emitBalancingStats: true
+        killDataSourceWhitelist: []
+        killPendingSegmentsSkipList: []
+        maxSegmentsInNodeLoadingQueue: 100
+        decommissioningNodes: []
+        decommissioningMaxPercentOfMaxSegmentsToMove: 70
+        pauseCoordination: false
+        replicateAfterLoadTimeout: false
+        maxNonPrimaryReplicantsToLoad: 2147483647
+        useRoundRobinSegmentAssignment: true
+
+    historicals:
+      nodeType: "historical"
+      druid.port: 8088
+      services:
+        - spec:
+            type: ClusterIP
+            clusterIP: None
+      nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/historical"
+      replicas: 1
+      volumeClaimTemplates:
+       - metadata:
+           name: segment-cache
+           annotations:
+             backup.kubernetes.io/enabled: "true"
+             volume.kubernetes.io/tier: "performance"
+         spec:
+           accessModes:
+           - ReadWriteOnce
+           resources:
+             requests:
+               storage: 5Gi
+           storageClassName: gp3
+           volumeAttributesClassName: gp3-high-perf
+      runtime.properties: |
+        druid.service=druid/historical
+        druid.processing.buffer.sizeBytes=25000000
+        druid.processing.numThreads=2
+        # Segment storage
+        
druid.segmentCache.locations=[{"path":"/druid/data/segments","maxSize":10737418240}]
+        druid.server.maxSize=10737418240
+      extra.jvm.options: |-
+        -Xmx512m
+        -Xms512m
+
+    routers:
+      nodeType: "router"
+      druid.port: 8088
+      services:
+        - spec:
+            type: ClusterIP
+            clusterIP: None
+      nodeConfigMountPath: "/opt/druid/conf/druid/cluster/query/router"
+      replicas: 1
+      runtime.properties: |
+        druid.service=druid/router
+        # HTTP proxy
+        druid.router.http.numConnections=50
+        druid.router.http.readTimeout=PT5M
+        druid.router.http.numMaxThreads=100
+        druid.server.http.numThreads=100
+        # Service discovery
+        druid.router.defaultBrokerServiceName=druid/broker
+        druid.router.coordinatorServiceName=druid/coordinator
+        # Management proxy to coordinator / overlord: required for unified web 
console.
+        druid.router.managementProxy.enabled=true
+
+    middlemanagers:
+      nodeType: "middleManager"
+      nodeConfigMountPath: "/opt/druid/conf/druid/cluster/data/middleManager"
+      druid.port: 8088
+      services:
+        - spec:
+            type: ClusterIP
+            clusterIP: None
+      replicas: 1
+      extra.jvm.options: |-
+        -Xmx512m
+        -Xms512m
+      runtime.properties: |
+        druid.service=druid/middleManager
+        druid.worker.capacity=1
+        druid.indexer.runner.javaOpts=-server -Xms128m -Xmx128m 
-XX:MaxDirectMemorySize=256m -Duser.timezone=UTC -Dfile.encoding=UTF-8 
-Djava.io.tmpdir=/druid/data/tmp -XX:+ExitOnOutOfMemoryError 
-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
+        druid.indexer.task.baseTaskDir=/druid/data/baseTaskDir
+        druid.server.http.numThreads=1
+        druid.indexer.fork.property.druid.processing.buffer.sizeBytes=25000000
+        druid.indexer.fork.property.druid.processing.numMergeBuffers=2
+        druid.indexer.fork.property.druid.processing.numThreads=1
+      dynamicConfig:
+        type: default
+        selectStrategy:
+          type: fillCapacityWithCategorySpec
+          workerCategorySpec:
+            categoryMap: {}
+            strong: true
+        autoScaler: null
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: druid-cluster
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - configmaps
+  verbs:
+  - '*'
+---
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: druid-cluster
+subjects:
+- kind: ServiceAccount
+  name: default
+roleRef:
+  kind: Role
+  name: druid-cluster
+  apiGroup: rbac.authorization.k8s.io
diff --git a/e2e/configs/volume-attributes-class.yaml 
b/e2e/configs/volume-attributes-class.yaml
index 42c651a..eb7a985 100644
--- a/e2e/configs/volume-attributes-class.yaml
+++ b/e2e/configs/volume-attributes-class.yaml
@@ -1,10 +1,9 @@
-apiVersion: storage.k8s.io/v1
+apiVersion: storage.k8s.io/v1beta1
 kind: VolumeAttributesClass
 metadata:
-  name: fast-ssd-class
-driverName: example.io/fast-ssd
+  name: gp3-fast
+driverName: ebs.csi.aws.com
 parameters:
-  provisioned-iops: "10000"
-  provisioned-throughput: "100MiB/s"
-  encryption: "true"
-  replica-count: "3"
\ No newline at end of file
+  type: gp3
+  iops: "4000"
+  throughput: "200"


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to