This is an automated email from the ASF dual-hosted git repository.

xiangfu pushed a commit to branch presto-worker
in repository https://gitbox.apache.org/repos/asf/incubator-pinot.git

commit ed90af6489265b7f62667f50c873daa2bef67592
Author: Xiang Fu <[email protected]>
AuthorDate: Fri Dec 6 02:09:59 2019 -0800

    Adding example of k8s presto-worker deployment
---
 kubernetes/helm/README.md          |  24 +++++++
 kubernetes/helm/presto-worker.yaml | 144 +++++++++++++++++++++++++++++++++++++
 2 files changed, 168 insertions(+)

diff --git a/kubernetes/helm/README.md b/kubernetes/helm/README.md
index 6227184..0d15745 100644
--- a/kubernetes/helm/README.md
+++ b/kubernetes/helm/README.md
@@ -497,6 +497,30 @@ Splits: 17 total, 17 done (100.00%)
 0:00 [1 rows, 8B] [2 rows/s, 19B/s]
 ```
 
+### (Optional) Deploy more Presto workers
+
+You can run below command to deploy more presto workers if needed.
+
+```bash
+kubectl apply -f presto-worker.yaml
+```
+
+Then you could verify the new worker nodes are added by:
+
+```bash
+presto:default> select * from system.runtime.nodes;
+               node_id                |         http_uri         |      
node_version      | coordinator | state
+--------------------------------------+--------------------------+------------------------+-------------+--------
+ 38959968-6262-46a1-a321-ee0db6cbcbd3 | http://10.244.0.182:8080 | 
0.230-SNAPSHOT-4e66289 | false       | active
+ 83851b8c-fe7f-49fe-ae0c-e3daf6d92bef | http://10.244.2.183:8080 | 
0.230-SNAPSHOT-4e66289 | false       | active
+ presto-coordinator                   | http://10.244.1.25:8080  | 
0.230-SNAPSHOT-4e66289 | true        | active
+(3 rows)
+
+Query 20191206_095812_00027_na99c, FINISHED, 2 nodes
+Splits: 17 total, 17 done (100.00%)
+0:00 [3 rows, 248B] [11 rows/s, 984B/s]
+```
+
 ## How to clean up Pinot deployment
 
 ```bash
diff --git a/kubernetes/helm/presto-worker.yaml 
b/kubernetes/helm/presto-worker.yaml
new file mode 100644
index 0000000..454e23b
--- /dev/null
+++ b/kubernetes/helm/presto-worker.yaml
@@ -0,0 +1,144 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: presto-worker-etc
+  namespace: pinot-quickstart
+data:
+  config.properties: |-
+    coordinator=false
+    http-server.http.port=8080
+    query.max-memory=50GB
+    query.max-memory-per-node=4GB
+    query.max-total-memory-per-node=8GB
+    discovery.uri=http://presto-coordinator:8080
+
+  jvm.config: |-
+    -server
+    -Xmx16G
+    -XX:+UseG1GC
+    -XX:G1HeapRegionSize=32M
+    -XX:+UseGCOverheadLimit
+    -XX:+ExplicitGCInvokesConcurrent
+    -XX:+HeapDumpOnOutOfMemoryError
+    -XX:+ExitOnOutOfMemoryError
+
+  log.properties: |-
+    com.facebook.presto=INFO
+
+  node.properties: |-
+    node.environment=production
+    node.data-dir=/home/presto/data
+
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: presto-worker
+  namespace: pinot-quickstart
+spec:
+  selector:
+    matchLabels:
+      app: presto-worker
+  serviceName: presto-worker
+  replicas: 4
+  updateStrategy:
+    type: RollingUpdate
+  podManagementPolicy: Parallel
+  template:
+    metadata:
+      labels:
+        app: presto-worker
+    spec:
+      terminationGracePeriodSeconds: 30
+      securityContext:
+        runAsGroup: 1000
+        fsGroup: 1000
+        runAsUser: 1000
+      containers:
+        - image: fx19880617/pinot-presto:latest
+          imagePullPolicy: Always
+          name: presto-worker
+          args: [ "run" ]
+          ports:
+            - containerPort: 8080
+              protocol: TCP
+          resources:
+            requests:
+              memory: 1Gi
+          volumeMounts:
+            - name: presto-data
+              mountPath: "/home/presto/data"
+            - name: presto-catalog
+              mountPath: "/home/presto/etc/catalog"
+              readOnly: true
+            - name: presto-etc
+              mountPath: "/home/presto/etc/config.properties"
+              subPath: config.properties
+              readOnly: true
+            - name: presto-etc
+              mountPath: "/home/presto/etc/log.properties"
+              subPath: log.properties
+              readOnly: true
+            - name: presto-etc
+              mountPath: "/home/presto/etc/node.properties"
+              subPath: node.properties
+              readOnly: true
+            - name: presto-etc
+              mountPath: "/home/presto/etc/jvm.config"
+              subPath: jvm.config
+              readOnly: true
+      nodeSelector: {}
+      restartPolicy: Always
+      volumes:
+        - name: presto-catalog
+          configMap:
+            name: presto-catalog
+        - name: presto-etc
+          configMap:
+            name: presto-worker-etc
+  volumeClaimTemplates:
+    - metadata:
+        name: presto-data
+        annotations:
+          pv.beta.kubernetes.io/gid: "1000"
+          pv.beta.kubernetes.io/groups: "1000"
+      spec:
+        accessModes:
+          - ReadWriteOnce
+        storageClassName: "default"
+        resources:
+          requests:
+            storage: 20Gi
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: presto-worker
+  namespace: pinot-quickstart
+spec:
+  ports:
+    # [podname].presto-worker.pinot-quickstart.svc.cluster.local
+    - port: 8080
+  clusterIP: None
+  selector:
+    app: presto-worker


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to