This is an automated email from the ASF dual-hosted git repository.

yihua pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new f4901b791faa [HUDI-9402] Supporting Docker demo for Mac chips with ARM 
architecture (#13297)
f4901b791faa is described below

commit f4901b791faa2ca47db41a886db24aed3e7eed3d
Author: Ranga Reddy <[email protected]>
AuthorDate: Wed Jul 30 02:05:34 2025 +0530

    [HUDI-9402] Supporting Docker demo for Mac chips with ARM architecture 
(#13297)
---
 ...r-compose_hadoop284_hive233_spark353_arm64.yml} | 141 ++++++++++++++++-----
 docker/setup_demo.sh                               |   2 +-
 2 files changed, 107 insertions(+), 36 deletions(-)

diff --git 
a/docker/compose/docker-compose_hadoop284_hive233_spark244_mac_aarch64.yml 
b/docker/compose/docker-compose_hadoop284_hive233_spark353_arm64.yml
similarity index 60%
rename from 
docker/compose/docker-compose_hadoop284_hive233_spark244_mac_aarch64.yml
rename to docker/compose/docker-compose_hadoop284_hive233_spark353_arm64.yml
index 0abcf676d5f7..80d24a97bb11 100644
--- a/docker/compose/docker-compose_hadoop284_hive233_spark244_mac_aarch64.yml
+++ b/docker/compose/docker-compose_hadoop284_hive233_spark353_arm64.yml
@@ -13,17 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-version: "3.3"
-
 services:
 
   namenode:
-    image: apachehudi/hudi-hadoop_2.8.4-namenode:linux-arm64-0.10.1
-    platform: linux/arm64
+    image: apachehudi/hudi-hadoop_2.8.4-namenode:latest
     hostname: namenode
     container_name: namenode
     environment:
-      - CLUSTER_NAME=hudi_hadoop284_hive232_spark244
+      - CLUSTER_NAME=hudi_hadoop284_hive232_spark353
     ports:
       - "50070:50070"
       - "8020:8020"
@@ -32,18 +29,17 @@ services:
     env_file:
       - ./hadoop.env
     healthcheck:
-      test: [ "CMD", "curl", "-f", "http://namenode:50070"; ]
+      test: ["CMD", "curl", "-f", "http://namenode:50070";]
       interval: 30s
       timeout: 10s
       retries: 3
 
   datanode1:
-    image: apachehudi/hudi-hadoop_2.8.4-datanode:linux-arm64-0.10.1
-    platform: linux/arm64
+    image: apachehudi/hudi-hadoop_2.8.4-datanode:latest
     container_name: datanode1
     hostname: datanode1
     environment:
-      - CLUSTER_NAME=hudi_hadoop284_hive232_spark244
+      - CLUSTER_NAME=hudi_hadoop284_hive232_spark353
     env_file:
       - ./hadoop.env
     ports:
@@ -55,7 +51,7 @@ services:
       - "namenode"
       - "historyserver"
     healthcheck:
-      test: [ "CMD", "curl", "-f", "http://datanode1:50075"; ]
+      test: ["CMD", "curl", "-f", "http://datanode1:50075";]
       interval: 30s
       timeout: 10s
       retries: 3
@@ -67,7 +63,7 @@ services:
     hostname: historyserver
     container_name: historyserver
     environment:
-      - CLUSTER_NAME=hudi_hadoop284_hive232_spark244
+      - CLUSTER_NAME=hudi_hadoop284_hive232_spark353
     depends_on:
       - "namenode"
     links:
@@ -75,7 +71,7 @@ services:
     ports:
       - "58188:8188"
     healthcheck:
-      test: [ "CMD", "curl", "-f", "http://historyserver:8188"; ]
+      test: ["CMD", "curl", "-f", "http://historyserver:8188";]
       interval: 30s
       timeout: 10s
       retries: 3
@@ -85,18 +81,14 @@ services:
       - historyserver:/hadoop/yarn/timeline
 
   hive-metastore-postgresql:
-    image: menorah84/hive-metastore-postgresql:2.3.0
-    platform: linux/arm64
-    environment:
-      - POSTGRES_HOST_AUTH_METHOD=trust
+    image: bde2020/hive-metastore-postgresql:2.3.0
     volumes:
       - hive-metastore-postgresql:/var/lib/postgresql
     hostname: hive-metastore-postgresql
     container_name: hive-metastore-postgresql
 
   hivemetastore:
-    image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3:linux-arm64-0.10.1
-    platform: linux/arm64
+    image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3:latest
     hostname: hivemetastore
     container_name: hivemetastore
     links:
@@ -112,7 +104,7 @@ services:
       # JVM debugging port (will be mapped to a random port on host)
       - "5005"
     healthcheck:
-      test: [ "CMD", "nc", "-z", "hivemetastore", "9083" ]
+      test: ["CMD", "nc", "-z", "hivemetastore", "9083"]
       interval: 30s
       timeout: 10s
       retries: 3
@@ -121,8 +113,7 @@ services:
       - "namenode"
 
   hiveserver:
-    image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3:linux-arm64-0.10.1
-    platform: linux/arm64
+    image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3:latest
     hostname: hiveserver
     container_name: hiveserver
     env_file:
@@ -143,8 +134,7 @@ services:
       - ${HUDI_WS}:/var/hoodie/ws
 
   sparkmaster:
-    image: 
apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkmaster_2.4.4:linux-arm64-0.10.1
-    platform: linux/arm64
+    image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkmaster_3.5.3:latest
     hostname: sparkmaster
     container_name: sparkmaster
     env_file:
@@ -163,8 +153,7 @@ services:
       - "namenode"
 
   spark-worker-1:
-    image: 
apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkworker_2.4.4:linux-arm64-0.10.1
-    platform: linux/arm64
+    image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkworker_3.5.3:latest
     hostname: spark-worker-1
     container_name: spark-worker-1
     env_file:
@@ -184,8 +173,7 @@ services:
       - "namenode"
 
   zookeeper:
-    image: 'arm64v8/zookeeper:3.4.12'
-    platform: linux/arm64
+    image: 'bitnami/zookeeper:3.6.4'
     hostname: zookeeper
     container_name: zookeeper
     ports:
@@ -194,8 +182,7 @@ services:
       - ALLOW_ANONYMOUS_LOGIN=yes
 
   kafka:
-    image: 'wurstmeister/kafka:2.12-2.0.1'
-    platform: linux/arm64
+    image: 'bitnami/kafka:3.4.1'
     hostname: kafkabroker
     container_name: kafkabroker
     ports:
@@ -203,11 +190,92 @@ services:
     environment:
       - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181
       - ALLOW_PLAINTEXT_LISTENER=yes
-      - KAFKA_ADVERTISED_HOST_NAME=kafkabroker
+
+  presto-coordinator-1:
+    container_name: presto-coordinator-1
+    hostname: presto-coordinator-1
+    image: apachehudi/hudi-hadoop_2.8.4-prestobase_0.271:latest
+    ports:
+      - "8090:8090"
+      # JVM debugging port (will be mapped to a random port on host)
+      - "5005"
+    environment:
+      - PRESTO_JVM_MAX_HEAP=512M
+      - PRESTO_QUERY_MAX_MEMORY=1GB
+      - PRESTO_QUERY_MAX_MEMORY_PER_NODE=256MB
+      - PRESTO_QUERY_MAX_TOTAL_MEMORY_PER_NODE=384MB
+      - PRESTO_MEMORY_HEAP_HEADROOM_PER_NODE=100MB
+      - TERM=xterm
+    links:
+      - "hivemetastore"
+    volumes:
+      - ${HUDI_WS}:/var/hoodie/ws
+    command: coordinator
+
+  presto-worker-1:
+    container_name: presto-worker-1
+    hostname: presto-worker-1
+    image: apachehudi/hudi-hadoop_2.8.4-prestobase_0.271:latest
+    depends_on: [ "presto-coordinator-1" ]
+    environment:
+      - PRESTO_JVM_MAX_HEAP=512M
+      - PRESTO_QUERY_MAX_MEMORY=1GB
+      - PRESTO_QUERY_MAX_MEMORY_PER_NODE=256MB
+      - PRESTO_QUERY_MAX_TOTAL_MEMORY_PER_NODE=384MB
+      - PRESTO_MEMORY_HEAP_HEADROOM_PER_NODE=100MB
+      - TERM=xterm
+    links:
+      - "hivemetastore"
+      - "hiveserver"
+      - "hive-metastore-postgresql"
+      - "namenode"
+    volumes:
+      - ${HUDI_WS}:/var/hoodie/ws
+    command: worker
+
+  trino-coordinator-1:
+    container_name: trino-coordinator-1
+    hostname: trino-coordinator-1
+    image: apachehudi/hudi-hadoop_2.8.4-trinocoordinator_368:latest
+    ports:
+      - "8091:8091"
+      # JVM debugging port (will be mapped to a random port on host)
+      - "5005"
+    links:
+      - "hivemetastore"
+    volumes:
+      - ${HUDI_WS}:/var/hoodie/ws
+    command: http://trino-coordinator-1:8091 trino-coordinator-1
+
+  trino-worker-1:
+    container_name: trino-worker-1
+    hostname: trino-worker-1
+    image: apachehudi/hudi-hadoop_2.8.4-trinoworker_368:latest
+    depends_on: [ "trino-coordinator-1" ]
+    ports:
+      - "8092:8092"
+      # JVM debugging port (will be mapped to a random port on host)
+      - "5005"
+    links:
+      - "hivemetastore"
+      - "hiveserver"
+      - "hive-metastore-postgresql"
+      - "namenode"
+    volumes:
+      - ${HUDI_WS}:/var/hoodie/ws
+    command: http://trino-coordinator-1:8091 trino-worker-1
+
+  graphite:
+    container_name: graphite
+    hostname: graphite
+    image: graphiteapp/graphite-statsd
+    ports:
+      - 80:80
+      - 2003-2004:2003-2004
+      - 8126:8126
 
   adhoc-1:
-    image: 
apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkadhoc_2.4.4:linux-arm64-0.10.1
-    platform: linux/arm64
+    image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkadhoc_3.5.3:latest
     hostname: adhoc-1
     container_name: adhoc-1
     env_file:
@@ -225,12 +293,13 @@ services:
       - "hiveserver"
       - "hive-metastore-postgresql"
       - "namenode"
+      - "presto-coordinator-1"
+      - "trino-coordinator-1"
     volumes:
       - ${HUDI_WS}:/var/hoodie/ws
 
   adhoc-2:
-    image: 
apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkadhoc_2.4.4:linux-arm64-0.10.1
-    platform: linux/arm64
+    image: apachehudi/hudi-hadoop_2.8.4-hive_2.3.3-sparkadhoc_3.5.3:latest
     hostname: adhoc-2
     container_name: adhoc-2
     env_file:
@@ -247,6 +316,8 @@ services:
       - "hiveserver"
       - "hive-metastore-postgresql"
       - "namenode"
+      - "presto-coordinator-1"
+      - "trino-coordinator-1"
     volumes:
       - ${HUDI_WS}:/var/hoodie/ws
 
@@ -257,4 +328,4 @@ volumes:
 
 networks:
   default:
-     name: hudi
+    name: hudi
diff --git a/docker/setup_demo.sh b/docker/setup_demo.sh
index b83fad6f8cff..b1a0ad02e257 100755
--- a/docker/setup_demo.sh
+++ b/docker/setup_demo.sh
@@ -21,7 +21,7 @@ HUDI_DEMO_ENV=$1
 WS_ROOT=`dirname $SCRIPT_PATH`
 COMPOSE_FILE_NAME="docker-compose_hadoop284_hive233_spark353_amd64.yml"
 if [ "$HUDI_DEMO_ENV" = "--mac-aarch64" ]; then
-  COMPOSE_FILE_NAME="docker-compose_hadoop284_hive233_spark244_mac_aarch64.yml"
+  COMPOSE_FILE_NAME="docker-compose_hadoop284_hive233_spark353_arm64.yml"
 fi
 # restart cluster
 HUDI_WS=${WS_ROOT} docker compose -f 
${SCRIPT_PATH}/compose/${COMPOSE_FILE_NAME} down

Reply via email to