This is an automated email from the ASF dual-hosted git repository. djwang pushed a commit to branch main in repository https://gitbox.apache.org/repos/asf/cloudberry-bootcamp.git
The following commit(s) were added to refs/heads/main by this push: new f753193 Update master to coordinator and brand name f753193 is described below commit f7531936d5784d5710b76fe7635f217d7df65269 Author: Dianjin Wang <wangdian...@gmail.com> AuthorDate: Wed Jun 11 16:10:07 2025 +0800 Update master to coordinator and brand name In this PR, these changes are made: - Update the word `master` to `coordinator` to match the source code changes - Update the old brand name `cloudberrydb` or `cloudberry database` to `cloudberry` or `Apache Cloudberry`. - Update faa.tar.gz file to match the changes - Add exec mod to smoke-test.sh --- 000-cbdb-sandbox/Dockerfile.RELEASE.rockylinux9 | 19 +++++----- 000-cbdb-sandbox/Dockerfile.main.rockylinux9 | 8 ++--- 000-cbdb-sandbox/README.md | 42 +++++++++++------------ 000-cbdb-sandbox/configs/faa.tar.gz | Bin 63088802 -> 63085639 bytes 000-cbdb-sandbox/configs/gpinitsystem_multinode | 32 ++++++++--------- 000-cbdb-sandbox/configs/gpinitsystem_singlenode | 32 ++++++++--------- 000-cbdb-sandbox/configs/init_system.sh | 30 ++++++++-------- 000-cbdb-sandbox/configs/smoke-test.sh | 8 ++--- 000-cbdb-sandbox/docker-compose-rockylinux9.yml | 8 ++--- 000-cbdb-sandbox/run.sh | 10 +++--- 10 files changed, 95 insertions(+), 94 deletions(-) diff --git a/000-cbdb-sandbox/Dockerfile.RELEASE.rockylinux9 b/000-cbdb-sandbox/Dockerfile.RELEASE.rockylinux9 index 563b2a5..948bbfa 100644 --- a/000-cbdb-sandbox/Dockerfile.RELEASE.rockylinux9 +++ b/000-cbdb-sandbox/Dockerfile.RELEASE.rockylinux9 @@ -85,7 +85,7 @@ RUN echo root:cbdb@123 | chpasswd && \ RUN cp /tmp/90-cbdb-sysctl.conf /etc/sysctl.conf && \ cp /tmp/90-cbdb-limits.conf /etc/security/limits.d/90-cbdb-limits.conf && \ cat /usr/share/zoneinfo/${TIMEZONE_VAR} > /etc/localtime && \ - echo "mdw" > /tmp/gpdb-hosts && \ + echo "cdw" > /tmp/gpdb-hosts && \ echo "/usr/local/lib" >> /etc/ld.so.conf && \ echo "/usr/local/lib64" >> /etc/ld.so.conf && \ ldconfig && \ @@ -98,17 +98,18 @@ RUN cp /tmp/90-cbdb-sysctl.conf /etc/sysctl.conf && \ echo "cbdb@123"|passwd --stdin gpadmin && \ echo "gpadmin ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers && \ echo "root ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers && \ - echo "export COORDINATOR_DATA_DIRECTORY=/data0/database/master/gpseg-1" >> /home/gpadmin/.bashrc && \ + echo "export COORDINATOR_DATA_DIRECTORY=/data0/database/coordinator/gpseg-1" >> /home/gpadmin/.bashrc && \ echo "source /usr/local/cloudberry-db/greenplum_path.sh" >> /home/gpadmin/.bashrc && \ - mkdir -p /data0/database/master /data0/database/primary /data0/database/mirror && \ + mkdir -p /data0/database/coordinator /data0/database/primary /data0/database/mirror && \ chown -R gpadmin:gpadmin /data0 && \ ssh-keygen -A && \ echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config -RUN cd /tmp/ \ - && unzip -d /tmp /tmp/cloudberrydb-${CODEBASE_VERSION_VAR}.zip +RUN cd /tmp/ && \ + unzip -d /tmp /tmp/cloudberrydb-${CODEBASE_VERSION_VAR}.zip && \ + mv /tmp/cloudberrydb-${CODEBASE_VERSION_VAR} /tmp/cloudberry -RUN cd /tmp/cloudberrydb-${CODEBASE_VERSION_VAR} && \ +RUN cd /tmp/cloudberry && \ ./configure --prefix=/usr/local/cloudberry-db \ --enable-cassert \ --enable-debug-extensions \ @@ -128,11 +129,11 @@ RUN cd /tmp/cloudberrydb-${CODEBASE_VERSION_VAR} && \ --with-python \ --with-pythonsrc-ext -RUN cd /tmp/cloudberrydb-${CODEBASE_VERSION_VAR} && \ +RUN cd /tmp/cloudberry && \ make -j$(nproc) && \ make install -RUN cd /tmp/cloudberrydb-${CODEBASE_VERSION_VAR}/contrib && \ +RUN cd /tmp/cloudberry/contrib && \ make -j$(nproc) && \ make install @@ -141,7 +142,7 @@ RUN cd /tmp/cloudberrydb-${CODEBASE_VERSION_VAR}/contrib && \ # ---------------------------------------------------------------------- # The default user is set to 'gpadmin', and the container starts by # running the init_system.sh script. This container serves as a base -# environment, and the Cloudberry Database RPM can be installed for +# environment, and the Apache Cloudberry RPM can be installed for # testing and functional verification. # ---------------------------------------------------------------------- USER gpadmin diff --git a/000-cbdb-sandbox/Dockerfile.main.rockylinux9 b/000-cbdb-sandbox/Dockerfile.main.rockylinux9 index 4e517b8..2073ebc 100644 --- a/000-cbdb-sandbox/Dockerfile.main.rockylinux9 +++ b/000-cbdb-sandbox/Dockerfile.main.rockylinux9 @@ -88,7 +88,7 @@ RUN dnf makecache && \ RUN cp /tmp/90-cbdb-sysctl.conf /etc/sysctl.conf && \ cp /tmp/90-cbdb-limits.conf /etc/security/limits.d/90-cbdb-limits.conf && \ cat /usr/share/zoneinfo/${TIMEZONE_VAR} > /etc/localtime && \ - echo "mdw" > /tmp/gpdb-hosts && \ + echo "cdw" > /tmp/gpdb-hosts && \ echo "/usr/local/lib" >> /etc/ld.so.conf && \ echo "/usr/local/lib64" >> /etc/ld.so.conf && \ ldconfig && \ @@ -101,9 +101,9 @@ RUN cp /tmp/90-cbdb-sysctl.conf /etc/sysctl.conf && \ echo "cbdb@123"|passwd --stdin gpadmin && \ echo "gpadmin ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers && \ echo "root ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers && \ - echo "export COORDINATOR_DATA_DIRECTORY=/data0/database/master/gpseg-1" >> /home/gpadmin/.bashrc && \ + echo "export COORDINATOR_DATA_DIRECTORY=/data0/database/coordinator/gpseg-1" >> /home/gpadmin/.bashrc && \ echo "source /usr/local/cloudberry-db/greenplum_path.sh" >> /home/gpadmin/.bashrc && \ - mkdir -p /data0/database/master /data0/database/primary /data0/database/mirror && \ + mkdir -p /data0/database/coordinator /data0/database/primary /data0/database/mirror && \ chown -R gpadmin:gpadmin /data0 && \ ssh-keygen -A && \ echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config @@ -147,7 +147,7 @@ RUN cd /tmp/cloudberry/contrib && \ # ---------------------------------------------------------------------- # The default user is set to 'gpadmin', and the container starts by # running the init_system.sh script. This container serves as a base -# environment, and the Cloudberry Database RPM can be installed for +# environment, and the Apache Cloudberry RPM can be installed for # testing and functional verification. # ---------------------------------------------------------------------- USER gpadmin diff --git a/000-cbdb-sandbox/README.md b/000-cbdb-sandbox/README.md index 15205aa..b8e0043 100644 --- a/000-cbdb-sandbox/README.md +++ b/000-cbdb-sandbox/README.md @@ -1,10 +1,10 @@ --- -title: Sandbox of Cloudberry Database +title: Sandbox of Apache Cloudberry --- -# Install Cloudberry Database With Docker +# Install Apache Cloudberry With Docker -This document guides you on how to quickly set up and connect to a Cloudberry Database in a Docker environment. You can try out Cloudberry Database by performing some basic operations and running SQL commands. +This document guides you on how to quickly set up and connect to a Apache Cloudberry in a Docker environment. You can try out Apache Cloudberry by performing some basic operations and running SQL commands. > [!WARNING] > This guide is intended for testing or development. DO NOT use it for > production. @@ -23,23 +23,23 @@ When building and deploying Cloudberry in Docker, you will have 2 different depl **Deployment Options** 1. **Single Container** (Default) - With the single container option, you will have the coordinator as well as the Cloudberry segments all running on a single container. This is the default behavior when deploying using the `run.sh` script provided. -2. **Multi-Container** - Deploying with the multi-container option will give you a more realistic deployment of what actual production Cloudberry clusters look like. With multi-node, you will have the coordinator, the standby coordinator, and 2 segment hosts all on their own respective containers. This is to both highlight the distributed nature of Cloudberry Database as well as highlight how high availability (HA) features work in the event of a server (or in this case a container) fail [...] +2. **Multi-Container** - Deploying with the multi-container option will give you a more realistic deployment of what actual production Cloudberry clusters look like. With multi-node, you will have the coordinator, the standby coordinator, and 2 segment hosts all on their own respective containers. This is to both highlight the distributed nature of Apache Cloudberry as well as highlight how high availability (HA) features work in the event of a server (or in this case a container) failin [...] - + **Build Options** -1. Compile with the source code of the latest Cloudberry Database (released in [Cloudberry Database Release Page](https://github.com/cloudberrydb/cloudberrydb/releases)). The base OS will be Rocky Linux 9 Docker image. -2. Method 2 - Compile with the latest Cloudberry Database [main](https://github.com/cloudberrydb/cloudberrydb/tree/main) branch. The base OS will be Rocky Linux 9 Docker image. +1. Compile with the source code of the latest Apache Cloudberry (released in [Apache Cloudberry Release Page](https://github.com/apache/cloudberry/releases)). The base OS will be Rocky Linux 9 Docker image. +2. Method 2 - Compile with the latest Apache Cloudberry [main](https://github.com/apache/cloudberry/tree/main) branch. The base OS will be Rocky Linux 9 Docker image. Build and deploy steps: 1. Start Docker Desktop and make sure it is running properly on your host platform. -2. Download this repository (which is [cloudberrydb/bootcamp](https://github.com/cloudberrydb/bootcamp)) to the target machine. +2. Download this repository (which is [apache/cloudberry-bootcamp](https://github.com/apache/cloudberry-bootcamp)) to the target machine. ```shell - git clone https://github.com/cloudberrydb/bootcamp.git + git clone https://github.com/apache/cloudberry-bootcamp.git ``` 3. Enter the repository and run the `run.sh` script to start the Docker container. This will start the automatic installation process. Depending on your environment, you may need to run this with 'sudo' command. @@ -77,26 +77,26 @@ Build and deploy steps: ## Connect to the database > [!NOTE] -> When deploying the multi-container Cloudberry environment it may take extra time for the database to initialize, so you may need to wait a few minutes before you can execute the psql prompt successfully. You can run `docker logs cbdb-mdw -f` to see the current state of the database initialization process, you'll know the process is finished when you see the "Deployment Successful" output. +> When deploying the multi-container Cloudberry environment it may take extra time for the database to initialize, so you may need to wait a few minutes before you can execute the psql prompt successfully. You can run `docker logs cbdb-cdw -f` to see the current state of the database initialization process, you'll know the process is finished when you see the "Deployment Successful" output. You can now connect to the database and try some basic operations. 1. Connect to the Docker container from the host machine: ```shell - docker exec -it cbdb-mdw /bin/bash + docker exec -it cbdb-cdw /bin/bash ``` If it is successful, you will see the following prompt: ```shell - [gpadmin@mdw /]$ + [gpadmin@cdw /]$ ``` -2. Log into Cloudberry Database in Docker. See the following commands and example outputs: +2. Log into Apache Cloudberry in Docker. See the following commands and example outputs: ```shell - [gpadmin@mdw ~]$ psql # Connects to the database with the default database name "gpadmin". + [gpadmin@cdw ~]$ psql # Connects to the database with the default database name "gpadmin". # psql (14.4, server 14.4) # Type "help" for help. @@ -105,11 +105,11 @@ You can now connect to the database and try some basic operations. ```sql gpadmin=# SELECT VERSION(); -- Checks the database version. - PostgreSQL 14.4 (Cloudberry Database 1.0.0 build dev) on aarch64-unknown-linux-gnu, compiled by gcc (GCC) 10.2.1 20210130 (Red Hat 10.2.1-11), 64-bit compiled on Oct 24 2023 10:24:28 + PostgreSQL 14.4 (Apache Cloudberry 1.0.0 build dev) on aarch64-unknown-linux-gnu, compiled by gcc (GCC) 10.2.1 20210130 (Red Hat 10.2.1-11), 64-bit compiled on Oct 24 2023 10:24:28 (1 row) ``` -Now you have a Cloudberry Database and can continue with [Cloudberry Database Tutorials Based on Docker Installation](https://github.com/cloudberrydb/bootcamp/blob/main/101-cbdb-tutorials/README.md)! Enjoy! +Now you have a Apache Cloudberry and can continue with [Apache Cloudberry Tutorials Based on Docker Installation](https://github.com/apache/cloudberry-bootcamp/blob/main/101-cbdb-tutorials/README.md)! Enjoy! ## Working with your Cloudberry Docker environment @@ -120,13 +120,13 @@ When working with the Cloudberry Docker environment there are a few commands tha To stop the **single container** deployment while _keeping the data and state_ within the container, you can run the command below. This means that you can later start the container again and any changes you made to the containers will be persisted between runs. ```shell -docker stop cbdb-mdw +docker stop cbdb-cdw ``` To stop the **single container** deployment and also remove the volume that belongs to the container, you can run the following command. Keep in mind this will remove the volume as well as the container associated which means any changes you've made inside of the container or any database state will be wiped and unrecoverable. ```shell -docker rm -f cbdb-mdw +docker rm -f cbdb-cdw ``` **Stopping Your Multi-Container Deployment With Docker** @@ -150,7 +150,7 @@ If you've run any of the commands above that keep the Docker volumes persisted b To start a **single container** deployment after it was shut down, you can simply run the following ```shell -docker start cbdb-mdw +docker start cbdb-cdw ``` **Starting A Stopped Multi-Container Cloudberry Docker Deployment** @@ -165,7 +165,7 @@ docker compose -f docker-compose-rockylinux9.yml start > When starting a previously stopped Cloudberry Docker environment, you'll > need to manually start the database back up. To do this, just run the > following commands once the container(s) are back up and running. The > `gpstart` command is used for starting the database, and -a is a flag saying > to start the database without prompting (non-interactive). ```shell -docker exec -it cbdb-mdw /bin/bash +docker exec -it cbdb-cdw /bin/bash -[gpadmin@mdw /] gpstart -a +[gpadmin@cdw /] gpstart -a ``` diff --git a/000-cbdb-sandbox/configs/faa.tar.gz b/000-cbdb-sandbox/configs/faa.tar.gz index ea6d0f4..57f8320 100644 Binary files a/000-cbdb-sandbox/configs/faa.tar.gz and b/000-cbdb-sandbox/configs/faa.tar.gz differ diff --git a/000-cbdb-sandbox/configs/gpinitsystem_multinode b/000-cbdb-sandbox/configs/gpinitsystem_multinode index 29104fa..ef96d73 100644 --- a/000-cbdb-sandbox/configs/gpinitsystem_multinode +++ b/000-cbdb-sandbox/configs/gpinitsystem_multinode @@ -1,8 +1,8 @@ # FILE NAME: gpinitsystem_singlenode # A configuration file is needed by the gpinitsystem utility. -# This sample file initializes a Greenplum Database Single Node -# Edition (SNE) system with one master and two segment instances +# This sample file initializes a Apache Cloudberry Single Node +# Edition (SNE) system with one coordinator and two segment instances # on the local host. This file is referenced when you run gpinitsystem. ################################################ @@ -12,7 +12,7 @@ # A name for the array you are configuring. You can use any name you # like. Enclose the name in quotes if the name contains spaces. -ARRAY_NAME="Sandbox: Cloudberry Database Cluster" +ARRAY_NAME="Sandbox: Apache Cloudberry Cluster" # This specifies the file that contains the list of segment host names # that comprise the Greenplum system. For a single-node system, this @@ -24,11 +24,11 @@ ARRAY_NAME="Sandbox: Cloudberry Database Cluster" MACHINE_LIST_FILE=/tmp/gpdb-hosts # This specifies a prefix that will be used to name the data directories -# of the master and segment instances. The naming convention for data -# directories in a Greenplum Database system is SEG_PREFIX<number> -# where <number> starts with 0 for segment instances and the master +# of the coordinator and segment instances. The naming convention for data +# directories in a Apache Cloudberry system is SEG_PREFIX<number> +# where <number> starts with 0 for segment instances and the coordinator # is always -1. So for example, if you choose the prefix gpsne, your -# master instance data directory would be named gpsne-1, and the segment +# coordinator instance data directory would be named gpsne-1, and the segment # instances would be named gpsne0, gpsne1, gpsne2, gpsne3, and so on. SEG_PREFIX=gpseg @@ -56,24 +56,24 @@ PORT_BASE=40000 declare -a DATA_DIRECTORY=(/data0/database/primary \ /data0/database/primary) -# The OS-configured hostname of the Greenplum Database master instance. +# The OS-configured hostname of the Apache Cloudberry coordinator instance. -MASTER_HOSTNAME=mdw +COORDINATOR_HOSTNAME=cdw # The location where the data directory will be created on the -# Greenplum master host. +# Greenplum coordinator host. # You must make sure that the user who runs gpinitsystem # has permissions to write to this directory. You may want to -# create this directory on the master host before running +# create this directory on the coordinator host before running # gpinitsystem and chown it to the appropriate user. -MASTER_DIRECTORY=/data0/database/master +COORDINATOR_DIRECTORY=/data0/database/coordinator -# The port number for the master instance. This is the port number +# The port number for the coordinator instance. This is the port number # that users and client connections will use when accessing the -# Greenplum Database system. +# Apache Cloudberry system. -MASTER_PORT=5432 +COORDINATOR_PORT=5432 # The shell the gpinitsystem script uses to execute # commands on remote hosts. Allowed value is ssh. You must set up @@ -86,7 +86,7 @@ TRUSTED_SHELL=ssh # checkpoints, in log file segments (each segment is normally 16 # megabytes). This will set the checkpoint_segments parameter # in the postgresql.conf file for each segment instance in the -# Greenplum Database system. +# Apache Cloudberry system. CHECK_POINT_SEGMENTS=8 diff --git a/000-cbdb-sandbox/configs/gpinitsystem_singlenode b/000-cbdb-sandbox/configs/gpinitsystem_singlenode index 7c3e061..baaaebf 100644 --- a/000-cbdb-sandbox/configs/gpinitsystem_singlenode +++ b/000-cbdb-sandbox/configs/gpinitsystem_singlenode @@ -1,8 +1,8 @@ # FILE NAME: gpinitsystem_singlenode # A configuration file is needed by the gpinitsystem utility. -# This sample file initializes a Greenplum Database Single Node -# Edition (SNE) system with one master and two segment instances +# This sample file initializes a Apache Cloudberry Single Node +# Edition (SNE) system with one coordinator and two segment instances # on the local host. This file is referenced when you run gpinitsystem. ################################################ @@ -12,7 +12,7 @@ # A name for the array you are configuring. You can use any name you # like. Enclose the name in quotes if the name contains spaces. -ARRAY_NAME="Sandbox: Cloudberry Database Cluster" +ARRAY_NAME="Sandbox: Apache Cloudberry Cluster" # This specifies the file that contains the list of segment host names # that comprise the Greenplum system. For a single-node system, this @@ -24,11 +24,11 @@ ARRAY_NAME="Sandbox: Cloudberry Database Cluster" MACHINE_LIST_FILE=/tmp/gpdb-hosts # This specifies a prefix that will be used to name the data directories -# of the master and segment instances. The naming convention for data -# directories in a Greenplum Database system is SEG_PREFIX<number> -# where <number> starts with 0 for segment instances and the master +# of the coordinator and segment instances. The naming convention for data +# directories in a Apache Cloudberry system is SEG_PREFIX<number> +# where <number> starts with 0 for segment instances and the coordinator # is always -1. So for example, if you choose the prefix gpsne, your -# master instance data directory would be named gpsne-1, and the segment +# coordinator instance data directory would be named gpsne-1, and the segment # instances would be named gpsne0, gpsne1, gpsne2, gpsne3, and so on. SEG_PREFIX=gpseg @@ -57,24 +57,24 @@ declare -a DATA_DIRECTORY=(/data0/database/primary \ /data0/database/primary \ /data0/database/primary) -# The OS-configured hostname of the Greenplum Database master instance. +# The OS-configured hostname of the Apache Cloudberry coordinator instance. -MASTER_HOSTNAME=mdw +COORDINATOR_HOSTNAME=cdw # The location where the data directory will be created on the -# Greenplum master host. +# Greenplum coordinator host. # You must make sure that the user who runs gpinitsystem # has permissions to write to this directory. You may want to -# create this directory on the master host before running +# create this directory on the coordinator host before running # gpinitsystem and chown it to the appropriate user. -MASTER_DIRECTORY=/data0/database/master +COORDINATOR_DIRECTORY=/data0/database/coordinator -# The port number for the master instance. This is the port number +# The port number for the coordinator instance. This is the port number # that users and client connections will use when accessing the -# Greenplum Database system. +# Apache Cloudberry system. -MASTER_PORT=5432 +COORDINATOR_PORT=5432 # The shell the gpinitsystem script uses to execute # commands on remote hosts. Allowed value is ssh. You must set up @@ -87,7 +87,7 @@ TRUSTED_SHELL=ssh # checkpoints, in log file segments (each segment is normally 16 # megabytes). This will set the checkpoint_segments parameter # in the postgresql.conf file for each segment instance in the -# Greenplum Database system. +# Apache Cloudberry system. CHECK_POINT_SEGMENTS=8 diff --git a/000-cbdb-sandbox/configs/init_system.sh b/000-cbdb-sandbox/configs/init_system.sh index 0aff0c3..73dafa9 100755 --- a/000-cbdb-sandbox/configs/init_system.sh +++ b/000-cbdb-sandbox/configs/init_system.sh @@ -51,35 +51,35 @@ cat /home/gpadmin/.ssh/id_rsa.pub >> /home/gpadmin/.ssh/authorized_keys chmod 600 /home/gpadmin/.ssh/authorized_keys # Add the container's hostname to the known_hosts file to avoid SSH warnings -ssh-keyscan -t rsa mdw > /home/gpadmin/.ssh/known_hosts 2>/dev/null +ssh-keyscan -t rsa cdw > /home/gpadmin/.ssh/known_hosts 2>/dev/null # Source Cloudberry environment variables and set # COORDINATOR_DATA_DIRECTORY source /usr/local/cloudberry-db/greenplum_path.sh -export COORDINATOR_DATA_DIRECTORY=/data0/database/master/gpseg-1 +export COORDINATOR_DATA_DIRECTORY=/data0/database/coordinator/gpseg-1 # Initialize single node Cloudberry cluster -if [[ $MULTINODE == "false" && $HOSTNAME == "mdw" ]]; then +if [[ $MULTINODE == "false" && $HOSTNAME == "cdw" ]]; then gpinitsystem -a \ -c /tmp/gpinitsystem_singlenode \ -h /tmp/gpdb-hosts \ --max_connections=100 # Initialize multi node Cloudberry cluster -elif [[ $MULTINODE == "true" && $HOSTNAME == "mdw" ]]; then +elif [[ $MULTINODE == "true" && $HOSTNAME == "cdw" ]]; then sshpass -p "cbdb@123" ssh-copy-id -o StrictHostKeyChecking=no sdw1 sshpass -p "cbdb@123" ssh-copy-id -o StrictHostKeyChecking=no sdw2 - sshpass -p "cbdb@123" ssh-copy-id -o StrictHostKeyChecking=no smdw + sshpass -p "cbdb@123" ssh-copy-id -o StrictHostKeyChecking=no scdw gpinitsystem -a \ -c /tmp/gpinitsystem_multinode \ -h /tmp/multinode-gpinit-hosts \ --max_connections=100 - gpinitstandby -s smdw -a + gpinitstandby -s scdw -a printf "sdw1\nsdw2\n" >> /tmp/gpdb-hosts fi -if [ $HOSTNAME == "mdw" ]; then +if [ $HOSTNAME == "cdw" ]; then ## Allow any host access the Cloudberry Cluster - echo 'host all all 0.0.0.0/0 trust' >> /data0/database/master/gpseg-1/pg_hba.conf + echo 'host all all 0.0.0.0/0 trust' >> /data0/database/coordinator/gpseg-1/pg_hba.conf gpstop -u psql -d template1 \ @@ -88,19 +88,19 @@ if [ $HOSTNAME == "mdw" ]; then cat <<-'EOF' ====================================================================== - ____ _ _ _ ____ ____ - / ___| | ___ _ _ __| | |__ ___ _ __ _ __ _ _ | _ \| __ ) -| | | |/ _ \| | | |/ _` | '_ \ / _ \ '__| '__| | | | | | | | _ \ -| |___| | (_) | |_| | (_| | |_) | __/ | | | | |_| | | |_| | |_) | - \____|_|\___/ \__,_|\__,_|_.__/ \___|_| |_| \__, | |____/|____/ - |___/ + ____ _ _ _ + / ___| | ___ _ _ __| | |__ ___ _ __ _ __ _ _ + | | | |/ _ \| | | |/ _` | '_ \ / _ \ '__| '__| | | | + | |___| | (_) | |_| | (_| | |_) | __/ | | | | |_| | + \____|_|\___/ \__,_|\__,_|_.__/ \___|_| |_| \__, | + |___/ ====================================================================== EOF cat <<-'EOF' ====================================================================== -Sandbox: Cloudberry Database Cluster details +Sandbox: Apache Cloudberry Cluster details ====================================================================== EOF diff --git a/000-cbdb-sandbox/configs/smoke-test.sh b/000-cbdb-sandbox/configs/smoke-test.sh old mode 100644 new mode 100755 index fea960d..bd97eb6 --- a/000-cbdb-sandbox/configs/smoke-test.sh +++ b/000-cbdb-sandbox/configs/smoke-test.sh @@ -55,8 +55,8 @@ lesson1(){ exit 1 fi - echo "local gpadmin lily md5" >> /data0/database/master/gpseg-1/pg_hba.conf - echo "local gpadmin lucy trust" >> /data0/database/master/gpseg-1/pg_hba.conf + echo "local gpadmin lily md5" >> /data0/database/coordinator/gpseg-1/pg_hba.conf + echo "local gpadmin lucy trust" >> /data0/database/coordinator/gpseg-1/pg_hba.conf gpstop -u @@ -86,7 +86,7 @@ lesson2(){ exit 1 fi - echo "local $TUTORIALDB lily md5" >> /data0/database/master/gpseg-1/pg_hba.conf + echo "local $TUTORIALDB lily md5" >> /data0/database/coordinator/gpseg-1/pg_hba.conf gpstop -u @@ -190,7 +190,7 @@ lesson5(){ psql -d $TUTORIALDB -c "EXPLAIN ANALYZE SELECT COUNT(*) FROM faa.sample WHERE id > 100" gpconfig -s optimizer - gpconfig -c optimizer -v off --masteronly + gpconfig -c optimizer -v off --coordinatoronly gpstop -u psql -d $TUTORIALDB -c "DROP TABLE IF EXISTS faa.otp_c" diff --git a/000-cbdb-sandbox/docker-compose-rockylinux9.yml b/000-cbdb-sandbox/docker-compose-rockylinux9.yml index b41a5c9..37cccc9 100644 --- a/000-cbdb-sandbox/docker-compose-rockylinux9.yml +++ b/000-cbdb-sandbox/docker-compose-rockylinux9.yml @@ -1,10 +1,10 @@ services: cbdb-coordinator: - container_name: cbdb-mdw + container_name: cbdb-cdw image: cbdb-${CODEBASE_VERSION}:${OS_VERSION} ports: - "15432:5432" - hostname: mdw + hostname: cdw tty: true networks: interconnect: @@ -12,9 +12,9 @@ services: environment: MULTINODE: "true" cbdb-standby-coordinator: - container_name: cbdb-smdw + container_name: cbdb-scdw image: cbdb-${CODEBASE_VERSION}:${OS_VERSION} - hostname: smdw + hostname: scdw tty: true networks: interconnect: diff --git a/000-cbdb-sandbox/run.sh b/000-cbdb-sandbox/run.sh index 1344c00..d42b55e 100755 --- a/000-cbdb-sandbox/run.sh +++ b/000-cbdb-sandbox/run.sh @@ -3,7 +3,7 @@ set -eu # Default values DEFAULT_OS_VERSION="rockylinux9" -DEFAULT_TIMEZONE_VAR="Asia/Shanghai" +DEFAULT_TIMEZONE_VAR="America/Los_Angeles" DEFAULT_PIP_INDEX_URL_VAR="https://pypi.org/simple" BUILD_ONLY="false" MULTINODE="false" @@ -20,10 +20,10 @@ PIP_INDEX_URL_VAR="${PIP_INDEX_URL_VAR:-$DEFAULT_PIP_INDEX_URL_VAR}" function usage() { echo "Usage: $0 [-o <os_version>] [-c <codebase_version>] [-b] [-m]" echo " -c Codebase version (valid values: main, or determined from release zip file name)" - echo " -t Timezone (default: Asia/Shanghai, or set via TIMEZONE_VAR environment variable)" + echo " -t Timezone (default: America/Los_Angeles, or set via TIMEZONE_VAR environment variable)" echo " -p Python Package Index (PyPI) (default: https://pypi.org/simple, or set via PIP_INDEX_URL_VAR environment variable)" echo " -b Build only, do not run the container (default: false, or set via BUILD_ONLY environment variable)" - echo " -m Multinode, this creates a multinode (multi-container) Cloudberry cluster using docker compose (requires compose to be installed)" + echo " -m Multinode, this creates a multinode (multi-container) Cloudberry cluster using docker compose (requires compose to be installed)" exit 1 } @@ -129,11 +129,11 @@ if [ "${MULTINODE}" == "true" ]; then else docker run --interactive \ --tty \ - --name cbdb-mdw \ + --name cbdb-cdw \ --detach \ --volume /sys/fs/cgroup:/sys/fs/cgroup:ro \ --publish 122:22 \ --publish 15432:5432 \ - --hostname mdw \ + --hostname cdw \ cbdb-${CODEBASE_VERSION}:${OS_VERSION} fi --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@cloudberry.apache.org For additional commands, e-mail: commits-h...@cloudberry.apache.org