This is an automated email from the ASF dual-hosted git repository.

djwang pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudberry.git

commit 2a56159ea69948559d9a550c40f64e3f440a1388
Author: Dianjin Wang <[email protected]>
AuthorDate: Tue Oct 14 17:01:05 2025 +0800

    Initial: squash cloudberry-bootcamp into devops/sandbox
    
    This commit imports only the essential apache/cloudberry-bootcamp
    repo assets (000-Cloudberry Sandbox) into the current repo under the
    devops/sandbox/ directory.
    
    Non-essential content such as tutorials (101–104), documentation, and
    large binary assets are excluded from this import to keep the repository
    lightweight and focused on the Sandbox deployment.
    
    This import is squashed into a single commit to provide a clean baseline
    for subsequent sandbox configuration and cleanup.
    
    initial commit: 
https://github.com/apache/cloudberry-bootcamp/commit/f7531936d5784d5710b76fe7635f217d7df65269
    
    See: https://lists.apache.org/thread/p6otyrrnosg8fsbyr6ok7hl8wxpx4ss2
    
    Original contributors information are as follows:
    
    Co-authored-by: Ed Espino <[email protected]>
    Co-authored-by: Antonio Petrole <[email protected]>
    Co-authored-by: Liang Chen <[email protected]>
    Co-authored-by: Ryan Wei <[email protected]>
    Co-authored-by: Fenggang Wang <[email protected]>
    Co-authored-by: TomShawn <[email protected]>
    Co-authored-by: Dianjin Wang <[email protected]>
---
 devops/sandbox/000-cbdb-sandbox/.env               |   2 +
 .../Dockerfile.RELEASE.rockylinux9                 | 155 +++++++++++++++++++
 .../000-cbdb-sandbox/Dockerfile.main.rockylinux9   | 160 +++++++++++++++++++
 devops/sandbox/000-cbdb-sandbox/README.md          | 171 +++++++++++++++++++++
 .../000-cbdb-sandbox/configs/90-cbdb-limits.conf   |  10 ++
 .../000-cbdb-sandbox/configs/90-cbdb-sysctl.conf   |  35 +++++
 .../configs/gpinitsystem_multinode                 | 119 ++++++++++++++
 .../configs/gpinitsystem_singlenode                | 121 +++++++++++++++
 .../000-cbdb-sandbox/configs/init_system.sh        | 130 ++++++++++++++++
 .../configs/multinode-gpinit-hosts                 |   2 +
 .../docker-compose-rockylinux9.yml                 |  45 ++++++
 devops/sandbox/000-cbdb-sandbox/run.sh             | 139 +++++++++++++++++
 devops/sandbox/images/sandbox-deployment.jpg       | Bin 0 -> 143855 bytes
 13 files changed, 1089 insertions(+)

diff --git a/devops/sandbox/000-cbdb-sandbox/.env 
b/devops/sandbox/000-cbdb-sandbox/.env
new file mode 100644
index 00000000000..5f1596515d2
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/.env
@@ -0,0 +1,2 @@
+CODEBASE_VERSION=1.6.0
+OS_VERSION=rockylinux9
diff --git a/devops/sandbox/000-cbdb-sandbox/Dockerfile.RELEASE.rockylinux9 
b/devops/sandbox/000-cbdb-sandbox/Dockerfile.RELEASE.rockylinux9
new file mode 100644
index 00000000000..948bbfa7986
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/Dockerfile.RELEASE.rockylinux9
@@ -0,0 +1,155 @@
+FROM rockylinux/rockylinux:9
+
+ARG CODEBASE_VERSION_VAR=${CODEBASE_VERSION_VAR}
+ARG TIMEZONE_VAR="Asia/Shanghai"
+
+ENV container=docker
+ENV MULTINODE=false
+
+RUN dnf update -y && \
+    dnf install -y systemd \
+                   systemd-libs && \
+    dnf clean all
+
+# Clean up unnecessary systemd units
+RUN [ -d /lib/systemd/system/sysinit.target.wants ] && find 
/lib/systemd/system/sysinit.target.wants/ -type l -not -name 
'systemd-tmpfiles-setup.service' -delete || echo "Directory 
/lib/systemd/system/sysinit.target.wants does not exist" && \
+    [ -d /lib/systemd/system/multi-user.target.wants ] && find 
/lib/systemd/system/multi-user.target.wants/ -type l -delete || echo "Directory 
/lib/systemd/system/multi-user.target.wants does not exist" && \
+    find /etc/systemd/system/*.wants/ -type l -delete || echo "Directory 
/etc/systemd/system/*.wants does not exist" && \
+    [ -d /lib/systemd/system/local-fs.target.wants ] && find 
/lib/systemd/system/local-fs.target.wants/ -type l -delete || echo "Directory 
/lib/systemd/system/local-fs.target.wants does not exist" && \
+    [ -d /lib/systemd/system/sockets.target.wants ] && find 
/lib/systemd/system/sockets.target.wants/ -type l -not -name '*udev*' -delete 
|| echo "Directory /lib/systemd/system/sockets.target.wants does not exist" && \
+    [ -d /lib/systemd/system/basic.target.wants ] && find 
/lib/systemd/system/basic.target.wants/ -type l -delete || echo "Directory 
/lib/systemd/system/basic.target.wants does not exist" && \
+    [ -d /lib/systemd/system/anaconda.target.wants ] && find 
/lib/systemd/system/anaconda.target.wants/ -type l -delete || echo "Directory 
/lib/systemd/system/anaconda.target.wants does not exist"
+
+COPY ./configs/* /tmp/
+
+RUN     echo root:cbdb@123 | chpasswd && \
+        dnf makecache && \
+        dnf install -y yum-utils \
+                       epel-release \
+                       git && \
+        yum-config-manager --disable epel-cisco-openh264 && \
+        dnf makecache && \
+        yum-config-manager --disable epel && \
+        dnf install -y --enablerepo=epel \
+                       the_silver_searcher \
+                       bat \
+                       htop && \
+        dnf install -y bison \
+                       cmake3 \
+                       ed \
+                       flex \
+                       gcc \
+                       gcc-c++ \
+                       glibc-langpack-en \
+                       go \
+                       initscripts \
+                       iproute \
+                       less \
+                       m4 \
+                       net-tools \
+                       openssh-clients \
+                       openssh-server \
+                       passwd \
+                       perl \
+                       rsync \
+                       sudo \
+                       tar \
+                       unzip \
+                       util-linux-ng \
+                       wget \
+                       sshpass \
+                       which && \
+        dnf install -y apr-devel \
+                       bzip2-devel \
+                       krb5-devel \
+                       libcurl-devel \
+                       libevent-devel \
+                       libxml2-devel \
+                       libzstd-devel \
+                       openldap-devel \
+                       openssl-devel \
+                       pam-devel \
+                       perl-ExtUtils-Embed \
+                       perl-Test-Simple \
+                       perl-core \
+                       python3-devel \
+                       readline-devel \
+                       zlib-devel && \
+        dnf install -y --enablerepo=crb \
+                       libuv-devel \
+                       libyaml-devel \
+                       perl-IPC-Run && \
+        dnf install -y --enablerepo=epel \
+                       xerces-c-devel
+
+RUN     cp /tmp/90-cbdb-sysctl.conf /etc/sysctl.conf && \
+        cp /tmp/90-cbdb-limits.conf /etc/security/limits.d/90-cbdb-limits.conf 
&& \
+        cat /usr/share/zoneinfo/${TIMEZONE_VAR} > /etc/localtime && \
+        echo "cdw" > /tmp/gpdb-hosts && \
+        echo "/usr/local/lib" >> /etc/ld.so.conf && \
+        echo "/usr/local/lib64" >> /etc/ld.so.conf && \
+        ldconfig && \
+        chmod 777 /tmp/gpinitsystem_singlenode && \
+        chmod 777 /tmp/init_system.sh && \
+        hostname > ~/orig_hostname && \
+        /usr/sbin/groupadd gpadmin && \
+        /usr/sbin/useradd  gpadmin -g gpadmin -G wheel && \
+        setcap cap_net_raw+ep /usr/bin/ping && \
+        echo "cbdb@123"|passwd --stdin gpadmin && \
+        echo "gpadmin        ALL=(ALL)       NOPASSWD: ALL" >> /etc/sudoers && 
\
+        echo "root           ALL=(ALL)       NOPASSWD: ALL" >> /etc/sudoers && 
\
+        echo "export 
COORDINATOR_DATA_DIRECTORY=/data0/database/coordinator/gpseg-1" >> 
/home/gpadmin/.bashrc && \
+        echo "source /usr/local/cloudberry-db/greenplum_path.sh"               
 >> /home/gpadmin/.bashrc && \
+        mkdir -p /data0/database/coordinator /data0/database/primary 
/data0/database/mirror && \
+        chown -R gpadmin:gpadmin /data0 && \
+        ssh-keygen -A && \
+        echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config
+
+RUN     cd /tmp/ && \
+        unzip -d /tmp /tmp/cloudberrydb-${CODEBASE_VERSION_VAR}.zip && \
+        mv /tmp/cloudberrydb-${CODEBASE_VERSION_VAR} /tmp/cloudberry
+
+RUN     cd /tmp/cloudberry && \
+        ./configure --prefix=/usr/local/cloudberry-db \
+                    --enable-cassert                  \
+                    --enable-debug-extensions         \
+                    --enable-ic-proxy                 \
+                    --enable-mapreduce                \
+                    --enable-orafce                   \
+                    --enable-orca                     \
+                    --enable-pxf                      \
+                    --enable-tap-tests                \
+                    --with-gssapi                     \
+                    --with-ldap                       \
+                    --with-libxml                     \
+                    --with-openssl                    \
+                    --with-pam                        \
+                    --with-perl                       \
+                    --with-pgport=5432                \
+                    --with-python                     \
+                    --with-pythonsrc-ext
+
+RUN     cd /tmp/cloudberry && \
+        make -j$(nproc) && \
+        make install
+
+RUN     cd /tmp/cloudberry/contrib && \
+        make -j$(nproc) && \
+        make install
+
+# ----------------------------------------------------------------------
+# Set the Default User and Command
+# ----------------------------------------------------------------------
+# The default user is set to 'gpadmin', and the container starts by
+# running the init_system.sh script. This container serves as a base
+# environment, and the Apache Cloudberry RPM can be installed for
+# testing and functional verification.
+# ----------------------------------------------------------------------
+USER gpadmin
+ENV USER=gpadmin
+WORKDIR /home/gpadmin
+
+EXPOSE 5432 22
+
+VOLUME [ "/sys/fs/cgroup" ]
+CMD ["bash","-c","/tmp/init_system.sh"]
diff --git a/devops/sandbox/000-cbdb-sandbox/Dockerfile.main.rockylinux9 
b/devops/sandbox/000-cbdb-sandbox/Dockerfile.main.rockylinux9
new file mode 100644
index 00000000000..2073ebcb2a6
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/Dockerfile.main.rockylinux9
@@ -0,0 +1,160 @@
+FROM rockylinux/rockylinux:9
+
+# Argument for configuring the timezone
+ARG TIMEZONE_VAR="America/Los_Angeles"
+
+# Environment variables
+ENV container=docker
+ENV MULTINODE=false
+
+RUN dnf update -y && \
+    dnf install -y systemd \
+                   systemd-libs && \
+    dnf clean all
+
+# Clean up unnecessary systemd units
+RUN [ -d /lib/systemd/system/sysinit.target.wants ] && find 
/lib/systemd/system/sysinit.target.wants/ -type l -not -name 
'systemd-tmpfiles-setup.service' -delete || echo "Directory 
/lib/systemd/system/sysinit.target.wants does not exist" && \
+    [ -d /lib/systemd/system/multi-user.target.wants ] && find 
/lib/systemd/system/multi-user.target.wants/ -type l -delete || echo "Directory 
/lib/systemd/system/multi-user.target.wants does not exist" && \
+    find /etc/systemd/system/*.wants/ -type l -delete || echo "Directory 
/etc/systemd/system/*.wants does not exist" && \
+    [ -d /lib/systemd/system/local-fs.target.wants ] && find 
/lib/systemd/system/local-fs.target.wants/ -type l -delete || echo "Directory 
/lib/systemd/system/local-fs.target.wants does not exist" && \
+    [ -d /lib/systemd/system/sockets.target.wants ] && find 
/lib/systemd/system/sockets.target.wants/ -type l -not -name '*udev*' -delete 
|| echo "Directory /lib/systemd/system/sockets.target.wants does not exist" && \
+    [ -d /lib/systemd/system/basic.target.wants ] && find 
/lib/systemd/system/basic.target.wants/ -type l -delete || echo "Directory 
/lib/systemd/system/basic.target.wants does not exist" && \
+    [ -d /lib/systemd/system/anaconda.target.wants ] && find 
/lib/systemd/system/anaconda.target.wants/ -type l -delete || echo "Directory 
/lib/systemd/system/anaconda.target.wants does not exist"
+
+COPY ./configs/* /tmp/
+
+RUN     dnf makecache && \
+        dnf install -y yum-utils \
+                       epel-release \
+                       git && \
+        yum-config-manager --disable epel-cisco-openh264 && \
+        dnf makecache && \
+        yum-config-manager --disable epel && \
+        dnf install -y --enablerepo=epel \
+                       the_silver_searcher \
+                       bat \
+                       htop && \
+        dnf install -y bison \
+                       cmake3 \
+                       ed \
+                       flex \
+                       gcc \
+                       gcc-c++ \
+                       glibc-langpack-en \
+                       go \
+                       initscripts \
+                       iproute \
+                       less \
+                       m4 \
+                       net-tools \
+                       openssh-clients \
+                       openssh-server \
+                       passwd \
+                       perl \
+                       rsync \
+                       sudo \
+                       tar \
+                       unzip \
+                       util-linux-ng \
+                       wget \
+                       sshpass \
+                       which && \
+        dnf install -y apr-devel \
+                       bzip2-devel \
+                       krb5-devel \
+                       libcurl-devel \
+                       libevent-devel \
+                       libxml2-devel \
+                       libuuid-devel \
+                       libzstd-devel \
+                       lz4-devel \
+                       openldap-devel \
+                       openssl-devel \
+                       pam-devel \
+                       perl-ExtUtils-Embed \
+                       perl-Test-Simple \
+                       perl-core \
+                       python3-devel \
+                       readline-devel \
+                       zlib-devel && \
+        dnf install -y --enablerepo=crb \
+                       libuv-devel \
+                       libyaml-devel \
+                       protobuf-devel \
+                       perl-IPC-Run && \
+        dnf install -y --enablerepo=epel \
+                       xerces-c-devel
+
+RUN     cp /tmp/90-cbdb-sysctl.conf /etc/sysctl.conf && \
+        cp /tmp/90-cbdb-limits.conf /etc/security/limits.d/90-cbdb-limits.conf 
&& \
+        cat /usr/share/zoneinfo/${TIMEZONE_VAR} > /etc/localtime && \
+        echo "cdw" > /tmp/gpdb-hosts && \
+        echo "/usr/local/lib" >> /etc/ld.so.conf && \
+        echo "/usr/local/lib64" >> /etc/ld.so.conf && \
+        ldconfig && \
+        chmod 777 /tmp/gpinitsystem_singlenode && \
+        chmod 777 /tmp/init_system.sh && \
+        hostname > ~/orig_hostname && \
+        /usr/sbin/groupadd gpadmin && \
+        /usr/sbin/useradd  gpadmin -g gpadmin -G wheel && \
+        setcap cap_net_raw+ep /usr/bin/ping && \
+        echo "cbdb@123"|passwd --stdin gpadmin && \
+        echo "gpadmin        ALL=(ALL)       NOPASSWD: ALL" >> /etc/sudoers && 
\
+        echo "root           ALL=(ALL)       NOPASSWD: ALL" >> /etc/sudoers && 
\
+        echo "export 
COORDINATOR_DATA_DIRECTORY=/data0/database/coordinator/gpseg-1" >> 
/home/gpadmin/.bashrc && \
+        echo "source /usr/local/cloudberry-db/greenplum_path.sh"               
 >> /home/gpadmin/.bashrc && \
+        mkdir -p /data0/database/coordinator /data0/database/primary 
/data0/database/mirror && \
+        chown -R gpadmin:gpadmin /data0 && \
+        ssh-keygen -A && \
+        echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config
+
+RUN     cd /tmp/ && \
+        git clone --recurse-submodules --branch main --single-branch --depth=1 
https://github.com/apache/cloudberry.git
+
+RUN     cd /tmp/cloudberry && \
+        ./configure --prefix=/usr/local/cloudberry-db \
+                    --enable-cassert                  \
+                    --enable-debug-extensions         \
+                    --enable-gpcloud                  \
+                    --enable-ic-proxy                 \
+                    --enable-mapreduce                \
+                    --enable-orafce                   \
+                    --enable-orca                     \
+                    --enable-pax                      \
+                    --enable-pxf                      \
+                    --enable-tap-tests                \
+                    --with-gssapi                     \
+                    --with-ldap                       \
+                    --with-libxml                     \
+                    --with-lz4                        \
+                    --with-openssl                    \
+                    --with-pam                        \
+                    --with-perl                       \
+                    --with-pgport=5432                \
+                    --with-python                     \
+                    --with-pythonsrc-ext
+
+RUN     cd /tmp/cloudberry && \
+        make -j$(nproc) && \
+        make install
+
+RUN     cd /tmp/cloudberry/contrib && \
+        make -j$(nproc) && \
+        make install
+
+# ----------------------------------------------------------------------
+# Set the Default User and Command
+# ----------------------------------------------------------------------
+# The default user is set to 'gpadmin', and the container starts by
+# running the init_system.sh script. This container serves as a base
+# environment, and the Apache Cloudberry RPM can be installed for
+# testing and functional verification.
+# ----------------------------------------------------------------------
+USER gpadmin
+ENV USER=gpadmin
+WORKDIR /home/gpadmin
+
+EXPOSE 5432 22
+
+VOLUME [ "/sys/fs/cgroup" ]
+CMD ["bash","-c","/tmp/init_system.sh"]
diff --git a/devops/sandbox/000-cbdb-sandbox/README.md 
b/devops/sandbox/000-cbdb-sandbox/README.md
new file mode 100644
index 00000000000..b8e0043c574
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/README.md
@@ -0,0 +1,171 @@
+---
+title: Sandbox of Apache Cloudberry
+---
+
+# Install Apache Cloudberry With Docker
+
+This document guides you on how to quickly set up and connect to a Apache 
Cloudberry in a Docker environment. You can try out Apache Cloudberry by 
performing some basic operations and running SQL commands. 
+
+> [!WARNING]
+> This guide is intended for testing or development. DO NOT use it for 
production.
+
+
+## Prerequisites
+
+Make sure that your environment meets the following requirements:
+
+- Platform requirement: Any platform with Docker runtime. For details, refer 
to [Get Started with Docker](https://www.docker.com/get-started/).
+- Other dependencies: Git, SSH, and internet connection
+
+## Build the Sandbox
+
+When building and deploying Cloudberry in Docker, you will have 2 different 
deployment options as well as different build options.
+
+**Deployment Options**
+1. **Single Container** (Default) - With the single container option, you will 
have the coordinator as well as the Cloudberry segments all running on a single 
container. This is the default behavior when deploying using the `run.sh` 
script provided.
+2. **Multi-Container** - Deploying with the multi-container option will give 
you a more realistic deployment of what actual production Cloudberry clusters 
look like. With multi-node, you will have the coordinator, the standby 
coordinator, and 2 segment hosts all on their own respective containers. This 
is to both highlight the distributed nature of Apache Cloudberry as well as 
highlight how high availability (HA) features work in the event of a server (or 
in this case a container) failin [...]
+
+![cloudberry Sandbox Deployments](../images/sandbox-deployment.jpg)
+
+**Build Options**
+
+1. Compile with the source code of the latest Apache Cloudberry (released in 
[Apache Cloudberry Release 
Page](https://github.com/apache/cloudberry/releases)). The base OS will be 
Rocky Linux 9 Docker image.
+2. Method 2 - Compile with the latest Apache Cloudberry 
[main](https://github.com/apache/cloudberry/tree/main) branch. The base OS will 
be Rocky Linux 9 Docker image.
+
+Build and deploy steps:
+
+1. Start Docker Desktop and make sure it is running properly on your host 
platform.
+
+2. Download this repository (which is 
[apache/cloudberry-bootcamp](https://github.com/apache/cloudberry-bootcamp)) to 
the target machine.
+
+    ```shell
+    git clone https://github.com/apache/cloudberry-bootcamp.git
+    ```
+
+3. Enter the repository and run the `run.sh` script to start the Docker 
container. This will start the automatic installation process. Depending on 
your environment, you may need to run this with 'sudo' command.
+
+    - For latest Cloudberry DB release running on a single container
+
+    ```shell
+    cd bootcamp/000-cbdb-sandbox
+    ./run.sh
+    ```
+    - For latest Cloudberry DB release running across multiple containers
+
+    ```shell
+    cd bootcamp/000-cbdb-sandbox
+    ./run.sh -m
+    ```
+    - For latest main branch running on a single container
+
+    ```shell
+    cd bootcamp/000-cbdb-sandbox
+    ./run.sh -c main
+    ```
+
+    - For latest main branch running across multiple containers
+
+    ```shell
+    cd bootcamp/000-cbdb-sandbox
+    ./run.sh -c main -m
+    ```
+
+    Once the script finishes without error, the sandbox is built and running 
successfully. The `docker run` and `docker compose` commands use the --detach 
option allowing you to ssh or access the running CBDB instance remotely.
+
+    Please review run.sh script for additional options (e.g. setting Timezone 
in running container, only building container). You can also execute `./run.sh 
-h` to see the usage.
+
+## Connect to the database
+
+> [!NOTE]
+> When deploying the multi-container Cloudberry environment it may take extra 
time for the database to initialize, so you may need to wait a few minutes 
before you can execute the psql prompt successfully. You can run `docker logs 
cbdb-cdw -f` to see the current state of the database initialization process, 
you'll know the process is finished when you see the "Deployment Successful" 
output.
+
+You can now connect to the database and try some basic operations.
+
+1. Connect to the Docker container from the host machine:
+
+    ```shell
+    docker exec -it cbdb-cdw /bin/bash
+    ```
+
+    If it is successful, you will see the following prompt:
+
+    ```shell
+    [gpadmin@cdw /]$
+    ```
+
+2. Log into Apache Cloudberry in Docker. See the following commands and 
example outputs:
+
+    ```shell
+    [gpadmin@cdw ~]$ psql  # Connects to the database with the default 
database name "gpadmin".
+    
+    # psql (14.4, server 14.4)
+    # Type "help" for help.
+    ```
+
+    ```sql
+    gpadmin=# SELECT VERSION();  -- Checks the database version.
+            
+    PostgreSQL 14.4 (Apache Cloudberry 1.0.0 build dev) on 
aarch64-unknown-linux-gnu, compiled by gcc (GCC) 10.2.1 20210130 (Red Hat 
10.2.1-11), 64-bit compiled on Oct 24 2023 10:24:28
+    (1 row)
+    ```
+
+Now you have a Apache Cloudberry and can continue with [Apache Cloudberry 
Tutorials Based on Docker 
Installation](https://github.com/apache/cloudberry-bootcamp/blob/main/101-cbdb-tutorials/README.md)!
 Enjoy!
+
+## Working with your Cloudberry Docker environment
+
+When working with the Cloudberry Docker environment there are a few commands 
that will be useful to you.
+
+**Stopping Your Single Container Deployment With Docker**
+
+To stop the **single container** deployment while _keeping the data and state_ 
within the container, you can run the command below. This means that you can 
later start the container again and any changes you made to the containers will 
be persisted between runs.
+
+```shell
+docker stop cbdb-cdw
+```
+
+To stop the **single container** deployment and also remove the volume that 
belongs to the container, you can run the following command. Keep in mind this 
will remove the volume as well as the container associated which means any 
changes you've made inside of the container or any database state will be wiped 
and unrecoverable.
+
+```shell
+docker rm -f cbdb-cdw
+```
+
+**Stopping Your Multi-Container Deployment With Docker**
+
+To stop the **multi-container** deployment while _keeping the data and state_ 
within the container, you can run the command below. This means that you can 
later start the container again and any changes you made to the containers will 
be persisted between runs.
+
+```shell
+docker compose -f docker-compose-rockylinux9.yml stop
+```
+
+To stop the **multi-container** deployment and also remove the network and 
volumes that belong to the containers, you can run the command below. Running 
this command means it will delete the containers as well as remove the volumes 
that the containers are associated with. This means any changes you've made 
inside of the containers or any database state will be wiped and unrecoverable.
+
+```shell
+docker compose -f docker-compose-rockylinux9.yml down -v
+```
+
+**Starting A Stopped Single Container Cloudberry Docker Deployment**
+
+If you've run any of the commands above that keep the Docker volumes persisted 
between shutting the containers down, you can use the following commands to 
bring that same deployment back up with it's previous state.
+
+To start a **single container** deployment after it was shut down, you can 
simply run the following
+
+```shell
+docker start cbdb-cdw
+```
+
+**Starting A Stopped Multi-Container Cloudberry Docker Deployment**
+
+To start a **multi-container** deployment after it was shut down, you can run 
the following command.
+
+```shell
+docker compose -f docker-compose-rockylinux9.yml start
+```
+
+> [!NOTE]
+> When starting a previously stopped Cloudberry Docker environment, you'll 
need to manually start the database back up. To do this, just run the following 
commands once the container(s) are back up and running. The `gpstart` command 
is used for starting the database, and -a is a flag saying to start the 
database without prompting (non-interactive).
+
+```shell
+docker exec -it cbdb-cdw /bin/bash
+
+[gpadmin@cdw /] gpstart -a
+```
diff --git a/devops/sandbox/000-cbdb-sandbox/configs/90-cbdb-limits.conf 
b/devops/sandbox/000-cbdb-sandbox/configs/90-cbdb-limits.conf
new file mode 100644
index 00000000000..d2bf601095b
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/configs/90-cbdb-limits.conf
@@ -0,0 +1,10 @@
+######################
+# CBDB CONFIG PARAMS #
+######################
+
+ * soft core unlimited
+ * hard core unlimited
+ * soft nofile 524288
+ * hard nofile 524288
+ * soft nproc 131072
+ * hard nproc 131072
diff --git a/devops/sandbox/000-cbdb-sandbox/configs/90-cbdb-sysctl.conf 
b/devops/sandbox/000-cbdb-sandbox/configs/90-cbdb-sysctl.conf
new file mode 100644
index 00000000000..ed806a5d8d1
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/configs/90-cbdb-sysctl.conf
@@ -0,0 +1,35 @@
+######################
+# CBDB CONFIG PARAMS #
+######################
+
+kernel.shmmax = 1000000000
+kernel.shmall = 4000000000
+kernel.shmmni = 4096
+vm.overcommit_memory = 2 
+vm.overcommit_ratio = 95
+net.ipv4.ip_local_port_range = 10000 65535
+kernel.sem = 250 2048000 200 8192
+kernel.sysrq = 1
+kernel.core_uses_pid = 1
+kernel.msgmnb = 65536
+kernel.msgmax = 65536
+kernel.msgmni = 2048
+net.ipv4.tcp_syncookies = 1
+net.ipv4.conf.default.accept_source_route = 0
+net.ipv4.tcp_max_syn_backlog = 4096
+net.ipv4.conf.all.arp_filter = 1
+net.ipv4.ipfrag_high_thresh = 41943040
+net.ipv4.ipfrag_low_thresh = 31457280
+net.ipv4.ipfrag_time = 60
+net.core.netdev_max_backlog = 10000
+net.core.rmem_max = 2097152
+net.core.wmem_max = 2097152
+vm.swappiness = 10
+vm.zone_reclaim_mode = 0
+vm.dirty_expire_centisecs = 500
+vm.dirty_writeback_centisecs = 100
+vm.dirty_background_ratio = 0
+vm.dirty_ratio = 0
+vm.dirty_background_bytes = 1610612736
+vm.dirty_bytes = 4294967296
+kernel.core_pattern=/var/core/core.%h.%t
diff --git a/devops/sandbox/000-cbdb-sandbox/configs/gpinitsystem_multinode 
b/devops/sandbox/000-cbdb-sandbox/configs/gpinitsystem_multinode
new file mode 100644
index 00000000000..ef96d730068
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/configs/gpinitsystem_multinode
@@ -0,0 +1,119 @@
+# FILE NAME: gpinitsystem_singlenode
+
+# A configuration file is needed by the gpinitsystem utility.
+# This sample file initializes a Apache Cloudberry Single Node
+# Edition (SNE) system with one coordinator and  two segment instances
+# on the local host. This file is referenced when you run gpinitsystem.
+
+################################################
+# REQUIRED PARAMETERS
+################################################
+
+# A name for the array you are configuring. You can use any name you
+# like. Enclose the name in quotes if the name contains spaces.
+
+ARRAY_NAME="Sandbox: Apache Cloudberry Cluster"
+
+# This specifies the file that contains the list of segment host names
+# that comprise the Greenplum system. For a single-node system, this
+# file contains the local OS-configured hostname (as output by the
+# hostname command). If the file does not reside in the same
+# directory where the gpinitsystem utility is executed, specify
+# the absolute path to the file.
+
+MACHINE_LIST_FILE=/tmp/gpdb-hosts
+
+# This specifies a prefix that will be used to name the data directories
+# of the coordinator and segment instances. The naming convention for data
+# directories in a Apache Cloudberry system is SEG_PREFIX<number>
+# where <number> starts with 0 for segment instances and the coordinator
+# is always -1. So for example, if you choose the prefix gpsne, your
+# coordinator instance data directory would be named gpsne-1, and the segment
+# instances would be named gpsne0, gpsne1, gpsne2, gpsne3, and so on.
+
+SEG_PREFIX=gpseg
+
+# Base port number on which primary segment instances will be
+# started on a segment host. The base port number will be
+# incremented by one for each segment instance started on a host.
+
+PORT_BASE=40000
+
+# This specifies the data storage location(s) where the script will
+# create the primary segment data directories. The script creates a
+# unique data directory for each segment instance. If you want multiple
+# segment instances per host, list a data storage area for each primary
+# segment you want created. The recommended number is one primary segment
+# per CPU. It is OK to list the same data storage area multiple times
+# if you want your data directories created in the same location. The
+# number of data directory locations specified will determine the number
+# of primary segment instances created per host.
+# You must make sure that the user who runs gpinitsystem (for example,
+# the gpadmin user) has permissions to write to these directories. You
+# may want to create these directories on the segment hosts before running
+# gpinitsystem and chown them to the appropriate user.
+
+declare -a DATA_DIRECTORY=(/data0/database/primary \
+                           /data0/database/primary)
+
+# The OS-configured hostname of the Apache Cloudberry coordinator instance.
+
+COORDINATOR_HOSTNAME=cdw
+
+# The location where the data directory will be created on the
+# Greenplum coordinator host.
+# You must make sure that the user who runs gpinitsystem
+# has permissions to write to this directory. You may want to
+# create this directory on the coordinator host before running
+# gpinitsystem and chown it to the appropriate user.
+
+COORDINATOR_DIRECTORY=/data0/database/coordinator
+
+# The port number for the coordinator instance. This is the port number
+# that users and client connections will use when accessing the
+# Apache Cloudberry system.
+
+COORDINATOR_PORT=5432
+
+# The shell the gpinitsystem script uses to execute
+# commands on remote hosts. Allowed value is ssh. You must set up
+# your trusted host environment before running the gpinitsystem
+# script. You can use gpssh-exkeys to do this.
+
+TRUSTED_SHELL=ssh
+
+# Maximum distance between automatic write ahead log (WAL)
+# checkpoints, in log file segments (each segment is normally 16
+# megabytes). This will set the checkpoint_segments parameter
+# in the postgresql.conf file for each segment instance in the
+# Apache Cloudberry system.
+
+CHECK_POINT_SEGMENTS=8
+
+# The character set encoding to use. Greenplum supports the
+# same character sets as PostgreSQL. See 'Character Set Support'
+# in the PostgreSQL documentation for allowed character sets.
+# Should correspond to the OS locale specified with the
+# gpinitsystem -n option.
+
+ENCODING=UNICODE
+
+################################################
+# OPTIONAL PARAMETERS
+################################################
+
+# Optional. Uncomment to create a database of this name after the
+# system is initialized. You can always create a database later using
+# the CREATE DATABASE command or the createdb script.
+
+DATABASE_NAME=gpadmin
+
+# Mirror configuration
+
+MIRROR_PORT_BASE=50000
+
+declare -a MIRROR_DATA_DIRECTORY=(/data0/database/mirror \
+                                  /data0/database/mirror)
+
+# REPLICATION_PORT_BASE=41000
+# MIRROR_REPLICATION_PORT_BASE=51000
diff --git a/devops/sandbox/000-cbdb-sandbox/configs/gpinitsystem_singlenode 
b/devops/sandbox/000-cbdb-sandbox/configs/gpinitsystem_singlenode
new file mode 100644
index 00000000000..baaaebfba83
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/configs/gpinitsystem_singlenode
@@ -0,0 +1,121 @@
+# FILE NAME: gpinitsystem_singlenode
+
+# A configuration file is needed by the gpinitsystem utility.
+# This sample file initializes a Apache Cloudberry Single Node
+# Edition (SNE) system with one coordinator and  two segment instances
+# on the local host. This file is referenced when you run gpinitsystem.
+
+################################################
+# REQUIRED PARAMETERS
+################################################
+
+# A name for the array you are configuring. You can use any name you
+# like. Enclose the name in quotes if the name contains spaces.
+
+ARRAY_NAME="Sandbox: Apache Cloudberry Cluster"
+
+# This specifies the file that contains the list of segment host names
+# that comprise the Greenplum system. For a single-node system, this
+# file contains the local OS-configured hostname (as output by the
+# hostname command). If the file does not reside in the same
+# directory where the gpinitsystem utility is executed, specify
+# the absolute path to the file.
+
+MACHINE_LIST_FILE=/tmp/gpdb-hosts
+
+# This specifies a prefix that will be used to name the data directories
+# of the coordinator and segment instances. The naming convention for data
+# directories in a Apache Cloudberry system is SEG_PREFIX<number>
+# where <number> starts with 0 for segment instances and the coordinator
+# is always -1. So for example, if you choose the prefix gpsne, your
+# coordinator instance data directory would be named gpsne-1, and the segment
+# instances would be named gpsne0, gpsne1, gpsne2, gpsne3, and so on.
+
+SEG_PREFIX=gpseg
+
+# Base port number on which primary segment instances will be
+# started on a segment host. The base port number will be
+# incremented by one for each segment instance started on a host.
+
+PORT_BASE=40000
+
+# This specifies the data storage location(s) where the script will
+# create the primary segment data directories. The script creates a
+# unique data directory for each segment instance. If you want multiple
+# segment instances per host, list a data storage area for each primary
+# segment you want created. The recommended number is one primary segment
+# per CPU. It is OK to list the same data storage area multiple times
+# if you want your data directories created in the same location. The
+# number of data directory locations specified will determine the number
+# of primary segment instances created per host.
+# You must make sure that the user who runs gpinitsystem (for example,
+# the gpadmin user) has permissions to write to these directories. You
+# may want to create these directories on the segment hosts before running
+# gpinitsystem and chown them to the appropriate user.
+
+declare -a DATA_DIRECTORY=(/data0/database/primary \
+                           /data0/database/primary \
+                           /data0/database/primary)
+
+# The OS-configured hostname of the Apache Cloudberry coordinator instance.
+
+COORDINATOR_HOSTNAME=cdw
+
+# The location where the data directory will be created on the
+# Greenplum coordinator host.
+# You must make sure that the user who runs gpinitsystem
+# has permissions to write to this directory. You may want to
+# create this directory on the coordinator host before running
+# gpinitsystem and chown it to the appropriate user.
+
+COORDINATOR_DIRECTORY=/data0/database/coordinator
+
+# The port number for the coordinator instance. This is the port number
+# that users and client connections will use when accessing the
+# Apache Cloudberry system.
+
+COORDINATOR_PORT=5432
+
+# The shell the gpinitsystem script uses to execute
+# commands on remote hosts. Allowed value is ssh. You must set up
+# your trusted host environment before running the gpinitsystem
+# script. You can use gpssh-exkeys to do this.
+
+TRUSTED_SHELL=ssh
+
+# Maximum distance between automatic write ahead log (WAL)
+# checkpoints, in log file segments (each segment is normally 16
+# megabytes). This will set the checkpoint_segments parameter
+# in the postgresql.conf file for each segment instance in the
+# Apache Cloudberry system.
+
+CHECK_POINT_SEGMENTS=8
+
+# The character set encoding to use. Greenplum supports the
+# same character sets as PostgreSQL. See 'Character Set Support'
+# in the PostgreSQL documentation for allowed character sets.
+# Should correspond to the OS locale specified with the
+# gpinitsystem -n option.
+
+ENCODING=UNICODE
+
+################################################
+# OPTIONAL PARAMETERS
+################################################
+
+# Optional. Uncomment to create a database of this name after the
+# system is initialized. You can always create a database later using
+# the CREATE DATABASE command or the createdb script.
+
+DATABASE_NAME=gpadmin
+
+# Mirror configuration
+
+MIRROR_PORT_BASE=50000
+
+declare -a MIRROR_DATA_DIRECTORY=(/data0/database/mirror \
+                                  /data0/database/mirror \
+                                  /data0/database/mirror)
+
+# REPLICATION_PORT_BASE=41000
+# MIRROR_REPLICATION_PORT_BASE=51000
diff --git a/devops/sandbox/000-cbdb-sandbox/configs/init_system.sh 
b/devops/sandbox/000-cbdb-sandbox/configs/init_system.sh
new file mode 100755
index 00000000000..73dafa9b001
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/configs/init_system.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+## ======================================================================
+## Container initialization script
+## ======================================================================
+
+# ----------------------------------------------------------------------
+# Start SSH daemon and setup for SSH access
+# ----------------------------------------------------------------------
+# The SSH daemon is started to allow remote access to the container via
+# SSH. This is useful for development and debugging purposes. If the SSH
+# daemon fails to start, the script exits with an error.
+# ----------------------------------------------------------------------
+if ! sudo /usr/sbin/sshd; then
+    echo "Failed to start SSH daemon" >&2
+    exit 1
+fi
+
+# ----------------------------------------------------------------------
+# Remove /run/nologin to allow logins
+# ----------------------------------------------------------------------
+# The /run/nologin file, if present, prevents users from logging into
+# the system. This file is removed to ensure that users can log in via SSH.
+# ----------------------------------------------------------------------
+sudo rm -rf /run/nologin
+
+# ## Set gpadmin ownership - Clouberry install directory and supporting
+# ## cluster creation files.
+sudo chown -R gpadmin.gpadmin /usr/local/cloudberry-db \
+                              /tmp/gpinitsystem_singlenode \
+                              /tmp/gpinitsystem_multinode \
+                              /tmp/gpdb-hosts \
+                              /tmp/multinode-gpinit-hosts \
+                              /tmp/faa.tar.gz \
+                              /tmp/smoke-test.sh
+
+# ----------------------------------------------------------------------
+# Configure passwordless SSH access for 'gpadmin' user
+# ----------------------------------------------------------------------
+# The script sets up SSH key-based authentication for the 'gpadmin' user,
+# allowing passwordless SSH access. It generates a new SSH key pair if one
+# does not already exist, and configures the necessary permissions.
+# ----------------------------------------------------------------------
+mkdir -p /home/gpadmin/.ssh
+chmod 700 /home/gpadmin/.ssh
+
+if [ ! -f /home/gpadmin/.ssh/id_rsa ]; then
+    ssh-keygen -t rsa -b 4096 -C gpadmin -f /home/gpadmin/.ssh/id_rsa -P "" > 
/dev/null 2>&1
+fi
+
+cat /home/gpadmin/.ssh/id_rsa.pub >> /home/gpadmin/.ssh/authorized_keys
+chmod 600 /home/gpadmin/.ssh/authorized_keys
+
+# Add the container's hostname to the known_hosts file to avoid SSH warnings
+ssh-keyscan -t rsa cdw > /home/gpadmin/.ssh/known_hosts 2>/dev/null
+
+# Source Cloudberry environment variables and set
+# COORDINATOR_DATA_DIRECTORY
+source /usr/local/cloudberry-db/greenplum_path.sh
+export COORDINATOR_DATA_DIRECTORY=/data0/database/coordinator/gpseg-1
+
+# Initialize single node Cloudberry cluster
+if [[ $MULTINODE == "false" && $HOSTNAME == "cdw" ]]; then
+    gpinitsystem -a \
+                 -c /tmp/gpinitsystem_singlenode \
+                 -h /tmp/gpdb-hosts \
+                 --max_connections=100
+# Initialize multi node Cloudberry cluster
+elif [[ $MULTINODE == "true" && $HOSTNAME == "cdw" ]]; then
+    sshpass -p "cbdb@123" ssh-copy-id -o StrictHostKeyChecking=no sdw1
+    sshpass -p "cbdb@123" ssh-copy-id -o StrictHostKeyChecking=no sdw2
+    sshpass -p "cbdb@123" ssh-copy-id -o StrictHostKeyChecking=no scdw
+    gpinitsystem -a \
+                 -c /tmp/gpinitsystem_multinode \
+                 -h /tmp/multinode-gpinit-hosts \
+                 --max_connections=100
+    gpinitstandby -s scdw -a
+    printf "sdw1\nsdw2\n" >> /tmp/gpdb-hosts
+fi
+
+if [ $HOSTNAME == "cdw" ]; then
+     ## Allow any host access the Cloudberry Cluster
+     echo 'host all all 0.0.0.0/0 trust' >> 
/data0/database/coordinator/gpseg-1/pg_hba.conf
+     gpstop -u
+
+     psql -d template1 \
+          -c "ALTER USER gpadmin PASSWORD 'cbdb@123'"
+
+     cat <<-'EOF'
+
+======================================================================
+         ____ _                 _ _                          
+        / ___| | ___  _   _  __| | |__   ___ _ __ _ __ _   _  
+       | |   | |/ _ \| | | |/ _` | '_ \ / _ \ '__| '__| | | |
+       | |___| | (_) | |_| | (_| | |_) |  __/ |  | |  | |_| |
+        \____|_|\___/ \__,_|\__,_|_.__/ \___|_|  |_|   \__, |
+                                                       |___/
+======================================================================
+EOF
+
+     cat <<-'EOF'
+
+======================================================================
+Sandbox: Apache Cloudberry Cluster details
+======================================================================
+
+EOF
+
+     echo "Current time: $(date)"
+     source /etc/os-release
+     echo "OS Version: ${NAME} ${VERSION}"
+
+     ## Set gpadmin password, display version and cluster configuration
+     psql -P pager=off -d template1 -c "SELECT VERSION()"
+     psql -P pager=off -d template1 -c "SELECT * FROM gp_segment_configuration 
ORDER BY dbid"
+     psql -P pager=off -d template1 -c "SHOW optimizer"
+fi
+
+echo """
+===========================
+=  DEPLOYMENT SUCCESSFUL  =
+===========================
+"""
+
+# ----------------------------------------------------------------------
+# Start an interactive bash shell
+# ----------------------------------------------------------------------
+# Finally, the script starts an interactive bash shell to keep the
+# container running and allow the user to interact with the environment.
+# ----------------------------------------------------------------------
+/bin/bash
diff --git a/devops/sandbox/000-cbdb-sandbox/configs/multinode-gpinit-hosts 
b/devops/sandbox/000-cbdb-sandbox/configs/multinode-gpinit-hosts
new file mode 100644
index 00000000000..a85b4c3f097
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/configs/multinode-gpinit-hosts
@@ -0,0 +1,2 @@
+sdw1
+sdw2
\ No newline at end of file
diff --git a/devops/sandbox/000-cbdb-sandbox/docker-compose-rockylinux9.yml 
b/devops/sandbox/000-cbdb-sandbox/docker-compose-rockylinux9.yml
new file mode 100644
index 00000000000..37cccc98ca6
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/docker-compose-rockylinux9.yml
@@ -0,0 +1,45 @@
+services:
+  cbdb-coordinator:
+    container_name: cbdb-cdw
+    image: cbdb-${CODEBASE_VERSION}:${OS_VERSION}
+    ports:
+    - "15432:5432"
+    hostname: cdw
+    tty: true
+    networks:
+      interconnect:
+        ipv4_address: 10.5.0.10
+    environment:
+      MULTINODE: "true"
+  cbdb-standby-coordinator:
+    container_name: cbdb-scdw
+    image: cbdb-${CODEBASE_VERSION}:${OS_VERSION}
+    hostname: scdw
+    tty: true
+    networks:
+      interconnect:
+        ipv4_address: 10.5.0.11
+  cbdb-segment-host-1:
+    container_name: cbdb-sdw1
+    image: cbdb-${CODEBASE_VERSION}:${OS_VERSION}
+    hostname: sdw1
+    tty: true
+    networks:
+      interconnect:
+        ipv4_address: 10.5.0.12
+  cbdb-segment-host-2:
+    container_name: cbdb-sdw2
+    image: cbdb-${CODEBASE_VERSION}:${OS_VERSION}
+    hostname: sdw2
+    tty: true
+    networks:
+      interconnect:
+        ipv4_address: 10.5.0.13
+networks:
+  interconnect:
+    name: cbdb-interconnect
+    driver: bridge
+    ipam:
+     config:
+       - subnet: 10.5.0.0/16
+         gateway: 10.5.0.1
diff --git a/devops/sandbox/000-cbdb-sandbox/run.sh 
b/devops/sandbox/000-cbdb-sandbox/run.sh
new file mode 100755
index 00000000000..d42b55e1ecf
--- /dev/null
+++ b/devops/sandbox/000-cbdb-sandbox/run.sh
@@ -0,0 +1,139 @@
+#!/bin/bash
+set -eu
+
+# Default values
+DEFAULT_OS_VERSION="rockylinux9"
+DEFAULT_TIMEZONE_VAR="America/Los_Angeles"
+DEFAULT_PIP_INDEX_URL_VAR="https://pypi.org/simple";
+BUILD_ONLY="false"
+MULTINODE="false"
+
+# Use environment variables if set, otherwise use default values
+# Export set for some variables to be used referenced docker compose file
+export OS_VERSION="${OS_VERSION:-$DEFAULT_OS_VERSION}"
+BUILD_ONLY="${BUILD_ONLY:-false}"
+export CODEBASE_VERSION="${CODEBASE_VERSION:-}"
+TIMEZONE_VAR="${TIMEZONE_VAR:-$DEFAULT_TIMEZONE_VAR}"
+PIP_INDEX_URL_VAR="${PIP_INDEX_URL_VAR:-$DEFAULT_PIP_INDEX_URL_VAR}"
+
+# Function to display help message
+function usage() {
+    echo "Usage: $0 [-o <os_version>] [-c <codebase_version>] [-b] [-m]"
+    echo "  -c  Codebase version (valid values: main, or determined from 
release zip file name)"
+    echo "  -t  Timezone (default: America/Los_Angeles, or set via 
TIMEZONE_VAR environment variable)"
+    echo "  -p  Python Package Index (PyPI) (default: https://pypi.org/simple, 
or set via PIP_INDEX_URL_VAR environment variable)"
+    echo "  -b  Build only, do not run the container (default: false, or set 
via BUILD_ONLY environment variable)"
+    echo "  -m  Multinode, this creates a multinode (multi-container) 
Cloudberry cluster using docker compose (requires compose to be installed)"
+    exit 1
+}
+
+# Parse command-line options
+while getopts "c:t:p:bmh" opt; do
+    case "${opt}" in
+        c)
+            CODEBASE_VERSION=${OPTARG}
+            ;;
+        t)
+            TIMEZONE_VAR=${OPTARG}
+            ;;
+        p)
+            PIP_INDEX_URL_VAR=${OPTARG}
+            ;;
+        b)
+            BUILD_ONLY="true"
+            ;;
+        m)
+            MULTINODE="true"
+            ;;
+        h)
+            usage
+            ;;
+        *)
+            usage
+            ;;
+    esac
+done
+
+if [[ $MULTINODE == "true" ]] && ! docker compose version; then
+        echo "Error: Multinode -m flag found in run arguments but calling 
docker compose failed. Please install Docker Compose by following the 
instructions at https://docs.docker.com/compose/install/. Exiting"
+        exit 1
+fi
+
+if [[ "${MULTINODE}" == "true" && "${BUILD_ONLY}" == "true" ]]; then
+    echo "Error: Cannot pass both multinode deployment [m] and build only [b] 
flags together"
+    exit 1
+fi
+
+# If CODEBASE_VERSION is not specified, determine it from the file name
+if [[ -z "$CODEBASE_VERSION" ]]; then
+    BASE_CODEBASE_FILE=$(ls configs/cloudberrydb-*.zip 2>/dev/null)
+
+    if [[ -z "$BASE_CODEBASE_FILE" ]]; then
+        echo "Error: No configs/cloudberrydb-*.zip file found and codebase 
version not specified."
+        exit 1
+    fi
+
+    CODEBASE_FILE=$(basename ${BASE_CODEBASE_FILE})
+
+    if [[ $CODEBASE_FILE =~ cloudberrydb-([0-9]+\.[0-9]+\.[0-9]+)\.zip ]]; then
+        CODEBASE_VERSION="${BASH_REMATCH[1]}"
+    else
+        echo "Error: Cannot extract version from file name $CODEBASE_FILE"
+        exit 1
+    fi
+fi
+
+# Validate OS_VERSION and map to appropriate Docker image
+case "${OS_VERSION}" in
+    rockylinux9)
+        OS_DOCKER_IMAGE="rockylinux9"
+        ;;
+    *)
+        echo "Invalid OS version: ${OS_VERSION}"
+        usage
+        ;;
+esac
+
+# Validate CODEBASE_VERSION
+if [[ "${CODEBASE_VERSION}" != "main" && ! "${CODEBASE_VERSION}" =~ 
^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
+    echo "Invalid codebase version: ${CODEBASE_VERSION}"
+    usage
+fi
+
+# Build image
+if [[ "${CODEBASE_VERSION}" = "main"  ]]; then
+    DOCKERFILE=Dockerfile.${CODEBASE_VERSION}.${OS_VERSION}
+
+    docker build --file ${DOCKERFILE} \
+                 --build-arg TIMEZONE_VAR="${TIMEZONE_VAR}" \
+                 --tag cbdb-${CODEBASE_VERSION}:${OS_VERSION} .
+else
+    DOCKERFILE=Dockerfile.RELEASE.${OS_VERSION}
+
+    docker build --file ${DOCKERFILE} \
+                 --build-arg TIMEZONE_VAR="${TIMEZONE_VAR}" \
+                 --build-arg PIP_INDEX_URL_VAR="${PIP_INDEX_URL_VAR}" \
+                 --build-arg CODEBASE_VERSION_VAR="${CODEBASE_VERSION}" \
+                 --tag cbdb-${CODEBASE_VERSION}:${OS_VERSION} .
+fi
+
+# Check if build only flag is set
+if [ "${BUILD_ONLY}" == "true" ]; then
+    echo "Docker image built successfully with OS version ${OS_VERSION} and 
codebase version ${CODEBASE_VERSION}. Build only mode, not running the 
container."
+    exit 0
+fi
+
+# Deploy container(s)
+if [ "${MULTINODE}" == "true" ]; then
+    docker compose -f docker-compose-$OS_VERSION.yml up --detach
+else
+    docker run --interactive \
+           --tty \
+           --name cbdb-cdw \
+           --detach \
+           --volume /sys/fs/cgroup:/sys/fs/cgroup:ro \
+           --publish 122:22 \
+           --publish 15432:5432 \
+           --hostname cdw \
+           cbdb-${CODEBASE_VERSION}:${OS_VERSION}
+fi
diff --git a/devops/sandbox/images/sandbox-deployment.jpg 
b/devops/sandbox/images/sandbox-deployment.jpg
new file mode 100644
index 00000000000..bb1b2dc0741
Binary files /dev/null and b/devops/sandbox/images/sandbox-deployment.jpg differ


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to