This is an automated email from the ASF dual-hosted git repository.

vega pushed a commit to branch ci-docker-staging
in repository https://gitbox.apache.org/repos/asf/tvm-vta.git


The following commit(s) were added to refs/heads/ci-docker-staging by this push:
     new 112eb31  update docker bash
112eb31 is described below

commit 112eb3159b53db002ef201ecc25e097ac48acd52
Author: Luis Vega <[email protected]>
AuthorDate: Sun Aug 29 18:45:18 2021 +0000

    update docker bash
---
 tests/scripts/docker_bash.sh | 437 ++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 385 insertions(+), 52 deletions(-)

diff --git a/tests/scripts/docker_bash.sh b/tests/scripts/docker_bash.sh
index cdda5d4..2a05abf 100755
--- a/tests/scripts/docker_bash.sh
+++ b/tests/scripts/docker_bash.sh
@@ -17,79 +17,412 @@
 # specific language governing permissions and limitations
 # under the License.
 
-# Docker bash script used to execute a command within a container.
 #
-if [ "$#" -lt 1 ]; then
-    echo "Usage: tests/script/docker_bash.sh <CONTAINER_NAME> [COMMAND]"
-    exit -1
+# Start a bash, mount REPO_MOUNT_POINT to be current directory.
+#
+# Usage: docker/bash.sh [-i|--interactive] [--net=host] [-t|--tty]
+#          [--mount MOUNT_DIR] [--repo-mount-point REPO_MOUNT_POINT]
+#          [--dry-run]
+#          <DOCKER_IMAGE_NAME> [--] [COMMAND]
+#
+# Usage: docker/bash.sh <CONTAINER_NAME>
+#     Starts an interactive session
+#
+# Usage2: docker/bash.sh [-i] <CONTAINER_NAME> [COMMAND]
+#     Execute command in the docker image, default non-interactive
+#     With -i, execute interactively.
+#
+
+set -euo pipefail
+
+
+function show_usage() {
+    cat <<EOF
+Usage: docker/bash.sh [-i|--interactive] [--net=host]
+         [--mount MOUNT_DIR] [--repo-mount-point REPO_MOUNT_POINT]
+         [--dry-run]
+         <DOCKER_IMAGE_NAME> [--] [COMMAND]
+
+-h, --help
+
+    Display this help message.
+
+-i, --interactive
+
+    Start the docker session in interactive mode.
+
+-t, --tty
+
+    Start the docker session with a pseudo terminal (tty).
+
+--net=host
+
+    Expose servers run into the container to the host, passing the
+    "--net=host" argument through to docker.  On MacOS, this is
+    instead passed as "-p 8888:8888" since the host networking driver
+    isn't supported.
+
+--mount MOUNT_DIR
+
+    Expose MOUNT_DIR as an additional mount point inside the docker
+    container.  The mount point inside the container is the same as
+    the folder location outside the container.  This option can be
+    specified multiple times.
+
+--repo-mount-point REPO_MOUNT_POINT
+
+    The directory inside the docker container at which the TVM
+    repository should be mounted, and is used as the workspace inside
+    the docker container.
+
+    If unspecified, the mount location depends on the environment.  If
+    running inside Jenkins, the mount location will be /workspace.
+    Otherwise, the mount location of the repository will be the same
+    as the external location of the repository, to maintain
+    compatibility with git-worktree.
+
+--dry-run
+
+    Print the docker command to be run, but do not execute it.
+
+DOCKER_IMAGE_NAME
+
+    The name of the docker container to be run.  This can be an
+    explicit name of a docker image (e.g. "tlcpack/ci-gpu:v0.76") or
+    can be a shortcut as defined in the TVM Jenkinsfile
+    (e.g. "ci_gpu").
+
+COMMAND
+
+    The command to be run inside the docker container.  If this is set
+    to "bash", both the --interactive and --net=host flags are set.
+    If no command is specified, defaults to "bash".  If the command
+    contains dash-prefixed arguments, the command should be preceded
+    by -- to indicate arguments that are not intended for bash.sh.
+
+EOF
+}
+
+
+#################################
+### Start of argument parsing ###
+#################################
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)"
+REPO_DIR="$(dirname "${SCRIPT_DIR}")"
+
+DRY_RUN=false
+INTERACTIVE=false
+TTY=false
+USE_NET_HOST=false
+DOCKER_IMAGE_NAME=
+COMMAND=bash
+MOUNT_DIRS=( )
+
+# TODO(Lunderberg): Remove this if statement and always set to
+# "${REPO_DIR}".  The consistent directory for Jenkins is currently
+# necessary to allow cmake build commands to run in CI after the build
+# steps.
+if [[ -n "${JENKINS_HOME:-}" ]]; then
+    REPO_MOUNT_POINT=/workspace
+else
+    REPO_MOUNT_POINT="${REPO_DIR}"
 fi
 
-DOCKER_IMAGE_NAME=("$1")
 
-if [ "$#" -eq 1 ]; then
-    COMMAND="bash"
+function parse_error() {
+    echo "$@" >&2
+    show_usage >&2
+    exit 1
+}
+
+# Handle joined flags, such as interpreting -ih as -i -h.  Either rewrites
+# the current argument if it is a joined argument, or shifts all arguments
+# otherwise.  Should be called as "eval $break_joined_flag" where joined
+# flags are possible.  Can't use a function definition, because it needs
+# to overwrite the parent scope's behavior.
+break_joined_flag='if (( ${#1} == 2 )); then shift; else set -- -"${1#-i}" 
"${@:2}"; fi'
+
+
+while (( $# )); do
+    case "$1" in
+        -h|--help)
+            show_usage
+            exit 0
+            ;;
+
+        -i*|--interactive)
+            INTERACTIVE=true
+            eval $break_joined_flag
+            ;;
+
+        -t*|--tty)
+            TTY=true
+            eval $break_joined_flag
+            ;;
+
+        --net=host)
+            USE_NET_HOST=true
+            shift
+            ;;
+
+        --mount)
+            if [[ -n "$2" ]]; then
+                MOUNT_DIRS+=("$2")
+                shift 2
+            else
+                parse_error 'ERROR: --mount requires a non-empty argument'
+            fi
+            ;;
+
+        --mount=?*)
+            MOUNT_DIRS+=("${1#*=}")
+            shift
+            ;;
+
+        --dry-run)
+            DRY_RUN=true
+            shift
+            ;;
+
+        --repo-mount-point)
+            if [[ -n "$2" ]]; then
+                REPO_MOUNT_POINT="$2"
+                shift 2
+            else
+                parse_error 'ERROR: --repo-mount-point requires a non-empty 
argument'
+            fi
+            ;;
+
+        --repo-mount-point=?*)
+            REPO_MOUNT_POINT="${1#*=}"
+            shift
+            ;;
+
+        --)
+            shift
+            COMMAND=( "$@" )
+            break
+            ;;
+
+        -*|--*)
+            echo "Error: Unknown flag: $1" >&2
+            echo "  If this flag is intended to be passed to the" >&2
+            echo "  docker command, please add -- before the docker" >&2
+            echo "  command (e.g. docker/bash.sh ci_gpu -- build -j2)" >&2
+            show_usage >&2
+            exit 1
+            ;;
+
+        *)
+            # First positional argument is the image name, all
+            # remaining below to the COMMAND.
+            if [[ -z "${DOCKER_IMAGE_NAME}" ]]; then
+                DOCKER_IMAGE_NAME=$1
+                shift
+            else
+                COMMAND=( "$@" )
+                break
+            fi
+            ;;
+    esac
+done
+
+if [[ -z "${DOCKER_IMAGE_NAME}" ]]; then
+    echo "Error: Missing DOCKER_IMAGE_NAME" >&2
+    show_usage >&2
+fi
+
+if [[ ${COMMAND[@]+"${COMMAND[@]}"} = bash ]]; then
+    INTERACTIVE=true
+    USE_NET_HOST=true
+fi
+
+
+
+###############################
+### End of argument parsing ###
+###############################
+
+source "$(dirname $0)/dev_common.sh" || exit 2
+
+DOCKER_FLAGS=( )
+DOCKER_ENV=( )
+DOCKER_MOUNT=( )
+DOCKER_DEVICES=( )
+
+
+# If the user gave a shortcut defined in the Jenkinsfile, use it.
+EXPANDED_SHORTCUT=$(lookup_image_spec "${DOCKER_IMAGE_NAME}")
+if [ -n "${EXPANDED_SHORTCUT}" ]; then
+    DOCKER_IMAGE_NAME="${EXPANDED_SHORTCUT}"
+fi
+
+# Set up working directories
+
+DOCKER_FLAGS+=( --workdir "${REPO_MOUNT_POINT}" )
+DOCKER_MOUNT+=( --volume "${REPO_DIR}":"${REPO_MOUNT_POINT}"
+                --volume "${SCRIPT_DIR}":/docker
+              )
+
+# Set up CI-specific environment variables
+DOCKER_ENV+=( --env CI_BUILD_HOME="${REPO_MOUNT_POINT}"
+              --env CI_BUILD_USER="$(id -u -n)"
+              --env CI_BUILD_UID="$(id -u)"
+              --env CI_BUILD_GROUP="$(id -g -n)"
+              --env CI_BUILD_GID="$(id -g)"
+              --env CI_PYTEST_ADD_OPTIONS="${CI_PYTEST_ADD_OPTIONS:-}"
+              --env CI_IMAGE_NAME="${DOCKER_IMAGE_NAME}"
+            )
+
+
+# Pass tvm test data folder through to the docker container, to avoid
+# repeated downloads.  Check if we have permissions to write to the
+# directory first, since the CI may not.
+TEST_DATA_PATH="${TVM_DATA_ROOT_PATH:-${HOME}/.tvm_test_data}"
+if [[ -d "${TEST_DATA_PATH}" && -w "${TEST_DATA_PATH}" ]]; then
+    DOCKER_MOUNT+=( --volume 
"${TEST_DATA_PATH}":"${REPO_MOUNT_POINT}"/.tvm_test_data )
+fi
+
+
+# Remove the container once it finishes running (--rm) and share the
+# PID namespace (--pid=host).  The process inside does not have pid 1
+# and SIGKILL is propagated to the process inside, allowing jenkins to
+# kill it if needed.
+DOCKER_FLAGS+=( --rm --pid=host)
+
+# Expose services running in container to the host.
+if $USE_NET_HOST; then
     if [[ $(uname) == "Darwin" ]]; then
         # Docker's host networking driver isn't supported on macOS.
         # Use default bridge network and expose port for jupyter notebook.
-        CI_DOCKER_EXTRA_PARAMS=("-it -p 8888:8888")
+        DOCKER_FLAGS+=( -p 8888:8888 )
     else
-        CI_DOCKER_EXTRA_PARAMS=("-it --net=host")
+        DOCKER_FLAGS+=(--net=host)
     fi
-else
-    shift 1
-    COMMAND=("$@")
 fi
 
-SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-WORKSPACE="$(pwd)"
+# Set up interactive sessions
+if ${INTERACTIVE}; then
+    DOCKER_FLAGS+=( --interactive )
+fi
 
-# Use nvidia-docker if the container is GPU.
-if [[ ! -z $CUDA_VISIBLE_DEVICES ]]; then
-    CUDA_ENV="-e CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES}"
-else
-    CUDA_ENV=""
+if ${TTY}; then
+    DOCKER_FLAGS+=( --tty )
 fi
 
-if [[ "${DOCKER_IMAGE_NAME}" == *"gpu"* ]]; then
-    if ! type "nvidia-docker" 1> /dev/null 2> /dev/null
-    then
-        DOCKER_BINARY="docker"
-        CUDA_ENV=" --gpus all "${CUDA_ENV}
+# Expose external directories to the docker container
+for MOUNT_DIR in ${MOUNT_DIRS[@]+"${MOUNT_DIRS[@]}"}; do
+    DOCKER_MOUNT+=( --volume "${MOUNT_DIR}:${MOUNT_DIR}" )
+done
+
+# Use nvidia-docker for GPU container.  If nvidia-docker is not
+# available, fall back to using "--gpus all" flag, requires docker
+# version 19.03 or higher.
+if [[ "${DOCKER_IMAGE_NAME}" == *"gpu"* || "${DOCKER_IMAGE_NAME}" == *"cuda"* 
]]; then
+    if type nvidia-docker 1> /dev/null 2> /dev/null; then
+        DOCKER_BINARY=nvidia-docker
     else
-        DOCKER_BINARY="nvidia-docker"
+        DOCKER_BINARY=docker
+        DOCKER_FLAGS+=( --gpus all )
     fi
+
+    # nvidia-docker treats Vulkan as a graphics API, so we need to
+    # request passthrough of graphics APIs.  This could also be set in
+    # the Dockerfile.
+    DOCKER_ENV+=( --env NVIDIA_DRIVER_CAPABILITIES=compute,graphics,utility )
+
+    # But as of nvidia-docker version 2.6.0-1, we still need to pass
+    # through the nvidia icd files ourselves.
+    ICD_SEARCH_LOCATIONS=(
+        # 
https://github.com/KhronosGroup/Vulkan-Loader/blob/master/loader/LoaderAndLayerInterface.md#icd-discovery-on-linux
+        /usr/local/etc/vulkan/icd.d
+        /usr/local/share/vulkan/icd.d
+        /etc/vulkan/icd.d
+        /usr/share/vulkan/icd.d
+        # 
https://github.com/NVIDIA/libglvnd/blob/master/src/EGL/icd_enumeration.md#icd-installation
+        /etc/glvnd/egl_vendor.d
+        /usr/share/glvnd/egl_vendor.d
+    )
+    for filename in $(find "${ICD_SEARCH_LOCATIONS[@]}" -name "*nvidia*.json" 
2> /dev/null); do
+        DOCKER_MOUNT+=( --volume "${filename}":"${filename}":ro )
+    done
+
 else
-    DOCKER_BINARY="docker"
+    DOCKER_BINARY=docker
+fi
+
+
+
+# Pass any restrictions of allowed CUDA devices from the host to the
+# docker container.
+if [[ -n ${CUDA_VISIBLE_DEVICES:-} ]]; then
+    DOCKER_ENV+=( --env CUDA_VISIBLE_DEVICES="${CUDA_VISIBLE_DEVICES}" )
+fi
+
+
+
+# Set TVM import path inside the docker image
+if [[ "${DOCKER_IMAGE_NAME}" == *"ci"* ]]; then
+    DOCKER_ENV+=( --env PYTHONPATH="${REPO_MOUNT_POINT}"/python )
+fi
+
+
+
+# If the Vitis-AI docker image is selected, expose the Xilinx FPGA
+# devices and required volumes containing e.g. DSA's and overlays
+if [[ "${DOCKER_IMAGE_NAME}" == *"demo_vitis_ai"* && -d "/dev/shm" && -d 
"/opt/xilinx/dsa" && -d "/opt/xilinx/overlaybins" ]]; then
+    DOCKER_MOUNT+=( --volume /dev/shm:/dev/shm
+                    --volume /opt/xilinx/dsa:/opt/xilinx/dsa
+                    --volume /opt/xilinx/overlaybins:/opt/xilinx/overlaybins
+                  )
+
+    XCLMGMT_DRIVER="$(find /dev -name xclmgmt\*)"
+    for DRIVER in "${XCLMGMT_DRIVER}"; do
+       DOCKER_DEVICES+=( --device="${DRIVER}" )
+    done
+
+    RENDER_DRIVER="$(find /dev/dri -name renderD\*)"
+    for DRIVER in "${RENDER_DRIVER}"; do
+        DOCKER_DEVICES+=( --device="${DRIVER}" )
+    done
+fi
+
+# Add ROCm devices and set ROCM_ENABLED=1 which is used in the 
with_the_same_user script
+# to add the user to the video group
+if [[ "${DOCKER_IMAGE_NAME}" == *"rocm"* && -d "/dev/dri" ]]; then
+    DOCKER_DEVICES+=( --device=/dev/kfd --device=/dev/dri )
+    DOCKER_ENV+=( --env ROCM_ENABLED=1 )
+fi
+
+# When running from a git worktree, also mount the original git dir.
+if [ -f "${REPO_DIR}/.git" ]; then
+    git_dir=$(cd ${REPO_DIR} && git rev-parse --git-common-dir)
+    if [ "${git_dir}" != "${REPO_DIR}/.git" ]; then
+        DOCKER_MOUNT+=( --volume "${git_dir}:${git_dir}" )
+    fi
 fi
 
 # Print arguments.
-echo "WORKSPACE: ${WORKSPACE}"
+echo "REPO_DIR: ${REPO_DIR}"
 echo "DOCKER CONTAINER NAME: ${DOCKER_IMAGE_NAME}"
 echo ""
 
-# FIXME(zhanghao): re-enable the tsim test after ISA is updated
-if [[ ${COMMAND[@]}  == "./tests/scripts/task_python_vta_tsim.sh" ]]; then
-  echo "Skip '${COMMAND[@]}'"
-  exit
-fi
+echo Running \'${COMMAND[@]+"${COMMAND[@]}"}\' inside ${DOCKER_IMAGE_NAME}...
+
 
-echo "Running '${COMMAND[@]}' inside ${DOCKER_IMAGE_NAME}..."
-
-# By default we cleanup - remove the container once it finish running (--rm)
-# and share the PID namespace (--pid=host) so the process inside does not have
-# pid 1 and SIGKILL is propagated to the process inside (jenkins can kill it).
-${DOCKER_BINARY} run --rm --pid=host\
-    -v ${WORKSPACE}:/workspace \
-    -v ${SCRIPT_DIR}:/docker \
-    -w /workspace \
-    -e "CI_BUILD_HOME=/workspace" \
-    -e "CI_BUILD_USER=$(id -u -n)" \
-    -e "CI_BUILD_UID=$(id -u)" \
-    -e "CI_BUILD_GROUP=$(id -g -n)" \
-    -e "CI_BUILD_GID=$(id -g)" \
-    -e "PYTHONPATH=python:topi/python"\
-    ${CUDA_ENV}\
-    ${CI_DOCKER_EXTRA_PARAMS[@]} \
-    ${DOCKER_IMAGE_NAME}\
-    bash --login tests/scripts/with_the_same_user \
-    ${COMMAND[@]}
+DOCKER_CMD=(${DOCKER_BINARY} run
+            ${DOCKER_FLAGS[@]+"${DOCKER_FLAGS[@]}"}
+            ${DOCKER_ENV[@]+"${DOCKER_ENV[@]}"}
+            ${DOCKER_MOUNT[@]+"${DOCKER_MOUNT[@]}"}
+            ${DOCKER_DEVICES[@]+"${DOCKER_DEVICES[@]}"}
+            "${DOCKER_IMAGE_NAME}"
+            bash --login /docker/with_the_same_user
+            ${COMMAND[@]+"${COMMAND[@]}"}
+           )
+
+if ${DRY_RUN}; then
+    echo ${DOCKER_CMD[@]+"${DOCKER_CMD[@]}"}
+else
+    ${DOCKER_CMD[@]+"${DOCKER_CMD[@]}"}
+fi

Reply via email to