This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch BASE-29930
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit d97c0a31e87ae8a23f2a798a76d89292c21c24a8
Author: Duo Zhang <[email protected]>
AuthorDate: Sat Feb 28 00:07:12 2026 +0800

    fix
---
 dev-support/integration-test.Jenkinsfile | 409 ++++++++++++++++---------------
 1 file changed, 210 insertions(+), 199 deletions(-)

diff --git a/dev-support/integration-test.Jenkinsfile 
b/dev-support/integration-test.Jenkinsfile
index b4632270ff8..0c65ab5a543 100644
--- a/dev-support/integration-test.Jenkinsfile
+++ b/dev-support/integration-test.Jenkinsfile
@@ -29,75 +29,81 @@ pipeline {
         dir('component') {
           checkout scm
         }
-    parallel {
-      stage ('hadoop 2 cache') {
-        steps {
-          // directory must be unique for each parallel stage, because jenkins 
runs them in the same workspace :(
-          dir('downloads-hadoop-2') {
-            sh '''#!/usr/bin/env bash
-              echo "Make sure we have a directory for downloading 
dependencies: $(pwd)"
-            '''
-          }
-          sh '''#!/usr/bin/env bash
-            set -e
-            echo "Ensure we have a copy of Hadoop ${HADOOP2_VERSION}"
-            
"${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh"
 \
-              --working-dir "${WORKSPACE}/downloads-hadoop-2" \
-              --keys 'https://downloads.apache.org/hadoop/common/KEYS' \
-              --verify-tar-gz \
-              "${WORKSPACE}/hadoop-${HADOOP2_VERSION}-bin.tar.gz" \
-              
"hadoop/common/hadoop-${HADOOP2_VERSION}/hadoop-${HADOOP2_VERSION}.tar.gz"
-            for stale in $(ls -1 "${WORKSPACE}"/hadoop-2*.tar.gz | grep -v 
${HADOOP2_VERSION}); do
-              echo "Delete stale hadoop 2 cache ${stale}"
-              rm -rf $stale
-            done
-          '''
-          stash name: 'hadoop-2', includes: 
"hadoop-${HADOOP2_VERSION}-bin.tar.gz"
-        }
-      } // hadoop 2 cache
-      stage ('hadoop 3 cache') {
-        steps {
-          script {
-            hadoop3_versions = env.HADOOP3_VERSIONS.split(",");
-            env.HADOOP3_VERSIONS_REGEX = "[" + hadoop3_versions.join("|") + 
"]";
-            for (hadoop3_version in hadoop3_versions) {
-              env.HADOOP3_VERSION = hadoop3_version;
-              echo "env.HADOOP3_VERSION" + env.hadoop3_version;
-              stage ('Hadoop 3 cache inner stage') {
-                // directory must be unique for each parallel stage, because 
jenkins runs them in the same workspace :(
-                dir("downloads-hadoop-${HADOOP3_VERSION}") {
+        parallel {
+          'hadoop 2 cache': {
+            stage ('hadoop 2 cache') {
+              agent {
+                node {
+                  label 'hbase'
+                }
+              }
+              steps {
+                dir('downloads-hadoop-2') {
                   sh '''#!/usr/bin/env bash
                     echo "Make sure we have a directory for downloading 
dependencies: $(pwd)"
                   '''
-                } //dir
+                }
                 sh '''#!/usr/bin/env bash
                   set -e
-                  echo "Ensure we have a copy of Hadoop ${HADOOP3_VERSION}"
+                  echo "Ensure we have a copy of Hadoop ${HADOOP2_VERSION}"
                   
"${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh"
 \
-                    --working-dir 
"${WORKSPACE}/downloads-hadoop-${HADOOP3_VERSION}" \
+                    --working-dir "${WORKSPACE}/downloads-hadoop-2" \
                     --keys 'https://downloads.apache.org/hadoop/common/KEYS' \
                     --verify-tar-gz \
-                    "${WORKSPACE}/hadoop-${HADOOP3_VERSION}-bin.tar.gz" \
-                    
"hadoop/common/hadoop-${HADOOP3_VERSION}/hadoop-${HADOOP3_VERSION}.tar.gz"
-                  for stale in $(ls -1 "${WORKSPACE}"/hadoop-3*.tar.gz | grep 
-v ${HADOOP3_VERSION}); do
-                    echo "Delete stale hadoop 3 cache ${stale}"
+                    "${WORKSPACE}/hadoop-${HADOOP2_VERSION}-bin.tar.gz" \
+                    
"hadoop/common/hadoop-${HADOOP2_VERSION}/hadoop-${HADOOP2_VERSION}.tar.gz"
+                  for stale in $(ls -1 "${WORKSPACE}"/hadoop-2*.tar.gz | grep 
-v ${HADOOP2_VERSION}); do
+                    echo "Delete stale hadoop 2 cache ${stale}"
                     rm -rf $stale
                   done
                 '''
-                stash name: "hadoop-${HADOOP3_VERSION}", includes: 
"hadoop-${HADOOP3_VERSION}-bin.tar.gz"
+                stash name: 'hadoop-2', includes: 
"hadoop-${HADOOP2_VERSION}-bin.tar.gz"
+              }
+            }
+          },
+          'hadoop 3 cache': {
+            stage ('hadoop 3 cache') {
+              agent {
+                node {
+                  label 'hbase'
+                }
+              }
+              steps {
                 script {
-                  if (env.HADOOP3_VERSION == env.HADOOP3_DEFAULT_VERSION) {
-                    // FIXME: we never unstash this, because we run the 
packaging tests with the version-specific stashes
-                    stash(name: "hadoop-3", includes: 
"hadoop-${HADOOP3_VERSION}-bin.tar.gz")
-                  } // if
-                } // script
-              } // stage ('Hadoop 3 cache inner stage')
-            } // for
-          } // script
-        } // steps
-      } // stage ('hadoop 3 cache')
-    } // parallel
-    } // steps
+                  hadoop3_versions = env.HADOOP3_VERSIONS.split(",");
+                  env.HADOOP3_VERSIONS_REGEX = "[" + 
hadoop3_versions.join("|") + "]";
+                  for (hadoop3_version in hadoop3_versions) {
+                    env.HADOOP3_VERSION = hadoop3_version;
+                    echo "env.HADOOP3_VERSION" + env.hadoop3_version;
+                    stage ('Hadoop 3 cache inner stage') {
+                      dir("downloads-hadoop-${HADOOP3_VERSION}") {
+                        sh '''#!/usr/bin/env bash
+                          echo "Make sure we have a directory for downloading 
dependencies: $(pwd)"
+                        '''
+                      }
+                      sh '''#!/usr/bin/env bash
+                        set -e
+                        echo "Ensure we have a copy of Hadoop 
${HADOOP3_VERSION}"
+                        
"${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh"
 \
+                          --working-dir 
"${WORKSPACE}/downloads-hadoop-${HADOOP3_VERSION}" \
+                          --keys 
'https://downloads.apache.org/hadoop/common/KEYS' \
+                          --verify-tar-gz \
+                          "${WORKSPACE}/hadoop-${HADOOP3_VERSION}-bin.tar.gz" \
+                          
"hadoop/common/hadoop-${HADOOP3_VERSION}/hadoop-${HADOOP3_VERSION}.tar.gz"
+                        for stale in $(ls -1 "${WORKSPACE}"/hadoop-3*.tar.gz | 
grep -v ${HADOOP3_VERSION}); do
+                          echo "Delete stale hadoop 3 cache ${stale}"
+                          rm -rf $stale
+                        done
+                      '''
+                      stash name: "hadoop-${HADOOP3_VERSION}", includes: 
"hadoop-${HADOOP3_VERSION}-bin.tar.gz"
+                    }
+                  }
+                }
+              }
+            }
+          },
+        }
+    }
   } // stage ('thirdparty installs')
   // This is meant to mimic what a release manager will do to create RCs.
   // See http://hbase.apache.org/book.html#maven.release
@@ -180,71 +186,7 @@ pipeline {
         environment {
           OUTPUT_DIR = "output-integration-hadoop-${env.HADOOP2_VERSION}"
         }
-        sh '''#!/bin/bash -e
-          echo "Setting up directories"
-          rm -rf "${OUTPUT_DIR}"
-          echo "(x) {color:red}-1 client integration test{color}\n-- Something 
went wrong with this stage, [check relevant console 
output|${BUILD_URL}/console]." >${OUTPUT_DIR}/commentfile
-          rm -rf "hbase-install"
-          rm -rf "hbase-client"
-          rm -rf "hbase-hadoop3-install"
-          rm -rf "hbase-hadoop3-client"
-          # remove old hadoop tarballs in workspace
-          rm -rf hadoop-2*.tar.gz
-        '''
-        unstash 'hadoop-2'
-        unstash 'hbase-install'
-        sh '''#!/bin/bash -xe
-          if [[ "${BRANCH_NAME}" == *"branch-2"* ]]; then
-            echo "Attempting to run an instance on top of Hadoop 2."
-            hadoop_artifact=$(ls -1 "${WORKSPACE}"/hadoop-2*.tar.gz | head -n 
1)
-            tar --strip-components=1 -xzf "${hadoop_artifact}" -C "hadoop-2"
-            install_artifact=$(ls -1 
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz | 
grep -v client-bin | grep -v hadoop3)
-            tar --strip-component=1 -xzf "${install_artifact}" -C 
"hbase-install"
-            client_artifact=$(ls -1 
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-client-bin.tar.gz
 | grep -v hadoop3)
-            tar --strip-component=1 -xzf "${client_artifact}" -C "hbase-client"
-            docker build -t hbase-integration-test -f 
"${BASEDIR}/dev-support/docker/Dockerfile" .
-            docker run --rm -v "${WORKSPACE}":/hbase -v 
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
-              -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-8" 
--workdir=/hbase hbase-integration-test \
-              component/dev-support/hbase_nightly_pseudo-distributed-test.sh \
-              --single-process \
-              --working-dir ${OUTPUT_DIR}/hadoop-2 \
-              --hbase-client-install "hbase-client" \
-              hbase-install \
-              hadoop-2/bin/hadoop \
-              hadoop-2/share/hadoop/yarn/timelineservice \
-              
hadoop-2/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
-              
hadoop-2/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
-              hadoop-2/bin/mapred \
-              >${OUTPUT_DIR}/hadoop-2.log 2>&1
-            if [ $? -ne 0 ]; then
-              echo "(x) {color:red}-1 client integration test{color}\n--Failed 
when running client tests on top of Hadoop 2. [see log for 
details|${BUILD_URL}/artifact/${OUTPUT_DIR}/hadoop-2.log]. (note that this 
means we didn't run on Hadoop 3)" >${OUTPUT_DIR}/commentfile
-              exit 2
-            fi
-            echo "(/) {color:green}+1 client integration test for HBase 2 
{color}" >${OUTPUT_DIR}/commentfile
-          else
-            echo "Skipping to run against Hadoop 2 for branch ${BRANCH_NAME}"
-          fi
-        '''
-      }
-      stage('hadoop 3 matrix') {
-        axes {
-          axis {
-            name 'HADOOP_VERSION'
-            values getHadoopVersions(env.HADOOP3_VERSIONS)
-          }
-        }
-        agent {
-          node {
-            label 'hbase'
-          }
-        }
-        stage('hadoop 3 integration test') {
-          environment {
-            OUTPUT_DIR = "output-integration-hadoop-${env.HADOOP_VERSION}"
-          }
-          dir('component') {
-            checkout scm
-          }
+        steps {
           sh '''#!/bin/bash -e
             echo "Setting up directories"
             rm -rf "${OUTPUT_DIR}"
@@ -254,81 +196,152 @@ pipeline {
             rm -rf "hbase-hadoop3-install"
             rm -rf "hbase-hadoop3-client"
             # remove old hadoop tarballs in workspace
-            rm -rf hadoop-3*.tar.gz
+            rm -rf hadoop-2*.tar.gz
           '''
-          unstash "hadoop-" + ${HADOOP_VERSION}
+          unstash 'hadoop-2'
           unstash 'hbase-install'
-          sh '''#!/bin/bash -e
-            echo "Attempting to use run an instance on top of Hadoop 
${HADOOP_VERSION}."
-            # Clean up any previous tested Hadoop3 files before unpacking the 
current one
-            rm -rf hadoop-3/*
-            # Create working dir
-            rm -rf "${OUTPUT_DIR}/non-shaded" && mkdir 
"${OUTPUT_DIR}/non-shaded"
-            rm -rf "${OUTPUT_DIR}/shaded" && mkdir "${OUTPUT_DIR}/shaded"
-            artifact=$(ls -1 
"${WORKSPACE}"/hadoop-${HADOOP3_VERSION}-bin.tar.gz | head -n 1)
-            tar --strip-components=1 -xzf "${artifact}" -C "hadoop-3"
-            # we need to patch some files otherwise minicluster will fail to 
start, see MAPREDUCE-7471
-            ${BASEDIR}/dev-support/patch-hadoop3.sh hadoop-3
-            hbase_install_dir="hbase-install"
-            hbase_client_dir="hbase-client"
-            if [ -d "hbase-hadoop3-install" ]; then
-              echo "run hadoop3 client integration test against hbase hadoop3 
binaries"
-              hbase_install_dir="hbase-hadoop3-install"
-              hbase_client_dir="hbase-hadoop3-client"
+          sh '''#!/bin/bash -xe
+            if [[ "${BRANCH_NAME}" == *"branch-2"* ]]; then
+              echo "Attempting to run an instance on top of Hadoop 2."
+              hadoop_artifact=$(ls -1 "${WORKSPACE}"/hadoop-2*.tar.gz | head 
-n 1)
+              tar --strip-components=1 -xzf "${hadoop_artifact}" -C "hadoop-2"
+              install_artifact=$(ls -1 
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz | 
grep -v client-bin | grep -v hadoop3)
+              tar --strip-component=1 -xzf "${install_artifact}" -C 
"hbase-install"
+              client_artifact=$(ls -1 
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-client-bin.tar.gz
 | grep -v hadoop3)
+              tar --strip-component=1 -xzf "${client_artifact}" -C 
"hbase-client"
+              docker build -t hbase-integration-test -f 
"${BASEDIR}/dev-support/docker/Dockerfile" .
+              docker run --rm -v "${WORKSPACE}":/hbase -v 
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
+                -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-8" 
--workdir=/hbase hbase-integration-test \
+                component/dev-support/hbase_nightly_pseudo-distributed-test.sh 
\
+                --single-process \
+                --working-dir ${OUTPUT_DIR}/hadoop-2 \
+                --hbase-client-install "hbase-client" \
+                hbase-install \
+                hadoop-2/bin/hadoop \
+                hadoop-2/share/hadoop/yarn/timelineservice \
+                
hadoop-2/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
+                
hadoop-2/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
+                hadoop-2/bin/mapred \
+                >${OUTPUT_DIR}/hadoop-2.log 2>&1
+              if [ $? -ne 0 ]; then
+                echo "(x) {color:red}-1 client integration 
test{color}\n--Failed when running client tests on top of Hadoop 2. [see log 
for details|${BUILD_URL}/artifact/${OUTPUT_DIR}/hadoop-2.log]. (note that this 
means we didn't run on Hadoop 3)" >${OUTPUT_DIR}/commentfile
+                exit 2
+              fi
+              echo "(/) {color:green}+1 client integration test for HBase 2 
{color}" >${OUTPUT_DIR}/commentfile
+            else
+              echo "Skipping to run against Hadoop 2 for branch ${BRANCH_NAME}"
             fi
-            docker build -t hbase-integration-test -f 
"${BASEDIR}/dev-support/docker/Dockerfile" .
-            docker run --rm -v "${WORKSPACE}":/hbase -v 
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
-              -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-17" \
-              -e HADOOP_OPTS="--add-opens java.base/java.lang=ALL-UNNAMED" \
-              --workdir=/hbase hbase-integration-test \
-              component/dev-support/hbase_nightly_pseudo-distributed-test.sh \
-              --single-process \
-              --working-dir ${OUTPUT_DIR}/non-shaded \
-              --hbase-client-install ${hbase_client_dir} \
-              ${hbase_install_dir} \
-              hadoop-3/bin/hadoop \
-              hadoop-3/share/hadoop/yarn/timelineservice \
-              
hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
-              
hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
-              hadoop-3/bin/mapred \
-              >${OUTPUT_DIR}/hadoop.log 2>&1
-            if [ $? -ne 0 ]; then
-              echo "(x) {color:red}-1 client integration test{color}\n--Failed 
when running client tests on top of Hadoop ${HADOOP_VERSION}. [see log for 
details|${BUILD_URL}/artifact/${OUTPUT_DIR}/hadoop.log]. (note that this means 
we didn't check the Hadoop ${HADOOP_VERSION} shaded client)" > 
${OUTPUT_DIR}/commentfile
-              exit 2
-            fi
-            echo "(/) {color:green}+1 client integration test for 
${HADOOP_VERSION} {color}" >> ${OUTPUT_DIR}/commentfile
-            echo "Attempting to run an instance on top of Hadoop 
${HADOOP_VERSION}, relying on the Hadoop client artifacts for the example 
client program."
-            docker run --rm -v "${WORKSPACE}":/hbase -v 
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
-              -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-17" \
-              -e HADOOP_OPTS="--add-opens java.base/java.lang=ALL-UNNAMED" \
-              --workdir=/hbase hbase-integration-test \
-              component/dev-support/hbase_nightly_pseudo-distributed-test.sh \
-              --single-process \
-              --hadoop-client-classpath 
hadoop-3/share/hadoop/client/hadoop-client-api-*.jar:hadoop-3/share/hadoop/client/hadoop-client-runtime-*.jar
 \
-              --working-dir ${OUTPUT_DIR}/shade \
-              --hbase-client-install ${hbase_client_dir} \
-              ${hbase_install_dir} \
-              hadoop-3/bin/hadoop \
-              hadoop-3/share/hadoop/yarn/timelineservice \
-              
hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
-              
hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
-              hadoop-3/bin/mapred \
-              >${OUTPUT_DIR}/hadoop-shaded.log 2>&1
-            if [ $? -ne 0 ]; then
-              echo "(x) {color:red}-1 client integration test{color}\n--Failed 
when running client tests on top of Hadoop ${HADOOP_VERSION} using Hadoop's 
shaded client. [see log for 
details|${BUILD_URL}/artifact/${OUTPUT_DIR}/hadoop-shaded.log]." >> 
${OUTPUT_DIR}/commentfile
-              exit 2
-            fi
-            echo "(/) {color:green}+1 client integration test for 
${HADOOP_VERSION} with shaded hadoop client {color}" >> 
${OUTPUT_DIR}/commentfile
           '''
-        } // hadoop 3 integration test
-      } // hadoop 3 matrix
-    } // parallel
-    } // steps
+        }
+      }
+      stage('hadoop 3 integration tests') {
+        matrix {
+          axes {
+            axis {
+              name 'HADOOP_VERSION'
+              values getHadoopVersions(env.HADOOP3_VERSIONS)
+            }
+          }
+          agent {
+            node {
+              label 'hbase'
+            }
+          }
+          stages {
+            stage('hadoop 3 integration test') {
+              environment {
+                OUTPUT_DIR = "output-integration-hadoop-${env.HADOOP_VERSION}"
+              }
+              steps {
+                dir('component') {
+                  checkout scm
+                }
+                sh '''#!/bin/bash -e
+                  echo "Setting up directories"
+                  rm -rf "${OUTPUT_DIR}"
+                  echo "(x) {color:red}-1 client integration test{color}\n-- 
Something went wrong with this stage, [check relevant console 
output|${BUILD_URL}/console]." >${OUTPUT_DIR}/commentfile
+                  rm -rf "hbase-install"
+                  rm -rf "hbase-client"
+                  rm -rf "hbase-hadoop3-install"
+                  rm -rf "hbase-hadoop3-client"
+                  # remove old hadoop tarballs in workspace
+                  rm -rf hadoop-3*.tar.gz
+                '''
+                unstash "hadoop-${HADOOP_VERSION}"
+                unstash 'hbase-install'
+                sh '''#!/bin/bash -e
+                  echo "Attempting to use run an instance on top of Hadoop 
${HADOOP_VERSION}."
+                  # Clean up any previous tested Hadoop3 files before 
unpacking the current one
+                  rm -rf hadoop-3/*
+                  # Create working dir
+                  rm -rf "${OUTPUT_DIR}/non-shaded" && mkdir 
"${OUTPUT_DIR}/non-shaded"
+                  rm -rf "${OUTPUT_DIR}/shaded" && mkdir "${OUTPUT_DIR}/shaded"
+                  artifact=$(ls -1 
"${WORKSPACE}"/hadoop-${HADOOP_VERSION}-bin.tar.gz | head -n 1)
+                  tar --strip-components=1 -xzf "${artifact}" -C "hadoop-3"
+                  # we need to patch some files otherwise minicluster will 
fail to start, see MAPREDUCE-7471
+                  ${BASEDIR}/dev-support/patch-hadoop3.sh hadoop-3
+                  hbase_install_dir="hbase-install"
+                  hbase_client_dir="hbase-client"
+                  if [ -d "hbase-hadoop3-install" ]; then
+                    echo "run hadoop3 client integration test against hbase 
hadoop3 binaries"
+                    hbase_install_dir="hbase-hadoop3-install"
+                    hbase_client_dir="hbase-hadoop3-client"
+                  fi
+                  docker build -t hbase-integration-test -f 
"${BASEDIR}/dev-support/docker/Dockerfile" .
+                  docker run --rm -v "${WORKSPACE}":/hbase -v 
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
+                    -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-17" \
+                    -e HADOOP_OPTS="--add-opens 
java.base/java.lang=ALL-UNNAMED" \
+                    --workdir=/hbase hbase-integration-test \
+                    
component/dev-support/hbase_nightly_pseudo-distributed-test.sh \
+                    --single-process \
+                    --working-dir ${OUTPUT_DIR}/non-shaded \
+                    --hbase-client-install ${hbase_client_dir} \
+                    ${hbase_install_dir} \
+                    hadoop-3/bin/hadoop \
+                    hadoop-3/share/hadoop/yarn/timelineservice \
+                    
hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
+                    
hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
+                    hadoop-3/bin/mapred \
+                    >${OUTPUT_DIR}/hadoop.log 2>&1
+                  if [ $? -ne 0 ]; then
+                    echo "(x) {color:red}-1 client integration 
test{color}\n--Failed when running client tests on top of Hadoop 
${HADOOP_VERSION}. [see log for 
details|${BUILD_URL}/artifact/${OUTPUT_DIR}/hadoop.log]. (note that this means 
we didn't check the Hadoop ${HADOOP_VERSION} shaded client)" > 
${OUTPUT_DIR}/commentfile
+                    exit 2
+                  fi
+                  echo "(/) {color:green}+1 client integration test for 
${HADOOP_VERSION} {color}" >> ${OUTPUT_DIR}/commentfile
+                  echo "Attempting to run an instance on top of Hadoop 
${HADOOP_VERSION}, relying on the Hadoop client artifacts for the example 
client program."
+                  docker run --rm -v "${WORKSPACE}":/hbase -v 
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
+                    -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-17" \
+                    -e HADOOP_OPTS="--add-opens 
java.base/java.lang=ALL-UNNAMED" \
+                    --workdir=/hbase hbase-integration-test \
+                    
component/dev-support/hbase_nightly_pseudo-distributed-test.sh \
+                    --single-process \
+                    --hadoop-client-classpath 
hadoop-3/share/hadoop/client/hadoop-client-api-*.jar:hadoop-3/share/hadoop/client/hadoop-client-runtime-*.jar
 \
+                    --working-dir ${OUTPUT_DIR}/shade \
+                    --hbase-client-install ${hbase_client_dir} \
+                    ${hbase_install_dir} \
+                    hadoop-3/bin/hadoop \
+                    hadoop-3/share/hadoop/yarn/timelineservice \
+                    
hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
+                    
hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
+                    hadoop-3/bin/mapred \
+                    >${OUTPUT_DIR}/hadoop-shaded.log 2>&1
+                  if [ $? -ne 0 ]; then
+                    echo "(x) {color:red}-1 client integration 
test{color}\n--Failed when running client tests on top of Hadoop 
${HADOOP_VERSION} using Hadoop's shaded client. [see log for 
details|${BUILD_URL}/artifact/${OUTPUT_DIR}/hadoop-shaded.log]." >> 
${OUTPUT_DIR}/commentfile
+                    exit 2
+                  fi
+                  echo "(/) {color:green}+1 client integration test for 
${HADOOP_VERSION} with shaded hadoop client {color}" >> 
${OUTPUT_DIR}/commentfile
+                '''
+              }
+            }
+          }
+        }
+      }
+    }
   } // stage integration test
   }
   post {
     always {
-      scripts {
+      script {
         sshPublisher(publishers: [
           sshPublisherDesc(configName: 'Nightlies',
             transfers: [
@@ -338,7 +351,6 @@ pipeline {
             ]
           )
         ])
-        // remove the big src tarball, store the nightlies url in 
hbase-src.html
         sh '''#!/bin/bash -e
           SRC_TAR="${WORKSPACE}/output-srctarball/hbase-src.tar.gz"
           if [ -f "${SRC_TAR}" ]; then
@@ -353,7 +365,7 @@ pipeline {
         def results = []
         results.add('output-srctarball/commentfile')
         
results.add("output-integration-hadoop-${env.HADOOP_VERSION}/commentfile")
-        for (hadoop3_version in getHadoopVersions($env.HADOOP3_VERSIONS)) {
+        for (hadoop3_version in getHadoopVersions(env.HADOOP3_VERSIONS)) {
           
results.add("output-integration-hadoop-${hadoop3_version}/commentfile")
         }
         echo env.BRANCH_NAME
@@ -366,7 +378,6 @@ pipeline {
           comment += "(/) *{color:green}+1 overall{color}*\n"
         } else {
           comment += "(x) *{color:red}-1 overall{color}*\n"
-          // Ideally get the committer our of the change and @ mention them in 
the per-jira comment
         }
         comment += "----\ndetails (if available):\n\n"
         echo ""
@@ -385,13 +396,13 @@ pipeline {
         jiras.each { currentIssue ->
           jiraComment issueKey: currentIssue, body: comment
         }
-      } // scripts
-        archiveArtifacts artifacts: 'output-srctarball/*'
-        archiveArtifacts artifacts: 'output-srctarball/**/*'
-        archiveArtifacts artifacts: 'output-integration-*/*'
-        archiveArtifacts artifacts: 'output-integration-*/**/*'
-    } // always
-  } // post
+      }
+      archiveArtifacts artifacts: 'output-srctarball/*'
+      archiveArtifacts artifacts: 'output-srctarball/**/*'
+      archiveArtifacts artifacts: 'output-integration-*/*'
+      archiveArtifacts artifacts: 'output-integration-*/**/*'
+    }
+  }
 }
 
 import org.jenkinsci.plugins.workflow.support.steps.build.RunWrapper

Reply via email to