This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2.5 by this push:
new 423ff80d6a0 HBASE-29930 Separate packaging and integration check in
nightly job to new jenkins job (#7847)
423ff80d6a0 is described below
commit 423ff80d6a079020ad8adf4022d3587740dacff6
Author: Duo Zhang <[email protected]>
AuthorDate: Thu Mar 5 17:33:17 2026 +0800
HBASE-29930 Separate packaging and integration check in nightly job to new
jenkins job (#7847)
(cherry picked from commit 865ba216171c42c90e0dc04004ffa9e83ff03d1e)
Signed-off-by: Nihal Jain <[email protected]>
---
dev-support/Jenkinsfile | 223 ------------
.../integration-test/integration-test.Jenkinsfile | 397 +++++++++++++++++++++
dev-support/integration-test/patch-hadoop3.sh | 24 ++
.../pseudo-distributed-test.sh} | 24 +-
.../source-artifact.sh} | 89 ++++-
5 files changed, 510 insertions(+), 247 deletions(-)
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index eb7470eaa64..883eaabba1f 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -900,224 +900,6 @@ pipeline {
} //script
} //steps
} //stage ('yetus jdk17 hadoop3 backwards compatibility checks')
-
- // This is meant to mimic what a release manager will do to create RCs.
- // See http://hbase.apache.org/book.html#maven.release
- // TODO (HBASE-23870): replace this with invocation of the release tool
- stage ('packaging and integration') {
- agent {
- node {
- label 'hbase'
- }
- }
- environment {
- BASEDIR = "${env.WORKSPACE}/component"
- BRANCH = "${env.BRANCH_NAME}"
- }
- steps {
- dir('component') {
- checkout scm
- }
- sh '''#!/bin/bash -e
- echo "Setting up directories"
- rm -rf "output-srctarball" && mkdir "output-srctarball"
- rm -rf "output-integration" && mkdir "output-integration"
"output-integration/hadoop-2" "output-integration/hadoop-3"
"output-integration/hadoop-3-shaded"
- rm -rf "unpacked_src_tarball" && mkdir "unpacked_src_tarball"
- rm -rf "hbase-install" && mkdir "hbase-install"
- rm -rf "hbase-client" && mkdir "hbase-client"
- rm -rf "hbase-hadoop3-install"
- rm -rf "hbase-hadoop3-client"
- rm -rf "hadoop-2" && mkdir "hadoop-2"
- rm -rf "hadoop-3" && mkdir "hadoop-3"
- rm -rf ".m2-for-repo" && mkdir ".m2-for-repo"
- rm -rf ".m2-for-src" && mkdir ".m2-for-src"
- # remove old hadoop tarballs in workspace
- rm -rf hadoop-2*.tar.gz
- rm -rf hadoop-3*.tar.gz
- rm -f "output-integration/commentfile"
- '''
- sh '''#!/usr/bin/env bash
- set -e
- rm -rf "output-srctarball/machine" && mkdir
"output-srctarball/machine"
- "${BASEDIR}/dev-support/gather_machine_environment.sh"
"output-srctarball/machine"
- echo "got the following saved stats in
'output-srctarball/machine'"
- ls -lh "output-srctarball/machine"
- '''
- sh '''#!/bin/bash -e
- echo "Checking the steps for an RM to make a source artifact,
then a binary artifact."
- docker build -t hbase-integration-test -f
"${BASEDIR}/dev-support/docker/Dockerfile" .
- docker run --rm -v "${WORKSPACE}":/hbase -v
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
- -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-17"
--workdir=/hbase hbase-integration-test \
- "component/dev-support/hbase_nightly_source-artifact.sh" \
- --intermediate-file-dir output-srctarball \
- --unpack-temp-dir unpacked_src_tarball \
- --maven-m2-initial .m2-for-repo \
- --maven-m2-src-build .m2-for-src \
- --clean-source-checkout \
- component
- if [ $? -eq 0 ]; then
- echo '(/) {color:green}+1 source release artifact{color}\n--
See build output for details.' >output-srctarball/commentfile
- else
- echo '(x) {color:red}-1 source release artifact{color}\n-- See
build output for details.' >output-srctarball/commentfile
- exit 1
- fi
- '''
- echo "unpacking the hbase bin tarball into 'hbase-install' and the
client tarball into 'hbase-client'"
- sh '''#!/bin/bash -e
- if [ 2 -ne $(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz |
grep -v hadoop3 | wc -l) ]; then
- echo '(x) {color:red}-1 testing binary artifact{color}\n--
source tarball did not produce the expected binaries.'
>>output-srctarball/commentfile
- exit 1
- fi
- install_artifact=$(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz |
grep -v client-bin | grep -v hadoop3)
- tar --strip-component=1 -xzf "${install_artifact}" -C
"hbase-install"
- client_artifact=$(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-client-bin.tar.gz
| grep -v hadoop3)
- tar --strip-component=1 -xzf "${client_artifact}" -C
"hbase-client"
- if [ 2 -eq $(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-hadoop3-*-bin.tar.gz
| wc -l) ]; then
- echo "hadoop3 artifacts available, unpacking the hbase hadoop3
bin tarball into 'hbase-hadoop3-install' and the client hadoop3 tarball into
'hbase-hadoop3-client'"
- mkdir hbase-hadoop3-install
- mkdir hbase-hadoop3-client
- hadoop3_install_artifact=$(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-hadoop3-*-bin.tar.gz
| grep -v client-bin)
- tar --strip-component=1 -xzf "${hadoop3_install_artifact}" -C
"hbase-hadoop3-install"
- hadoop3_client_artifact=$(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-hadoop3-*-client-bin.tar.gz)
- tar --strip-component=1 -xzf "${hadoop3_client_artifact}" -C
"hbase-hadoop3-client"
- fi
- '''
- unstash 'hadoop-2'
- sh '''#!/bin/bash -xe
- if [[ "${BRANCH}" == *"branch-2"* ]]; then
- echo "Attempting to use run an instance on top of Hadoop 2."
- artifact=$(ls -1 "${WORKSPACE}"/hadoop-2*.tar.gz | head -n 1)
- tar --strip-components=1 -xzf "${artifact}" -C "hadoop-2"
- docker build -t hbase-integration-test -f
"${BASEDIR}/dev-support/docker/Dockerfile" .
- docker run --rm -v "${WORKSPACE}":/hbase -v
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
- -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-8"
--workdir=/hbase hbase-integration-test \
-
component/dev-support/hbase_nightly_pseudo-distributed-test.sh \
- --single-process \
- --working-dir output-integration/hadoop-2 \
- --hbase-client-install "hbase-client" \
- hbase-install \
- hadoop-2/bin/hadoop \
- hadoop-2/share/hadoop/yarn/timelineservice \
-
hadoop-2/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
-
hadoop-2/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
- hadoop-2/bin/mapred \
- >output-integration/hadoop-2.log 2>&1
- if [ $? -ne 0 ]; then
- echo "(x) {color:red}-1 client integration
test{color}\n--Failed when running client tests on top of Hadoop 2. [see log
for details|${BUILD_URL}/artifact/output-integration/hadoop-2.log]. (note that
this means we didn't run on Hadoop 3)" >output-integration/commentfile
- exit 2
- fi
- echo "(/) {color:green}+1 client integration test for HBase 2
{color}" >output-integration/commentfile
- else
- echo "Skipping to run against Hadoop 2 for branch ${BRANCH}"
- fi
- '''
- script {
- for (hadoop3_version in hadoop3_versions) {
- env.HADOOP3_VERSION = hadoop3_version;
- echo "env.HADOOP3_VERSION" + env.hadoop3_version;
- stage ("packaging and integration Hadoop 3 inner stage ") {
- unstash "hadoop-" + env.HADOOP3_VERSION
- sh '''#!/bin/bash -e
- echo "Attempting to use run an instance on top of Hadoop
${HADOOP3_VERSION}."
- # Clean up any previous tested Hadoop3 files before
unpacking the current one
- rm -rf hadoop-3/*
- # Create working dir
- rm -rf "output-integration/hadoop-${HADOOP3_VERSION}" &&
mkdir "output-integration/hadoop-${HADOOP3_VERSION}"
- rm -rf
"output-integration/hadoop-${HADOOP3_VERSION}-shaded" && mkdir
"output-integration/hadoop-${HADOOP3_VERSION}-shaded"
- artifact=$(ls -1
"${WORKSPACE}"/hadoop-${HADOOP3_VERSION}-bin.tar.gz | head -n 1)
- tar --strip-components=1 -xzf "${artifact}" -C "hadoop-3"
- # we need to patch some files otherwise minicluster will
fail to start, see MAPREDUCE-7471
- ${BASEDIR}/dev-support/patch-hadoop3.sh hadoop-3
- hbase_install_dir="hbase-install"
- hbase_client_dir="hbase-client"
- if [ -d "hbase-hadoop3-install" ]; then
- echo "run hadoop3 client integration test against hbase
hadoop3 binaries"
- hbase_install_dir="hbase-hadoop3-install"
- hbase_client_dir="hbase-hadoop3-client"
- fi
- docker build -t hbase-integration-test -f
"${BASEDIR}/dev-support/docker/Dockerfile" .
- docker run --rm -v "${WORKSPACE}":/hbase -v
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
- -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-17"
\
- -e HADOOP_OPTS="--add-opens
java.base/java.lang=ALL-UNNAMED" \
- --workdir=/hbase hbase-integration-test \
-
component/dev-support/hbase_nightly_pseudo-distributed-test.sh \
- --single-process \
- --working-dir
output-integration/hadoop-${HADOOP3_VERSION} \
- --hbase-client-install ${hbase_client_dir} \
- ${hbase_install_dir} \
- hadoop-3/bin/hadoop \
- hadoop-3/share/hadoop/yarn/timelineservice \
-
hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
-
hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
- hadoop-3/bin/mapred \
- >output-integration/hadoop-${HADOOP3_VERSION}.log 2>&1
- if [ $? -ne 0 ]; then
- echo "(x) {color:red}-1 client integration
test{color}\n--Failed when running client tests on top of Hadoop
${HADOOP3_VERSION}. [see log for
details|${BUILD_URL}/artifact/output-integration/hadoop-${HADOOP3_VERSION}.log].
(note that this means we didn't check the Hadoop ${HADOOP3_VERSION} shaded
client)" >> output-integration/commentfile
- exit 2
- fi
- echo "Attempting to use run an instance on top of Hadoop
${HADOOP3_VERSION}, relying on the Hadoop client artifacts for the example
client program."
- docker run --rm -v "${WORKSPACE}":/hbase -v
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
- -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-17"
\
- -e HADOOP_OPTS="--add-opens
java.base/java.lang=ALL-UNNAMED" \
- --workdir=/hbase hbase-integration-test \
-
component/dev-support/hbase_nightly_pseudo-distributed-test.sh \
- --single-process \
- --hadoop-client-classpath
hadoop-3/share/hadoop/client/hadoop-client-api-*.jar:hadoop-3/share/hadoop/client/hadoop-client-runtime-*.jar
\
- --working-dir
output-integration/hadoop-${HADOOP3_VERSION}-shaded \
- --hbase-client-install ${hbase_client_dir} \
- ${hbase_install_dir} \
- hadoop-3/bin/hadoop \
- hadoop-3/share/hadoop/yarn/timelineservice \
-
hadoop-3/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
-
hadoop-3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar \
- hadoop-3/bin/mapred \
-
>output-integration/hadoop-${HADOOP3_VERSION}-shaded.log 2>&1
- if [ $? -ne 0 ]; then
- echo "(x) {color:red}-1 client integration
test{color}\n--Failed when running client tests on top of Hadoop
${HADOOP3_VERSION} using Hadoop's shaded client. [see log for
details|${BUILD_URL}/artifact/output-integration/hadoop-${HADOOP3_VERSION}-shaded.log]."
>> output-integration/commentfile
- exit 2
- fi
- echo "(/) {color:green}+1 client integration test for
${HADOOP3_VERSION} {color}" >> output-integration/commentfile
- '''
- } //stage ("packaging and integration Hadoop 3 inner stage ")
- } //for
- } // script
- } //steps
- post {
- always {
- sh '''#!/bin/bash -e
- if [ ! -f "output-integration/commentfile" ]; then
- echo "(x) {color:red}-1 source release artifact{color}\n--
Something went wrong with this stage, [check relevant console
output|${BUILD_URL}/console]." >output-srctarball/commentfile
- echo "(x) {color:red}-1 client integration test{color}\n--
Something went wrong with this stage, [check relevant console
output|${BUILD_URL}/console]." >output-integration/commentfile
- fi
- '''
- stash name: 'srctarball-result', includes:
"output-srctarball/commentfile,output-integration/commentfile"
- sshPublisher(publishers: [
- sshPublisherDesc(configName: 'Nightlies',
- transfers: [
- sshTransfer(remoteDirectory:
"hbase/${JOB_NAME}/${BUILD_NUMBER}",
- sourceFiles: "output-srctarball/hbase-src.tar.gz"
- )
- ]
- )
- ])
- // remove the big src tarball, store the nightlies url in
hbase-src.html
- sh '''#!/bin/bash -e
- SRC_TAR="${WORKSPACE}/output-srctarball/hbase-src.tar.gz"
- if [ -f "${SRC_TAR}" ]; then
- echo "Remove ${SRC_TAR} for saving space"
- rm -rf "${SRC_TAR}"
- python3 ${BASEDIR}/dev-support/gen_redirect_html.py
"${ASF_NIGHTLIES_BASE}/output-srctarball" >
"${WORKSPACE}/output-srctarball/hbase-src.html"
- else
- echo "No hbase-src.tar.gz, skipping"
- fi
- '''
- archiveArtifacts artifacts: 'output-srctarball/*'
- archiveArtifacts artifacts: 'output-srctarball/**/*'
- archiveArtifacts artifacts: 'output-integration/*'
- archiveArtifacts artifacts: 'output-integration/**/*'
- } //always
- } //post
- } //stage packaging
} // parallel
} //stage:_health checks
} //stages
@@ -1135,15 +917,12 @@ pipeline {
rm -rf ${OUTPUT_DIR_RELATIVE_JDK11_HADOOP3}
rm -rf ${OUTPUT_DIR_RELATIVE_JDK17_HADOOP3}
rm -rf ${OUTPUT_DIR_RELATIVE_JDK17_HADOOP3_BACKWARDS}-*
- rm -rf output-srctarball
- rm -rf output-integration
'''
unstash 'general-result'
unstash 'jdk8-hadoop2-result'
unstash 'jdk8-hadoop3-result'
unstash 'jdk11-hadoop3-result'
unstash 'jdk17-hadoop3-result'
- unstash 'srctarball-result'
def results = ["${env.OUTPUT_DIR_RELATIVE_GENERAL}/commentfile",
"${env.OUTPUT_DIR_RELATIVE_JDK8_HADOOP2}/commentfile",
@@ -1158,8 +937,6 @@ pipeline {
unstash("jdk17-hadoop3-backwards-result-${hadoop3_version}")
results.add("${env.OUTPUT_DIR_RELATIVE_JDK17_HADOOP3_BACKWARDS}-${hadoop3_version}/commentfile")
}
- results.add('output-srctarball/commentfile')
- results.add('output-integration/commentfile')
echo env.BRANCH_NAME
echo env.BUILD_URL
echo currentBuild.result
diff --git a/dev-support/integration-test/integration-test.Jenkinsfile
b/dev-support/integration-test/integration-test.Jenkinsfile
new file mode 100644
index 00000000000..5407ecda19a
--- /dev/null
+++ b/dev-support/integration-test/integration-test.Jenkinsfile
@@ -0,0 +1,397 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+pipeline {
+ agent {
+ node {
+ label 'hbase'
+ }
+ }
+ triggers {
+ cron('@daily')
+ }
+ options {
+ buildDiscarder(logRotator(numToKeepStr: '20'))
+ timeout (time: 16, unit: 'HOURS')
+ timestamps()
+ skipDefaultCheckout()
+ disableConcurrentBuilds()
+ }
+ environment {
+ HADOOP_VERSIONS = "2.10.2,3.2.4,3.3.5,3.3.6,3.4.0,3.4.1,3.4.2,3.4.3"
+ BASEDIR = "${env.WORKSPACE}/component"
+ }
+ parameters {
+ booleanParam(name: 'DEBUG', defaultValue: false, description: 'Produce a
lot more meta-information.')
+ }
+ stages {
+ stage('scm-checkout') {
+ steps {
+ dir('component') {
+ checkout scm
+ }
+ }
+ }
+ // This is meant to mimic what a release manager will do to create RCs.
+ // See http://hbase.apache.org/book.html#maven.release
+ // TODO (HBASE-23870): replace this with invocation of the release tool
+ stage ('packaging test') {
+ steps {
+ sh '''#!/bin/bash -e
+ echo "Setting up directories"
+ rm -rf "output-srctarball" && mkdir "output-srctarball"
+ rm -rf "unpacked_src_tarball" && mkdir "unpacked_src_tarball"
+ rm -rf ".m2-for-repo" && mkdir ".m2-for-repo"
+ rm -rf ".m2-for-src" && mkdir ".m2-for-src"
+ '''
+ sh '''#!/bin/bash -e
+ rm -rf "output-srctarball/machine" && mkdir
"output-srctarball/machine"
+ "${BASEDIR}/dev-support/gather_machine_environment.sh"
"output-srctarball/machine"
+ echo "got the following saved stats in 'output-srctarball/machine'"
+ ls -lh "output-srctarball/machine"
+ '''
+ sh '''#!/bin/bash -e
+ echo "Checking the steps for an RM to make a source artifact, then a
binary artifact."
+ docker build -t hbase-integration-test -f
"${BASEDIR}/dev-support/docker/Dockerfile" .
+ docker run --rm -v "${WORKSPACE}":/hbase -v
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
+ -u `id -u`:`id -g` -e JAVA_HOME="/usr/lib/jvm/java-17"
--workdir=/hbase hbase-integration-test \
+ "component/dev-support/integration-test/source-artifact.sh" \
+ --intermediate-file-dir output-srctarball \
+ --unpack-temp-dir unpacked_src_tarball \
+ --maven-m2-initial .m2-for-repo \
+ --maven-m2-src-build .m2-for-src \
+ --clean-source-checkout \
+ component
+ if [ $? -eq 0 ]; then
+ echo '(/) {color:green}+1 source release artifact{color}\n-- See
build output for details.' >output-srctarball/commentfile
+ else
+ echo '(x) {color:red}-1 source release artifact{color}\n-- See
build output for details.' >output-srctarball/commentfile
+ exit 1
+ fi
+ '''
+ echo "make sure we have proper hbase tarballs under hbase-assembly"
+ sh '''#!/bin/bash -e
+ if [ 2 -ne $(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz |
grep -v hadoop3 | wc -l) ]; then
+ echo '(x) {color:red}-1 testing binary artifact{color}\n-- source
tarball did not produce the expected binaries.' >>output-srctarball/commentfile
+ exit 1
+ fi
+ if [[ "${BRANCH_NAME}" == *"branch-2"* ]]; then
+ if [ 2 -ne $(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-hadoop3-*-bin.tar.gz
| wc -l) ]; then
+ echo '(x) {color:red}-1 testing binary artifact{color}\n--
source tarball did not produce the expected hadoop3 binaries.'
>>output-srctarball/commentfile
+ exit 1
+ fi
+ fi
+ '''
+ stash name: 'hbase-install', includes:
"unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz"
+ } // steps
+ post {
+ always {
+ script {
+ def srcFile = "${env.WORKSPACE}/output-srctarball/hbase-src.tar.gz"
+ if (fileExists(srcFile)) {
+ echo "upload hbase-src.tar.gz to nightlies"
+ sshPublisher(publishers: [
+ sshPublisherDesc(configName: 'Nightlies',
+ transfers: [
+ sshTransfer(remoteDirectory:
"hbase/${JOB_NAME}/${BUILD_NUMBER}",
+ sourceFiles: srcFile
+ )
+ ]
+ )
+ ])
+ // remove the big src tarball, store the nightlies url in
hbase-src.html
+ sh '''#!/bin/bash -e
+ SRC_TAR="${WORKSPACE}/output-srctarball/hbase-src.tar.gz"
+ echo "Remove ${SRC_TAR} for saving space"
+ rm -rf "${SRC_TAR}"
+ python3 ${BASEDIR}/dev-support/gen_redirect_html.py
"${ASF_NIGHTLIES_BASE}/output-srctarball" >
"${WORKSPACE}/output-srctarball/hbase-src.html"
+ '''
+ }
+ }
+ archiveArtifacts artifacts: 'output-srctarball/*'
+ archiveArtifacts artifacts: 'output-srctarball/**/*'
+ }
+ }
+ } // packaging test
+ stage ('integration test matrix') {
+ matrix {
+ agent {
+ node {
+ label 'hbase'
+ }
+ }
+ axes {
+ axis {
+ name 'HADOOP_VERSION'
+ // matrix does not support dynamic axis values, so here we need to
keep align with the
+ // above environment
+ values
"2.10.2","3.2.4","3.3.5","3.3.6","3.4.0","3.4.1","3.4.2","3.4.3"
+ }
+ }
+ environment {
+ BASEDIR = "${env.WORKSPACE}/component"
+ OUTPUT_DIR = "output-integration-hadoop-${env.HADOOP_VERSION}"
+ }
+ when {
+ expression {
+ if (HADOOP_VERSION == '2.10.2') {
+ // only branch-2/branch-2.x need to run against hadoop2, here we
also includes
+ // HBASE-XXXXX-branch-2 feature branch
+ return env.BRANCH_NAME.contains('branch-2')
+ }
+ if (HADOOP_VERSION == '3.2.4') {
+ // only branch-2.5 need to run against hadoop 3.2.4, here we
also includes
+ // HBASE-XXXXX-branch-2.5 feature branch
+ return env.BRANCH_NAME.contains('branch-2.5')
+ }
+ return true
+ }
+ }
+ stages {
+ stage('scm-checkout') {
+ steps {
+ sh '''#!/bin/bash -e
+ echo "Setting up directories"
+ rm -rf "${OUTPUT_DIR}" && mkdir "${OUTPUT_DIR}"
+ echo "(x) {color:red}-1 client integration test for
${HADOOP_VERSION}{color}\n-- Something went wrong with this stage, [check
relevant console output|${BUILD_URL}/console]." >${OUTPUT_DIR}/commentfile
+ rm -rf "unpacked_src_tarball"
+ rm -rf "hbase-install" && mkdir "hbase-install"
+ rm -rf "hbase-client" && mkdir "hbase-client"
+ rm -rf "hadoop-install" && mkdir "hadoop-install"
+ rm -rf "hbase-hadoop3-install"
+ rm -rf "hbase-hadoop3-client"
+ # remove old hadoop tarballs in workspace
+ rm -rf hadoop-*.tar.gz
+ '''
+ dir('component') {
+ checkout scm
+ }
+ } // steps
+ } // scm-checkout
+ stage('install hadoop') {
+ steps {
+ dir("downloads-hadoop") {
+ sh '''#!/bin/bash -e
+ echo "Make sure we have a directory for downloading
dependencies: $(pwd)"
+ '''
+ sh '''#!/bin/bash -e
+ echo "Ensure we have a copy of Hadoop ${HADOOP_VERSION}"
+
"${WORKSPACE}/component/dev-support/jenkins-scripts/cache-apache-project-artifact.sh"
\
+ --working-dir "${WORKSPACE}/downloads-hadoop" \
+ --keys 'https://downloads.apache.org/hadoop/common/KEYS' \
+ --verify-tar-gz \
+ "${WORKSPACE}/hadoop-${HADOOP_VERSION}-bin.tar.gz" \
+
"hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz"
+ for stale in $(ls -1 "${WORKSPACE}"/hadoop-*.tar.gz | grep
-v ${HADOOP_VERSION}); do
+ echo "Delete stale hadoop cache ${stale}"
+ rm -rf $stale
+ done
+ artifact=$(ls -1
"${WORKSPACE}"/hadoop-${HADOOP_VERSION}-bin.tar.gz | head -n 1)
+ tar --strip-components=1 -xzf "${artifact}" -C
"${WORKSPACE}/hadoop-install"
+ if [[ ${HADOOP_VERSION} == 3.* ]]; then
+ # we need to patch some files otherwise minicluster will
fail to start, see MAPREDUCE-7471
+ ${BASEDIR}/dev-support/integration-test/patch-hadoop3.sh
"${WORKSPACE}/hadoop-install"
+ fi
+ '''
+ } // dir
+ } // steps
+ } // install hadoop
+ stage('install hbase') {
+ steps {
+ unstash 'hbase-install'
+ sh'''#!/bin/bash -e
+ install_artifact=$(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-bin.tar.gz |
grep -v client-bin | grep -v hadoop3)
+ tar --strip-component=1 -xzf "${install_artifact}" -C
"hbase-install"
+ client_artifact=$(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-client-bin.tar.gz
| grep -v hadoop3)
+ tar --strip-component=1 -xzf "${client_artifact}" -C
"hbase-client"
+ if ls
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-hadoop3-*-bin.tar.gz
&>/dev/null; then
+ echo "hadoop3 artifacts available, unpacking the hbase
hadoop3 bin tarball into 'hbase-hadoop3-install' and the client hadoop3 tarball
into 'hbase-hadoop3-client'"
+ mkdir hbase-hadoop3-install
+ mkdir hbase-hadoop3-client
+ hadoop3_install_artifact=$(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-hadoop3-*-bin.tar.gz
| grep -v client-bin)
+ tar --strip-component=1 -xzf "${hadoop3_install_artifact}"
-C "hbase-hadoop3-install"
+ hadoop3_client_artifact=$(ls -1
"${WORKSPACE}"/unpacked_src_tarball/hbase-assembly/target/hbase-*-hadoop3-*-client-bin.tar.gz)
+ tar --strip-component=1 -xzf "${hadoop3_client_artifact}" -C
"hbase-hadoop3-client"
+ fi
+ '''
+ } // steps
+ }
+ stage('integration test ') {
+ steps {
+ sh '''#!/bin/bash -e
+ hbase_install_dir="hbase-install"
+ hbase_client_dir="hbase-client"
+ if [[ ${HADOOP_VERSION} == 3.* ]] && [[ -d
"hbase-hadoop3-install" ]]; then
+ echo "run hadoop3 client integration test against hbase
hadoop3 binaries"
+ hbase_install_dir="hbase-hadoop3-install"
+ hbase_client_dir="hbase-hadoop3-client"
+ fi
+ java_home="/usr/lib/jvm/java-17"
+ hadoop_opts="--add-opens java.base/java.lang=ALL-UNNAMED"
+ if [[ ${HADOOP_VERSION} == 2.* ]]; then
+ java_home="/usr/lib/jvm/java-8"
+ hadoop_opts=""
+ fi
+ echo "Attempting to run an instance on top of Hadoop
${HADOOP_VERSION}."
+ # Create working dir
+ rm -rf "${OUTPUT_DIR}/non-shaded" && mkdir
"${OUTPUT_DIR}/non-shaded"
+ docker build -t hbase-integration-test -f
"${BASEDIR}/dev-support/docker/Dockerfile" .
+ docker run --rm -v "${WORKSPACE}":/hbase -v
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
+ -u `id -u`:`id -g` -e JAVA_HOME="${java_home}" \
+ -e HADOOP_OPTS="${hadoop_opts}" \
+ --workdir=/hbase hbase-integration-test \
+
component/dev-support/integration-test/pseudo-distributed-test.sh \
+ --single-process \
+ --working-dir ${OUTPUT_DIR}/non-shaded \
+ --hbase-client-install ${hbase_client_dir} \
+ ${hbase_install_dir} \
+ hadoop-install/bin/hadoop \
+ hadoop-install/share/hadoop/yarn/timelineservice \
+
hadoop-install/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
+
hadoop-install/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar
\
+ hadoop-install/bin/mapred \
+ >${OUTPUT_DIR}/hadoop.log 2>&1
+ if [ $? -ne 0 ]; then
+ echo "(x) {color:red}-1 client integration test for
${HADOOP_VERSION}{color}\n--Failed when running client tests on top of Hadoop
${HADOOP_VERSION}. [see log for
details|${BUILD_URL}/artifact/${OUTPUT_DIR}/hadoop.log]. (note that this means
we didn't check the Hadoop ${HADOOP_VERSION} shaded client)"
>${OUTPUT_DIR}/commentfile
+ exit 2
+ fi
+ echo "(/) {color:green}+1 client integration test for
${HADOOP_VERSION} {color}" >${OUTPUT_DIR}/commentfile
+ if [[ ${HADOOP_VERSION} == 2.* ]] || [[ ${HADOOP_VERSION} ==
3.2.* ]]; then
+ echo "skip running shaded hadoop client test for
${HADOOP_VERSION}"
+ exit 0
+ fi
+ # Create working dir
+ rm -rf "${OUTPUT_DIR}/shaded" && mkdir "${OUTPUT_DIR}/shaded"
+ echo "Attempting to run an instance on top of Hadoop
${HADOOP_VERSION}, relying on the Hadoop client artifacts for the example
client program."
+ docker run --rm -v "${WORKSPACE}":/hbase -v
/etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro \
+ -u `id -u`:`id -g` -e JAVA_HOME="${java_home}" \
+ -e HADOOP_OPTS="${hadoop_opts}" \
+ --workdir=/hbase hbase-integration-test \
+
component/dev-support/integration-test/pseudo-distributed-test.sh \
+ --single-process \
+ --hadoop-client-classpath
hadoop-install/share/hadoop/client/hadoop-client-api-*.jar:hadoop-install/share/hadoop/client/hadoop-client-runtime-*.jar
\
+ --working-dir ${OUTPUT_DIR}/shaded \
+ --hbase-client-install ${hbase_client_dir} \
+ ${hbase_install_dir} \
+ hadoop-install/bin/hadoop \
+ hadoop-install/share/hadoop/yarn/timelineservice \
+
hadoop-install/share/hadoop/yarn/test/hadoop-yarn-server-tests-*-tests.jar \
+
hadoop-install/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-*-tests.jar
\
+ hadoop-install/bin/mapred \
+ >${OUTPUT_DIR}/hadoop-shaded.log 2>&1
+ if [ $? -ne 0 ]; then
+ echo "(x) {color:red}-1 client integration testfor
${HADOOP_VERSION}{color}\n--Failed when running client tests on top of Hadoop
${HADOOP_VERSION} using Hadoop's shaded client. [see log for
details|${BUILD_URL}/artifact/${OUTPUT_DIR}/hadoop-shaded.log]." >>
${OUTPUT_DIR}/commentfile
+ exit 2
+ fi
+ echo "(/) {color:green}+1 client integration test for
${HADOOP_VERSION} with shaded hadoop client{color}" >> ${OUTPUT_DIR}/commentfile
+ '''
+ } // steps
+ post {
+ always {
+ stash name: "test-result-${env.HADOOP_VERSION}", includes:
"${env.OUTPUT_DIR}/commentfile"
+ archiveArtifacts artifacts: "${env.OUTPUT_DIR}/*"
+ archiveArtifacts artifacts: "${env.OUTPUT_DIR}/**/*"
+ } // always
+ } // post
+ } // integration test
+ } // stages
+ } // matrix
+ } // integration test matrix
+ } // stages
+ post {
+ always {
+ script {
+ def results = []
+ results.add('output-srctarball/commentfile')
+ for (hadoopVersion in getHadoopVersions(env.HADOOP_VERSIONS)) {
+ try {
+ unstash "test-result-${hadoopVersion}"
+
results.add("output-integration-hadoop-${hadoopVersion}/commentfile")
+ } catch (e) {
+ echo "unstash ${hadoopVersion} failed, ignore"
+ }
+ }
+ echo env.BRANCH_NAME
+ echo env.BUILD_URL
+ echo currentBuild.result
+ echo currentBuild.durationString
+ def comment = "Results for branch ${env.BRANCH_NAME}\n"
+ comment += "\t[build ${currentBuild.displayName} on
builds.a.o|${env.BUILD_URL}]: "
+ if (currentBuild.result == null || currentBuild.result == "SUCCESS") {
+ comment += "(/) *{color:green}+1 overall{color}*\n"
+ } else {
+ comment += "(x) *{color:red}-1 overall{color}*\n"
+ // Ideally get the committer our of the change and @ mention them in
the per-jira comment
+ }
+ comment += "----\ndetails (if available):\n\n"
+ echo ""
+ echo "[DEBUG] trying to aggregate step-wise results"
+ comment += results.collect { fileExists(file: it) ? readFile(file: it)
: "" }.join("\n\n")
+ echo "[INFO] Comment:"
+ echo comment
+ echo ""
+ echo "[DEBUG] checking to see if feature branch"
+ def jiras = getJirasToComment(env.BRANCH_NAME, [])
+ if (jiras.isEmpty()) {
+ echo "[DEBUG] non-feature branch, checking change messages for jira
keys."
+ echo "[INFO] There are ${currentBuild.changeSets.size()} change
sets."
+ jiras = getJirasToCommentFromChangesets(currentBuild)
+ }
+ jiras.each { currentIssue ->
+ jiraComment issueKey: currentIssue, body: comment
+ }
+ } // script
+ } // always
+ } // post
+}
+
+@NonCPS
+List<String> getHadoopVersions(String versions) {
+ return versions.split(',').collect { it.trim() }.findAll { it } as String[]
+}
+
+import org.jenkinsci.plugins.workflow.support.steps.build.RunWrapper
+@NonCPS
+List<String> getJirasToCommentFromChangesets(RunWrapper thisBuild) {
+ def seenJiras = []
+ thisBuild.changeSets.each { cs ->
+ cs.getItems().each { change ->
+ CharSequence msg = change.msg
+ echo "change: ${change}"
+ echo " ${msg}"
+ echo " ${change.commitId}"
+ echo " ${change.author}"
+ echo ""
+ seenJiras = getJirasToComment(msg, seenJiras)
+ }
+ }
+ return seenJiras
+}
+
+@NonCPS
+List<String> getJirasToComment(CharSequence source, List<String> seen) {
+ source.eachMatch("HBASE-[0-9]+") { currentIssue ->
+ echo "[DEBUG] found jira key: ${currentIssue}"
+ if (currentIssue in seen) {
+ echo "[DEBUG] already commented on ${currentIssue}."
+ } else {
+ echo "[INFO] commenting on ${currentIssue}."
+ seen << currentIssue
+ }
+ }
+ return seen
+}
+
diff --git a/dev-support/integration-test/patch-hadoop3.sh
b/dev-support/integration-test/patch-hadoop3.sh
new file mode 100755
index 00000000000..b4c51ca9487
--- /dev/null
+++ b/dev-support/integration-test/patch-hadoop3.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+hadoop_dir=$1
+
+sed -i
"s/HADOOP_TOOLS_DIR=\${HADOOP_TOOLS_DIR:-\"share\/hadoop\/tools\"}/HADOOP_TOOLS_DIR=\${HADOOP_TOOLS_DIR:-\"\$HADOOP_TOOLS_HOME\/share\/hadoop\/tools\"}/g"
"$hadoop_dir/libexec/hadoop-functions.sh"
+sed -i
"/HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.MiniHadoopClusterManager/a
mockitojar=\$(echo
\"\${HADOOP_TOOLS_LIB_JARS_DIR}\"\/mockito-core-[0-9]*.jar)\nhadoop_add_classpath
\"\${mockitojar}\"" "$hadoop_dir/bin/mapred"
+curl
https://repo1.maven.org/maven2/org/mockito/mockito-core/2.28.2/mockito-core-2.28.2.jar
-o "$hadoop_dir/share/hadoop/tools/lib/mockito-core-2.28.2.jar"
diff --git a/dev-support/hbase_nightly_pseudo-distributed-test.sh
b/dev-support/integration-test/pseudo-distributed-test.sh
similarity index 93%
rename from dev-support/hbase_nightly_pseudo-distributed-test.sh
rename to dev-support/integration-test/pseudo-distributed-test.sh
index 923341ab43e..d9da9a38ef0 100755
--- a/dev-support/hbase_nightly_pseudo-distributed-test.sh
+++ b/dev-support/integration-test/pseudo-distributed-test.sh
@@ -308,7 +308,11 @@ fi
if [ "${hadoop_version%.*.*}" -gt 2 ]; then
echo "Verifying configs"
- "${hadoop_exec}" --config "${working_dir}/hbase-conf/" conftest
+ hadoop_conf_files=""
+ for f in "${working_dir}"/hbase-conf/*-site.xml; do
+ hadoop_conf_files="$hadoop_conf_files -conffile $f"
+ done
+ "${hadoop_exec}" --config "${working_dir}/hbase-conf/" conftest
$hadoop_conf_files
fi
if [ -n "${clean}" ]; then
@@ -322,8 +326,16 @@ echo "Listing HDFS contents"
redirect_and_run "${working_dir}/hadoop_cluster_smoke" \
"${hadoop_exec}" --config "${working_dir}/hbase-conf/" fs -ls -R /
+if [ "${hadoop_version%.*.*}" -gt 2 ]; then
+ # for now, all hbase branches are compiled with hadoop 3.4.x when using
hadoop-3.0 profile, where
+ # the protobuf library has been shaded and relocated, so we always need to
use ProtobufRpcEngine2
+ # at hbase side, even if the hadoop server side uses ProtobufRpcEngine, so
here we will remove the
+ # config value to let the hadoop code pick the right rpc engine
+ sed -i "/<property>.*ProtobufRpcEngine.*<\/property>/d"
"${working_dir}/hbase-conf/core-site.xml"
+fi
+
echo "Starting up HBase"
-HBASE_CONF_DIR="${working_dir}/hbase-conf/"
"${component_install}/bin/start-hbase.sh"
+HBASE_CONF_DIR="${working_dir}/hbase-conf/" HBASE_LOG_DIR="${working_dir}"
"${component_install}/bin/start-hbase.sh"
sleep_time=2
until "${component_install}/bin/hbase" --config "${working_dir}/hbase-conf/"
shell --noninteractive >"${working_dir}/waiting_hbase_startup.log" 2>&1 <<EOF
@@ -413,7 +425,7 @@ HADOOP_CLASSPATH="${hbase_dep_classpath}" redirect_and_run
"${working_dir}/mr-im
EOF
echo "Verifying row count from import."
-import_rowcount=$(echo 'count "test:example"' | "${hbase_client}/bin/hbase"
--config "${working_dir}/hbase-conf/" shell --noninteractive 2>/dev/null | tail
-n 1)
+import_rowcount=$(echo 'count "test:example"' | "${hbase_client}/bin/hbase"
--config "${working_dir}/hbase-conf/" shell --noninteractive 2>/dev/null | grep
"row(s)" | awk '{print $1}')
if [ ! "${import_rowcount}" -eq 48 ]; then
echo "ERROR: Instead of finding 48 rows, we found ${import_rowcount}."
exit 2
@@ -512,11 +524,11 @@ public class HBaseClientReadWriteExample {
}
EOF
redirect_and_run "${working_dir}/hbase-shaded-client-compile" \
- javac -cp
"${hbase_client}/lib/shaded-clients/hbase-shaded-client-byo-hadoop-${hbase_version}.jar:${hadoop_jars}"
"${working_dir}/HBaseClientReadWriteExample.java"
+ $JAVA_HOME/bin/javac -cp
"${hbase_client}/lib/shaded-clients/hbase-shaded-client-byo-hadoop-${hbase_version}.jar:${hadoop_jars}"
"${working_dir}/HBaseClientReadWriteExample.java"
echo "Running shaded client example. It'll fetch the set of regions,
round-trip them to a file in HDFS, then write them one-per-row into the test
table."
# The order of classpath entries here is important. if we're using non-shaded
Hadoop 3 / 2.9.0 jars, we have to work around YARN-2190.
redirect_and_run "${working_dir}/hbase-shaded-client-example" \
- java -cp
"${working_dir}/hbase-conf/:${hbase_client}/lib/shaded-clients/hbase-shaded-client-byo-hadoop-${hbase_version}.jar:${hbase_dep_classpath}:${working_dir}:${hadoop_jars}"
HBaseClientReadWriteExample
+ $JAVA_HOME/bin/java -cp
"${working_dir}/hbase-conf/:${hbase_client}/lib/shaded-clients/hbase-shaded-client-byo-hadoop-${hbase_version}.jar:${hbase_dep_classpath}:${working_dir}:${hadoop_jars}"
HBaseClientReadWriteExample
echo "Checking on results of example program."
"${hadoop_exec}" --config "${working_dir}/hbase-conf/" fs -copyToLocal
"example-region-listing.data" "${working_dir}/example-region-listing.data"
@@ -526,7 +538,7 @@ echo "Checking on results of example program."
EOF
echo "Verifying row count from example."
-example_rowcount=$(echo 'count "test:example"' | "${hbase_client}/bin/hbase"
--config "${working_dir}/hbase-conf/" shell --noninteractive 2>/dev/null | tail
-n 1)
+example_rowcount=$(echo 'count "test:example"' | "${hbase_client}/bin/hbase"
--config "${working_dir}/hbase-conf/" shell --noninteractive 2>/dev/null | grep
"row(s)" | awk '{print $1}')
if [ "${example_rowcount}" -gt "1050" ]; then
echo "Found ${example_rowcount} rows, which is enough to cover 48 for
import, 1000 example's use of user table regions, 2 for example's use of
meta/namespace regions, and 1 for example's count record"
else
diff --git a/dev-support/hbase_nightly_source-artifact.sh
b/dev-support/integration-test/source-artifact.sh
similarity index 70%
rename from dev-support/hbase_nightly_source-artifact.sh
rename to dev-support/integration-test/source-artifact.sh
index 5d9902e5f04..da48ad07de2 100755
--- a/dev-support/hbase_nightly_source-artifact.sh
+++ b/dev-support/integration-test/source-artifact.sh
@@ -16,7 +16,6 @@
# specific language governing permissions and limitations
# under the License.
-set -e
function usage {
echo "Usage: ${0} [options] /path/to/component/checkout"
echo ""
@@ -34,6 +33,13 @@ function usage {
echo " a git checkout, including
ignored files."
exit 1
}
+
+set -e
+
+MVN="mvn"
+if ! command -v mvn &>/dev/null; then
+ MVN=$MAVEN_HOME/bin/mvn
+fi
# if no args specified, show usage
if [ $# -lt 1 ]; then
usage
@@ -125,7 +131,7 @@ fi
# See http://hbase.apache.org/book.html#maven.release
echo "Maven details, in case our JDK doesn't match expectations:"
-mvn --version --offline | tee "${working_dir}/maven_version"
+${MVN} --version --offline | tee "${working_dir}/maven_version"
echo "Do a clean building of the source artifact using code in
${component_dir}"
cd "${component_dir}"
@@ -154,7 +160,6 @@ echo "Checking against things we don't expect to include in
the source tarball (
# e.g. prior to HBASE-19152 we'd have the following lines (ignoring the bash
comment marker):
#Only in .: .gitattributes
#Only in .: .gitignore
-#Only in .: hbase-native-client
cat >known_excluded <<END
Only in .: .git
END
@@ -170,20 +175,68 @@ else
echo "Everything looks as expected."
fi
+function get_hadoop3_version {
+ local version="$1"
+ if [[ "${version}" =~ -SNAPSHOT$ ]]; then
+ echo "${version/-SNAPSHOT/-hadoop3-SNAPSHOT}"
+ else
+ echo "${version}-hadoop3"
+ fi
+}
+
+function build_tarball {
+ local build_hadoop3=$1
+ local mvn_extra_args=""
+ local build_log="srctarball_install.log"
+ local tarball_glob="hbase-*-bin.tar.gz"
+ if [ $build_hadoop3 -ne 0 ]; then
+ local version=$(${MVN} -Dmaven.repo.local="${m2_tarbuild}" help:evaluate
-Dexpression=project.version -q -DforceStdout)
+ local hadoop3_version=$(get_hadoop3_version $version)
+ mvn_extra_args="-Drevision=${hadoop3_version} -Dhadoop.profile=3.0"
+ build_log="hadoop3_srctarball_install.log"
+ tarball_glob="hbase-*-hadoop3-*-bin.tar.gz"
+ echo "Follow the ref guide section on making a RC: Step 8 Build the
hadoop3 binary tarball."
+ else
+ echo "Follow the ref guide section on making a RC: Step 7 Build the binary
tarball."
+ fi
+ if ${MVN} --threads=2 -DskipTests -Prelease --batch-mode
-Dmaven.repo.local="${m2_tarbuild}" ${mvn_extra_args} clean install \
+ assembly:single >"${working_dir}/${build_log}" 2>&1; then
+ for artifact in "${unpack_dir}"/hbase-assembly/target/${tarball_glob}; do
+ if [ -f "${artifact}" ]; then
+ # TODO check the layout of the binary artifact we just made.
+ echo "Building a binary tarball from the source tarball succeeded."
+ return 0
+ fi
+ done
+ fi
+
+ echo "Building a binary tarball from the source tarball failed. see
${working_dir}/${build_log} for details."
+ # Copy up the rat.txt to the working dir so available in build archive in
case rat complaints.
+ # rat.txt can be under any module target dir... copy them all up renaming
them to include parent dir as we go.
+ find ${unpack_dir} -name rat.txt -type f | while IFS= read -r NAME; do cp -v
"$NAME" "${working_dir}/${NAME//\//_}"; done
+ return 1
+}
+
cd "${unpack_dir}"
-echo "Follow the ref guide section on making a RC: Step 8 Build the binary
tarball."
-if mvn --threads=2 -DskipTests -Prelease --batch-mode
-Dmaven.repo.local="${m2_tarbuild}" clean install \
- assembly:single >"${working_dir}/srctarball_install.log" 2>&1; then
- for artifact in "${unpack_dir}"/hbase-assembly/target/hbase-*-bin.tar.gz; do
- if [ -f "${artifact}" ]; then
- # TODO check the layout of the binary artifact we just made.
- echo "Building a binary tarball from the source tarball succeeded."
- exit 0
- fi
- done
+
+if ${MVN} -Dmaven.repo.local="${m2_tarbuild}" help:active-profiles | grep -q
hadoop-3.0; then
+ echo "The hadoop-3.0 profile is activated by default, build a default
tarball."
+ build_tarball 0
+else
+ echo "The hadoop-3.0 profile is not activated by default, build a default
tarball first."
+ # use java 8 to build with hadoop2
+ JAVA_HOME="/usr/lib/jvm/java-8" build_tarball 0
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+
+ # move the previous tarballs out, so it will not be cleaned while building
against hadoop3
+ mv "${unpack_dir}"/hbase-assembly/target/hbase-*-bin.tar.gz "${unpack_dir}"/
+ echo "build a hadoop3 tarball."
+ build_tarball 1
+ if [ $? -ne 0 ]; then
+ exit 1
+ fi
+ # move tarballs back
+ mv "${unpack_dir}"/hbase-*-bin.tar.gz "${unpack_dir}"/hbase-assembly/target/
fi
-echo "Building a binary tarball from the source tarball failed. see
${working_dir}/srctarball_install.log for details."
-# Copy up the rat.txt to the working dir so available in build archive in case
rat complaints.
-# rat.txt can be under any module target dir... copy them all up renaming them
to include parent dir as we go.
-find ${unpack_dir} -name rat.txt -type f | while IFS= read -r NAME; do cp -v
"$NAME" "${working_dir}/${NAME//\//_}"; done
-exit 1