[ 
https://issues.apache.org/jira/browse/BEAM-3214?focusedWorklogId=109529&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-109529
 ]

ASF GitHub Bot logged work on BEAM-3214:
----------------------------------------

                Author: ASF GitHub Bot
            Created on: 06/Jun/18 20:06
            Start Date: 06/Jun/18 20:06
    Worklog Time Spent: 10m 
      Work Description: iemejia closed pull request #5499: [BEAM-3214] Add 
integration test for HBaseIO.
URL: https://github.com/apache/beam/pull/5499
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git 
a/.test-infra/kubernetes/hbase/SmallITCluster/hbase-single-node-cluster-for-local-dev.yml
 
b/.test-infra/kubernetes/hbase/SmallITCluster/hbase-single-node-cluster-for-local-dev.yml
new file mode 100644
index 00000000000..52433802734
--- /dev/null
+++ 
b/.test-infra/kubernetes/hbase/SmallITCluster/hbase-single-node-cluster-for-local-dev.yml
@@ -0,0 +1,37 @@
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+# This script provides access to HBase cluster to DirectRunner and any other
+# external application that uses HBase Client API.
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: hbase-external
+  labels:
+    name: hbase-external
+spec:
+  ports:
+    - name: zookeeper
+      port: 2181
+    - name: hbase-master
+      port: 16000
+    - name: web
+      port: 16010
+    - name: region
+      port: 16201
+  selector:
+    name: hbase
+  type: LoadBalancer
\ No newline at end of file
diff --git 
a/.test-infra/kubernetes/hbase/SmallITCluster/hbase-single-node-cluster.yml 
b/.test-infra/kubernetes/hbase/SmallITCluster/hbase-single-node-cluster.yml
new file mode 100644
index 00000000000..0c27f426bc9
--- /dev/null
+++ b/.test-infra/kubernetes/hbase/SmallITCluster/hbase-single-node-cluster.yml
@@ -0,0 +1,100 @@
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+# This script creates single node HBase cluster. This means all three services
+# zookeeper, HBase master and regionserver are running on the same kubernetes 
pod.
+# Service "hbase" allow to reach HBase pod using its FQDN.
+# StatefulSet "hbase" creates one replica of HBase fully configured node. In 
order
+# to access the cluster from outside of GCP please also run this configuration 
file with:
+# hbase-single-node-cluster-for-local-dev.yml
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: hbase
+  labels:
+    name: hbase
+spec:
+  clusterIP: None
+  ports:
+    - name: zookeeper
+      port: 2181
+      targetPort: 2181
+    - name: rest
+      port: 8080
+      targetPort: 8080
+    - name: rest-info
+      port: 8085
+      targetPort: 8085
+    - name: thrift
+      port: 9090
+      targetPort: 9090
+    - name: thrift-info
+      port: 9095
+      targetPort: 9095
+    - name: hbase-master
+      port: 16000
+      targetPort: 16000
+    - name: web
+      port: 16010
+      targetPort: 16010
+    - name: region
+      port: 16201
+      targetPort: 16201
+    - name: region-info
+      port: 16301
+      targetPort: 16301
+  selector:
+    name: hbase
+
+---
+
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: hbase
+spec:
+  selector:
+    matchLabels:
+      name: hbase
+  serviceName: "hbase"
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        name: hbase
+    spec:
+      containers:
+        - name: hbase
+          image: harisekhon/hbase:1.2
+          ports:
+            - name: zookeeper
+              containerPort: 2181
+            - name: rest
+              containerPort: 8080
+            - name: rest-info
+              containerPort: 8085
+            - name: thrift
+              containerPort: 9090
+            - name: thrift-info
+              containerPort: 9095
+            - name: hbase-master
+              containerPort: 16000
+            - name: web
+              containerPort: 16010
+            - name: region
+              containerPort: 16201
+            - name: region-info
+              containerPort: 16301
\ No newline at end of file
diff --git a/.test-infra/kubernetes/hbase/SmallITCluster/setup-all.sh 
b/.test-infra/kubernetes/hbase/SmallITCluster/setup-all.sh
new file mode 100755
index 00000000000..a364f2b822c
--- /dev/null
+++ b/.test-infra/kubernetes/hbase/SmallITCluster/setup-all.sh
@@ -0,0 +1,40 @@
+#!/bin/sh
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+# This script starts HBase cluster and hbase-external service that allows to 
reach cluster
+# from developer's machine. Once the cluster is working, scripts waits till
+# external cluster endpoint will be available. It prints out configuration 
line that
+# should be added to /etc/hosts file in order to work with HBase cluster.
+set -e
+
+kubectl create -f hbase-single-node-cluster.yml
+kubectl create -f hbase-single-node-cluster-for-local-dev.yml
+
+external_ip="$(kubectl get svc hbase-external -o 
jsonpath='{.status.loadBalancer.ingress[0].ip}')"
+
+echo "Waiting for the HBase service to come up ........"
+while [ -z "$external_ip" ]
+do
+   sleep 10s
+   external_ip="$(kubectl get svc hbase-external -o 
jsonpath='{.status.loadBalancer.ingress[0].ip}')"
+   echo "."
+done
+
+hbase_master_pod_name="$(kubectl get pods --selector=name=hbase -o 
jsonpath='{.items[*].metadata.name}')"
+hbase_master_namespace="$(kubectl get pods --selector=name=hbase -o 
jsonpath='{.items[*].metadata.namespace}')"
+
+echo "For local tests please add the following entry to /etc/hosts file"
+printf "%s\\t%s.hbase.%s.svc.cluster.local\n" "${external_ip}" 
"${hbase_master_pod_name}" "${hbase_master_namespace}"
diff --git a/.test-infra/kubernetes/hbase/SmallITCluster/setup.sh 
b/.test-infra/kubernetes/hbase/SmallITCluster/setup.sh
new file mode 100755
index 00000000000..1b4acd286bb
--- /dev/null
+++ b/.test-infra/kubernetes/hbase/SmallITCluster/setup.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+# Start HBase cluster.
+set -e
+
+kubectl create -f hbase-single-node-cluster.yml
\ No newline at end of file
diff --git a/.test-infra/kubernetes/hbase/SmallITCluster/teardown-all.sh 
b/.test-infra/kubernetes/hbase/SmallITCluster/teardown-all.sh
new file mode 100755
index 00000000000..162e92bdbc9
--- /dev/null
+++ b/.test-infra/kubernetes/hbase/SmallITCluster/teardown-all.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+# This script terminates HBase cluster and hbase-external service. It checks 
/etc/hosts file
+# for any unneeded entries and notifies user about them.
+set -e
+
+external_ip="$(kubectl get svc hbase-external -o 
jsonpath='{.status.loadBalancer.ingress[0].ip}')"
+hbase_master_pod_name="$(kubectl get pods --selector=name=hbase -o 
jsonpath='{.items[*].metadata.name}')"
+
+kubectl delete -f hbase-single-node-cluster.yml
+kubectl delete -f hbase-single-node-cluster-for-local-dev.yml
+
+if grep "${external_ip}\\|${hbase_master_pod_name}" /etc/hosts ; then
+    echo "Remove entry from /etc/hosts."
+fi
diff --git a/.test-infra/kubernetes/hbase/SmallITCluster/teardown.sh 
b/.test-infra/kubernetes/hbase/SmallITCluster/teardown.sh
new file mode 100755
index 00000000000..b35d4b4eb39
--- /dev/null
+++ b/.test-infra/kubernetes/hbase/SmallITCluster/teardown.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+# HBase cluster termination script.
+set -e
+
+kubectl delete -f hbase-single-node-cluster.yml
\ No newline at end of file
diff --git a/sdks/java/io/hbase/build.gradle b/sdks/java/io/hbase/build.gradle
index 001030d3fc2..e3230525bac 100644
--- a/sdks/java/io/hbase/build.gradle
+++ b/sdks/java/io/hbase/build.gradle
@@ -18,6 +18,9 @@
 
 apply from: project(":").file("build_rules.gradle")
 applyJavaNature(failOnWarning: true)
+provideIntegrationTestingDependencies()
+enableJavaPerformanceTesting()
+
 
 description = "Apache Beam :: SDKs :: Java :: IO :: HBase"
 ext.summary = "Library to read and write from/to HBase"
@@ -41,6 +44,7 @@ dependencies {
   shadow library.java.findbugs_jsr305
   shadow library.java.slf4j_api
   shadow "org.apache.hbase:hbase-shaded-client:$hbase_version"
+  testCompile project(path: ":beam-sdks-java-io-common", configuration: 
"shadowTest")
   testCompile project(path: ":beam-sdks-java-core", configuration: 
"shadowTest")
   testCompile project(path: ":beam-runners-direct-java", configuration: 
"shadow")
   testCompile library.java.commons_lang3
diff --git a/sdks/java/io/hbase/pom.xml b/sdks/java/io/hbase/pom.xml
index 5aa0e86b3a3..2a256b54ba0 100644
--- a/sdks/java/io/hbase/pom.xml
+++ b/sdks/java/io/hbase/pom.xml
@@ -179,6 +179,17 @@
       <artifactId>hamcrest-library</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.beam</groupId>
+      <artifactId>beam-sdks-java-io-common</artifactId>
+      <scope>test</scope>
+      <classifier>tests</classifier>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.beam</groupId>
+      <artifactId>beam-sdks-java-io-common</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
 </project>
diff --git 
a/sdks/java/io/hbase/src/test/java/org/apache/beam/sdk/io/hbase/HBaseIOIT.java 
b/sdks/java/io/hbase/src/test/java/org/apache/beam/sdk/io/hbase/HBaseIOIT.java
new file mode 100644
index 00000000000..3b7787f7242
--- /dev/null
+++ 
b/sdks/java/io/hbase/src/test/java/org/apache/beam/sdk/io/hbase/HBaseIOIT.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.beam.sdk.io.hbase;
+
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import org.apache.beam.sdk.io.GenerateSequence;
+import org.apache.beam.sdk.io.common.HashingFn;
+import org.apache.beam.sdk.io.common.IOTestPipelineOptions;
+import org.apache.beam.sdk.io.common.TestRow;
+import org.apache.beam.sdk.options.Default;
+import org.apache.beam.sdk.options.Description;
+import org.apache.beam.sdk.options.PipelineOptionsFactory;
+import org.apache.beam.sdk.testing.PAssert;
+import org.apache.beam.sdk.testing.TestPipeline;
+import org.apache.beam.sdk.transforms.Combine;
+import org.apache.beam.sdk.transforms.Count;
+import org.apache.beam.sdk.transforms.DoFn;
+import org.apache.beam.sdk.transforms.ParDo;
+import org.apache.beam.sdk.values.PCollection;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.JUnit4;
+
+/**
+ * A test of {@link org.apache.beam.sdk.io.hbase.HBaseIOIT} on an independent 
HBase instance.
+ *
+ * <p>This test requires a running instance of HBase. Pass in connection 
information using
+ * PipelineOptions:
+ *
+ * <pre>
+ *
+ *  ./gradlew clean integrationTest -p sdks/java/io/hbase/ 
-DintegrationTestPipelineOptions='[
+ *  "--hbaseServerName=1.2.3.4"]' -DintegrationTestRunner=direct
+ *  --tests org.apache.beam.sdk.io.hbase.HBaseIOIT
+ *
+ * </pre>
+ */
+@RunWith(JUnit4.class)
+public class HBaseIOIT {
+
+  /** HBaseIOIT options. */
+  public interface HBasePipelineOptions extends IOTestPipelineOptions {
+    @Description("HBase host")
+    @Default.String("HBase-host")
+    String getHbaseServerName();
+
+    void setHbaseServerName(String host);
+  }
+
+  private static int numberOfRows;
+  private static final Configuration conf = HBaseConfiguration.create();
+  private static final String TABLE_NAME = "IOTesting";
+  private static final byte[] COLUMN_FAMILY = Bytes.toBytes("TestData");
+  private static final byte[] COLUMN_HASH = Bytes.toBytes("hash");
+  private static Admin admin;
+  private static HBasePipelineOptions options;
+
+  @Rule public TestPipeline pipelineWrite = TestPipeline.create();
+  @Rule public TestPipeline pipelineRead = TestPipeline.create();
+
+  @BeforeClass
+  public static void setup() throws IOException {
+    PipelineOptionsFactory.register(HBasePipelineOptions.class);
+    options = 
TestPipeline.testingPipelineOptions().as(HBasePipelineOptions.class);
+
+    numberOfRows = options.getNumberOfRecords();
+
+    conf.setStrings("hbase.zookeeper.quorum", options.getHbaseServerName());
+    conf.setStrings("hbase.cluster.distributed", "true");
+    conf.setStrings("hbase.client.retries.number", "1");
+
+    Connection connection = ConnectionFactory.createConnection(conf);
+
+    admin = connection.getAdmin();
+    HTableDescriptor testTable =
+        new HTableDescriptor(TableName.valueOf(TABLE_NAME))
+            .addFamily(new HColumnDescriptor(COLUMN_FAMILY));
+    admin.createTable(testTable);
+  }
+
+  @AfterClass
+  public static void tearDown() throws IOException {
+    admin.disableTable(TableName.valueOf(TABLE_NAME));
+    admin.deleteTable(TableName.valueOf(TABLE_NAME));
+  }
+
+  /** Tests writing then reading data for a HBase database. */
+  @Test
+  public void testWriteThenRead() {
+    runWrite();
+    runRead();
+  }
+
+  /** Writes the test dataset to HBase. */
+  private void runWrite() {
+    pipelineWrite
+        .apply("Generate Sequence", GenerateSequence.from(0).to((long) 
numberOfRows))
+        .apply("Prepare TestRows", ParDo.of(new 
TestRow.DeterministicallyConstructTestRowFn()))
+        .apply("Prepare mutations", ParDo.of(new ConstructMutations()))
+        .apply("Write to HBase", 
HBaseIO.write().withConfiguration(conf).withTableId(TABLE_NAME));
+
+    pipelineWrite.run().waitUntilFinish();
+  }
+
+  /** Read the test dataset from hbase and validate its contents. */
+  private void runRead() {
+    PCollection<Result> tableRows =
+        
pipelineRead.apply(HBaseIO.read().withConfiguration(conf).withTableId(TABLE_NAME));
+
+    PAssert.thatSingleton(tableRows.apply("Count All", 
Count.<Result>globally()))
+        .isEqualTo((long) numberOfRows);
+
+    PCollection<String> consolidatedHashcode =
+        tableRows
+            .apply(ParDo.of(new SelectNameFn()))
+            .apply("Hash row contents", Combine.globally(new 
HashingFn()).withoutDefaults());
+
+    PAssert.that(consolidatedHashcode)
+        .containsInAnyOrder(TestRow.getExpectedHashForRowCount(numberOfRows));
+
+    pipelineRead.run().waitUntilFinish();
+  }
+
+  /** Produces test rows. */
+  private static class ConstructMutations extends DoFn<TestRow, Mutation> {
+    @ProcessElement
+    public void processElement(ProcessContext c) {
+      c.output(
+          new Put(c.element().id().toString().getBytes(StandardCharsets.UTF_8))
+              .addColumn(COLUMN_FAMILY, COLUMN_HASH, 
Bytes.toBytes(c.element().name())));
+    }
+  }
+
+  /** Read rows from Table. */
+  private static class SelectNameFn extends DoFn<Result, String> {
+    @ProcessElement
+    public void processElement(ProcessContext c) {
+      c.output(
+          new String(c.element().getValue(COLUMN_FAMILY, COLUMN_HASH), 
StandardCharsets.UTF_8));
+    }
+  }
+}


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Issue Time Tracking
-------------------

    Worklog Id:     (was: 109529)
    Time Spent: 5h 50m  (was: 5h 40m)

> Add an integration test for HBaseIO Read/Write transforms
> ---------------------------------------------------------
>
>                 Key: BEAM-3214
>                 URL: https://issues.apache.org/jira/browse/BEAM-3214
>             Project: Beam
>          Issue Type: Test
>          Components: io-java-hbase
>            Reporter: Chamikara Jayalath
>            Assignee: Kamil Szewczyk
>            Priority: Major
>             Fix For: 2.5.0
>
>          Time Spent: 5h 50m
>  Remaining Estimate: 0h
>
> We should add an small scale integration test for HBaseIO that can be run as 
> a part of 'beam_PostCommit_Java_MavenInstall' and 
> 'beam_PostCommit_Java_ValidatesRunner*' Jenkins test suites.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to