Repository: hbase
Updated Branches:
  refs/heads/master 68b2f5502 -> a6eeb26cc


http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
index cb9e0c7..b5143de 100644
--- 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
+++ 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.spark.example.hbasecontext;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -32,11 +31,13 @@ import org.apache.spark.SparkConf;
 import org.apache.spark.api.java.JavaRDD;
 import org.apache.spark.api.java.JavaSparkContext;
 import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * This is a simple example of getting records in HBase
  * with the bulkGet function.
  */
[email protected]
 final public class JavaHBaseBulkGetExample {
 
   private JavaHBaseBulkGetExample() {}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
index f0f3e79..6738059 100644
--- 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
+++ 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.spark.example.hbasecontext;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -33,6 +32,7 @@ import org.apache.spark.SparkConf;
 import org.apache.spark.api.java.JavaRDD;
 import org.apache.spark.api.java.JavaSparkContext;
 import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Run this example using command below:
@@ -45,6 +45,7 @@ import org.apache.spark.api.java.function.Function;
  * 'hbase org.apache.hadoop.hbase.tool.LoadIncrementalHFiles' to load the 
HFiles into table to
  * verify this example.
  */
[email protected]
 final public class JavaHBaseBulkLoadExample {
   private JavaHBaseBulkLoadExample() {}
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
index 5821c19..4a80b96 100644
--- 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
+++ 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
@@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.spark.example.hbasecontext;
 
 import java.util.ArrayList;
 import java.util.List;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableName;
@@ -29,11 +28,13 @@ import org.apache.spark.SparkConf;
 import org.apache.spark.api.java.JavaRDD;
 import org.apache.spark.api.java.JavaSparkContext;
 import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * This is a simple example of putting records in HBase
  * with the bulkPut function.
  */
[email protected]
 final public class JavaHBaseBulkPutExample {
 
   private JavaHBaseBulkPutExample() {}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java
 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java
index 8d4c092..0d4f680 100644
--- 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java
+++ 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseDistributedScan.java
@@ -17,7 +17,6 @@
 package org.apache.hadoop.hbase.spark.example.hbasecontext;
 
 import java.util.List;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableName;
@@ -29,14 +28,15 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.spark.SparkConf;
 import org.apache.spark.api.java.JavaRDD;
 import org.apache.spark.api.java.JavaSparkContext;
-
 import org.apache.spark.api.java.function.Function;
+import org.apache.yetus.audience.InterfaceAudience;
 import scala.Tuple2;
 
 /**
  * This is a simple example of scanning records from HBase
  * with the hbaseRDD function.
  */
[email protected]
 final public class JavaHBaseDistributedScan {
 
   private JavaHBaseDistributedScan() {}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
index 9a1259e..a55d853 100644
--- 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
+++ 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.spark.example.hbasecontext;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.TableName;
@@ -36,13 +35,14 @@ import org.apache.spark.api.java.JavaRDD;
 import org.apache.spark.api.java.JavaSparkContext;
 import org.apache.spark.api.java.function.Function;
 import org.apache.spark.api.java.function.VoidFunction;
-
+import org.apache.yetus.audience.InterfaceAudience;
 import scala.Tuple2;
 
 /**
  * This is a simple example of using the foreachPartition
  * method with a HBase connection
  */
[email protected]
 final public class JavaHBaseMapGetPutExample {
 
   private JavaHBaseMapGetPutExample() {}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java
 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java
index cd4cf24..74fadc6 100644
--- 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java
+++ 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseStreamingBulkPutExample.java
@@ -28,10 +28,12 @@ import org.apache.spark.api.java.function.Function;
 import org.apache.spark.streaming.Duration;
 import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
 import org.apache.spark.streaming.api.java.JavaStreamingContext;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * This is a simple example of BulkPut with Spark Streaming
  */
[email protected]
 final public class JavaHBaseStreamingBulkPutExample {
 
   private JavaHBaseStreamingBulkPutExample() {}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala
index 8eb4dd9..1fc92c0 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCache.scala
@@ -18,16 +18,22 @@
 package org.apache.hadoop.hbase.spark
 
 import java.io.IOException
-
 import org.apache.hadoop.conf.Configuration
-import org.apache.hadoop.hbase.client.{Admin, Connection, ConnectionFactory, 
RegionLocator, Table}
+import org.apache.hadoop.hbase.client.Admin
+import org.apache.hadoop.hbase.client.Connection
+import org.apache.hadoop.hbase.client.ConnectionFactory
+import org.apache.hadoop.hbase.client.RegionLocator
+import org.apache.hadoop.hbase.client.Table
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory
-import org.apache.hadoop.hbase.security.{User, UserProvider}
+import org.apache.hadoop.hbase.security.User
+import org.apache.hadoop.hbase.security.UserProvider
 import org.apache.hadoop.hbase.spark.datasources.HBaseSparkConf
-import org.apache.hadoop.hbase.{HConstants, TableName}
-
+import org.apache.hadoop.hbase.HConstants
+import org.apache.hadoop.hbase.TableName
+import org.apache.yetus.audience.InterfaceAudience
 import scala.collection.mutable
 
[email protected]
 private[spark] object HBaseConnectionCache extends Logging {
 
   // A hashmap of Spark-HBase connections. Key is HBaseConnectionKey.
@@ -130,6 +136,7 @@ private[spark] object HBaseConnectionCache extends Logging {
   }
 }
 
[email protected]
 private[hbase] case class SmartConnection (
     connection: Connection, var refCount: Int = 0, var timestamp: Long = 0) {
   def getTable(tableName: TableName): Table = connection.getTable(tableName)
@@ -153,6 +160,7 @@ private[hbase] case class SmartConnection (
  * that may be used in the process of establishing a connection.
  *
  */
[email protected]
 class HBaseConnectionKey(c: Configuration) extends Logging {
   val conf: Configuration = c
   val CONNECTION_PROPERTIES: Array[String] = Array[String](
@@ -256,6 +264,7 @@ class HBaseConnectionKey(c: Configuration) extends Logging {
  * @param numActualConnectionsCreated number of actual HBase connections the 
cache ever created
  * @param numActiveConnections number of current alive HBase connections the 
cache is holding
  */
[email protected]
 case class HBaseConnectionCacheStat(var numTotalRequests: Long,
                                     var numActualConnectionsCreated: Long,
                                     var numActiveConnections: Long)

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
index 0156e70..d376bf2 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala
@@ -1112,6 +1112,7 @@ class HBaseContext(@transient val sc: SparkContext,
   class WriterLength(var written:Long, val writer:StoreFileWriter)
 }
 
[email protected]
 object LatestHBaseContextCache {
   var latest:HBaseContext = null
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala
index 9a67477..a92f4e0 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/Logging.scala
@@ -17,9 +17,10 @@
 
 package org.apache.hadoop.hbase.spark
 
-import org.apache.log4j.LogManager
-import org.slf4j.{Logger, LoggerFactory}
+import org.apache.yetus.audience.InterfaceAudience
 import org.slf4j.impl.StaticLoggerBinder
+import org.slf4j.Logger
+import org.slf4j.LoggerFactory
 
 /**
  * Utility trait for classes that want to log data. Creates a SLF4J logger for 
the class and allows
@@ -28,6 +29,7 @@ import org.slf4j.impl.StaticLoggerBinder
  * Logging is private in Spark 2.0
  * This is to isolate incompatibilties across Spark releases.
  */
[email protected]
 trait Logging {
 
   // Make the log field transient so that objects with Logging can

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
index efeaa7c..6a65667 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
@@ -266,6 +266,7 @@ class HBaseTableScanRDD(relation: HBaseRelation,
   }
 }
 
[email protected]
 case class SerializedFilter(b: Option[Array[Byte]])
 
 object SerializedFilter {
@@ -278,13 +279,14 @@ object SerializedFilter {
   }
 }
 
[email protected]
 private[hbase] case class HBaseRegion(
     override val index: Int,
     val start: Option[HBaseType] = None,
     val end: Option[HBaseType] = None,
     val server: Option[String] = None) extends Partition
 
-
[email protected]
 private[hbase] case class HBaseScanPartition(
     override val index: Int,
     val regions: HBaseRegion,
@@ -292,6 +294,7 @@ private[hbase] case class HBaseScanPartition(
     val points: Seq[Array[Byte]],
     val sf: SerializedFilter) extends Partition
 
[email protected]
 case class RDDResources(set: mutable.HashSet[Resource]) {
   def addResource(s: Resource) {
     set += s

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerDes.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerDes.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerDes.scala
index 98cc871..fc0e4d0 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerDes.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerDes.scala
@@ -17,26 +17,18 @@
 
 package org.apache.hadoop.hbase.spark.datasources
 
-import java.io.ByteArrayInputStream
-
-import org.apache.avro.Schema
-import org.apache.avro.Schema.Type._
-import org.apache.avro.generic.GenericDatumReader
-import org.apache.avro.generic.GenericDatumWriter
-import org.apache.avro.generic.GenericRecord
-import org.apache.avro.generic.{GenericDatumReader, GenericDatumWriter, 
GenericRecord}
-import org.apache.avro.io._
-import org.apache.commons.io.output.ByteArrayOutputStream
 import org.apache.hadoop.hbase.util.Bytes
-import org.apache.spark.sql.types._
+import org.apache.yetus.audience.InterfaceAudience
 
 // TODO: This is not really used in code.
[email protected]
 trait SerDes {
   def serialize(value: Any): Array[Byte]
   def deserialize(bytes: Array[Byte], start: Int, end: Int): Any
 }
 
 // TODO: This is not really used in code.
[email protected]
 class DoubleSerDes extends SerDes {
   override def serialize(value: Any): Array[Byte] = 
Bytes.toBytes(value.asInstanceOf[Double])
   override def deserialize(bytes: Array[Byte], start: Int, end: Int): Any = {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala
index ce7b55a..8f1f15c 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/package.scala
@@ -21,6 +21,7 @@ import org.apache.hadoop.hbase.util.Bytes
 
 import scala.math.Ordering
 
+// TODO: add @InterfaceAudience.Private if 
https://issues.scala-lang.org/browse/SI-3600 is resolved
 package object hbase {
   type HBaseType = Array[Byte]
   def bytesMin = new Array[Byte](0)

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
index c09e99d..fda3c78 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
@@ -21,16 +21,20 @@ import org.apache.avro.Schema
 import org.apache.avro.generic.GenericData
 import org.apache.hadoop.hbase.spark.AvroSerdes
 import org.apache.spark.sql.datasources.hbase.HBaseTableCatalog
-import org.apache.spark.sql.{DataFrame, SQLContext}
-import org.apache.spark.{SparkConf, SparkContext}
+import org.apache.spark.sql.DataFrame
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * @param col0 Column #0, Type is String
  * @param col1 Column #1, Type is Array[Byte]
  */
[email protected]
 case class AvroHBaseRecord(col0: String,
                            col1: Array[Byte])
-
[email protected]
 object AvroHBaseRecord {
   val schemaString =
     s"""{"namespace": "example.avro",
@@ -58,7 +62,7 @@ object AvroHBaseRecord {
     favoriteArray.add(s"number${i}")
     favoriteArray.add(s"number${i+1}")
     user.put("favorite_array", favoriteArray)
-    import collection.JavaConverters._
+    import scala.collection.JavaConverters._
     val favoriteMap = Map[String, Int](("key1" -> i), ("key2" -> (i+1))).asJava
     user.put("favorite_map", favoriteMap)
     val avroByte = AvroSerdes.serialize(user, avroSchema)
@@ -66,6 +70,7 @@ object AvroHBaseRecord {
   }
 }
 
[email protected]
 object AvroSource {
   def catalog = s"""{
                     |"table":{"namespace":"default", 
"name":"ExampleAvrotable"},

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
index 96c6d6e..0630371 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
@@ -17,13 +17,18 @@
 
 package org.apache.hadoop.hbase.spark.example.datasources
 
-import org.apache.spark.sql.{DataFrame, SQLContext}
-import org.apache.spark.{SparkContext, SparkConf}
 import org.apache.spark.sql.datasources.hbase.HBaseTableCatalog
+import org.apache.spark.sql.DataFrame
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
[email protected]
 class UserCustomizedSampleException(message: String = null, cause: Throwable = 
null) extends
   RuntimeException(UserCustomizedSampleException.message(message, cause), 
cause)
 
[email protected]
 object UserCustomizedSampleException {
   def message(message: String, cause: Throwable) =
     if (message != null) message
@@ -31,6 +36,7 @@ object UserCustomizedSampleException {
     else null
 }
 
[email protected]
 case class IntKeyRecord(
   col0: Integer,
   col1: Boolean,
@@ -56,6 +62,7 @@ object IntKeyRecord {
   }
 }
 
[email protected]
 object DataType {
   val cat = s"""{
                 |"table":{"namespace":"default", 
"name":"DataTypeExampleTable"},

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
index 056c071..b414a37 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
@@ -17,10 +17,14 @@
 
 package org.apache.hadoop.hbase.spark.example.datasources
 
-import org.apache.spark.sql.{DataFrame, SQLContext}
-import org.apache.spark.{SparkContext, SparkConf}
 import org.apache.spark.sql.datasources.hbase.HBaseTableCatalog
+import org.apache.spark.sql.DataFrame
+import org.apache.spark.sql.SQLContext
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
[email protected]
 case class HBaseRecord(
   col0: String,
   col1: Boolean,
@@ -32,6 +36,7 @@ case class HBaseRecord(
   col7: String,
   col8: Byte)
 
[email protected]
 object HBaseRecord {
   def apply(i: Int): HBaseRecord = {
     val s = s"""row${"%03d".format(i)}"""
@@ -47,6 +52,7 @@ object HBaseRecord {
   }
 }
 
[email protected]
 object HBaseSource {
   val cat = s"""{
                 |"table":{"namespace":"default", 
"name":"HBaseSourceExampleTable"},

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkDeleteExample.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkDeleteExample.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkDeleteExample.scala
index 46135a5..506fd22 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkDeleteExample.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkDeleteExample.scala
@@ -17,17 +17,20 @@
 
 package org.apache.hadoop.hbase.spark.example.hbasecontext
 
+import org.apache.hadoop.hbase.client.Delete
 import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
 import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.client.Delete
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
 import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * This is a simple example of deleting records in HBase
  * with the bulkDelete function.
  */
[email protected]
 object HBaseBulkDeleteExample {
   def main(args: Array[String]) {
     if (args.length < 1) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkGetExample.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkGetExample.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkGetExample.scala
index 1bdc90d..58bc1d4 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkGetExample.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkGetExample.scala
@@ -17,18 +17,22 @@
 
 package org.apache.hadoop.hbase.spark.example.hbasecontext
 
-import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{CellUtil, TableName, HBaseConfiguration}
-import org.apache.hadoop.hbase.util.Bytes
 import org.apache.hadoop.hbase.client.Get
 import org.apache.hadoop.hbase.client.Result
+import org.apache.hadoop.hbase.spark.HBaseContext
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.hadoop.hbase.CellUtil
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
 import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * This is a simple example of getting records from HBase
  * with the bulkGet function.
  */
[email protected]
 object HBaseBulkGetExample {
   def main(args: Array[String]) {
     if (args.length < 1) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExample.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExample.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExample.scala
index 063f2c2..0a6f379 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExample.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExample.scala
@@ -17,17 +17,20 @@
 
 package org.apache.hadoop.hbase.spark.example.hbasecontext
 
+import org.apache.hadoop.hbase.client.Put
 import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
 import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.client.Put
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
 import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * This is a simple example of putting records in HBase
  * with the bulkPut function.
  */
[email protected]
 object HBaseBulkPutExample {
   def main(args: Array[String]) {
     if (args.length < 2) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExampleFromFile.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExampleFromFile.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExampleFromFile.scala
index 37a0358..51ff0da 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExampleFromFile.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutExampleFromFile.scala
@@ -17,21 +17,24 @@
 
 package org.apache.hadoop.hbase.spark.example.hbasecontext
 
+import org.apache.hadoop.hbase.client.Put
 import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
 import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.client.Put
-import org.apache.hadoop.mapred.TextInputFormat
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
 import org.apache.hadoop.io.LongWritable
 import org.apache.hadoop.io.Text
+import org.apache.hadoop.mapred.TextInputFormat
 import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * This is a simple example of putting records in HBase
  * with the bulkPut function.  In this example we are
  * getting the put information from a file
  */
[email protected]
 object HBaseBulkPutExampleFromFile {
   def main(args: Array[String]) {
     if (args.length < 3) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutTimestampExample.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutTimestampExample.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutTimestampExample.scala
index fa78216..9bfcc2c 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutTimestampExample.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseBulkPutTimestampExample.scala
@@ -19,16 +19,18 @@ package org.apache.hadoop.hbase.spark.example.hbasecontext
 
 import org.apache.hadoop.hbase.spark.HBaseContext
 import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
+import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
 import org.apache.hadoop.hbase.util.Bytes
 import org.apache.hadoop.hbase.client.Put
 import org.apache.spark.SparkConf
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * This is a simple example of putting records in HBase
  * with the bulkPut function.  In this example we are
  * also setting the timestamp in the put
  */
[email protected]
 object HBaseBulkPutTimestampExample {
   def main(args: Array[String]) {
     if (args.length < 2) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseDistributedScanExample.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseDistributedScanExample.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseDistributedScanExample.scala
index bb2e79d..7d8643a 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseDistributedScanExample.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseDistributedScanExample.scala
@@ -16,16 +16,19 @@
  */
 package org.apache.hadoop.hbase.spark.example.hbasecontext
 
+import org.apache.hadoop.hbase.client.Scan
 import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
 import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.client.Scan
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
 import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 /**
  * This is a simple example of scanning records from HBase
  * with the hbaseRDD function in Distributed fashion.
  */
[email protected]
 object HBaseDistributedScanExample {
   def main(args: Array[String]) {
     if (args.length < 1) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseStreamingBulkPutExample.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseStreamingBulkPutExample.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseStreamingBulkPutExample.scala
index 8ac93ef..20a22f7 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseStreamingBulkPutExample.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/hbasecontext/HBaseStreamingBulkPutExample.scala
@@ -17,18 +17,21 @@
 
 package org.apache.hadoop.hbase.spark.example.hbasecontext
 
+import org.apache.hadoop.hbase.client.Put
 import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.spark.SparkContext
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
 import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.client.Put
-import org.apache.spark.streaming.StreamingContext
-import org.apache.spark.streaming.Seconds
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
 import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.spark.streaming.Seconds
+import org.apache.spark.streaming.StreamingContext
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * This is a simple example of BulkPut with Spark Streaming
  */
[email protected]
 object HBaseStreamingBulkPutExample {
   def main(args: Array[String]) {
     if (args.length < 4) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkDeleteExample.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkDeleteExample.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkDeleteExample.scala
index 83d3f9e..0ba4d1c 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkDeleteExample.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkDeleteExample.scala
@@ -17,17 +17,20 @@
 package org.apache.hadoop.hbase.spark.example.rdd
 
 import org.apache.hadoop.hbase.client.Delete
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
 import org.apache.hadoop.hbase.spark.HBaseContext
 import org.apache.hadoop.hbase.spark.HBaseRDDFunctions._
 import org.apache.hadoop.hbase.util.Bytes
-
-import org.apache.spark.{SparkContext, SparkConf}
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * This is a simple example of deleting records in HBase
  * with the bulkDelete function.
  */
[email protected]
 object HBaseBulkDeleteExample {
   def main(args: Array[String]) {
     if (args.length < 1) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkGetExample.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkGetExample.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkGetExample.scala
index eedabc3..0736f6e 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkGetExample.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkGetExample.scala
@@ -16,17 +16,23 @@
  */
 package org.apache.hadoop.hbase.spark.example.rdd
 
-import org.apache.hadoop.hbase.client.{Result, Get}
-import org.apache.hadoop.hbase.{CellUtil, TableName, HBaseConfiguration}
+import org.apache.hadoop.hbase.client.Get
+import org.apache.hadoop.hbase.client.Result
 import org.apache.hadoop.hbase.spark.HBaseContext
-import org.apache.hadoop.hbase.util.Bytes
 import org.apache.hadoop.hbase.spark.HBaseRDDFunctions._
-import org.apache.spark.{SparkContext, SparkConf}
+import org.apache.hadoop.hbase.util.Bytes
+import org.apache.hadoop.hbase.CellUtil
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * This is a simple example of getting records from HBase
  * with the bulkGet function.
  */
[email protected]
 object HBaseBulkGetExample {
   def main(args: Array[String]) {
     if (args.length < 1) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkPutExample.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkPutExample.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkPutExample.scala
index 28711b8..9f5885f 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkPutExample.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseBulkPutExample.scala
@@ -21,13 +21,17 @@ import org.apache.hadoop.hbase.client.Put
 import org.apache.hadoop.hbase.spark.HBaseContext
 import org.apache.hadoop.hbase.spark.HBaseRDDFunctions._
 import org.apache.hadoop.hbase.util.Bytes
-import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
-import org.apache.spark.{SparkConf, SparkContext}
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * This is a simple example of putting records in HBase
  * with the bulkPut function.
  */
[email protected]
 object HBaseBulkPutExample {
    def main(args: Array[String]) {
      if (args.length < 2) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseForeachPartitionExample.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseForeachPartitionExample.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseForeachPartitionExample.scala
index 8dfefc2..be257ee 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseForeachPartitionExample.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseForeachPartitionExample.scala
@@ -17,17 +17,21 @@
 
 package org.apache.hadoop.hbase.spark.example.rdd
 
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
 import org.apache.hadoop.hbase.client.Put
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
 import org.apache.hadoop.hbase.spark.HBaseContext
 import org.apache.hadoop.hbase.spark.HBaseRDDFunctions._
 import org.apache.hadoop.hbase.util.Bytes
-import org.apache.spark.{SparkContext, SparkConf}
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * This is a simple example of using the foreachPartition
  * method with a HBase connection
  */
[email protected]
 object HBaseForeachPartitionExample {
   def main(args: Array[String]) {
     if (args.length < 2) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseMapPartitionExample.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseMapPartitionExample.scala
 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseMapPartitionExample.scala
index 0d0b314..0793524 100644
--- 
a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseMapPartitionExample.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/rdd/HBaseMapPartitionExample.scala
@@ -18,16 +18,20 @@
 package org.apache.hadoop.hbase.spark.example.rdd
 
 import org.apache.hadoop.hbase.client.Get
-import org.apache.hadoop.hbase.{TableName, HBaseConfiguration}
 import org.apache.hadoop.hbase.spark.HBaseContext
 import org.apache.hadoop.hbase.spark.HBaseRDDFunctions._
 import org.apache.hadoop.hbase.util.Bytes
-import org.apache.spark.{SparkContext, SparkConf}
+import org.apache.hadoop.hbase.HBaseConfiguration
+import org.apache.hadoop.hbase.TableName
+import org.apache.spark.SparkConf
+import org.apache.spark.SparkContext
+import org.apache.yetus.audience.InterfaceAudience
 
 /**
  * This is a simple example of using the mapPartitions
  * method with a HBase connection
  */
[email protected]
 object HBaseMapPartitionExample {
   def main(args: Array[String]) {
     if (args.length < 1) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/DataTypeParserWrapper.scala
----------------------------------------------------------------------
diff --git 
a/hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/DataTypeParserWrapper.scala
 
b/hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/DataTypeParserWrapper.scala
index 37ee346..4307852 100644
--- 
a/hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/DataTypeParserWrapper.scala
+++ 
b/hbase-spark/src/main/scala/org/apache/spark/sql/datasources/hbase/DataTypeParserWrapper.scala
@@ -19,11 +19,14 @@ package org.apache.spark.sql.datasources.hbase
 
 import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
 import org.apache.spark.sql.types.DataType
+import org.apache.yetus.audience.InterfaceAudience
 
[email protected]
 trait DataTypeParser {
   def parse(dataTypeString: String): DataType
 }
 
[email protected]
 object DataTypeParserWrapper extends DataTypeParser{
   def parse(dataTypeString: String): DataType = 
CatalystSqlParser.parseDataType(dataTypeString)
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-thrift/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml
index a991953..f1624df 100644
--- a/hbase-thrift/pom.xml
+++ b/hbase-thrift/pom.xml
@@ -145,6 +145,10 @@
           <failOnViolation>true</failOnViolation>
         </configuration>
       </plugin>
+      <plugin>
+        <groupId>net.revelc.code</groupId>
+        <artifactId>warbucks-maven-plugin</artifactId>
+      </plugin>
     </plugins>
     <pluginManagement>
       <plugins>

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java
----------------------------------------------------------------------
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java
index eab5307..b75b433 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/HttpAuthenticationException.java
@@ -18,6 +18,9 @@
  */
 package org.apache.hadoop.hbase.thrift;
 
+import org.apache.yetus.audience.InterfaceAudience;
+
[email protected]
 public class HttpAuthenticationException extends Exception {
   private static final long serialVersionUID = 0;
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
----------------------------------------------------------------------
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
index 3dad28a..e36d639 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java
@@ -31,7 +31,6 @@ import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.LongAdder;
-
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.thrift.ThriftServerRunner.HBaseHandler;
@@ -40,6 +39,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.thrift.TException;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -52,6 +52,7 @@ import org.slf4j.LoggerFactory;
  * thrift server dies or is shut down before everything in the queue is 
drained.
  *
  */
[email protected]
 public class IncrementCoalescer implements IncrementCoalescerMBean {
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java
----------------------------------------------------------------------
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java
index 604fa97..06cf193 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescerMBean.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hbase.thrift;
 
+import org.apache.yetus.audience.InterfaceAudience;
+
[email protected]
 public interface IncrementCoalescerMBean {
   int getQueueSize();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java
----------------------------------------------------------------------
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java
index 973cad7..c86f476 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/THBaseThreadPoolExecutor.java
@@ -22,12 +22,14 @@ import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * A ThreadPoolExecutor customized for working with HBase thrift to update 
metrics before and
  * after the execution of a task.
  */
 
[email protected]
 public class THBaseThreadPoolExecutor extends ThreadPoolExecutor {
 
   private ThriftMetrics metrics;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-zookeeper/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-zookeeper/pom.xml b/hbase-zookeeper/pom.xml
index d4a47e2..a7cde51 100644
--- a/hbase-zookeeper/pom.xml
+++ b/hbase-zookeeper/pom.xml
@@ -91,6 +91,10 @@
         <groupId>org.codehaus.mojo</groupId>
         <artifactId>findbugs-maven-plugin</artifactId>
       </plugin>
+      <plugin>
+        <groupId>net.revelc.code</groupId>
+        <artifactId>warbucks-maven-plugin</artifactId>
+      </plugin>
     </plugins>
     <!-- General Resources -->
     <pluginManagement>

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java
----------------------------------------------------------------------
diff --git 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java
 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java
index a50ce4c..7413879 100644
--- 
a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java
+++ 
b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionNormalizerTracker.java
@@ -19,10 +19,10 @@
 package org.apache.hadoop.hbase.zookeeper;
 
 import java.io.IOException;
-
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -33,6 +33,7 @@ import 
org.apache.hadoop.hbase.shaded.protobuf.generated.RegionNormalizerProtos;
 /**
  * Tracks region normalizer state up in ZK
  */
[email protected]
 public class RegionNormalizerTracker extends ZKNodeTracker {
   private static final Logger LOG = 
LoggerFactory.getLogger(RegionNormalizerTracker.class);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a6eeb26c/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 27d786b..de4bda3 100755
--- a/pom.xml
+++ b/pom.xml
@@ -984,6 +984,34 @@
             <includeTestSourceDirectory>true</includeTestSourceDirectory>
           </configuration>
         </plugin>
+        <plugin>
+          <groupId>net.revelc.code</groupId>
+          <artifactId>warbucks-maven-plugin</artifactId>
+          <version>${maven.warbucks.version}</version>
+          <configuration>
+            <ignoreRuleFailures>false</ignoreRuleFailures>
+            <rules>
+              <rule>
+                <!-- exclude the generated java files -->
+                <classPattern>(?!.*(.generated.|.tmpl.|\$)).*</classPattern>
+                <includeTestClasses>false</includeTestClasses>
+                <includePublicClasses>true</includePublicClasses>
+                
<includePackagePrivateClasses>false</includePackagePrivateClasses>
+                <includeProtectedClasses>false</includeProtectedClasses>
+                <includePrivateClasses>false</includePrivateClasses>
+                
<classAnnotationPattern>org[.]apache[.]yetus[.]audience[.]InterfaceAudience.*</classAnnotationPattern>
+              </rule>
+            </rules>
+          </configuration>
+          <executions>
+            <execution>
+              <id>run-warbucks</id>
+              <goals>
+                <goal>check</goal><!-- runs at process-test-classes phase -->
+              </goals>
+            </execution>
+          </executions>
+        </plugin>
       </plugins>
     </pluginManagement>
     <plugins>
@@ -1489,6 +1517,7 @@
     <maven.shade.version>3.0.0</maven.shade.version>
     <maven.site.version>3.4</maven.site.version>
     <maven.source.version>3.0.1</maven.source.version>
+    <maven.warbucks.version>1.1.0</maven.warbucks.version>
     <os.maven.version>1.5.0.Final</os.maven.version>
     <plexus.errorprone.javac.version>2.8.2</plexus.errorprone.javac.version>
     <scala.maven.version>3.2.2</scala.maven.version>

Reply via email to