[37/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteString.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteString.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteString.java
new file mode 100644
index 000..f673ee3
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ByteString.java
@@ -0,0 +1,1565 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InvalidObjectException;
+import java.io.ObjectInputStream;
+import java.io.OutputStream;
+import java.io.Serializable;
+import java.io.UnsupportedEncodingException;
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.nio.charset.UnsupportedCharsetException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NoSuchElementException;
+
+/**
+ * Immutable sequence of bytes. Substring is supported by sharing the 
reference to the immutable
+ * underlying bytes. Concatenation is likewise supported without copying (long 
strings) by building
+ * a tree of pieces in {@link RopeByteString}.
+ *
+ * Like {@link String}, the contents of a {@link ByteString} can never be 
observed to change, not
+ * even in the presence of a data race or incorrect API usage in the client 
code.
+ *
+ * @author crazy...@google.com Bob Lee
+ * @author ken...@google.com Kenton Varda
+ * @author carlan...@google.com Carl Haverl
+ * @author marti...@google.com Martin Buchholz
+ */
+public abstract class ByteString implements Iterable, Serializable {
+
+  /**
+   * When two strings to be concatenated have a combined length shorter than
+   * this, we just copy their bytes on {@link #concat(ByteString)}.
+   * The trade-off is copy size versus the overhead of creating tree nodes
+   * in {@link RopeByteString}.
+   */
+  static final int CONCATENATE_BY_COPY_SIZE = 128;
+
+  /**
+   * When copying an InputStream into a ByteString with .readFrom(),
+   * the chunks in the underlying rope start at 256 bytes, but double
+   * each iteration up to 8192 bytes.
+   */
+  static final int MIN_READ_FROM_CHUNK_SIZE = 0x100;  // 256b
+  static final int MAX_READ_FROM_CHUNK_SIZE = 0x2000;  // 8k
+
+  /**
+   * Empty {@code ByteString}.
+   */
+  public static final ByteString EMPTY = new 
LiteralByteString(Internal.EMPTY_BYTE_ARRAY);
+
+  /**
+   * An interface to efficiently copy {@code byte[]}.
+   *
+   * One of the noticeable costs of copying a byte[] into a new array using
+   * {@code System.arraycopy} is nullification of a new buffer before the 
copy. It has been shown
+   * the Hotspot VM is capable to intrisicfy {@code Arrays.copyOfRange} 
operation to avoid this
+   * expensive nullification and provide substantial performance gain. 
Unfortunately this does not
+   * hold on Android 

[31/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Empty.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Empty.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Empty.java
new file mode 100644
index 000..61f1574
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Empty.java
@@ -0,0 +1,386 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/empty.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * A generic empty message that you can re-use to avoid defining duplicated
+ * empty messages in your APIs. A typical example is to use it as the request
+ * or the response type of an API method. For instance:
+ * service Foo {
+ *   rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+ * }
+ * The JSON representation for `Empty` is empty JSON object `{}`.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Empty}
+ */
+public  final class Empty extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Empty)
+EmptyOrBuilder {
+  // Use Empty.newBuilder() to construct.
+  private 
Empty(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Empty() {
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Empty(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.EmptyProto.internal_static_google_protobuf_Empty_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.EmptyProto.internal_static_google_protobuf_Empty_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.Empty.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Empty.Builder.class);
+  }
+
+  private byte memoizedIsInitialized = -1;
+  public final boolean isInitialized() {
+byte isInitialized = memoizedIsInitialized;
+if (isInitialized == 1) return true;
+if (isInitialized == 0) return false;
+
+memoizedIsInitialized = 1;
+return true;
+  }
+
+  public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+  throws java.io.IOException {
+  }
+
+  public int getSerializedSize() {
+int size = memoizedSize;
+if (size != -1) return size;
+
+size = 0;
+memoizedSize = size;
+return size;
+  }
+
+  private static final long serialVersionUID = 0L;
+  @java.lang.Override
+  public boolean equals(final java.lang.Object obj) {
+if (obj == this) {
+ return true;
+}
+if (!(obj instanceof 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Empty)) {
+  return super.equals(obj);
+}
+org.apache.hadoop.hbase.shaded.com.google.protobuf.Empty other = 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.Empty) obj;
+
+boolean result = true;
+return result;
+  }
+
+  @java.lang.Override
+  public int hashCode() {
+if (memoizedHashCode != 0) {
+  return memoizedHashCode;
+}
+int hash = 41;
+hash = (19 * hash) + getDescriptor().hashCode();

[57/57] [abbrv] hbase git commit: HBASE-18147 POC jenkinsfile for nightly checks.

2017-07-07 Thread busbey
HBASE-18147 POC jenkinsfile for nightly checks.

* adds ruby tools to dockerfile
* adds rubocop to dockerfile
* adds ruby-lint to dockerfile
* adds perlcritic to dockerfile
* TODO branch with mulitple jdk versions
* TODO verify flaky list is in use


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/59ec4088
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/59ec4088
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/59ec4088

Branch: refs/heads/HBASE-18147
Commit: 59ec408823435b2744c2113952f0047cfa59ec2e
Parents: 30d06df
Author: Sean Busbey 
Authored: Tue Jul 4 15:12:38 2017 -0400
Committer: Sean Busbey 
Committed: Fri Jul 7 23:33:42 2017 -0400

--
 dev-support/Jenkinsfile   | 142 +
 dev-support/docker/Dockerfile |  29 
 2 files changed, 171 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/59ec4088/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
new file mode 100644
index 000..a3f7f3b
--- /dev/null
+++ b/dev-support/Jenkinsfile
@@ -0,0 +1,142 @@
+pipeline {
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
+  // work around for YETUS-508, requires maven outside of the dockerfile
+  tools {
+maven 'Maven (latest)'
+  }
+  triggers {
+cron('@daily')
+  }
+  options {
+timeout (time: 6, unit: 'HOURS')
+timestamps()
+  }
+  environment {
+TOOLS = "${env.WORKSPACE}/tools"
+BASEDIR = "${env.WORKSPACE}/component"
+YETUS_RELEASE = '0.4.0'
+  }
+  parameters {
+booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.
+
+Should only be used manually when e.g. there is some non-work-aroundable 
issue in yetus we are checking a fix for.''')
+  }
+  stages {
+stage ('yetus check') {
+  environment {
+OUTPUTDIR = "${env.WORKSPACE}/artifacts"
+PROJECT = 'hbase'
+PROJET_PERSONALITY = 
'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
+AUTHOR_IGNORE_LIST = 'src/main/asciidoc/_chapters/developer.adoc'
+WHITESPACE_IGNORE_LIST = '.*/generated/.*'
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop'
+BRANCH_SPECIFIC_DOCKERFILE = 
"${env.BASEDIR}/dev-support/docker/Dockerfile"
+  }
+  steps {
+// TODO we can move the yetus install into a different stage and then use 
stash to deploy it.
+sh  '''#!/usr/bin/env bash
+printenv
+echo "Ensure we have a copy of Apache Yetus."
+if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
+  YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
+  echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
+  if [ ! -d "${YETUS_DIR}" ]; then
+echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm -rf "${WORKSPACE}/.gpg"
+mkdir -p "${WORKSPACE}/.gpg"
+chmod -R 700 "${WORKSPACE}/.gpg"
+
+echo "install yetus project KEYS"
+curl -L --fail -o "${WORKSPACE}/KEYS_YETUS" 
https://dist.apache.org/repos/dist/release/yetus/KEYS
+gpg --homedir "${WORKSPACE}/.gpg" --import "${WORKSPACE}/KEYS_YETUS"
+
+echo "download yetus release ${YETUS_RELEASE}"
+curl -L --fail -O 
"https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/yetus-${YETUS_RELEASE}-bin.tar.gz;
+curl -L --fail -O 
"https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/yetus-${YETUS_RELEASE}-bin.tar.gz.asc;
+echo "verifying yetus release"
+gpg --homedir "${WORKSPACE}/.gpg" --verify 
"yetus-${YETUS_RELEASE}-bin.tar.gz.asc"
+mv "yetus-${YETUS_RELEASE}-bin.tar.gz" yetus.tar.gz
+  else
+echo "Reusing cached download of Apache Yetus version ${YETUS_RELEASE}."
+  fi
+else
+  YETUS_DIR="${WORKSPACE}/yetus-git"
+  rm -rf "${YETUS_DIR}"
+  echo "downloading from github"
+  curl -L --fail https://api.github.com/repos/apache/yetus/tarball/HEAD -o 
yetus.tar.gz
+fi
+if [ ! -d "${YETUS_DIR}" ]; then
+  echo "unpacking yetus into '${YETUS_DIR}'"
+  mkdir -p "${YETUS_DIR}"
+  gunzip -c yetus.tar.gz | tar xpf - -C "${YETUS_DIR}" --strip-components 1
+fi
+   '''
+// TODO we can move the personality install into a different stage and 
then use stash to deploy it.
+   dir ("${env.TOOLS}") {
+ sh """#!/usr/bin/env bash
+echo "Downloading Project personality."
+curl -L  -o personality.sh "${env.PROJET_PERSONALITY}"
+   """
+   }
+   sh '''#!/usr/bin/env bash
+YETUS_ARGS=()
+YETUS_ARGS=("--personality=${TOOLS}/personality.sh" "${YETUS_ARGS[@]}")
+YETUS_ARGS=("--basedir=${BASEDIR}" "${YETUS_ARGS[@]}")

[41/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/AbstractMessage.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/AbstractMessage.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/AbstractMessage.java
new file mode 100644
index 000..4a6fefa
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/AbstractMessage.java
@@ -0,0 +1,646 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLite;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * A partial implementation of the {@link Message} interface which implements
+ * as many methods of that interface as possible in terms of other methods.
+ *
+ * @author ken...@google.com Kenton Varda
+ */
+public abstract class AbstractMessage
+// TODO(dweis): Update GeneratedMessage to parameterize with MessageType 
and BuilderType.
+extends AbstractMessageLite
+implements Message {
+
+  @Override
+  public boolean isInitialized() {
+return MessageReflection.isInitialized(this);
+  }
+
+  /**
+   * Interface for the parent of a Builder that allows the builder to
+   * communicate invalidations back to the parent for use when using nested
+   * builders.
+   */
+  protected interface BuilderParent {
+
+/**
+ * A builder becomes dirty whenever a field is modified -- including fields
+ * in nested builders -- and becomes clean when build() is called.  Thus,
+ * when a builder becomes dirty, all its parents become dirty as well, and
+ * when it becomes clean, all its children become clean.  The dirtiness
+ * state is used to invalidate certain cached values.
+ * 
+ * To this end, a builder calls markDirty() on its parent whenever it
+ * transitions from clean to dirty.  The parent must propagate this call to
+ * its own parent, unless it was already dirty, in which case the
+ * grandparent must necessarily already be dirty as well.  The parent can
+ * only transition back to "clean" after calling build() on all children.
+ */
+void markDirty();
+  }
+
+  /** Create a nested builder. */
+  protected Message.Builder newBuilderForType(BuilderParent parent) {
+throw new UnsupportedOperationException("Nested builder is not supported 
for this type.");
+  }
+
+
+  @Override
+  public List findInitializationErrors() {
+return MessageReflection.findMissingFields(this);
+  }
+
+  @Override
+  public String getInitializationErrorString() {
+return 

[46/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/DummyRegionServerEndpointProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/DummyRegionServerEndpointProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/DummyRegionServerEndpointProtos.java
new file mode 100644
index 000..a011b30
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/DummyRegionServerEndpointProtos.java
@@ -0,0 +1,1225 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: DummyRegionServerEndpoint.proto
+
+package org.apache.hadoop.hbase.coprocessor.protobuf.generated;
+
+public final class DummyRegionServerEndpointProtos {
+  private DummyRegionServerEndpointProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface DummyRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.test.pb.DummyRequest}
+   */
+  public static final class DummyRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements DummyRequestOrBuilder {
+// Use DummyRequest.newBuilder() to construct.
+private DummyRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private DummyRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final DummyRequest defaultInstance;
+public static DummyRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public DummyRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private DummyRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.internal_static_hbase_test_pb_DummyRequest_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.internal_static_hbase_test_pb_DummyRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyRequest.class,
 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.DummyRegionServerEndpointProtos.DummyRequest.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new com.google.protobuf.AbstractParser() {
+  public DummyRequest parsePartialFrom(
+  com.google.protobuf.CodedInputStream input,
+  com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+  throws com.google.protobuf.InvalidProtocolBufferException {
+return new DummyRequest(input, extensionRegistry);
+  }
+};
+
+@java.lang.Override
+public com.google.protobuf.Parser getParserForType() {
+  return PARSER;
+}
+
+private void initFields() {
+}
+private byte memoizedIsInitialized = -1;
+public final boolean isInitialized() {
+  byte isInitialized = 

[49/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationProtos.java
new file mode 100644
index 000..61b47ff
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationProtos.java
@@ -0,0 +1,1277 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: ColumnAggregationProtocol.proto
+
+package org.apache.hadoop.hbase.coprocessor.protobuf.generated;
+
+public final class ColumnAggregationProtos {
+  private ColumnAggregationProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface SumRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required bytes family = 1;
+/**
+ * required bytes family = 1;
+ */
+boolean hasFamily();
+/**
+ * required bytes family = 1;
+ */
+com.google.protobuf.ByteString getFamily();
+
+// optional bytes qualifier = 2;
+/**
+ * optional bytes qualifier = 2;
+ */
+boolean hasQualifier();
+/**
+ * optional bytes qualifier = 2;
+ */
+com.google.protobuf.ByteString getQualifier();
+  }
+  /**
+   * Protobuf type {@code SumRequest}
+   */
+  public static final class SumRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements SumRequestOrBuilder {
+// Use SumRequest.newBuilder() to construct.
+private SumRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SumRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SumRequest defaultInstance;
+public static SumRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SumRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SumRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  family_ = input.readBytes();
+  break;
+}
+case 18: {
+  bitField0_ |= 0x0002;
+  qualifier_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.internal_static_SumRequest_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.internal_static_SumRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.SumRequest.class,
 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationProtos.SumRequest.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new 

[53/57] [abbrv] hbase git commit: HBASE-18319 Implement getClusterStatus/getRegionLoad/getCompactionState/getLastMajorCompactionTimestamp methods

2017-07-07 Thread busbey
HBASE-18319 Implement 
getClusterStatus/getRegionLoad/getCompactionState/getLastMajorCompactionTimestamp
 methods


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b0a5fa0c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b0a5fa0c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b0a5fa0c

Branch: refs/heads/HBASE-18147
Commit: b0a5fa0c2a119168c4272e5efba16a3ef9e9c329
Parents: 4fe7385
Author: Guanghao Zhang 
Authored: Wed Jul 5 18:33:57 2017 +0800
Committer: Guanghao Zhang 
Committed: Fri Jul 7 16:21:45 2017 +0800

--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  95 
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  44 
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 219 ++-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  11 +-
 .../hbase/shaded/protobuf/RequestConverter.java |  16 +-
 .../hbase/client/TestAsyncClusterAdminApi.java  | 132 +++
 .../hbase/client/TestAsyncRegionAdminApi.java   |   8 +-
 .../hbase/client/TestAsyncTableAdminApi.java|  81 ++-
 8 files changed, 591 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b0a5fa0c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index ff35d46..8ade209 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.io.IOException;
 import java.util.List;
 import java.util.Collection;
 import java.util.Map;
@@ -24,8 +25,10 @@ import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
 import java.util.regex.Pattern;
 
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
@@ -332,6 +335,11 @@ public interface AsyncAdmin {
   CompletableFuture getOnlineRegions(ServerName serverName);
 
   /**
+   * Get the regions of a given table.
+   */
+  CompletableFuture getTableRegions(TableName tableName);
+
+  /**
* Flush a table.
* @param tableName table to flush
*/
@@ -796,4 +804,91 @@ public interface AsyncAdmin {
* @return procedure list wrapped by {@link CompletableFuture}
*/
   CompletableFuture listProcedures();
+
+  /**
+   * @return cluster status wrapped by {@link CompletableFuture}
+   */
+  CompletableFuture getClusterStatus();
+
+  /**
+   * @return current master server name wrapped by {@link CompletableFuture}
+   */
+  default CompletableFuture getMaster() {
+return getClusterStatus().thenApply(ClusterStatus::getMaster);
+  }
+
+  /**
+   * @return current backup master list wrapped by {@link CompletableFuture}
+   */
+  default CompletableFuture getBackupMasters() {
+return getClusterStatus().thenApply(ClusterStatus::getBackupMasters);
+  }
+
+  /**
+   * @return current live region servers list wrapped by {@link 
CompletableFuture}
+   */
+  default CompletableFuture getRegionServers() {
+return getClusterStatus().thenApply(ClusterStatus::getServers);
+  }
+
+  /**
+   * Get a list of {@link RegionLoad} of all regions hosted on a region 
seerver.
+   * @param serverName
+   * @return a list of {@link RegionLoad} wrapped by {@link CompletableFuture}
+   */
+  default CompletableFuture getRegionLoads(ServerName 
serverName) {
+return getRegionLoads(serverName, Optional.empty());
+  }
+
+  /**
+   * Get a list of {@link RegionLoad} of all regions hosted on a region 
seerver for a table.
+   * @param serverName
+   * @param tableName
+   * @return a list of {@link RegionLoad} wrapped by {@link CompletableFuture}
+   */
+  CompletableFuture getRegionLoads(ServerName serverName,
+  Optional tableName);
+
+  /**
+   * Check whether master is in maintenance mode
+   * @return true if master is in maintenance mode, false otherwise. The 
return value will be
+   * wrapped by a {@link CompletableFuture}
+   */
+  CompletableFuture isMasterInMaintenanceMode();
+
+  /**
+   * Get the current compaction state of a table. It could be in a major 
compaction, a minor
+   * compaction, both, or none.
+   * @param tableName table to examine
+   * @return the current compaction state wrapped by a {@link 

[38/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ApiOrBuilder.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ApiOrBuilder.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ApiOrBuilder.java
new file mode 100644
index 000..b8b7030
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/ApiOrBuilder.java
@@ -0,0 +1,258 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/api.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+public interface ApiOrBuilder extends
+// @@protoc_insertion_point(interface_extends:google.protobuf.Api)
+org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+  /**
+   * 
+   * The fully qualified name of this api, including package name
+   * followed by the api's simple name.
+   * 
+   *
+   * string name = 1;
+   */
+  java.lang.String getName();
+  /**
+   * 
+   * The fully qualified name of this api, including package name
+   * followed by the api's simple name.
+   * 
+   *
+   * string name = 1;
+   */
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+  getNameBytes();
+
+  /**
+   * 
+   * The methods of this api, in unspecified order.
+   * 
+   *
+   * repeated .google.protobuf.Method methods = 2;
+   */
+  java.util.List 
+  getMethodsList();
+  /**
+   * 
+   * The methods of this api, in unspecified order.
+   * 
+   *
+   * repeated .google.protobuf.Method methods = 2;
+   */
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.Method getMethods(int 
index);
+  /**
+   * 
+   * The methods of this api, in unspecified order.
+   * 
+   *
+   * repeated .google.protobuf.Method methods = 2;
+   */
+  int getMethodsCount();
+  /**
+   * 
+   * The methods of this api, in unspecified order.
+   * 
+   *
+   * repeated .google.protobuf.Method methods = 2;
+   */
+  java.util.List 
+  getMethodsOrBuilderList();
+  /**
+   * 
+   * The methods of this api, in unspecified order.
+   * 
+   *
+   * repeated .google.protobuf.Method methods = 2;
+   */
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MethodOrBuilder 
getMethodsOrBuilder(
+  int index);
+
+  /**
+   * 
+   * Any metadata attached to the API.
+   * 
+   *
+   * repeated .google.protobuf.Option options = 3;
+   */
+  java.util.List 
+  getOptionsList();
+  /**
+   * 
+   * Any metadata attached to the API.
+   * 
+   *
+   * repeated .google.protobuf.Option options = 3;
+   */
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.Option getOptions(int 
index);
+  /**
+   * 
+   * Any metadata attached to the API.
+   * 
+   *
+   * repeated .google.protobuf.Option options = 3;
+   */
+  int getOptionsCount();
+  /**
+   * 
+   * Any metadata attached to the API.
+   * 
+   *
+   * repeated .google.protobuf.Option options = 3;
+   */
+  java.util.List 
+  getOptionsOrBuilderList();
+  /**
+   * 
+   * Any metadata attached to the API.
+   * 
+   *
+   * repeated .google.protobuf.Option options = 3;
+   */
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.OptionOrBuilder 
getOptionsOrBuilder(
+  int index);
+
+  /**
+   * 
+   * A version string for this api. If specified, must have the form
+   * `major-version.minor-version`, as in `1.10`. If the minor version
+   * is omitted, it defaults to zero. If the entire version field is
+   * empty, the major version is derived from the package name, as
+   * outlined below. If the field is not empty, the version in the
+   * package name will be verified to be consistent with what is
+   * provided here.
+   * The versioning schema uses [semantic
+   * versioning](http://semver.org) where the major version number
+   * indicates a breaking change and the minor version an additive,
+   * non-breaking change. Both version numbers are signals to users
+   * what to expect from different versions, and should be carefully
+   * chosen based on the product plan.
+   * The major version is also reflected in the package name of the
+   * API, which must end in `vmajor-version`, as in
+   * `google.feature.v1`. For major versions 0 and 1, the suffix can
+   * be omitted. Zero major versions must only be used for
+   * experimental, none-GA apis.
+   * 
+   *
+   * string version = 4;
+   */
+  java.lang.String getVersion();
+  /**
+   * 
+   * A version string for this api. If specified, must have the form
+   * `major-version.minor-version`, as in `1.10`. If the minor version
+   * is omitted, it defaults to zero. If the entire version field is
+   * empty, the major version is derived from the package name, as
+   * outlined below. If the field is not empty, the version in the
+   * package name will be verified to be 

[42/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-examples/README.txt
--
diff --git a/hbase-examples/README.txt b/hbase-examples/README.txt
index 22d1103..c47ed4f 100644
--- a/hbase-examples/README.txt
+++ b/hbase-examples/README.txt
@@ -63,8 +63,27 @@ Example code.
   3. Execute {./DemoClient}.
 
 ON PROTOBUFS
-This maven module has core protobuf definition files ('.protos') used by hbase
-examples. 
+This maven module has  protobuf definition files ('.protos') used by hbase
+Coprocessor Endpoints examples including tests. Coprocessor
+Endpoints are meant to be standalone, independent code not reliant on hbase
+internals. They define their Service using protobuf. The protobuf version
+they use can be distinct from that used by HBase internally since HBase started
+shading its protobuf references. Endpoints have no access to the shaded 
protobuf
+hbase uses. They do have access to the content of hbase-protocol -- the
+.protos found in here -- but avoid using as much of this as you can as it is
+liable to change.
 
-Generation of java files from protobuf .proto files included here is done as
-part of the build.
+Generation of java files from protobuf .proto files included here is done apart
+from the build. Run the generation whenever you make changes to the .orotos 
files
+and then check in the produced java (The reasoning is that change is infrequent
+so why pay the price of generating files anew on each build.
+
+To generate java files from protos run:
+
+ $ mvn compile -Dcompile-protobuf
+or
+ $ mvn compile -Pcompile-protobuf
+
+After you've done the above, check it and then check in changes (or post a 
patch
+on a JIRA with your definition file changes and the generated files). Be 
careful
+to notice new files and files removed and do appropriate git rm/adds.

http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-examples/pom.xml
--
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index 5c6acfa..3a30add 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -29,7 +29,7 @@
   hbase-examples
   Apache HBase - Examples
   Examples of HBase usage
-  
+
   
 
   
@@ -46,33 +46,20 @@
   true
 
   
-  
-maven-surefire-plugin
-${surefire.version}
-
-  
-  ${surefire.firstPartGroups}
-
-  
-  
-  
-org.apache.maven.plugins
-maven-source-plugin
-  
-  
-org.xolstice.maven.plugins
-protobuf-maven-plugin
-
-  
-compile-protoc
-generate-sources
-
-  compile
-
-  
-
-  
+${surefire.firstPartGroups}
+
+
+
+
+  org.apache.maven.plugins
+  maven-source-plugin
+
 
 
   
@@ -94,7 +81,7 @@
 
   
   
-
+
   
 
 
@@ -107,7 +94,7 @@
 
   
   
-
+
   
 
   
@@ -125,16 +112,16 @@
   test
 
 
-  org.apache.hbase
-  hbase-common
+org.apache.hbase
+hbase-common
 
 
-  org.apache.hbase
-  hbase-protocol
+org.apache.hbase
+hbase-protocol
 
 
-  org.apache.hbase
-  hbase-client
+org.apache.hbase
+hbase-client
 
 
   org.apache.hbase
@@ -149,10 +136,11 @@
   hbase-thrift
 
 
-  org.apache.hbase
-  hbase-testing-util
-  test
+org.apache.hbase
+hbase-testing-util
+test
 
+
 
   org.apache.thrift
   libthrift
@@ -169,126 +157,152 @@
   com.google.protobuf
   protobuf-java
 
-  
-  
-
-
-  skipExamplesTests
-  
-
-  skipExamplesTests
-
-  
-  
-true
-true
-  
-
-
-
+ 
+ skipExamplesTests
+ 
+ 
+ skipExamplesTests
+ 
+ 
+ 
+ true
+ true
+ 
+ 
+
+  compile-protobuf
+  
+
+  compile-protobuf
+
+  
+  
+
+  
+org.xolstice.maven.plugins
+protobuf-maven-plugin
+
+  
+compile-protoc
+generate-sources
+
+  compile
+
+  
+
+  
+
+  
+
+
+ 
+ 
-
-
-  

[43/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
new file mode 100644
index 000..86c88c6
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/SecureBulkLoadProtos.java
@@ -0,0 +1,2088 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: SecureBulkLoad.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class SecureBulkLoadProtos {
+  private SecureBulkLoadProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface SecureBulkLoadHFilesRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 1;
+/**
+ * repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 
1;
+ */
+
java.util.List
 
+getFamilyPathList();
+/**
+ * repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 
1;
+ */
+
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath
 getFamilyPath(int index);
+/**
+ * repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 
1;
+ */
+int getFamilyPathCount();
+/**
+ * repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 
1;
+ */
+java.util.List
 
+getFamilyPathOrBuilderList();
+/**
+ * repeated .hbase.pb.BulkLoadHFileRequest.FamilyPath family_path = 
1;
+ */
+
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPathOrBuilder
 getFamilyPathOrBuilder(
+int index);
+
+// optional bool assign_seq_num = 2;
+/**
+ * optional bool assign_seq_num = 2;
+ */
+boolean hasAssignSeqNum();
+/**
+ * optional bool assign_seq_num = 2;
+ */
+boolean getAssignSeqNum();
+
+// required .hbase.pb.DelegationToken fs_token = 3;
+/**
+ * required .hbase.pb.DelegationToken fs_token = 3;
+ */
+boolean hasFsToken();
+/**
+ * required .hbase.pb.DelegationToken fs_token = 3;
+ */
+org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken 
getFsToken();
+/**
+ * required .hbase.pb.DelegationToken fs_token = 3;
+ */
+
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationTokenOrBuilder
 getFsTokenOrBuilder();
+
+// required string bulk_token = 4;
+/**
+ * required string bulk_token = 4;
+ */
+boolean hasBulkToken();
+/**
+ * required string bulk_token = 4;
+ */
+java.lang.String getBulkToken();
+/**
+ * required string bulk_token = 4;
+ */
+com.google.protobuf.ByteString
+getBulkTokenBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SecureBulkLoadHFilesRequest}
+   */
+  public static final class SecureBulkLoadHFilesRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements SecureBulkLoadHFilesRequestOrBuilder {
+// Use SecureBulkLoadHFilesRequest.newBuilder() to construct.
+private 
SecureBulkLoadHFilesRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private SecureBulkLoadHFilesRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final SecureBulkLoadHFilesRequest defaultInstance;
+public static SecureBulkLoadHFilesRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public SecureBulkLoadHFilesRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private SecureBulkLoadHFilesRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+

[48/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithErrorsProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithErrorsProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithErrorsProtos.java
new file mode 100644
index 000..64cf82e
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithErrorsProtos.java
@@ -0,0 +1,1290 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: ColumnAggregationWithErrorsProtocol.proto
+
+package org.apache.hadoop.hbase.coprocessor.protobuf.generated;
+
+public final class ColumnAggregationWithErrorsProtos {
+  private ColumnAggregationWithErrorsProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface ColumnAggregationWithErrorsSumRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required bytes family = 1;
+/**
+ * required bytes family = 1;
+ */
+boolean hasFamily();
+/**
+ * required bytes family = 1;
+ */
+com.google.protobuf.ByteString getFamily();
+
+// optional bytes qualifier = 2;
+/**
+ * optional bytes qualifier = 2;
+ */
+boolean hasQualifier();
+/**
+ * optional bytes qualifier = 2;
+ */
+com.google.protobuf.ByteString getQualifier();
+  }
+  /**
+   * Protobuf type {@code ColumnAggregationWithErrorsSumRequest}
+   *
+   * 
+   * use unique names for messages in ColumnAggregationXXX.protos due to a bug 
in
+   * protoc or hadoop's protoc compiler.
+   * 
+   */
+  public static final class ColumnAggregationWithErrorsSumRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements ColumnAggregationWithErrorsSumRequestOrBuilder {
+// Use ColumnAggregationWithErrorsSumRequest.newBuilder() to construct.
+private 
ColumnAggregationWithErrorsSumRequest(com.google.protobuf.GeneratedMessage.Builder
 builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private ColumnAggregationWithErrorsSumRequest(boolean noInit) { 
this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final ColumnAggregationWithErrorsSumRequest defaultInstance;
+public static ColumnAggregationWithErrorsSumRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public ColumnAggregationWithErrorsSumRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private ColumnAggregationWithErrorsSumRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  family_ = input.readBytes();
+  break;
+}
+case 18: {
+  bitField0_ |= 0x0002;
+  qualifier_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.ColumnAggregationWithErrorsProtos.internal_static_ColumnAggregationWithErrorsSumRequest_descriptor;
+}
+
+protected 

[55/57] [abbrv] hbase git commit: HBASE-18317 Implement async admin operations for Normalizer/CleanerChore/CatalogJanitor

2017-07-07 Thread busbey
HBASE-18317 Implement async admin operations for 
Normalizer/CleanerChore/CatalogJanitor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7f937297
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7f937297
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7f937297

Branch: refs/heads/HBASE-18147
Commit: 7f937297821f5fb27c5ed32a4c0d4e5bc4a18956
Parents: 48d28c7
Author: Guanghao Zhang 
Authored: Fri Jul 7 21:13:38 2017 +0800
Committer: Guanghao Zhang 
Committed: Sat Jul 8 10:55:10 2017 +0800

--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  | 131 +
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  75 ++--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 190 +++
 .../hbase/client/TestAsyncBalancerAdminApi.java |  54 --
 .../hbase/client/TestAsyncToolAdminApi.java | 127 +
 5 files changed, 441 insertions(+), 136 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7f937297/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 8ade209..8411a5b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -284,40 +284,6 @@ public interface AsyncAdmin {
   CompletableFuture listNamespaceDescriptors();
 
   /**
-   * Turn the load balancer on or off.
-   * @param on
-   * @return Previous balancer value wrapped by a {@link CompletableFuture}.
-   */
-  CompletableFuture setBalancerOn(boolean on);
-
-  /**
-   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
-   * reassignments. Can NOT run for various reasons. Check logs.
-   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
-   * {@link CompletableFuture}.
-   */
-  default CompletableFuture balance() {
-return balance(false);
-  }
-
-  /**
-   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
-   * reassignments. If there is region in transition, force parameter of true 
would still run
-   * balancer. Can *not* run for other reasons. Check logs.
-   * @param forcible whether we should force balance even if there is region 
in transition.
-   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
-   * {@link CompletableFuture}.
-   */
-  CompletableFuture balance(boolean forcible);
-
-  /**
-   * Query the current state of the balancer.
-   * @return true if the balance switch is on, false otherwise The return 
value will be wrapped by a
-   * {@link CompletableFuture}.
-   */
-  CompletableFuture isBalancerOn();
-
-  /**
* Close a region. For expert-admins Runs close on the regionserver. The 
master will not be
* informed of the close.
* @param regionName region name to close
@@ -891,4 +857,101 @@ public interface AsyncAdmin {
* @return the last major compaction timestamp wrapped by a {@link 
CompletableFuture}
*/
   CompletableFuture 
getLastMajorCompactionTimestampForRegion(byte[] regionName);
+
+  /**
+   * Turn the load balancer on or off.
+   * @param on
+   * @return Previous balancer value wrapped by a {@link CompletableFuture}.
+   */
+  CompletableFuture setBalancerOn(boolean on);
+
+  /**
+   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
+   * reassignments. Can NOT run for various reasons. Check logs.
+   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
+   * {@link CompletableFuture}.
+   */
+  default CompletableFuture balance() {
+return balance(false);
+  }
+
+  /**
+   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
+   * reassignments. If there is region in transition, force parameter of true 
would still run
+   * balancer. Can *not* run for other reasons. Check logs.
+   * @param forcible whether we should force balance even if there is region 
in transition.
+   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
+   * {@link CompletableFuture}.
+   */
+  CompletableFuture balance(boolean forcible);
+
+  /**
+   * Query the current state of the balancer.
+   * @return true if the balance switch is on, false otherwise. The return 
value will be wrapped by a
+   * {@link CompletableFuture}.
+   */
+  CompletableFuture 

[45/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/IncrementCounterProcessorTestProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/IncrementCounterProcessorTestProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/IncrementCounterProcessorTestProtos.java
new file mode 100644
index 000..7ba5b8e
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/IncrementCounterProcessorTestProtos.java
@@ -0,0 +1,4059 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: IncrementCounterProcessor.proto
+
+package org.apache.hadoop.hbase.coprocessor.protobuf.generated;
+
+public final class IncrementCounterProcessorTestProtos {
+  private IncrementCounterProcessorTestProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface IncCounterProcessorRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required bytes row = 1;
+/**
+ * required bytes row = 1;
+ */
+boolean hasRow();
+/**
+ * required bytes row = 1;
+ */
+com.google.protobuf.ByteString getRow();
+
+// required int32 counter = 2;
+/**
+ * required int32 counter = 2;
+ */
+boolean hasCounter();
+/**
+ * required int32 counter = 2;
+ */
+int getCounter();
+  }
+  /**
+   * Protobuf type {@code IncCounterProcessorRequest}
+   */
+  public static final class IncCounterProcessorRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements IncCounterProcessorRequestOrBuilder {
+// Use IncCounterProcessorRequest.newBuilder() to construct.
+private 
IncCounterProcessorRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private IncCounterProcessorRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final IncCounterProcessorRequest defaultInstance;
+public static IncCounterProcessorRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public IncCounterProcessorRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private IncCounterProcessorRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  row_ = input.readBytes();
+  break;
+}
+case 16: {
+  bitField0_ |= 0x0002;
+  counter_ = input.readInt32();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.internal_static_IncCounterProcessorRequest_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.coprocessor.protobuf.generated.IncrementCounterProcessorTestProtos.internal_static_IncCounterProcessorRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  

[50/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java
new file mode 100644
index 000..373e036
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/example/generated/BulkDeleteProtos.java
@@ -0,0 +1,1792 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: BulkDelete.proto
+
+package org.apache.hadoop.hbase.coprocessor.example.generated;
+
+public final class BulkDeleteProtos {
+  private BulkDeleteProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface BulkDeleteRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required .hbase.pb.Scan scan = 1;
+/**
+ * required .hbase.pb.Scan scan = 1;
+ */
+boolean hasScan();
+/**
+ * required .hbase.pb.Scan scan = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan getScan();
+/**
+ * required .hbase.pb.Scan scan = 1;
+ */
+org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder 
getScanOrBuilder();
+
+// required .hbase.pb.BulkDeleteRequest.DeleteType deleteType = 2;
+/**
+ * required .hbase.pb.BulkDeleteRequest.DeleteType deleteType = 
2;
+ */
+boolean hasDeleteType();
+/**
+ * required .hbase.pb.BulkDeleteRequest.DeleteType deleteType = 
2;
+ */
+
org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType
 getDeleteType();
+
+// optional uint64 timestamp = 3;
+/**
+ * optional uint64 timestamp = 3;
+ */
+boolean hasTimestamp();
+/**
+ * optional uint64 timestamp = 3;
+ */
+long getTimestamp();
+
+// required uint32 rowBatchSize = 4;
+/**
+ * required uint32 rowBatchSize = 4;
+ */
+boolean hasRowBatchSize();
+/**
+ * required uint32 rowBatchSize = 4;
+ */
+int getRowBatchSize();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.BulkDeleteRequest}
+   */
+  public static final class BulkDeleteRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements BulkDeleteRequestOrBuilder {
+// Use BulkDeleteRequest.newBuilder() to construct.
+private BulkDeleteRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private BulkDeleteRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final BulkDeleteRequest defaultInstance;
+public static BulkDeleteRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public BulkDeleteRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private BulkDeleteRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.Builder subBuilder 
= null;
+  if (((bitField0_ & 0x0001) == 0x0001)) {
+subBuilder = scan_.toBuilder();
+  }
+  scan_ = 
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.PARSER,
 extensionRegistry);
+  if (subBuilder != null) {
+subBuilder.mergeFrom(scan_);
+scan_ = subBuilder.buildPartial();
+  }
+  bitField0_ |= 0x0001;
+  break;
+}
+case 16: {
+  int rawValue = input.readEnum();
+  

[54/57] [abbrv] hbase git commit: HBASE-18335 configuration guide fixes

2017-07-07 Thread busbey
HBASE-18335 configuration guide fixes

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/48d28c7a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/48d28c7a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/48d28c7a

Branch: refs/heads/HBASE-18147
Commit: 48d28c7a248d0f8c19b18c20d4484864ac67b549
Parents: b0a5fa0
Author: Artem Ervits 
Authored: Fri Jul 7 10:52:06 2017 -0400
Committer: tedyu 
Committed: Fri Jul 7 14:37:11 2017 -0700

--
 src/main/asciidoc/_chapters/configuration.adoc | 63 ++---
 src/main/asciidoc/_chapters/hbase-default.adoc | 40 ++---
 2 files changed, 51 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/48d28c7a/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index 2113b2f..bf14d11 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -79,11 +79,10 @@ To check for well-formedness and only print output if 
errors exist, use the comm
 .Keep Configuration In Sync Across the Cluster
 [WARNING]
 
-When running in distributed mode, after you make an edit to an HBase 
configuration, make sure you copy the content of the _conf/_ directory to all 
nodes of the cluster.
+When running in distributed mode, after you make an edit to an HBase 
configuration, make sure you copy the contents of the _conf/_ directory to all 
nodes of the cluster.
 HBase will not do this for you.
 Use `rsync`, `scp`, or another secure mechanism for copying the configuration 
files to your nodes.
-For most configuration, a restart is needed for servers to pick up changes An 
exception is dynamic configuration.
-to be described later below.
+For most configurations, a restart is needed for servers to pick up changes. 
Dynamic configuration is an exception to this, to be described later below.
 
 
 [[basic.prerequisites]]
@@ -131,11 +130,11 @@ DNS::
   HBase uses the local hostname to self-report its IP address. Both forward 
and reverse DNS resolving must work in versions of HBase previous to 0.92.0. 
The link:https://github.com/sujee/hadoop-dns-checker[hadoop-dns-checker] tool 
can be used to verify DNS is working correctly on the cluster. The project 
`README` file provides detailed instructions on usage.
 
 Loopback IP::
-  Prior to hbase-0.96.0, HBase only used the IP address `127.0.0.1` to refer 
to `localhost`, and this could not be configured.
+  Prior to hbase-0.96.0, HBase only used the IP address `127.0.0.1` to refer 
to `localhost`, and this was not configurable.
   See <> for more details.
 
 NTP::
-  The clocks on cluster nodes should be synchronized. A small amount of 
variation is acceptable, but larger amounts of skew can cause erratic and 
unexpected behavior. Time synchronization is one of the first things to check 
if you see unexplained problems in your cluster. It is recommended that you run 
a Network Time Protocol (NTP) service, or another time-synchronization 
mechanism, on your cluster, and that all nodes look to the same service for 
time synchronization. See the 
link:http://www.tldp.org/LDP/sag/html/basic-ntp-config.html[Basic NTP 
Configuration] at [citetitle]_The Linux Documentation Project (TLDP)_ to set up 
NTP.
+  The clocks on cluster nodes should be synchronized. A small amount of 
variation is acceptable, but larger amounts of skew can cause erratic and 
unexpected behavior. Time synchronization is one of the first things to check 
if you see unexplained problems in your cluster. It is recommended that you run 
a Network Time Protocol (NTP) service, or another time-synchronization 
mechanism on your cluster and that all nodes look to the same service for time 
synchronization. See the 
link:http://www.tldp.org/LDP/sag/html/basic-ntp-config.html[Basic NTP 
Configuration] at [citetitle]_The Linux Documentation Project (TLDP)_ to set up 
NTP.
 
 [[ulimit]]
 Limits on Number of Files and Processes (ulimit)::
@@ -176,8 +175,8 @@ Linux Shell::
   All of the shell scripts that come with HBase rely on the 
link:http://www.gnu.org/software/bash[GNU Bash] shell.
 
 Windows::
-  Prior to HBase 0.96, testing for running HBase on Microsoft Windows was 
limited.
-  Running a on Windows nodes is not recommended for production systems.
+  Prior to HBase 0.96, running HBase on Microsoft Windows was limited only for 
testing purposes.
+  Running production systems on Windows machines is not recommended. 
 
 
 [[hadoop]]
@@ -261,8 +260,8 @@ Because HBase depends on Hadoop, it 

[40/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Any.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Any.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Any.java
new file mode 100644
index 000..bf8e852
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Any.java
@@ -0,0 +1,899 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/any.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * `Any` contains an arbitrary serialized protocol buffer message along with a
+ * URL that describes the type of the serialized message.
+ * Protobuf library provides support to pack/unpack Any values in the form
+ * of utility functions or additional generated methods of the Any type.
+ * Example 1: Pack and unpack a message in C++.
+ * Foo foo = ...;
+ * Any any;
+ * any.PackFrom(foo);
+ * ...
+ * if (any.UnpackTo(foo)) {
+ *   ...
+ * }
+ * Example 2: Pack and unpack a message in Java.
+ * Foo foo = ...;
+ * Any any = Any.pack(foo);
+ * ...
+ * if (any.is(Foo.class)) {
+ *   foo = any.unpack(Foo.class);
+ * }
+ *  Example 3: Pack and unpack a message in Python.
+ * foo = Foo(...)
+ * any = Any()
+ * any.Pack(foo)
+ * ...
+ * if any.Is(Foo.DESCRIPTOR):
+ *   any.Unpack(foo)
+ *   ...
+ * The pack methods provided by protobuf library will by default use
+ * 'type.googleapis.com/full.type.name' as the type URL and the unpack
+ * methods only use the fully qualified type name after the last '/'
+ * in the type URL, for example "foo.bar.com/x/y.z" will yield type
+ * name "y.z".
+ * JSON
+ * 
+ * The JSON representation of an `Any` value uses the regular
+ * representation of the deserialized, embedded message, with an
+ * additional field `type` which contains the type URL. Example:
+ * package google.profile;
+ * message Person {
+ *   string first_name = 1;
+ *   string last_name = 2;
+ * }
+ * {
+ *   "type": "type.googleapis.com/google.profile.Person",
+ *   "firstName": string,
+ *   "lastName": string
+ * }
+ * If the embedded message type is well-known and has a custom JSON
+ * representation, that representation will be embedded adding a field
+ * `value` which holds the custom JSON in addition to the `type`
+ * field. Example (for message [google.protobuf.Duration][]):
+ * {
+ *   "type": 
"type.googleapis.org.apache.hadoop.hbase.shaded.com.google.protobuf.Duration",
+ *   "value": "1.212s"
+ * }
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Any}
+ */
+public  final class Any extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Any)
+AnyOrBuilder {
+  // Use Any.newBuilder() to construct.
+  private 
Any(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Any() {
+typeUrl_ = "";
+value_ = 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.EMPTY;
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Any(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 10: {
+java.lang.String s = input.readStringRequireUtf8();
+
+typeUrl_ = s;
+break;
+  }
+  case 18: {
+
+value_ = input.readBytes();
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  

[44/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java
new file mode 100644
index 000..508790c
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AggregateProtos.java
@@ -0,0 +1,2375 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: Aggregate.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class AggregateProtos {
+  private AggregateProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface AggregateRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required string interpreter_class_name = 1;
+/**
+ * required string interpreter_class_name = 1;
+ *
+ * 
+ ** The request passed to the AggregateService consists of three parts
+ *  (1) the (canonical) classname of the ColumnInterpreter implementation
+ *  (2) the Scan query
+ *  (3) any bytes required to construct the ColumnInterpreter object
+ *  properly
+ * 
+ */
+boolean hasInterpreterClassName();
+/**
+ * required string interpreter_class_name = 1;
+ *
+ * 
+ ** The request passed to the AggregateService consists of three parts
+ *  (1) the (canonical) classname of the ColumnInterpreter implementation
+ *  (2) the Scan query
+ *  (3) any bytes required to construct the ColumnInterpreter object
+ *  properly
+ * 
+ */
+java.lang.String getInterpreterClassName();
+/**
+ * required string interpreter_class_name = 1;
+ *
+ * 
+ ** The request passed to the AggregateService consists of three parts
+ *  (1) the (canonical) classname of the ColumnInterpreter implementation
+ *  (2) the Scan query
+ *  (3) any bytes required to construct the ColumnInterpreter object
+ *  properly
+ * 
+ */
+com.google.protobuf.ByteString
+getInterpreterClassNameBytes();
+
+// required .hbase.pb.Scan scan = 2;
+/**
+ * required .hbase.pb.Scan scan = 2;
+ */
+boolean hasScan();
+/**
+ * required .hbase.pb.Scan scan = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan getScan();
+/**
+ * required .hbase.pb.Scan scan = 2;
+ */
+org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanOrBuilder 
getScanOrBuilder();
+
+// optional bytes interpreter_specific_bytes = 3;
+/**
+ * optional bytes interpreter_specific_bytes = 3;
+ */
+boolean hasInterpreterSpecificBytes();
+/**
+ * optional bytes interpreter_specific_bytes = 3;
+ */
+com.google.protobuf.ByteString getInterpreterSpecificBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.AggregateRequest}
+   */
+  public static final class AggregateRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements AggregateRequestOrBuilder {
+// Use AggregateRequest.newBuilder() to construct.
+private AggregateRequest(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private AggregateRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final AggregateRequest defaultInstance;
+public static AggregateRequest getDefaultInstance() {
+  return defaultInstance;
+}
+
+public AggregateRequest getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private AggregateRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+  

[39/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Api.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Api.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Api.java
new file mode 100644
index 000..3265158
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Api.java
@@ -0,0 +1,2473 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/api.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Api is a light-weight descriptor for a protocol buffer service.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Api}
+ */
+public  final class Api extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Api)
+ApiOrBuilder {
+  // Use Api.newBuilder() to construct.
+  private 
Api(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Api() {
+name_ = "";
+methods_ = java.util.Collections.emptyList();
+options_ = java.util.Collections.emptyList();
+version_ = "";
+mixins_ = java.util.Collections.emptyList();
+syntax_ = 0;
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Api(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 10: {
+java.lang.String s = input.readStringRequireUtf8();
+
+name_ = s;
+break;
+  }
+  case 18: {
+if (!((mutable_bitField0_ & 0x0002) == 0x0002)) {
+  methods_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0002;
+}
+methods_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Method.parser(),
 extensionRegistry));
+break;
+  }
+  case 26: {
+if (!((mutable_bitField0_ & 0x0004) == 0x0004)) {
+  options_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0004;
+}
+options_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Option.parser(),
 extensionRegistry));
+break;
+  }
+  case 34: {
+java.lang.String s = input.readStringRequireUtf8();
+
+version_ = s;
+break;
+  }
+  case 42: {
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.SourceContext.Builder 
subBuilder = null;
+if (sourceContext_ != null) {
+  subBuilder = sourceContext_.toBuilder();
+}
+sourceContext_ = 
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.SourceContext.parser(),
 extensionRegistry);
+if (subBuilder != null) {
+  subBuilder.mergeFrom(sourceContext_);
+  sourceContext_ = subBuilder.buildPartial();
+}
+
+break;
+  }
+  case 50: {
+if (!((mutable_bitField0_ & 0x0020) == 0x0020)) {
+  mixins_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0020;
+}
+mixins_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Mixin.parser(),
 extensionRegistry));
+break;
+  }
+  case 56: {
+int rawValue = input.readEnum();
+
+syntax_ = rawValue;
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  

[47/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithNullResponseProtos.java
--
diff --git 
a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithNullResponseProtos.java
 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithNullResponseProtos.java
new file mode 100644
index 000..b25f7aa
--- /dev/null
+++ 
b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/protobuf/generated/ColumnAggregationWithNullResponseProtos.java
@@ -0,0 +1,1283 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: ColumnAggregationNullResponseProtocol.proto
+
+package org.apache.hadoop.hbase.coprocessor.protobuf.generated;
+
+public final class ColumnAggregationWithNullResponseProtos {
+  private ColumnAggregationWithNullResponseProtos() {}
+  public static void registerAllExtensions(
+  com.google.protobuf.ExtensionRegistry registry) {
+  }
+  public interface ColumnAggregationNullResponseSumRequestOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required bytes family = 1;
+/**
+ * required bytes family = 1;
+ */
+boolean hasFamily();
+/**
+ * required bytes family = 1;
+ */
+com.google.protobuf.ByteString getFamily();
+
+// optional bytes qualifier = 2;
+/**
+ * optional bytes qualifier = 2;
+ */
+boolean hasQualifier();
+/**
+ * optional bytes qualifier = 2;
+ */
+com.google.protobuf.ByteString getQualifier();
+  }
+  /**
+   * Protobuf type {@code ColumnAggregationNullResponseSumRequest}
+   *
+   * 
+   * use unique names for messages in ColumnAggregationXXX.protos due to a bug 
in
+   * protoc or hadoop's protoc compiler.
+   * 
+   */
+  public static final class ColumnAggregationNullResponseSumRequest extends
+  com.google.protobuf.GeneratedMessage
+  implements ColumnAggregationNullResponseSumRequestOrBuilder {
+// Use ColumnAggregationNullResponseSumRequest.newBuilder() to construct.
+private 
ColumnAggregationNullResponseSumRequest(com.google.protobuf.GeneratedMessage.Builder
 builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private ColumnAggregationNullResponseSumRequest(boolean noInit) { 
this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final ColumnAggregationNullResponseSumRequest 
defaultInstance;
+public static ColumnAggregationNullResponseSumRequest getDefaultInstance() 
{
+  return defaultInstance;
+}
+
+public ColumnAggregationNullResponseSumRequest getDefaultInstanceForType() 
{
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private ColumnAggregationNullResponseSumRequest(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  family_ = input.readBytes();
+  break;
+}
+case 18: {
+  bitField0_ |= 0x0002;
+  qualifier_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 

[32/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DoubleArrayList.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DoubleArrayList.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DoubleArrayList.java
new file mode 100644
index 000..88effb2
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DoubleArrayList.java
@@ -0,0 +1,273 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.DoubleList;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.RandomAccess;
+
+/**
+ * An implementation of {@link DoubleList} on top of a primitive array.
+ *
+ * @author dw...@google.com (Daniel Weis)
+ */
+final class DoubleArrayList
+extends AbstractProtobufList
+implements DoubleList, RandomAccess {
+
+  private static final DoubleArrayList EMPTY_LIST = new DoubleArrayList();
+  static {
+EMPTY_LIST.makeImmutable();
+  }
+
+  public static DoubleArrayList emptyList() {
+return EMPTY_LIST;
+  }
+
+  /**
+   * The backing store for the list.
+   */
+  private double[] array;
+
+  /**
+   * The size of the list distinct from the length of the array. That is, it 
is the number of
+   * elements set in the list.
+   */
+  private int size;
+
+  /**
+   * Constructs a new mutable {@code DoubleArrayList} with default capacity.
+   */
+  DoubleArrayList() {
+this(new double[DEFAULT_CAPACITY], 0);
+  }
+
+  /**
+   * Constructs a new mutable {@code DoubleArrayList}
+   * containing the same elements as {@code other}.
+   */
+  private DoubleArrayList(double[] other, int size) {
+array = other;
+this.size = size;
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (!(o instanceof DoubleArrayList)) {
+  return super.equals(o);
+}
+DoubleArrayList other = (DoubleArrayList) o;
+if (size != other.size) {
+  return false;
+}
+
+final double[] arr = other.array;
+for (int i = 0; i < size; i++) {
+  if (array[i] != arr[i]) {
+return false;
+  }
+}
+
+return true;
+  }
+
+  @Override
+  public int hashCode() {
+int result = 1;
+for (int i = 0; i < size; i++) {
+  long bits = Double.doubleToLongBits(array[i]);
+  result = (31 * result) + Internal.hashLong(bits);
+}
+return result;
+  }
+
+  @Override
+  public DoubleList mutableCopyWithCapacity(int capacity) {
+if (capacity < size) {
+  throw new IllegalArgumentException();
+}
+return new DoubleArrayList(Arrays.copyOf(array, capacity), size);
+  }
+
+  @Override
+  public Double get(int index) {
+return getDouble(index);
+  }
+
+  @Override
+  public double getDouble(int index) {
+ensureIndexInRange(index);
+return array[index];
+  }
+
+  @Override
+  public int size() {
+return size;
+  }
+
+  @Override
+  public Double set(int index, Double element) {
+return setDouble(index, element);
+  }
+
+  

[51/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
Revert "HBASE-17056 Remove checked in PB generated files Selective add of 
dependency on"
Revert for now. Build unstable and some interesting issues around
CLASSPATH

This reverts commit df93c13fd21a3f34aa3851893d715cbc4edb555b.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6786b2b6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6786b2b6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6786b2b6

Branch: refs/heads/HBASE-18147
Commit: 6786b2b63e9ec7fe45c5ed800a54f4f80358c3c3
Parents: 68436c9
Author: Michael Stack 
Authored: Thu Jul 6 21:58:32 2017 -0700
Committer: Michael Stack 
Committed: Thu Jul 6 21:58:32 2017 -0700

--
 hbase-client/pom.xml| 4 -
 hbase-endpoint/README.txt   |25 +-
 hbase-endpoint/pom.xml  |   259 +-
 .../example/generated/BulkDeleteProtos.java |  1792 +
 .../generated/ColumnAggregationProtos.java  |  1277 +
 .../ColumnAggregationWithErrorsProtos.java  |  1290 +
 ...ColumnAggregationWithNullResponseProtos.java |  1283 +
 .../DummyRegionServerEndpointProtos.java|  1225 +
 .../IncrementCounterProcessorTestProtos.java|  4059 +
 .../protobuf/generated/AggregateProtos.java |  2375 +
 .../generated/SecureBulkLoadProtos.java |  2088 +
 hbase-examples/README.txt   |27 +-
 hbase-examples/pom.xml  |   312 +-
 .../example/generated/ExampleProtos.java|  1149 +
 hbase-procedure/pom.xml | 4 -
 hbase-protocol-shaded/README.txt|49 +-
 hbase-protocol-shaded/pom.xml   |   283 +-
 .../com/google/protobuf/AbstractMessage.java|   646 +
 .../google/protobuf/AbstractMessageLite.java|   383 +
 .../com/google/protobuf/AbstractParser.java |   258 +
 .../google/protobuf/AbstractProtobufList.java   |   180 +
 .../hbase/shaded/com/google/protobuf/Any.java   |   899 +
 .../com/google/protobuf/AnyOrBuilder.java   |70 +
 .../shaded/com/google/protobuf/AnyProto.java|59 +
 .../hbase/shaded/com/google/protobuf/Api.java   |  2473 +
 .../com/google/protobuf/ApiOrBuilder.java   |   258 +
 .../shaded/com/google/protobuf/ApiProto.java|98 +
 .../com/google/protobuf/BlockingRpcChannel.java |51 +
 .../com/google/protobuf/BlockingService.java|64 +
 .../shaded/com/google/protobuf/BoolValue.java   |   452 +
 .../com/google/protobuf/BoolValueOrBuilder.java |18 +
 .../com/google/protobuf/BooleanArrayList.java   |   272 +
 .../com/google/protobuf/ByteBufferWriter.java   |   185 +
 .../shaded/com/google/protobuf/ByteInput.java   |81 +
 .../google/protobuf/ByteInputByteString.java|   251 +
 .../shaded/com/google/protobuf/ByteOutput.java  |   116 +
 .../shaded/com/google/protobuf/ByteString.java  |  1565 +
 .../shaded/com/google/protobuf/BytesValue.java  |   454 +
 .../google/protobuf/BytesValueOrBuilder.java|18 +
 .../com/google/protobuf/CodedInputStream.java   |  3549 +
 .../com/google/protobuf/CodedOutputStream.java  |  3001 +
 .../com/google/protobuf/DescriptorProtos.java   | 39141 
 .../shaded/com/google/protobuf/Descriptors.java |  2547 +
 .../com/google/protobuf/DoubleArrayList.java|   273 +
 .../shaded/com/google/protobuf/DoubleValue.java |   454 +
 .../google/protobuf/DoubleValueOrBuilder.java   |18 +
 .../shaded/com/google/protobuf/Duration.java|   618 +
 .../com/google/protobuf/DurationOrBuilder.java  |33 +
 .../com/google/protobuf/DurationProto.java  |59 +
 .../com/google/protobuf/DynamicMessage.java |   684 +
 .../hbase/shaded/com/google/protobuf/Empty.java |   386 +
 .../com/google/protobuf/EmptyOrBuilder.java | 9 +
 .../shaded/com/google/protobuf/EmptyProto.java  |58 +
 .../hbase/shaded/com/google/protobuf/Enum.java  |  1745 +
 .../com/google/protobuf/EnumOrBuilder.java  |   157 +
 .../shaded/com/google/protobuf/EnumValue.java   |  1044 +
 .../com/google/protobuf/EnumValueOrBuilder.java |80 +
 .../com/google/protobuf/ExperimentalApi.java|66 +
 .../shaded/com/google/protobuf/Extension.java   |86 +
 .../com/google/protobuf/ExtensionLite.java  |63 +
 .../com/google/protobuf/ExtensionRegistry.java  |   396 +
 .../protobuf/ExtensionRegistryFactory.java  |95 +
 .../google/protobuf/ExtensionRegistryLite.java  |   227 +
 .../hbase/shaded/com/google/protobuf/Field.java |  2450 +
 .../shaded/com/google/protobuf/FieldMask.java   |   903 +
 .../com/google/protobuf/FieldMaskOrBuilder.java |44 +
 .../com/google/protobuf/FieldMaskProto.java |59 +
 .../com/google/protobuf/FieldOrBuilder.java |   189 +
 .../shaded/com/google/protobuf/FieldSet.java|   909 +
 .../com/google/protobuf/FloatArrayList.java |   272 +
 

[52/57] [abbrv] hbase git commit: HBASE-18083 Make large/small file clean thread number configurable in HFileCleaner

2017-07-07 Thread busbey
HBASE-18083 Make large/small file clean thread number configurable in 
HFileCleaner


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4fe73857
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4fe73857
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4fe73857

Branch: refs/heads/HBASE-18147
Commit: 4fe73857679ecba89a7edd3c17d9f92e4c0e2164
Parents: 6786b2b
Author: Yu Li 
Authored: Fri Jul 7 14:07:23 2017 +0800
Committer: Yu Li 
Committed: Fri Jul 7 14:07:23 2017 +0800

--
 .../hbase/master/cleaner/HFileCleaner.java  | 154 +--
 .../hbase/master/cleaner/TestHFileCleaner.java  |  13 +-
 2 files changed, 120 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4fe73857/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index 8b3515a..6d1fbe3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -67,6 +68,16 @@ public class HFileCleaner extends 
CleanerChore impleme
   "hbase.regionserver.hfilecleaner.small.queue.size";
   public final static int DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE = 10240;
 
+  // Configuration key for large file delete thread number
+  public final static String LARGE_HFILE_DELETE_THREAD_NUMBER =
+  "hbase.regionserver.hfilecleaner.large.thread.count";
+  public final static int DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER = 1;
+
+  // Configuration key for small file delete thread number
+  public final static String SMALL_HFILE_DELETE_THREAD_NUMBER =
+  "hbase.regionserver.hfilecleaner.small.thread.count";
+  public final static int DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER = 1;
+
   private static final Log LOG = LogFactory.getLog(HFileCleaner.class);
 
   StealJobQueue largeFileQueue;
@@ -74,11 +85,13 @@ public class HFileCleaner extends 
CleanerChore impleme
   private int throttlePoint;
   private int largeQueueInitSize;
   private int smallQueueInitSize;
+  private int largeFileDeleteThreadNumber;
+  private int smallFileDeleteThreadNumber;
   private List threads = new ArrayList();
   private boolean running;
 
-  private long deletedLargeFiles = 0L;
-  private long deletedSmallFiles = 0L;
+  private AtomicLong deletedLargeFiles = new AtomicLong();
+  private AtomicLong deletedSmallFiles = new AtomicLong();
 
   /**
* @param period the period of time to sleep between each run
@@ -100,6 +113,10 @@ public class HFileCleaner extends 
CleanerChore impleme
 conf.getInt(SMALL_HFILE_QUEUE_INIT_SIZE, 
DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE);
 largeFileQueue = new StealJobQueue<>(largeQueueInitSize, 
smallQueueInitSize);
 smallFileQueue = largeFileQueue.getStealFromQueue();
+largeFileDeleteThreadNumber =
+conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER);
+smallFileDeleteThreadNumber =
+conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER);
 startHFileDeleteThreads();
   }
 
@@ -183,30 +200,34 @@ public class HFileCleaner extends 
CleanerChore impleme
 final String n = Thread.currentThread().getName();
 running = true;
 // start thread for large file deletion
-Thread large = new Thread() {
-  @Override
-  public void run() {
-consumerLoop(largeFileQueue);
-  }
-};
-large.setDaemon(true);
-large.setName(n + "-HFileCleaner.large-" + System.currentTimeMillis());
-large.start();
-LOG.debug("Starting hfile cleaner for large files: " + large.getName());
-threads.add(large);
+for (int i = 0; i < largeFileDeleteThreadNumber; i++) {
+  Thread large = new Thread() {
+@Override
+public void run() {
+  consumerLoop(largeFileQueue);
+}
+  };
+  large.setDaemon(true);
+  large.setName(n + "-HFileCleaner.large." + i + "-" + 
System.currentTimeMillis());
+  large.start();
+  LOG.debug("Starting hfile cleaner for large files: " + large.getName());
+  threads.add(large);
+}
 
 // start thread for small file deletion
-Thread small = new Thread() {
-  @Override
- 

[35/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedOutputStream.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedOutputStream.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedOutputStream.java
new file mode 100644
index 000..03871c9
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedOutputStream.java
@@ -0,0 +1,3001 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.FIXED_32_SIZE;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.FIXED_64_SIZE;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.MAX_VARINT_SIZE;
+import static java.lang.Math.max;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Utf8.UnpairedSurrogateException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Encodes and writes protocol message fields.
+ *
+ * This class contains two kinds of methods:  methods that write specific
+ * protocol message constructs and field types (e.g. {@link #writeTag} and
+ * {@link #writeInt32}) and methods that write low-level values (e.g.
+ * {@link #writeRawVarint32} and {@link #writeRawBytes}).  If you are
+ * writing encoded protocol messages, you should use the former methods, but if
+ * you are writing some other format of your own design, use the latter.
+ *
+ * This class is totally unsynchronized.
+ */
+public abstract class CodedOutputStream extends ByteOutput {
+  private static final Logger logger = 
Logger.getLogger(CodedOutputStream.class.getName());
+  private static final boolean HAS_UNSAFE_ARRAY_OPERATIONS = 
UnsafeUtil.hasUnsafeArrayOperations();
+  private static final long ARRAY_BASE_OFFSET = 
UnsafeUtil.getArrayBaseOffset();
+
+  /**
+   * @deprecated Use {@link #computeFixed32SizeNoTag(int)} instead.
+   */
+  @Deprecated
+  public static final int LITTLE_ENDIAN_32_SIZE = FIXED_32_SIZE;
+
+  /**
+   * The buffer size used in {@link #newInstance(OutputStream)}.
+   */
+  public static final int DEFAULT_BUFFER_SIZE = 4096;
+
+  /**
+   * Returns the buffer size to efficiently write dataLength bytes to this
+   * CodedOutputStream. Used by AbstractMessageLite.
+   *
+   * @return the buffer size to efficiently write dataLength bytes to this
+   * CodedOutputStream.
+   */
+  static int computePreferredBufferSize(int dataLength) {
+if (dataLength > DEFAULT_BUFFER_SIZE) {
+  return DEFAULT_BUFFER_SIZE;
+}
+return dataLength;
+  }
+
+  /**
+   * Create a new {@code CodedOutputStream} wrapping the given {@code 
OutputStream}.
+   *
+   *  NOTE: The provided {@link OutputStream} MUST NOT 
retain access or
+   * modify the provided byte arrays. Doing so may result in corrupted data, 
which would be
+   * difficult to debug.
+   

[30/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/EnumValue.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/EnumValue.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/EnumValue.java
new file mode 100644
index 000..a0a9d49
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/EnumValue.java
@@ -0,0 +1,1044 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/type.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Enum value definition.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.EnumValue}
+ */
+public  final class EnumValue extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.EnumValue)
+EnumValueOrBuilder {
+  // Use EnumValue.newBuilder() to construct.
+  private 
EnumValue(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private EnumValue() {
+name_ = "";
+number_ = 0;
+options_ = java.util.Collections.emptyList();
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private EnumValue(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 10: {
+java.lang.String s = input.readStringRequireUtf8();
+
+name_ = s;
+break;
+  }
+  case 16: {
+
+number_ = input.readInt32();
+break;
+  }
+  case 26: {
+if (!((mutable_bitField0_ & 0x0004) == 0x0004)) {
+  options_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0004;
+}
+options_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Option.parser(),
 extensionRegistry));
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  if (((mutable_bitField0_ & 0x0004) == 0x0004)) {
+options_ = java.util.Collections.unmodifiableList(options_);
+  }
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TypeProto.internal_static_google_protobuf_EnumValue_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TypeProto.internal_static_google_protobuf_EnumValue_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.EnumValue.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.EnumValue.Builder.class);
+  }
+
+  private int bitField0_;
+  public static final int NAME_FIELD_NUMBER = 1;
+  private volatile java.lang.Object name_;
+  /**
+   * 
+   * Enum value name.
+   * 
+   *
+   * string name = 1;
+   */
+  public java.lang.String getName() {
+java.lang.Object ref = name_;
+if (ref instanceof java.lang.String) {
+  return (java.lang.String) ref;
+} else {
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+  (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+  java.lang.String s = bs.toStringUtf8();
+  name_ = s;
+  return s;
+}
+  }
+  /**
+   * 
+   * Enum value name.
+   * 
+   *
+   * string name = 1;
+   

[03/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AccessControlProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AccessControlProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AccessControlProtos.java
new file mode 100644
index 000..e83a7ac
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AccessControlProtos.java
@@ -0,0 +1,11171 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: AccessControl.proto
+
+package org.apache.hadoop.hbase.shaded.protobuf.generated;
+
+public final class AccessControlProtos {
+  private AccessControlProtos() {}
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
registry) {
+  }
+
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry 
registry) {
+registerAllExtensions(
+
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
+  }
+  public interface PermissionOrBuilder extends
+  // @@protoc_insertion_point(interface_extends:hbase.pb.Permission)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+/**
+ * required .hbase.pb.Permission.Type type = 1;
+ */
+boolean hasType();
+/**
+ * required .hbase.pb.Permission.Type type = 1;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type
 getType();
+
+/**
+ * optional .hbase.pb.GlobalPermission global_permission = 2;
+ */
+boolean hasGlobalPermission();
+/**
+ * optional .hbase.pb.GlobalPermission global_permission = 2;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermission
 getGlobalPermission();
+/**
+ * optional .hbase.pb.GlobalPermission global_permission = 2;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermissionOrBuilder
 getGlobalPermissionOrBuilder();
+
+/**
+ * optional .hbase.pb.NamespacePermission namespace_permission = 
3;
+ */
+boolean hasNamespacePermission();
+/**
+ * optional .hbase.pb.NamespacePermission namespace_permission = 
3;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermission
 getNamespacePermission();
+/**
+ * optional .hbase.pb.NamespacePermission namespace_permission = 
3;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermissionOrBuilder
 getNamespacePermissionOrBuilder();
+
+/**
+ * optional .hbase.pb.TablePermission table_permission = 4;
+ */
+boolean hasTablePermission();
+/**
+ * optional .hbase.pb.TablePermission table_permission = 4;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermission
 getTablePermission();
+/**
+ * optional .hbase.pb.TablePermission table_permission = 4;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermissionOrBuilder
 getTablePermissionOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.Permission}
+   */
+  public  static final class Permission extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // @@protoc_insertion_point(message_implements:hbase.pb.Permission)
+  PermissionOrBuilder {
+// Use Permission.newBuilder() to construct.
+private 
Permission(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private Permission() {
+  type_ = 1;
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private Permission(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  int mutable_bitField0_ = 0;
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if 

[08/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Value.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Value.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Value.java
new file mode 100644
index 000..593043b
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Value.java
@@ -0,0 +1,1410 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/struct.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * `Value` represents a dynamically typed value which can be either
+ * null, a number, a string, a boolean, a recursive struct value, or a
+ * list of values. A producer of value is expected to set one of that
+ * variants, absence of any variant indicates an error.
+ * The JSON representation for `Value` is JSON value.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Value}
+ */
+public  final class Value extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Value)
+ValueOrBuilder {
+  // Use Value.newBuilder() to construct.
+  private 
Value(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Value() {
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Value(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 8: {
+int rawValue = input.readEnum();
+kindCase_ = 1;
+kind_ = rawValue;
+break;
+  }
+  case 17: {
+kindCase_ = 2;
+kind_ = input.readDouble();
+break;
+  }
+  case 26: {
+java.lang.String s = input.readStringRequireUtf8();
+kindCase_ = 3;
+kind_ = s;
+break;
+  }
+  case 32: {
+kindCase_ = 4;
+kind_ = input.readBool();
+break;
+  }
+  case 42: {
+org.apache.hadoop.hbase.shaded.com.google.protobuf.Struct.Builder 
subBuilder = null;
+if (kindCase_ == 5) {
+  subBuilder = 
((org.apache.hadoop.hbase.shaded.com.google.protobuf.Struct) kind_).toBuilder();
+}
+kind_ =
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Struct.parser(),
 extensionRegistry);
+if (subBuilder != null) {
+  
subBuilder.mergeFrom((org.apache.hadoop.hbase.shaded.com.google.protobuf.Struct)
 kind_);
+  kind_ = subBuilder.buildPartial();
+}
+kindCase_ = 5;
+break;
+  }
+  case 50: {
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ListValue.Builder subBuilder 
= null;
+if (kindCase_ == 6) {
+  subBuilder = 
((org.apache.hadoop.hbase.shaded.com.google.protobuf.ListValue) 
kind_).toBuilder();
+}
+kind_ =
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.ListValue.parser(),
 extensionRegistry);
+if (subBuilder != null) {
+  
subBuilder.mergeFrom((org.apache.hadoop.hbase.shaded.com.google.protobuf.ListValue)
 kind_);
+  kind_ = subBuilder.buildPartial();
+}
+kindCase_ = 6;
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  makeExtensionsImmutable();
+}
+  }
+  public static final 

[15/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/StringValue.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/StringValue.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/StringValue.java
new file mode 100644
index 000..f3e2ef6
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/StringValue.java
@@ -0,0 +1,532 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/wrappers.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Wrapper message for `string`.
+ * The JSON representation for `StringValue` is JSON string.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.StringValue}
+ */
+public  final class StringValue extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.StringValue)
+StringValueOrBuilder {
+  // Use StringValue.newBuilder() to construct.
+  private 
StringValue(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private StringValue() {
+value_ = "";
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private StringValue(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 10: {
+java.lang.String s = input.readStringRequireUtf8();
+
+value_ = s;
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_StringValue_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_StringValue_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.StringValue.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.StringValue.Builder.class);
+  }
+
+  public static final int VALUE_FIELD_NUMBER = 1;
+  private volatile java.lang.Object value_;
+  /**
+   * 
+   * The string value.
+   * 
+   *
+   * string value = 1;
+   */
+  public java.lang.String getValue() {
+java.lang.Object ref = value_;
+if (ref instanceof java.lang.String) {
+  return (java.lang.String) ref;
+} else {
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+  (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+  java.lang.String s = bs.toStringUtf8();
+  value_ = s;
+  return s;
+}
+  }
+  /**
+   * 
+   * The string value.
+   * 
+   *
+   * string value = 1;
+   */
+  public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+  getValueBytes() {
+java.lang.Object ref = value_;
+if (ref instanceof java.lang.String) {
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+  (java.lang.String) ref);
+  value_ = b;
+  return b;
+} else {
+  return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) 
ref;
+}
+  }
+
+  private byte memoizedIsInitialized = -1;
+  public final boolean 

[11/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/UInt32Value.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/UInt32Value.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/UInt32Value.java
new file mode 100644
index 000..7b4dae9
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/UInt32Value.java
@@ -0,0 +1,451 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/wrappers.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Wrapper message for `uint32`.
+ * The JSON representation for `UInt32Value` is JSON number.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.UInt32Value}
+ */
+public  final class UInt32Value extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.UInt32Value)
+UInt32ValueOrBuilder {
+  // Use UInt32Value.newBuilder() to construct.
+  private 
UInt32Value(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private UInt32Value() {
+value_ = 0;
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private UInt32Value(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 8: {
+
+value_ = input.readUInt32();
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_UInt32Value_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_UInt32Value_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.UInt32Value.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UInt32Value.Builder.class);
+  }
+
+  public static final int VALUE_FIELD_NUMBER = 1;
+  private int value_;
+  /**
+   * 
+   * The uint32 value.
+   * 
+   *
+   * uint32 value = 1;
+   */
+  public int getValue() {
+return value_;
+  }
+
+  private byte memoizedIsInitialized = -1;
+  public final boolean isInitialized() {
+byte isInitialized = memoizedIsInitialized;
+if (isInitialized == 1) return true;
+if (isInitialized == 0) return false;
+
+memoizedIsInitialized = 1;
+return true;
+  }
+
+  public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+  throws java.io.IOException {
+if (value_ != 0) {
+  output.writeUInt32(1, value_);
+}
+  }
+
+  public int getSerializedSize() {
+int size = memoizedSize;
+if (size != -1) return size;
+
+size = 0;
+if (value_ != 0) {
+  size += 
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+.computeUInt32Size(1, value_);
+}
+memoizedSize = size;
+return size;
+  }
+
+  private static final long serialVersionUID = 0L;
+  @java.lang.Override
+  public boolean equals(final java.lang.Object obj) {
+if (obj == this) {
+ return true;
+}
+if (!(obj instanceof 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UInt32Value)) {
+  return 

[33/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Descriptors.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Descriptors.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Descriptors.java
new file mode 100644
index 000..62ccd19
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Descriptors.java
@@ -0,0 +1,2547 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.*;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.Syntax;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.WeakHashMap;
+import java.util.logging.Logger;
+
+/**
+ * Contains a collection of classes which describe protocol message types.
+ *
+ * Every message type has a {@link Descriptor}, which lists all
+ * its fields and other information about a type.  You can get a message
+ * type's descriptor by calling {@code MessageType.getDescriptor()}, or
+ * (given a message object of the type) {@code message.getDescriptorForType()}.
+ * Furthermore, each message is associated with a {@link FileDescriptor} for
+ * a relevant {@code .proto} file. You can obtain it by calling
+ * {@code Descriptor.getFile()}. A {@link FileDescriptor} contains descriptors
+ * for all the messages defined in that file, and file descriptors for all the
+ * imported {@code .proto} files.
+ *
+ * Descriptors are built from DescriptorProtos, as defined in
+ * {@code google/protobuf/descriptor.proto}.
+ *
+ * @author ken...@google.com Kenton Varda
+ */
+public final class Descriptors {
+  private static final Logger logger =
+  Logger.getLogger(Descriptors.class.getName());
+  /**
+   * Describes a {@code .proto} file, including everything defined within.
+   * That includes, in particular, descriptors for all the messages and
+   * file descriptors for all other imported {@code .proto} files
+   * (dependencies).
+   */
+  public static final class FileDescriptor extends GenericDescriptor {
+/** Convert the descriptor to its protocol message representation. */
+@Override
+public FileDescriptorProto toProto() {
+  return proto;
+}
+
+/** Get the file name. */
+@Override
+public String getName() {
+  return proto.getName();
+}
+
+/** Returns this object. */
+@Override
+public FileDescriptor getFile() {
+  return this;
+}
+
+/** Returns the same as getName(). */
+@Override
+public String getFullName() {
+  return proto.getName();
+}
+
+/**
+ * Get the proto package name.  This is the package name given by the
+ * {@code package} statement in the {@code .proto} file, which differs
+ * from the Java package.
+ */
+public String getPackage() { return 

[10/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/UnknownFieldSetLite.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/UnknownFieldSetLite.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/UnknownFieldSetLite.java
new file mode 100644
index 000..faa4c30
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/UnknownFieldSetLite.java
@@ -0,0 +1,432 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+/**
+ * {@code UnknownFieldSetLite} is used to keep track of fields which were seen
+ * when parsing a protocol message but whose field numbers or types are
+ * unrecognized. This most frequently occurs when new fields are added to a
+ * message type and then messages containing those fields are read by old
+ * software that was compiled before the new types were added.
+ *
+ * For use by generated code only.
+ *
+ * @author dw...@google.com (Daniel Weis)
+ */
+public final class UnknownFieldSetLite {
+  
+  // Arbitrarily chosen.
+  // TODO(dweis): Tune this number?
+  private static final int MIN_CAPACITY = 8;
+
+  private static final UnknownFieldSetLite DEFAULT_INSTANCE =
+  new UnknownFieldSetLite(0, new int[0], new Object[0], false /* isMutable 
*/);
+
+  /**
+   * Get an empty {@code UnknownFieldSetLite}.
+   *
+   * For use by generated code only.
+   */
+  public static UnknownFieldSetLite getDefaultInstance() {
+return DEFAULT_INSTANCE;
+  }
+  
+  /**
+   * Returns a new mutable instance.
+   */
+  static UnknownFieldSetLite newInstance() {
+return new UnknownFieldSetLite();
+  }
+
+  /**
+   * Returns a mutable {@code UnknownFieldSetLite} that is the composite of 
{@code first} and
+   * {@code second}.
+   */
+  static UnknownFieldSetLite mutableCopyOf(UnknownFieldSetLite first, 
UnknownFieldSetLite second) {
+int count = first.count + second.count;
+int[] tags = Arrays.copyOf(first.tags, count);
+System.arraycopy(second.tags, 0, tags, first.count, second.count);
+Object[] objects = Arrays.copyOf(first.objects, count);
+System.arraycopy(second.objects, 0, objects, first.count, second.count);
+return new UnknownFieldSetLite(count, tags, objects, true /* isMutable */);
+  }
+  
+  /**
+   * The number of elements in the set.
+   */
+  private int count;
+  
+  /**
+   * The tag numbers for the elements in the set.
+   */
+  private int[] tags;
+  
+  /**
+   * The boxed values of the elements in the set.
+   */
+  private Object[] objects;
+  
+  /**
+   * The lazily computed serialized size of the set.
+   */
+  private int memoizedSerializedSize = -1;
+  
+  /**
+   * Indicates that this object is mutable. 
+   */
+  private boolean isMutable;
+
+  /**
+   * Constructs a mutable {@code UnknownFieldSetLite}.
+   */
+  private UnknownFieldSetLite() {
+this(0, new int[MIN_CAPACITY], new Object[MIN_CAPACITY], true /* isMutable 
*/);
+  }
+  
+  /**
+   * Constructs the {@code UnknownFieldSetLite}.
+   */
+  private 

[13/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseInfoTree.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseInfoTree.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseInfoTree.java
new file mode 100644
index 000..cd08eab
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseInfoTree.java
@@ -0,0 +1,226 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+
+/**
+ * Data structure which is populated with the locations of each field value 
parsed from the text.
+ *
+ * The locations of primary fields values are retrieved by {@code 
getLocation} or
+ * {@code getLocations}.  The locations of sub message values are within nested
+ * {@code TextFormatParseInfoTree}s and are retrieve by {@code getNestedTree} 
or
+ * {@code getNestedTrees}.
+ *
+ * The {@code TextFormatParseInfoTree} is created by a Builder.
+ */
+public class TextFormatParseInfoTree {
+
+  // Defines a mapping between each field's descriptor to the list of 
locations where
+  // its value(s) were was encountered.
+  private Map 
locationsFromField;
+
+  // Defines a mapping between a field's descriptor to a list of 
TextFormatParseInfoTrees for
+  // sub message location information.
+  Map subtreesFromField;
+
+  /**
+   * Construct a {@code TextFormatParseInfoTree}.
+   *
+   * @param locationsFromField a map of fields to location in the source code
+   * @param subtreeBuildersFromField a map of fields to parse tree location 
information builders
+   */
+  private TextFormatParseInfoTree(
+  Map locationsFromField,
+  Map 
subtreeBuildersFromField) {
+
+// The maps are unmodifiable.  The values in the maps are unmodifiable.
+Map locs =
+new HashMap();
+for (Entry kv : 
locationsFromField.entrySet()) {
+  locs.put(kv.getKey(), Collections.unmodifiableList(kv.getValue()));
+}
+this.locationsFromField = Collections.unmodifiableMap(locs);
+
+Map subs =
+new HashMap();
+for (Entry kv : 
subtreeBuildersFromField.entrySet()) {
+  List submessagesOfField = new 
ArrayList();
+  for (Builder subBuilder : kv.getValue()) {
+submessagesOfField.add(subBuilder.build());
+  }
+  subs.put(kv.getKey(), Collections.unmodifiableList(submessagesOfField));
+}
+this.subtreesFromField = Collections.unmodifiableMap(subs);
+  }
+
+ /**
+  * Retrieve all the locations of a field.
+  *
+  * @param fieldDescriptor the the @{link FieldDescriptor} of the 

[36/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedInputStream.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedInputStream.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedInputStream.java
new file mode 100644
index 000..23cc1a4
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/CodedInputStream.java
@@ -0,0 +1,3549 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EMPTY_BYTE_ARRAY;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EMPTY_BYTE_BUFFER;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.UTF_8;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.checkNotNull;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.FIXED_32_SIZE;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.FIXED_64_SIZE;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.MAX_VARINT_SIZE;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Reads and decodes protocol message fields.
+ *
+ * This class contains two kinds of methods: methods that read specific 
protocol message
+ * constructs and field types (e.g. {@link #readTag()} and {@link 
#readInt32()}) and methods that
+ * read low-level values (e.g. {@link #readRawVarint32()} and {@link 
#readRawBytes}). If you are
+ * reading encoded protocol messages, you should use the former methods, but 
if you are reading some
+ * other format of your own design, use the latter.
+ *
+ * @author ken...@google.com Kenton Varda
+ */
+public abstract class CodedInputStream {
+  private static final int DEFAULT_BUFFER_SIZE = 4096;
+  private static final int DEFAULT_RECURSION_LIMIT = 100;
+  // Integer.MAX_VALUE == 0x7FF == INT_MAX from limits.h
+  private static final int DEFAULT_SIZE_LIMIT = Integer.MAX_VALUE;
+
+  /** Visible for subclasses. See setRecursionLimit() */
+  int recursionDepth;
+
+  int recursionLimit = DEFAULT_RECURSION_LIMIT;
+
+  /** Visible for subclasses. See setSizeLimit() */
+  int sizeLimit = DEFAULT_SIZE_LIMIT;
+
+  /** Create a new CodedInputStream wrapping the given InputStream. */
+  public static CodedInputStream newInstance(final InputStream input) {
+return newInstance(input, DEFAULT_BUFFER_SIZE);
+  }
+
+  /** Create a new CodedInputStream wrapping the given InputStream. */
+  static CodedInputStream newInstance(final InputStream input, int bufferSize) 
{
+if (input == null) {
+  // TODO(nathanmittler): Ideally we should throw here. This is done for 
backward compatibility.
+  return newInstance(EMPTY_BYTE_ARRAY);
+}
+return new StreamDecoder(input, bufferSize);
+  }
+
+  /** Create a new CodedInputStream wrapping the given 

[06/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestProcedureProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestProcedureProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestProcedureProtos.java
new file mode 100644
index 000..46e9c01
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestProcedureProtos.java
@@ -0,0 +1,608 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: TestProcedure.proto
+
+package org.apache.hadoop.hbase.shaded.ipc.protobuf.generated;
+
+public final class TestProcedureProtos {
+  private TestProcedureProtos() {}
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
registry) {
+  }
+
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry 
registry) {
+registerAllExtensions(
+
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
+  }
+  public interface TestTableDDLStateDataOrBuilder extends
+  // @@protoc_insertion_point(interface_extends:TestTableDDLStateData)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+/**
+ * required string table_name = 1;
+ */
+boolean hasTableName();
+/**
+ * required string table_name = 1;
+ */
+java.lang.String getTableName();
+/**
+ * required string table_name = 1;
+ */
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+getTableNameBytes();
+  }
+  /**
+   * Protobuf type {@code TestTableDDLStateData}
+   */
+  public  static final class TestTableDDLStateData extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // @@protoc_insertion_point(message_implements:TestTableDDLStateData)
+  TestTableDDLStateDataOrBuilder {
+// Use TestTableDDLStateData.newBuilder() to construct.
+private 
TestTableDDLStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private TestTableDDLStateData() {
+  tableName_ = "";
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private TestTableDDLStateData(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  int mutable_bitField0_ = 0;
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs 
= input.readBytes();
+  bitField0_ |= 0x0001;
+  tableName_ = bs;
+  break;
+}
+  }
+}
+  } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+e).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProcedureProtos.internal_static_TestTableDDLStateData_descriptor;
+}
+
+protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProcedureProtos.internal_static_TestTableDDLStateData_fieldAccessorTable
+

[20/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MessageLiteToString.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MessageLiteToString.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MessageLiteToString.java
new file mode 100644
index 000..eea6db5
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MessageLiteToString.java
@@ -0,0 +1,239 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+
+/**
+ * Helps generate {@link String} representations of {@link MessageLite} protos.
+ */
+// TODO(dweis): Fix map fields.
+final class MessageLiteToString {
+
+  private static final String LIST_SUFFIX = "List";
+  private static final String BUILDER_LIST_SUFFIX = "OrBuilderList";
+  private static final String BYTES_SUFFIX = "Bytes";
+  
+  /**
+   * Returns a {@link String} representation of the {@link MessageLite} 
object.  The first line of
+   * the {@code String} representation representation includes a comment 
string to uniquely identify
+   * the objcet instance. This acts as an indicator that this should not be 
relied on for
+   * comparisons.
+   *
+   * For use by generated code only.
+   */
+  static String toString(MessageLite messageLite, String commentString) {
+StringBuilder buffer = new StringBuilder();
+buffer.append("# ").append(commentString);
+reflectivePrintWithIndent(messageLite, buffer, 0);
+return buffer.toString();
+  }
+
+  /**
+   * Reflectively prints the {@link MessageLite} to the buffer at given {@code 
indent} level.
+   *
+   * @param buffer the buffer to write to
+   * @param indent the number of spaces to indent the proto by
+   */
+  private static void reflectivePrintWithIndent(
+  MessageLite messageLite, StringBuilder buffer, int indent) {
+// Build a map of method name to method. We're looking for methods like 
getFoo(), hasFoo(), and
+// getFooList() which might be useful for building an object's string 
representation.
+Map nameToNoArgMethod = new HashMap();
+Map nameToMethod = new HashMap();
+Set getters = new TreeSet();
+for (Method method : messageLite.getClass().getDeclaredMethods()) {
+  nameToMethod.put(method.getName(), method);
+  if (method.getParameterTypes().length == 0) {
+nameToNoArgMethod.put(method.getName(), method);
+
+if (method.getName().startsWith("get")) {
+  getters.add(method.getName());
+}
+  }
+}
+
+for (String getter : getters) {
+  String suffix = getter.replaceFirst("get", "");
+  if (suffix.endsWith(LIST_SUFFIX) && 
!suffix.endsWith(BUILDER_LIST_SUFFIX)) {
+String camelCase = suffix.substring(0, 1).toLowerCase()
++ suffix.substring(1, suffix.length() - LIST_SUFFIX.length());
+   

[21/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MapEntry.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MapEntry.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MapEntry.java
new file mode 100644
index 000..d586cc7
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MapEntry.java
@@ -0,0 +1,449 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * Implements MapEntry messages.
+ *
+ * In reflection API, map fields will be treated as repeated message fields and
+ * each map entry is accessed as a message. This MapEntry class is used to
+ * represent these map entry messages in reflection API.
+ *
+ * Protobuf internal. Users shouldn't use this class.
+ */
+public final class MapEntry extends AbstractMessage {
+
+  private static final class Metadata extends MapEntryLite.Metadata {
+
+public final Descriptor descriptor;
+public final Parser> parser;
+
+public Metadata(
+Descriptor descriptor,
+MapEntry defaultInstance,
+WireFormat.FieldType keyType,
+WireFormat.FieldType valueType) {
+  super(keyType, defaultInstance.key, valueType, defaultInstance.value);
+  this.descriptor = descriptor;
+  this.parser = new AbstractParser>() {
+
+@Override
+public MapEntry parsePartialFrom(
+CodedInputStream input, ExtensionRegistryLite extensionRegistry)
+throws InvalidProtocolBufferException {
+  return new MapEntry(Metadata.this, input, extensionRegistry);
+}
+  };
+}
+  }
+
+  private final K key;
+  private final V value;
+  private final Metadata metadata;
+
+  /** Create a default MapEntry instance. */
+  private MapEntry(
+  Descriptor descriptor,
+  WireFormat.FieldType keyType, K defaultKey,
+  WireFormat.FieldType valueType, V defaultValue) {
+this.key = defaultKey;
+this.value = defaultValue;
+this.metadata = new Metadata(descriptor, this, keyType, valueType);
+  }
+
+  /** Create a MapEntry with the provided key and value. */
+  private MapEntry(Metadata metadata, K key, V value) {
+this.key = key;
+this.value = value;
+this.metadata = metadata;
+  }
+
+  /** Parsing constructor. */
+  private MapEntry(
+  Metadata metadata,
+  CodedInputStream input,
+  ExtensionRegistryLite extensionRegistry)
+  throws InvalidProtocolBufferException {
+try {
+  this.metadata = metadata;
+  Map.Entry entry = MapEntryLite.parseEntry(input, metadata, 
extensionRegistry);
+  

[09/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Utf8.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Utf8.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Utf8.java
new file mode 100644
index 000..b84efd6
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Utf8.java
@@ -0,0 +1,1764 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeUtil.addressOffset;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeUtil.getArrayBaseOffset;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeUtil.hasUnsafeArrayOperations;
+import static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeUtil.hasUnsafeByteBufferOperations;
+import static java.lang.Character.MAX_SURROGATE;
+import static java.lang.Character.MIN_SURROGATE;
+import static java.lang.Character.isSurrogatePair;
+import static java.lang.Character.toCodePoint;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A set of low-level, high-performance static utility methods related
+ * to the UTF-8 character encoding.  This class has no dependencies
+ * outside of the core JDK libraries.
+ *
+ * There are several variants of UTF-8.  The one implemented by
+ * this class is the restricted definition of UTF-8 introduced in
+ * Unicode 3.1, which mandates the rejection of "overlong" byte
+ * sequences as well as rejection of 3-byte surrogate codepoint byte
+ * sequences.  Note that the UTF-8 decoder included in Oracle's JDK
+ * has been modified to also reject "overlong" byte sequences, but (as
+ * of 2011) still accepts 3-byte surrogate codepoint byte sequences.
+ *
+ * The byte sequences considered valid by this class are exactly
+ * those that can be roundtrip converted to Strings and back to bytes
+ * using the UTF-8 charset, without loss:  {@code
+ * Arrays.equals(bytes, new String(bytes, 
Internal.UTF_8).getBytes(Internal.UTF_8))
+ * }
+ *
+ * See the Unicode Standard,
+ * Table 3-6. UTF-8 Bit Distribution,
+ * Table 3-7. Well Formed UTF-8 Byte Sequences.
+ *
+ * This class supports decoding of partial byte sequences, so that the
+ * bytes in a complete UTF-8 byte sequences can be stored in multiple
+ * segments.  Methods typically return {@link #MALFORMED} if the partial
+ * byte sequence is definitely not well-formed, {@link #COMPLETE} if it is
+ * well-formed in the absence of additional input, or if the byte sequence
+ * apparently terminated in the middle of a character, an opaque integer
+ * "state" value containing enough information to decode the character when
+ * passed to a subsequent invocation of a partial decoding method.
+ *
+ * @author marti...@google.com (Martin Buchholz)
+ */
+// TODO(nathanmittler): Copy changes in this class back to Guava
+final class Utf8 {
+
+  /**
+   * UTF-8 is a runtime hot spot so we attempt to provide heavily optimized 
implementations
+   * depending on what is available on the platform. The processor is the 

[24/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageV3.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageV3.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageV3.java
new file mode 100644
index 000..cc815fc
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageV3.java
@@ -0,0 +1,2838 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor;
+// In opensource protobuf, we have versioned this GeneratedMessageV3 class to 
GeneratedMessageV3V3 and
+// in the future may have GeneratedMessageV3V4 etc. This allows us to change 
some aspects of this
+// class without breaking binary compatibility with old generated code that 
still subclasses
+// the old GeneratedMessageV3 class. To allow these different 
GeneratedMessageV3V? classes to
+// interoperate (e.g., a GeneratedMessageV3V3 object has a message extension 
field whose class
+// type is GeneratedMessageV3V4), these classes still share a common parent 
class AbstarctMessage
+// and are using the same GeneratedMessage.GeneratedExtension class for 
extension definitions.
+// Since this class becomes GeneratedMessageV3V? in opensource, we have to add 
an import here
+// to be able to use GeneratedMessage.GeneratedExtension. The 
GeneratedExtension definition in
+// this file is also excluded from opensource to avoid conflict.
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessage.GeneratedExtension;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * All generated protocol message classes extend this class.  This class
+ * implements most of the Message and Builder interfaces using Java reflection.
+ * Users can ignore this class and pretend that generated messages implement
+ * the Message interface directly.
+ *
+ * @author ken...@google.com Kenton Varda
+ */
+public abstract class GeneratedMessageV3 extends AbstractMessage
+implements Serializable {
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * For testing. Allows a test to disable the optimization that avoids using
+   * field builders 

[17/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RepeatedFieldBuilder.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RepeatedFieldBuilder.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RepeatedFieldBuilder.java
new file mode 100644
index 000..8968a70
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RepeatedFieldBuilder.java
@@ -0,0 +1,708 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import java.util.AbstractList;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * {@code RepeatedFieldBuilder} implements a structure that a protocol
+ * message uses to hold a repeated field of other protocol messages. It 
supports
+ * the classical use case of adding immutable {@link Message}'s to the
+ * repeated field and is highly optimized around this (no extra memory
+ * allocations and sharing of immutable arrays).
+ * 
+ * It also supports the additional use case of adding a {@link Message.Builder}
+ * to the repeated field and deferring conversion of that {@code Builder}
+ * to an immutable {@code Message}. In this way, it's possible to maintain
+ * a tree of {@code Builder}'s that acts as a fully read/write data
+ * structure.
+ * 
+ * Logically, one can think of a tree of builders as converting the entire tree
+ * to messages when build is called on the root or when any method is called
+ * that desires a Message instead of a Builder. In terms of the implementation,
+ * the {@code SingleFieldBuilder} and {@code RepeatedFieldBuilder}
+ * classes cache messages that were created so that messages only need to be
+ * created when some change occurred in its builder or a builder for one of its
+ * descendants.
+ *
+ * @param  the type of message for the field
+ * @param  the type of builder for the field
+ * @param  the common interface for the message and the builder
+ *
+ * @author j...@google.com (Jon Perlow)
+ */
+public class RepeatedFieldBuilder
+
+implements GeneratedMessage.BuilderParent {
+
+  // Parent to send changes to.
+  private GeneratedMessage.BuilderParent parent;
+
+  // List of messages. Never null. It may be immutable, in which case
+  // isMessagesListMutable will be false. See note below.
+  private List messages;
+
+  // Whether messages is an mutable array that can be modified.
+  private boolean isMessagesListMutable;
+
+  // List of builders. May be null, in which case, no nested builders were
+  // created. If not null, entries represent the builder for that index.
+  private List> builders;
+
+  // Here are the invariants for messages and builders:
+  // 1. messages is never null and its count corresponds to the number of items
+  //in the repeated field.
+  // 2. If builders is non-null, messages and builders MUST always
+  //contain the same number of items.
+  // 3. Entries in either array can be null, but for any index, there MUST be
+  //either 

[34/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DescriptorProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DescriptorProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DescriptorProtos.java
new file mode 100644
index 000..0468e6c
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/DescriptorProtos.java
@@ -0,0 +1,39141 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/descriptor.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+public final class DescriptorProtos {
+  private DescriptorProtos() {}
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
registry) {
+  }
+
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry 
registry) {
+registerAllExtensions(
+
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
+  }
+  public interface FileDescriptorSetOrBuilder extends
+  // 
@@protoc_insertion_point(interface_extends:google.protobuf.FileDescriptorSet)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+/**
+ * repeated .google.protobuf.FileDescriptorProto file = 1;
+ */
+
java.util.List
 
+getFileList();
+/**
+ * repeated .google.protobuf.FileDescriptorProto file = 1;
+ */
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto
 getFile(int index);
+/**
+ * repeated .google.protobuf.FileDescriptorProto file = 1;
+ */
+int getFileCount();
+/**
+ * repeated .google.protobuf.FileDescriptorProto file = 1;
+ */
+java.util.List
 
+getFileOrBuilderList();
+/**
+ * repeated .google.protobuf.FileDescriptorProto file = 1;
+ */
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProtoOrBuilder
 getFileOrBuilder(
+int index);
+  }
+  /**
+   * 
+   * The protocol compiler can output a FileDescriptorSet containing the .proto
+   * files it parses.
+   * 
+   *
+   * Protobuf type {@code google.protobuf.FileDescriptorSet}
+   */
+  public  static final class FileDescriptorSet extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // 
@@protoc_insertion_point(message_implements:google.protobuf.FileDescriptorSet)
+  FileDescriptorSetOrBuilder {
+// Use FileDescriptorSet.newBuilder() to construct.
+private 
FileDescriptorSet(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private FileDescriptorSet() {
+  file_ = java.util.Collections.emptyList();
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private FileDescriptorSet(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  int mutable_bitField0_ = 0;
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  if (!((mutable_bitField0_ & 0x0001) == 0x0001)) {
+file_ = new 
java.util.ArrayList();
+mutable_bitField0_ |= 0x0001;
+  }
+  file_.add(
+  
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.DescriptorProtos.FileDescriptorProto.PARSER,
 extensionRegistry));
+  break;
+}
+  }
+}
+  } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new 

[27/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FloatValue.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FloatValue.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FloatValue.java
new file mode 100644
index 000..f3315c8
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FloatValue.java
@@ -0,0 +1,454 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/wrappers.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Wrapper message for `float`.
+ * The JSON representation for `FloatValue` is JSON number.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.FloatValue}
+ */
+public  final class FloatValue extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.FloatValue)
+FloatValueOrBuilder {
+  // Use FloatValue.newBuilder() to construct.
+  private 
FloatValue(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private FloatValue() {
+value_ = 0F;
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private FloatValue(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 13: {
+
+value_ = input.readFloat();
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_FloatValue_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_FloatValue_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.FloatValue.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.FloatValue.Builder.class);
+  }
+
+  public static final int VALUE_FIELD_NUMBER = 1;
+  private float value_;
+  /**
+   * 
+   * The float value.
+   * 
+   *
+   * float value = 1;
+   */
+  public float getValue() {
+return value_;
+  }
+
+  private byte memoizedIsInitialized = -1;
+  public final boolean isInitialized() {
+byte isInitialized = memoizedIsInitialized;
+if (isInitialized == 1) return true;
+if (isInitialized == 0) return false;
+
+memoizedIsInitialized = 1;
+return true;
+  }
+
+  public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+  throws java.io.IOException {
+if (value_ != 0F) {
+  output.writeFloat(1, value_);
+}
+  }
+
+  public int getSerializedSize() {
+int size = memoizedSize;
+if (size != -1) return size;
+
+size = 0;
+if (value_ != 0F) {
+  size += 
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+.computeFloatSize(1, value_);
+}
+memoizedSize = size;
+return size;
+  }
+
+  private static final long serialVersionUID = 0L;
+  @java.lang.Override
+  public boolean equals(final java.lang.Object obj) {
+if (obj == this) {
+ return true;
+}
+if (!(obj instanceof 
org.apache.hadoop.hbase.shaded.com.google.protobuf.FloatValue)) {
+  return super.equals(obj);
+}

[02/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
new file mode 100644
index 000..812cf3b
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
@@ -0,0 +1,30634 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: Admin.proto
+
+package org.apache.hadoop.hbase.shaded.protobuf.generated;
+
+public final class AdminProtos {
+  private AdminProtos() {}
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
registry) {
+  }
+
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry 
registry) {
+registerAllExtensions(
+
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
+  }
+  public interface GetRegionInfoRequestOrBuilder extends
+  // 
@@protoc_insertion_point(interface_extends:hbase.pb.GetRegionInfoRequest)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+/**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+boolean hasRegion();
+/**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier 
getRegion();
+/**
+ * required .hbase.pb.RegionSpecifier region = 1;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder
 getRegionOrBuilder();
+
+/**
+ * optional bool compaction_state = 2;
+ */
+boolean hasCompactionState();
+/**
+ * optional bool compaction_state = 2;
+ */
+boolean getCompactionState();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetRegionInfoRequest}
+   */
+  public  static final class GetRegionInfoRequest extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // 
@@protoc_insertion_point(message_implements:hbase.pb.GetRegionInfoRequest)
+  GetRegionInfoRequestOrBuilder {
+// Use GetRegionInfoRequest.newBuilder() to construct.
+private 
GetRegionInfoRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private GetRegionInfoRequest() {
+  compactionState_ = false;
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private GetRegionInfoRequest(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  int mutable_bitField0_ = 0;
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder
 subBuilder = null;
+  if (((bitField0_ & 0x0001) == 0x0001)) {
+subBuilder = region_.toBuilder();
+  }
+  region_ = 
input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER,
 extensionRegistry);
+  if (subBuilder != null) {
+subBuilder.mergeFrom(region_);
+region_ = subBuilder.buildPartial();
+  }
+  bitField0_ |= 0x0001;
+  break;
+}
+case 16: {
+  bitField0_ |= 0x0002;
+  compactionState_ = input.readBool();
+  break;
+}
+  }
+}
+  } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+throw e.setUnfinishedMessage(this);
+  } catch 

[28/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FieldMask.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FieldMask.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FieldMask.java
new file mode 100644
index 000..df42bca
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/FieldMask.java
@@ -0,0 +1,903 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/field_mask.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * `FieldMask` represents a set of symbolic field paths, for example:
+ * paths: "f.a"
+ * paths: "f.b.d"
+ * Here `f` represents a field in some root message, `a` and `b`
+ * fields in the message found in `f`, and `d` a field found in the
+ * message in `f.b`.
+ * Field masks are used to specify a subset of fields that should be
+ * returned by a get operation or modified by an update operation.
+ * Field masks also have a custom JSON encoding (see below).
+ * # Field Masks in Projections
+ * When used in the context of a projection, a response message or
+ * sub-message is filtered by the API to only contain those fields as
+ * specified in the mask. For example, if the mask in the previous
+ * example is applied to a response message as follows:
+ * f {
+ *   a : 22
+ *   b {
+ * d : 1
+ * x : 2
+ *   }
+ *   y : 13
+ * }
+ * z: 8
+ * The result will not contain specific values for fields x,y and z
+ * (their value will be set to the default, and omitted in proto text
+ * output):
+ * f {
+ *   a : 22
+ *   b {
+ * d : 1
+ *   }
+ * }
+ * A repeated field is not allowed except at the last position of a
+ * paths string.
+ * If a FieldMask object is not present in a get operation, the
+ * operation applies to all fields (as if a FieldMask of all fields
+ * had been specified).
+ * Note that a field mask does not necessarily apply to the
+ * top-level response message. In case of a REST get operation, the
+ * field mask applies directly to the response, but in case of a REST
+ * list operation, the mask instead applies to each individual message
+ * in the returned resource list. In case of a REST custom method,
+ * other definitions may be used. Where the mask applies will be
+ * clearly documented together with its declaration in the API.  In
+ * any case, the effect on the returned resource/resources is required
+ * behavior for APIs.
+ * # Field Masks in Update Operations
+ * A field mask in update operations specifies which fields of the
+ * targeted resource are going to be updated. The API is required
+ * to only change the values of the fields as specified in the mask
+ * and leave the others untouched. If a resource is passed in to
+ * describe the updated values, the API ignores the values of all
+ * fields not covered by the mask.
+ * If a repeated field is specified for an update operation, the existing
+ * repeated values in the target resource will be overwritten by the new 
values.
+ * Note that a repeated field is only allowed in the last position of a `paths`
+ * string.
+ * If a sub-message is specified in the last position of the field mask for an
+ * update operation, then the existing sub-message in the target resource is
+ * overwritten. Given the target message:
+ * f {
+ *   b {
+ * d : 1
+ * x : 2
+ *   }
+ *   c : 1
+ * }
+ * And an update message:
+ * f {
+ *   b {
+ * d : 10
+ *   }
+ * }
+ * then if the field mask is:
+ *  paths: "f.b"
+ * then the result will be:
+ * f {
+ *   b {
+ * d : 10
+ *   }
+ *   c : 1
+ * }
+ * However, if the update mask was:
+ *  paths: "f.b.d"
+ * then the result would be:
+ * f {
+ *   b {
+ * d : 10
+ * x : 2
+ *   }
+ *   c : 1
+ * }
+ * In order to reset a field's value to the default, the field must
+ * be in the mask and set to the default value in the provided resource.
+ * Hence, in order to reset all fields of a resource, provide a default
+ * instance of the resource and set all fields in the mask, or do
+ * not provide a mask as described below.
+ * If a field mask is not present on update, the operation applies to
+ * all fields (as if a field mask of all fields has been specified).
+ * Note that in the presence of schema evolution, this may mean that
+ * fields the client does not know and has therefore not filled into
+ * the request will be reset to their default. If this is unwanted
+ * behavior, a specific service may require a client to always specify
+ * a field mask, producing an error if not.
+ * As with get 

[23/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Int32Value.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Int32Value.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Int32Value.java
new file mode 100644
index 000..476b086
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Int32Value.java
@@ -0,0 +1,451 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/wrappers.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Wrapper message for `int32`.
+ * The JSON representation for `Int32Value` is JSON number.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Int32Value}
+ */
+public  final class Int32Value extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Int32Value)
+Int32ValueOrBuilder {
+  // Use Int32Value.newBuilder() to construct.
+  private 
Int32Value(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Int32Value() {
+value_ = 0;
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Int32Value(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 8: {
+
+value_ = input.readInt32();
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_Int32Value_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.WrappersProto.internal_static_google_protobuf_Int32Value_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.Int32Value.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Int32Value.Builder.class);
+  }
+
+  public static final int VALUE_FIELD_NUMBER = 1;
+  private int value_;
+  /**
+   * 
+   * The int32 value.
+   * 
+   *
+   * int32 value = 1;
+   */
+  public int getValue() {
+return value_;
+  }
+
+  private byte memoizedIsInitialized = -1;
+  public final boolean isInitialized() {
+byte isInitialized = memoizedIsInitialized;
+if (isInitialized == 1) return true;
+if (isInitialized == 0) return false;
+
+memoizedIsInitialized = 1;
+return true;
+  }
+
+  public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+  throws java.io.IOException {
+if (value_ != 0) {
+  output.writeInt32(1, value_);
+}
+  }
+
+  public int getSerializedSize() {
+int size = memoizedSize;
+if (size != -1) return size;
+
+size = 0;
+if (value_ != 0) {
+  size += 
org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+.computeInt32Size(1, value_);
+}
+memoizedSize = size;
+return size;
+  }
+
+  private static final long serialVersionUID = 0L;
+  @java.lang.Override
+  public boolean equals(final java.lang.Object obj) {
+if (obj == this) {
+ return true;
+}
+if (!(obj instanceof 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Int32Value)) {
+  return super.equals(obj);
+}
+

[29/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Field.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Field.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Field.java
new file mode 100644
index 000..d33fd75
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Field.java
@@ -0,0 +1,2450 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/type.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * A single field of a message type.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Field}
+ */
+public  final class Field extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Field)
+FieldOrBuilder {
+  // Use Field.newBuilder() to construct.
+  private 
Field(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Field() {
+kind_ = 0;
+cardinality_ = 0;
+number_ = 0;
+name_ = "";
+typeUrl_ = "";
+oneofIndex_ = 0;
+packed_ = false;
+options_ = java.util.Collections.emptyList();
+jsonName_ = "";
+defaultValue_ = "";
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Field(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 8: {
+int rawValue = input.readEnum();
+
+kind_ = rawValue;
+break;
+  }
+  case 16: {
+int rawValue = input.readEnum();
+
+cardinality_ = rawValue;
+break;
+  }
+  case 24: {
+
+number_ = input.readInt32();
+break;
+  }
+  case 34: {
+java.lang.String s = input.readStringRequireUtf8();
+
+name_ = s;
+break;
+  }
+  case 50: {
+java.lang.String s = input.readStringRequireUtf8();
+
+typeUrl_ = s;
+break;
+  }
+  case 56: {
+
+oneofIndex_ = input.readInt32();
+break;
+  }
+  case 64: {
+
+packed_ = input.readBool();
+break;
+  }
+  case 74: {
+if (!((mutable_bitField0_ & 0x0080) == 0x0080)) {
+  options_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0080;
+}
+options_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Option.parser(),
 extensionRegistry));
+break;
+  }
+  case 82: {
+java.lang.String s = input.readStringRequireUtf8();
+
+jsonName_ = s;
+break;
+  }
+  case 90: {
+java.lang.String s = input.readStringRequireUtf8();
+
+defaultValue_ = s;
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  if (((mutable_bitField0_ & 0x0080) == 0x0080)) {
+options_ = java.util.Collections.unmodifiableList(options_);
+  }
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.TypeProto.internal_static_google_protobuf_Field_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+

[22/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/LazyFieldLite.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/LazyFieldLite.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/LazyFieldLite.java
new file mode 100644
index 000..91b70e0
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/LazyFieldLite.java
@@ -0,0 +1,437 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import java.io.IOException;
+
+/**
+ * LazyFieldLite encapsulates the logic of lazily parsing message fields. It 
stores
+ * the message in a ByteString initially and then parses it on-demand.
+ *
+ * LazyFieldLite is thread-compatible: concurrent reads are safe once the 
proto that this
+ * LazyFieldLite is a part of is no longer being mutated by its Builder. 
However, explicit
+ * synchronization is needed under read/write situations.
+ *
+ * When a LazyFieldLite is used in the context of a MessageLite object, its 
behavior is considered
+ * to be immutable and none of the setter methods in its API are expected to 
be invoked. All of the
+ * getters are expected to be thread-safe. When used in the context of a 
MessageLite.Builder,
+ * setters can be invoked, but there is no guarantee of thread safety.
+ * 
+ * TODO(yatin,dweis): Consider splitting this class's functionality and put 
the mutable methods
+ * into a separate builder class to allow us to give stronger compile-time 
guarantees.
+ *
+ * This class is internal implementation detail of the protobuf library, so 
you don't need to use it
+ * directly.
+ *
+ * @author xia...@google.com (Xiang Li)
+ */
+public class LazyFieldLite {
+  private static final ExtensionRegistryLite EMPTY_REGISTRY =
+  ExtensionRegistryLite.getEmptyRegistry();
+
+  /**
+   * The value associated with the LazyFieldLite object is stored in one or 
more of the following
+   * three fields (delayedBytes, value, memoizedBytes). They should together 
be interpreted as
+   * follows.
+   * 1) delayedBytes can be non-null, while value and memoizedBytes is null. 
The object will be in
+   *this state while the value for the object has not yet been parsed.
+   * 2) Both delayedBytes and value are non-null. The object transitions to 
this state as soon as
+   *some caller needs to access the value (by invoking getValue()).
+   * 3) memoizedBytes is merely an optimization for calls to 
LazyFieldLite.toByteString() to avoid
+   *recomputing the ByteString representation on each call. Instead, when 
the value is parsed
+   *from delayedBytes, we will also assign the contents of delayedBytes to 
memoizedBytes (since
+   *that is the ByteString representation of value).
+   * 4) Finally, if the LazyFieldLite was created directly with a parsed 
MessageLite value, then
+   *delayedBytes will be null, and memoizedBytes will be initialized only 
upon the first call to
+   *LazyFieldLite.toByteString().
+   *
+   * Given the above conditions, any caller that needs a serialized 
representation of 

[05/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestProtos.java
new file mode 100644
index 000..a30032d
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestProtos.java
@@ -0,0 +1,2987 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: test.proto
+
+package org.apache.hadoop.hbase.shaded.ipc.protobuf.generated;
+
+public final class TestProtos {
+  private TestProtos() {}
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
registry) {
+  }
+
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry 
registry) {
+registerAllExtensions(
+
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
+  }
+  public interface EmptyRequestProtoOrBuilder extends
+  // @@protoc_insertion_point(interface_extends:EmptyRequestProto)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code EmptyRequestProto}
+   */
+  public  static final class EmptyRequestProto extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // @@protoc_insertion_point(message_implements:EmptyRequestProto)
+  EmptyRequestProtoOrBuilder {
+// Use EmptyRequestProto.newBuilder() to construct.
+private 
EmptyRequestProto(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private EmptyRequestProto() {
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private EmptyRequestProto(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+  }
+}
+  } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+e).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.internal_static_EmptyRequestProto_descriptor;
+}
+
+protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.internal_static_EmptyRequestProto_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EmptyRequestProto.class,
 
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EmptyRequestProto.Builder.class);
+}
+
+private byte memoizedIsInitialized = -1;
+public final boolean isInitialized() {
+  byte isInitialized = memoizedIsInitialized;
+  if (isInitialized == 1) return true;
+  if (isInitialized == 0) return false;
+
+  memoizedIsInitialized = 1;
+  return true;
+}
+
+public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+throws java.io.IOException {
+  unknownFields.writeTo(output);
+}
+
+

[07/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.java
new file mode 100644
index 000..71975c2
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/compiler/PluginProtos.java
@@ -0,0 +1,5424 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/compiler/plugin.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf.compiler;
+
+public final class PluginProtos {
+  private PluginProtos() {}
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
registry) {
+  }
+
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry 
registry) {
+registerAllExtensions(
+
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
+  }
+  public interface VersionOrBuilder extends
+  // 
@@protoc_insertion_point(interface_extends:google.protobuf.compiler.Version)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+/**
+ * optional int32 major = 1;
+ */
+boolean hasMajor();
+/**
+ * optional int32 major = 1;
+ */
+int getMajor();
+
+/**
+ * optional int32 minor = 2;
+ */
+boolean hasMinor();
+/**
+ * optional int32 minor = 2;
+ */
+int getMinor();
+
+/**
+ * optional int32 patch = 3;
+ */
+boolean hasPatch();
+/**
+ * optional int32 patch = 3;
+ */
+int getPatch();
+
+/**
+ * 
+ * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It 
should
+ * be empty for mainline stable releases.
+ * 
+ *
+ * optional string suffix = 4;
+ */
+boolean hasSuffix();
+/**
+ * 
+ * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It 
should
+ * be empty for mainline stable releases.
+ * 
+ *
+ * optional string suffix = 4;
+ */
+java.lang.String getSuffix();
+/**
+ * 
+ * A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It 
should
+ * be empty for mainline stable releases.
+ * 
+ *
+ * optional string suffix = 4;
+ */
+org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+getSuffixBytes();
+  }
+  /**
+   * 
+   * The version number of protocol compiler.
+   * 
+   *
+   * Protobuf type {@code google.protobuf.compiler.Version}
+   */
+  public  static final class Version extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // 
@@protoc_insertion_point(message_implements:google.protobuf.compiler.Version)
+  VersionOrBuilder {
+// Use Version.newBuilder() to construct.
+private 
Version(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private Version() {
+  major_ = 0;
+  minor_ = 0;
+  patch_ = 0;
+  suffix_ = "";
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private Version(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  int mutable_bitField0_ = 0;
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 8: {
+  bitField0_ |= 0x0001;
+  major_ = input.readInt32();
+  break;
+}
+case 16: {
+  bitField0_ |= 0x0002;
+  minor_ = input.readInt32();
+  break;
+}
+case 24: {
+  

[01/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-18147 2ec2d3693 -> 59ec40882 (forced update)


http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
new file mode 100644
index 000..0271777
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/BackupProtos.java
@@ -0,0 +1,7013 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: Backup.proto
+
+package org.apache.hadoop.hbase.shaded.protobuf.generated;
+
+public final class BackupProtos {
+  private BackupProtos() {}
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
registry) {
+  }
+
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry 
registry) {
+registerAllExtensions(
+
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
+  }
+  /**
+   * Protobuf enum {@code hbase.pb.BackupType}
+   */
+  public enum BackupType
+  implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+/**
+ * FULL = 0;
+ */
+FULL(0),
+/**
+ * INCREMENTAL = 1;
+ */
+INCREMENTAL(1),
+;
+
+/**
+ * FULL = 0;
+ */
+public static final int FULL_VALUE = 0;
+/**
+ * INCREMENTAL = 1;
+ */
+public static final int INCREMENTAL_VALUE = 1;
+
+
+public final int getNumber() {
+  return value;
+}
+
+/**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+@java.lang.Deprecated
+public static BackupType valueOf(int value) {
+  return forNumber(value);
+}
+
+public static BackupType forNumber(int value) {
+  switch (value) {
+case 0: return FULL;
+case 1: return INCREMENTAL;
+default: return null;
+  }
+}
+
+public static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+BackupType> internalValueMap =
+  new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap()
 {
+public BackupType findValueByNumber(int number) {
+  return BackupType.forNumber(number);
+}
+  };
+
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(ordinal());
+}
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.BackupProtos.getDescriptor().getEnumTypes().get(0);
+}
+
+private static final BackupType[] VALUES = values();
+
+public static BackupType valueOf(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
 desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int value;
+
+private BackupType(int value) {
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.BackupType)
+  }
+
+  public interface ServerTimestampOrBuilder extends
+  // @@protoc_insertion_point(interface_extends:hbase.pb.ServerTimestamp)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+/**
+ * optional .hbase.pb.ServerName server_name = 1;
+ */
+boolean hasServerName();
+/**
+ * optional .hbase.pb.ServerName server_name = 1;
+ */
+org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName 
getServerName();
+/**
+ * optional .hbase.pb.ServerName server_name = 1;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder
 getServerNameOrBuilder();
+
+/**
+ * optional uint64 timestamp = 2;
+ */
+boolean hasTimestamp();
+/**
+ * optional uint64 timestamp = 2;
+ */
+long getTimestamp();
+  }
+  /**
+   * 
+   **
+   * ServerTimestamp keeps last WAL roll time per Region 

[26/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessage.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessage.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessage.java
new file mode 100644
index 000..4a0f93a
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessage.java
@@ -0,0 +1,3047 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+/**
+ * All generated protocol message classes extend this class.  This class
+ * implements most of the Message and Builder interfaces using Java reflection.
+ * Users can ignore this class and pretend that generated messages implement
+ * the Message interface directly.
+ *
+ * @author ken...@google.com Kenton Varda
+ */
+public abstract class GeneratedMessage extends AbstractMessage
+implements Serializable {
+  private static final long serialVersionUID = 1L;
+
+  /**
+   * For testing. Allows a test to disable the optimization that avoids using
+   * field builders for nested messages until they are requested. By disabling
+   * this optimization, existing tests can be reused to test the field 
builders.
+   */
+  protected static boolean alwaysUseFieldBuilders = false;
+
+  /** For use by generated code only.  */
+  protected UnknownFieldSet unknownFields;
+
+  protected GeneratedMessage() {
+unknownFields = UnknownFieldSet.getDefaultInstance();
+  }
+
+  protected GeneratedMessage(Builder builder) {
+unknownFields = builder.getUnknownFields();
+  }
+
+  @Override
+  public Parser getParserForType() {
+throw new UnsupportedOperationException(
+"This is supposed to be overridden by subclasses.");
+  }
+
+ /**
+  * For testing. Allows a test to disable the optimization that avoids using
+  * field builders for nested messages until they are requested. By disabling
+  * this optimization, existing tests can be reused to test the field builders.
+  * See {@link RepeatedFieldBuilder} and {@link SingleFieldBuilder}.
+  */
+  static void enableAlwaysUseFieldBuildersForTesting() {
+alwaysUseFieldBuilders = true;

[19/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Method.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Method.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Method.java
new file mode 100644
index 000..32e8b35
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Method.java
@@ -0,0 +1,1541 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/api.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * Method represents a method of an api.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Method}
+ */
+public  final class Method extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Method)
+MethodOrBuilder {
+  // Use Method.newBuilder() to construct.
+  private 
Method(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Method() {
+name_ = "";
+requestTypeUrl_ = "";
+requestStreaming_ = false;
+responseTypeUrl_ = "";
+responseStreaming_ = false;
+options_ = java.util.Collections.emptyList();
+syntax_ = 0;
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Method(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 10: {
+java.lang.String s = input.readStringRequireUtf8();
+
+name_ = s;
+break;
+  }
+  case 18: {
+java.lang.String s = input.readStringRequireUtf8();
+
+requestTypeUrl_ = s;
+break;
+  }
+  case 24: {
+
+requestStreaming_ = input.readBool();
+break;
+  }
+  case 34: {
+java.lang.String s = input.readStringRequireUtf8();
+
+responseTypeUrl_ = s;
+break;
+  }
+  case 40: {
+
+responseStreaming_ = input.readBool();
+break;
+  }
+  case 50: {
+if (!((mutable_bitField0_ & 0x0020) == 0x0020)) {
+  options_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0020;
+}
+options_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Option.parser(),
 extensionRegistry));
+break;
+  }
+  case 56: {
+int rawValue = input.readEnum();
+
+syntax_ = rawValue;
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  if (((mutable_bitField0_ & 0x0020) == 0x0020)) {
+options_ = java.util.Collections.unmodifiableList(options_);
+  }
+  makeExtensionsImmutable();
+}
+  }
+  public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+  getDescriptor() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ApiProto.internal_static_google_protobuf_Method_descriptor;
+  }
+
+  protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+  internalGetFieldAccessorTable() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ApiProto.internal_static_google_protobuf_Method_fieldAccessorTable
+.ensureFieldAccessorsInitialized(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.Method.class, 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Method.Builder.class);
+  }
+
+  private int bitField0_;
+  public static 

[16/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RpcController.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RpcController.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RpcController.java
new file mode 100644
index 000..99e306b
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/RpcController.java
@@ -0,0 +1,118 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * An {@code RpcController} mediates a single method call.  The primary
+ * purpose of the controller is to provide a way to manipulate settings
+ * specific to the RPC implementation and to find out about RPC-level errors.
+ *
+ * Starting with version 2.3.0, RPC implementations should not try to build
+ * on this, but should instead provide code generator plugins which generate
+ * code specific to the particular RPC implementation.  This way the generated
+ * code can be more appropriate for the implementation in use and can avoid
+ * unnecessary layers of indirection.
+ *
+ * The methods provided by the {@code RpcController} interface are intended
+ * to be a "least common denominator" set of features which we expect all
+ * implementations to support.  Specific implementations may provide more
+ * advanced features (e.g. deadline propagation).
+ *
+ * @author ken...@google.com Kenton Varda
+ */
+public interface RpcController {
+  // -
+  // These calls may be made from the client side only.  Their results
+  // are undefined on the server side (may throw RuntimeExceptions).
+
+  /**
+   * Resets the RpcController to its initial state so that it may be reused in
+   * a new call.  This can be called from the client side only.  It must not
+   * be called while an RPC is in progress.
+   */
+  void reset();
+
+  /**
+   * After a call has finished, returns true if the call failed.  The possible
+   * reasons for failure depend on the RPC implementation.  {@code failed()}
+   * most only be called on the client side, and must not be called before a
+   * call has finished.
+   */
+  boolean failed();
+
+  /**
+   * If {@code failed()} is {@code true}, returns a human-readable description
+   * of the error.
+   */
+  String errorText();
+
+  /**
+   * Advises the RPC system that the caller desires that the RPC call be
+   * canceled.  The RPC system may cancel it immediately, may wait awhile and
+   * then cancel it, or may not even cancel the call at all.  If the call is
+   * canceled, the "done" callback will still be called and the RpcController
+   * will indicate that the call failed at that time.
+   */
+  void startCancel();
+
+  // -
+  // These calls may be made from the server side only.  Their results
+  // are undefined on the client side (may throw RuntimeExceptions).
+
+  /**
+   * Causes {@code failed()} to return true on the client side.  {@code reason}
+   * will be incorporated into the 

[14/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormat.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormat.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormat.java
new file mode 100644
index 000..491089b
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormat.java
@@ -0,0 +1,2108 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor;
+
+import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.CharBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.logging.Logger;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Provide text parsing and formatting support for proto2 instances.
+ * The implementation largely follows google/protobuf/text_format.cc.
+ *
+ * @author wen...@google.com Wenbo Zhu
+ * @author ken...@google.com Kenton Varda
+ */
+public final class TextFormat {
+  private TextFormat() {}
+
+  private static final Logger logger =
+  Logger.getLogger(TextFormat.class.getName());
+
+  private static final Printer DEFAULT_PRINTER = new Printer();
+  private static final Printer SINGLE_LINE_PRINTER =
+  (new Printer()).setSingleLineMode(true);
+  private static final Printer UNICODE_PRINTER =
+  (new Printer()).setEscapeNonAscii(false);
+
+  /**
+   * Outputs a textual representation of the Protocol Message supplied into
+   * the parameter output. (This representation is the new version of the
+   * classic "ProtocolPrinter" output from the original Protocol Buffer system)
+   */
+  public static void print(
+  final MessageOrBuilder message, final Appendable output)
+  throws IOException {
+DEFAULT_PRINTER.print(message, new TextGenerator(output));
+  }
+
+  /** Outputs a textual representation of {@code fields} to {@code output}. */
+  public static void print(final UnknownFieldSet fields,
+   final Appendable output)
+   throws IOException {
+DEFAULT_PRINTER.printUnknownFields(fields, new TextGenerator(output));
+  }
+
+  /**
+   * Same as {@code print()}, except that non-ASCII characters are not
+   * escaped.
+   */
+  public static void printUnicode(
+  final MessageOrBuilder message, final Appendable output)
+  throws IOException {
+UNICODE_PRINTER.print(message, new TextGenerator(output));
+  }
+
+  /**
+   * Same as {@code print()}, except that non-ASCII characters are not
+   * escaped.
+   */
+  public static void printUnicode(final UnknownFieldSet fields,
+  final Appendable output)
+  

[04/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestRpcServiceProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestRpcServiceProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestRpcServiceProtos.java
new file mode 100644
index 000..200a7f4
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/ipc/protobuf/generated/TestRpcServiceProtos.java
@@ -0,0 +1,575 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: test_rpc_service.proto
+
+package org.apache.hadoop.hbase.shaded.ipc.protobuf.generated;
+
+public final class TestRpcServiceProtos {
+  private TestRpcServiceProtos() {}
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
registry) {
+  }
+
+  public static void registerAllExtensions(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry 
registry) {
+registerAllExtensions(
+
(org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) 
registry);
+  }
+  /**
+   * 
+   **
+   * A protobuf service for use in tests
+   * 
+   *
+   * Protobuf service {@code TestProtobufRpcProto}
+   */
+  public static abstract class TestProtobufRpcProto
+  implements org.apache.hadoop.hbase.shaded.com.google.protobuf.Service {
+protected TestProtobufRpcProto() {}
+
+public interface Interface {
+  /**
+   * rpc ping(.EmptyRequestProto) returns 
(.EmptyResponseProto);
+   */
+  public abstract void ping(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController 
controller,
+  
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EmptyRequestProto
 request,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
 done);
+
+  /**
+   * rpc echo(.EchoRequestProto) returns (.EchoResponseProto);
+   */
+  public abstract void echo(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController 
controller,
+  
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoRequestProto
 request,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
 done);
+
+  /**
+   * rpc error(.EmptyRequestProto) returns 
(.EmptyResponseProto);
+   */
+  public abstract void error(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController 
controller,
+  
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EmptyRequestProto
 request,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
 done);
+
+  /**
+   * rpc pause(.PauseRequestProto) returns 
(.EmptyResponseProto);
+   */
+  public abstract void pause(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController 
controller,
+  
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.PauseRequestProto
 request,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
 done);
+
+  /**
+   * rpc addr(.EmptyRequestProto) returns 
(.AddrResponseProto);
+   */
+  public abstract void addr(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController 
controller,
+  
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EmptyRequestProto
 request,
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
 done);
+
+}
+
+public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service 
newReflectiveService(
+final Interface impl) {
+  return new TestProtobufRpcProto() {
+@java.lang.Override
+public  void ping(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController 
controller,
+
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EmptyRequestProto
 request,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
 done) {
+  impl.ping(controller, request, done);
+}
+
+@java.lang.Override
+public  void echo(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController 
controller,
+
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoRequestProto
 request,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback
 done) {
+  impl.echo(controller, request, done);
+}
+
+@java.lang.Override
+public  void error(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController 
controller,
+
org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EmptyRequestProto
 request,
+

[25/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageLite.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageLite.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageLite.java
new file mode 100644
index 000..584eba1
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/GeneratedMessageLite.java
@@ -0,0 +1,2419 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.LimitedInputStream;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageLite.EqualsVisitor.NotEqualsException;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.BooleanList;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.DoubleList;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.FloatList;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.IntList;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.LongList;
+import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.ProtobufList;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.WireFormat.FieldType;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Lite version of {@link GeneratedMessage}.
+ *
+ * @author ken...@google.com Kenton Varda
+ */
+public abstract class GeneratedMessageLite<
+MessageType extends GeneratedMessageLite,
+BuilderType extends GeneratedMessageLite.Builder>
+extends AbstractMessageLite {
+
+  /** For use by generated code only. Lazily initialized to reduce 
allocations. */
+  protected UnknownFieldSetLite unknownFields = 
UnknownFieldSetLite.getDefaultInstance();
+
+  /** For use by generated code only.  */
+  protected int memoizedSerializedSize = -1;
+
+  @Override
+  @SuppressWarnings("unchecked") // Guaranteed by runtime.
+  public final Parser getParserForType() {
+return (Parser) dynamicMethod(MethodToInvoke.GET_PARSER);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked") // Guaranteed by runtime.
+  public final MessageType getDefaultInstanceForType() {
+return (MessageType) dynamicMethod(MethodToInvoke.GET_DEFAULT_INSTANCE);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked") // Guaranteed by runtime.
+  public final BuilderType newBuilderForType() {
+return (BuilderType) dynamicMethod(MethodToInvoke.NEW_BUILDER);
+  }
+
+  /**
+   * A reflective toString function. This is primarily intended as a developer 
aid, while keeping
+   * binary size down. The first line of the {@code toString()} representation 

[18/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MutabilityOracle.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MutabilityOracle.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MutabilityOracle.java
new file mode 100644
index 000..b1eca4f
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/MutabilityOracle.java
@@ -0,0 +1,48 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * Verifies that an object is mutable, throwing if not.
+ */
+interface MutabilityOracle {
+  static final MutabilityOracle IMMUTABLE = new MutabilityOracle() {
+@Override
+public void ensureMutable() {
+  throw new UnsupportedOperationException();
+}
+  };
+
+  /**
+   * Throws an {@link UnsupportedOperationException} if not mutable.
+   */
+  void ensureMutable();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/NioByteString.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/NioByteString.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/NioByteString.java
new file mode 100644
index 000..1610f80
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/NioByteString.java
@@ -0,0 +1,291 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 

[12/57] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around C

2017-07-07 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Type.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Type.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Type.java
new file mode 100644
index 000..f46f351
--- /dev/null
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Type.java
@@ -0,0 +1,1967 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/type.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * 
+ * A protocol buffer message type.
+ * 
+ *
+ * Protobuf type {@code google.protobuf.Type}
+ */
+public  final class Type extends
+org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+// @@protoc_insertion_point(message_implements:google.protobuf.Type)
+TypeOrBuilder {
+  // Use Type.newBuilder() to construct.
+  private 
Type(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+super(builder);
+  }
+  private Type() {
+name_ = "";
+fields_ = java.util.Collections.emptyList();
+oneofs_ = 
org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList.EMPTY;
+options_ = java.util.Collections.emptyList();
+syntax_ = 0;
+  }
+
+  @java.lang.Override
+  public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+return 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Type(
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+  throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+this();
+int mutable_bitField0_ = 0;
+try {
+  boolean done = false;
+  while (!done) {
+int tag = input.readTag();
+switch (tag) {
+  case 0:
+done = true;
+break;
+  default: {
+if (!input.skipField(tag)) {
+  done = true;
+}
+break;
+  }
+  case 10: {
+java.lang.String s = input.readStringRequireUtf8();
+
+name_ = s;
+break;
+  }
+  case 18: {
+if (!((mutable_bitField0_ & 0x0002) == 0x0002)) {
+  fields_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0002;
+}
+fields_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Field.parser(),
 extensionRegistry));
+break;
+  }
+  case 26: {
+java.lang.String s = input.readStringRequireUtf8();
+if (!((mutable_bitField0_ & 0x0004) == 0x0004)) {
+  oneofs_ = new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.LazyStringArrayList();
+  mutable_bitField0_ |= 0x0004;
+}
+oneofs_.add(s);
+break;
+  }
+  case 34: {
+if (!((mutable_bitField0_ & 0x0008) == 0x0008)) {
+  options_ = new 
java.util.ArrayList();
+  mutable_bitField0_ |= 0x0008;
+}
+options_.add(
+
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.Option.parser(),
 extensionRegistry));
+break;
+  }
+  case 42: {
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.SourceContext.Builder 
subBuilder = null;
+if (sourceContext_ != null) {
+  subBuilder = sourceContext_.toBuilder();
+}
+sourceContext_ = 
input.readMessage(org.apache.hadoop.hbase.shaded.com.google.protobuf.SourceContext.parser(),
 extensionRegistry);
+if (subBuilder != null) {
+  subBuilder.mergeFrom(sourceContext_);
+  sourceContext_ = subBuilder.buildPartial();
+}
+
+break;
+  }
+  case 48: {
+int rawValue = input.readEnum();
+
+syntax_ = rawValue;
+break;
+  }
+}
+  }
+} catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+  throw e.setUnfinishedMessage(this);
+} catch (java.io.IOException e) {
+  throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+  e).setUnfinishedMessage(this);
+} finally {
+  if (((mutable_bitField0_ & 0x0002) == 0x0002)) {
+fields_ = java.util.Collections.unmodifiableList(fields_);

hbase git commit: HBASE-18327 redo test-patch personality 'hadoopcheck' to better account for feature branches

2017-07-07 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/master 7f9372978 -> 30d06dfe3


HBASE-18327 redo test-patch personality 'hadoopcheck' to better account for 
feature branches

Signed-off-by: Ashish Singhi 
Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/30d06dfe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/30d06dfe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/30d06dfe

Branch: refs/heads/master
Commit: 30d06dfe3aa29ec0acc0e0ba0aa1fd21919ce64e
Parents: 7f93729
Author: Sean Busbey 
Authored: Thu Jul 6 15:56:02 2017 -0400
Committer: Sean Busbey 
Committed: Fri Jul 7 23:07:50 2017 -0400

--
 dev-support/hbase-personality.sh | 33 ++---
 1 file changed, 14 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/30d06dfe/dev-support/hbase-personality.sh
--
diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index 7625403..990a49a 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -51,16 +51,18 @@ function personality_globals
   #shellcheck disable=SC2034
   GITHUB_REPO="apache/hbase"
 
-  # TODO use PATCH_BRANCH to select hadoop versions to use.
   # All supported Hadoop versions that we want to test the compilation with
-  HBASE_MASTER_HADOOP2_VERSIONS="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 
2.7.3"
-  HBASE_MASTER_HADOOP3_VERSIONS="3.0.0-alpha3"
-
-  HBASE_BRANCH2_HADOOP2_VERSIONS="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 
2.7.3"
-  HBASE_BRANCH2_HADOOP3_VERSIONS="3.0.0-alpha3"
-
-  HBASE_HADOOP2_VERSIONS="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 
2.6.4 2.6.5 2.7.1 2.7.2 2.7.3"
-  HBASE_HADOOP3_VERSIONS=""
+  # See the Hadoop section on prereqs in the HBase Reference Guide
+  if [[ "${PATCH_BRANCH}" = branch-1* ]]; then
+HBASE_HADOOP2_VERSIONS="2.4.0 2.4.1 2.5.0 2.5.1 2.5.2 2.6.1 2.6.2 2.6.3 
2.6.4 2.6.5 2.7.1 2.7.2 2.7.3"
+HBASE_HADOOP3_VERSIONS=""
+  elif [[ ${PATCH_BRANCH} = branch-2* ]]; then
+HBASE_HADOOP2_VERSIONS="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3"
+HBASE_HADOOP3_VERSIONS="3.0.0-alpha3"
+  else # master or a feature branch
+HBASE_HADOOP2_VERSIONS="2.6.1 2.6.2 2.6.3 2.6.4 2.6.5 2.7.1 2.7.2 2.7.3"
+HBASE_HADOOP3_VERSIONS="3.0.0-alpha3"
+  fi
 
   # TODO use PATCH_BRANCH to select jdk versions to use.
 
@@ -196,16 +198,9 @@ function hadoopcheck_rebuild
 
   big_console_header "Compiling against various Hadoop versions"
 
-  if [[ "${PATCH_BRANCH}" = "master" ]]; then
-hbase_hadoop2_versions=${HBASE_MASTER_HADOOP2_VERSIONS}
-hbase_hadoop3_versions=${HBASE_MASTER_HADOOP3_VERSIONS}
-  elif [[ ${PATCH_BRANCH} = branch-2* ]]; then
-hbase_hadoop2_versions=${HBASE_BRANCH2_HADOOP2_VERSIONS}
-hbase_hadoop3_versions=${HBASE_BRANCH2_HADOOP3_VERSIONS}
-  else
-hbase_hadoop2_versions=${HBASE_HADOOP2_VERSIONS}
-hbase_hadoop3_versions=${HBASE_HADOOP3_VERSIONS}
-  fi
+  hbase_hadoop2_versions=${HBASE_HADOOP2_VERSIONS}
+  hbase_hadoop3_versions=${HBASE_HADOOP3_VERSIONS}
+
 
   export MAVEN_OPTS="${MAVEN_OPTS}"
   for hadoopver in ${hbase_hadoop2_versions}; do



hbase git commit: HBASE-18317 Implement async admin operations for Normalizer/CleanerChore/CatalogJanitor

2017-07-07 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2 359f97711 -> 5d4e4f200


HBASE-18317 Implement async admin operations for 
Normalizer/CleanerChore/CatalogJanitor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5d4e4f20
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5d4e4f20
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5d4e4f20

Branch: refs/heads/branch-2
Commit: 5d4e4f20070c4a1d00608509123da5d5df688935
Parents: 359f977
Author: Guanghao Zhang 
Authored: Fri Jul 7 21:13:38 2017 +0800
Committer: Guanghao Zhang 
Committed: Sat Jul 8 10:59:01 2017 +0800

--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  | 131 +
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  75 ++--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 190 +++
 .../hbase/client/TestAsyncBalancerAdminApi.java |  54 --
 .../hbase/client/TestAsyncToolAdminApi.java | 127 +
 5 files changed, 441 insertions(+), 136 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5d4e4f20/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 8ade209..8411a5b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -284,40 +284,6 @@ public interface AsyncAdmin {
   CompletableFuture listNamespaceDescriptors();
 
   /**
-   * Turn the load balancer on or off.
-   * @param on
-   * @return Previous balancer value wrapped by a {@link CompletableFuture}.
-   */
-  CompletableFuture setBalancerOn(boolean on);
-
-  /**
-   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
-   * reassignments. Can NOT run for various reasons. Check logs.
-   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
-   * {@link CompletableFuture}.
-   */
-  default CompletableFuture balance() {
-return balance(false);
-  }
-
-  /**
-   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
-   * reassignments. If there is region in transition, force parameter of true 
would still run
-   * balancer. Can *not* run for other reasons. Check logs.
-   * @param forcible whether we should force balance even if there is region 
in transition.
-   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
-   * {@link CompletableFuture}.
-   */
-  CompletableFuture balance(boolean forcible);
-
-  /**
-   * Query the current state of the balancer.
-   * @return true if the balance switch is on, false otherwise The return 
value will be wrapped by a
-   * {@link CompletableFuture}.
-   */
-  CompletableFuture isBalancerOn();
-
-  /**
* Close a region. For expert-admins Runs close on the regionserver. The 
master will not be
* informed of the close.
* @param regionName region name to close
@@ -891,4 +857,101 @@ public interface AsyncAdmin {
* @return the last major compaction timestamp wrapped by a {@link 
CompletableFuture}
*/
   CompletableFuture 
getLastMajorCompactionTimestampForRegion(byte[] regionName);
+
+  /**
+   * Turn the load balancer on or off.
+   * @param on
+   * @return Previous balancer value wrapped by a {@link CompletableFuture}.
+   */
+  CompletableFuture setBalancerOn(boolean on);
+
+  /**
+   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
+   * reassignments. Can NOT run for various reasons. Check logs.
+   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
+   * {@link CompletableFuture}.
+   */
+  default CompletableFuture balance() {
+return balance(false);
+  }
+
+  /**
+   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
+   * reassignments. If there is region in transition, force parameter of true 
would still run
+   * balancer. Can *not* run for other reasons. Check logs.
+   * @param forcible whether we should force balance even if there is region 
in transition.
+   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
+   * {@link CompletableFuture}.
+   */
+  CompletableFuture balance(boolean forcible);
+
+  /**
+   * Query the current state of the balancer.
+   * @return true if the balance switch is on, false otherwise. The return 
value will be wrapped by 

hbase git commit: HBASE-18317 Implement async admin operations for Normalizer/CleanerChore/CatalogJanitor

2017-07-07 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/master 48d28c7a2 -> 7f9372978


HBASE-18317 Implement async admin operations for 
Normalizer/CleanerChore/CatalogJanitor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7f937297
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7f937297
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7f937297

Branch: refs/heads/master
Commit: 7f937297821f5fb27c5ed32a4c0d4e5bc4a18956
Parents: 48d28c7
Author: Guanghao Zhang 
Authored: Fri Jul 7 21:13:38 2017 +0800
Committer: Guanghao Zhang 
Committed: Sat Jul 8 10:55:10 2017 +0800

--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  | 131 +
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  75 ++--
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 190 +++
 .../hbase/client/TestAsyncBalancerAdminApi.java |  54 --
 .../hbase/client/TestAsyncToolAdminApi.java | 127 +
 5 files changed, 441 insertions(+), 136 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7f937297/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 8ade209..8411a5b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -284,40 +284,6 @@ public interface AsyncAdmin {
   CompletableFuture listNamespaceDescriptors();
 
   /**
-   * Turn the load balancer on or off.
-   * @param on
-   * @return Previous balancer value wrapped by a {@link CompletableFuture}.
-   */
-  CompletableFuture setBalancerOn(boolean on);
-
-  /**
-   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
-   * reassignments. Can NOT run for various reasons. Check logs.
-   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
-   * {@link CompletableFuture}.
-   */
-  default CompletableFuture balance() {
-return balance(false);
-  }
-
-  /**
-   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
-   * reassignments. If there is region in transition, force parameter of true 
would still run
-   * balancer. Can *not* run for other reasons. Check logs.
-   * @param forcible whether we should force balance even if there is region 
in transition.
-   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
-   * {@link CompletableFuture}.
-   */
-  CompletableFuture balance(boolean forcible);
-
-  /**
-   * Query the current state of the balancer.
-   * @return true if the balance switch is on, false otherwise The return 
value will be wrapped by a
-   * {@link CompletableFuture}.
-   */
-  CompletableFuture isBalancerOn();
-
-  /**
* Close a region. For expert-admins Runs close on the regionserver. The 
master will not be
* informed of the close.
* @param regionName region name to close
@@ -891,4 +857,101 @@ public interface AsyncAdmin {
* @return the last major compaction timestamp wrapped by a {@link 
CompletableFuture}
*/
   CompletableFuture 
getLastMajorCompactionTimestampForRegion(byte[] regionName);
+
+  /**
+   * Turn the load balancer on or off.
+   * @param on
+   * @return Previous balancer value wrapped by a {@link CompletableFuture}.
+   */
+  CompletableFuture setBalancerOn(boolean on);
+
+  /**
+   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
+   * reassignments. Can NOT run for various reasons. Check logs.
+   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
+   * {@link CompletableFuture}.
+   */
+  default CompletableFuture balance() {
+return balance(false);
+  }
+
+  /**
+   * Invoke the balancer. Will run the balancer and if regions to move, it 
will go ahead and do the
+   * reassignments. If there is region in transition, force parameter of true 
would still run
+   * balancer. Can *not* run for other reasons. Check logs.
+   * @param forcible whether we should force balance even if there is region 
in transition.
+   * @return True if balancer ran, false otherwise. The return value will be 
wrapped by a
+   * {@link CompletableFuture}.
+   */
+  CompletableFuture balance(boolean forcible);
+
+  /**
+   * Query the current state of the balancer.
+   * @return true if the balance switch is on, false otherwise. The return 
value will be wrapped by a
+ 

hbase git commit: HBASE-18335 configuration guide fixes

2017-07-07 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master b0a5fa0c2 -> 48d28c7a2


HBASE-18335 configuration guide fixes

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/48d28c7a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/48d28c7a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/48d28c7a

Branch: refs/heads/master
Commit: 48d28c7a248d0f8c19b18c20d4484864ac67b549
Parents: b0a5fa0
Author: Artem Ervits 
Authored: Fri Jul 7 10:52:06 2017 -0400
Committer: tedyu 
Committed: Fri Jul 7 14:37:11 2017 -0700

--
 src/main/asciidoc/_chapters/configuration.adoc | 63 ++---
 src/main/asciidoc/_chapters/hbase-default.adoc | 40 ++---
 2 files changed, 51 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/48d28c7a/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index 2113b2f..bf14d11 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -79,11 +79,10 @@ To check for well-formedness and only print output if 
errors exist, use the comm
 .Keep Configuration In Sync Across the Cluster
 [WARNING]
 
-When running in distributed mode, after you make an edit to an HBase 
configuration, make sure you copy the content of the _conf/_ directory to all 
nodes of the cluster.
+When running in distributed mode, after you make an edit to an HBase 
configuration, make sure you copy the contents of the _conf/_ directory to all 
nodes of the cluster.
 HBase will not do this for you.
 Use `rsync`, `scp`, or another secure mechanism for copying the configuration 
files to your nodes.
-For most configuration, a restart is needed for servers to pick up changes An 
exception is dynamic configuration.
-to be described later below.
+For most configurations, a restart is needed for servers to pick up changes. 
Dynamic configuration is an exception to this, to be described later below.
 
 
 [[basic.prerequisites]]
@@ -131,11 +130,11 @@ DNS::
   HBase uses the local hostname to self-report its IP address. Both forward 
and reverse DNS resolving must work in versions of HBase previous to 0.92.0. 
The link:https://github.com/sujee/hadoop-dns-checker[hadoop-dns-checker] tool 
can be used to verify DNS is working correctly on the cluster. The project 
`README` file provides detailed instructions on usage.
 
 Loopback IP::
-  Prior to hbase-0.96.0, HBase only used the IP address `127.0.0.1` to refer 
to `localhost`, and this could not be configured.
+  Prior to hbase-0.96.0, HBase only used the IP address `127.0.0.1` to refer 
to `localhost`, and this was not configurable.
   See <> for more details.
 
 NTP::
-  The clocks on cluster nodes should be synchronized. A small amount of 
variation is acceptable, but larger amounts of skew can cause erratic and 
unexpected behavior. Time synchronization is one of the first things to check 
if you see unexplained problems in your cluster. It is recommended that you run 
a Network Time Protocol (NTP) service, or another time-synchronization 
mechanism, on your cluster, and that all nodes look to the same service for 
time synchronization. See the 
link:http://www.tldp.org/LDP/sag/html/basic-ntp-config.html[Basic NTP 
Configuration] at [citetitle]_The Linux Documentation Project (TLDP)_ to set up 
NTP.
+  The clocks on cluster nodes should be synchronized. A small amount of 
variation is acceptable, but larger amounts of skew can cause erratic and 
unexpected behavior. Time synchronization is one of the first things to check 
if you see unexplained problems in your cluster. It is recommended that you run 
a Network Time Protocol (NTP) service, or another time-synchronization 
mechanism on your cluster and that all nodes look to the same service for time 
synchronization. See the 
link:http://www.tldp.org/LDP/sag/html/basic-ntp-config.html[Basic NTP 
Configuration] at [citetitle]_The Linux Documentation Project (TLDP)_ to set up 
NTP.
 
 [[ulimit]]
 Limits on Number of Files and Processes (ulimit)::
@@ -176,8 +175,8 @@ Linux Shell::
   All of the shell scripts that come with HBase rely on the 
link:http://www.gnu.org/software/bash[GNU Bash] shell.
 
 Windows::
-  Prior to HBase 0.96, testing for running HBase on Microsoft Windows was 
limited.
-  Running a on Windows nodes is not recommended for production systems.
+  Prior to HBase 0.96, running HBase on Microsoft Windows was limited only for 
testing purposes.
+  Running production systems on Windows machines is not recommended. 
 

[33/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
index b261615..cecc275 100644
--- a/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/classification/package-tree.html
@@ -88,11 +88,11 @@
 
 Annotation Type Hierarchy
 
-org.apache.hadoop.hbase.classification.InterfaceStability.Stable (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 org.apache.hadoop.hbase.classification.InterfaceStability.Evolving (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
-org.apache.hadoop.hbase.classification.InterfaceAudience.Private (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 org.apache.hadoop.hbase.classification.InterfaceAudience.Public (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
+org.apache.hadoop.hbase.classification.InterfaceAudience.Private (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
+org.apache.hadoop.hbase.classification.InterfaceStability.Stable (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 org.apache.hadoop.hbase.classification.InterfaceStability.Unstable (implements 
java.lang.annotation.http://docs.oracle.com/javase/8/docs/api/java/lang/annotation/Annotation.html?is-external=true;
 title="class or interface in java.lang.annotation">Annotation)
 
 



[30/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index efc1a6a..8403c18 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
+private class RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer
 extends RawAsyncHBaseAdmin.TableProcedureBiConsumer
 
 
@@ -240,7 +240,7 @@ extends 
 
 AddColumnFamilyProcedureBiConsumer
-AddColumnFamilyProcedureBiConsumer(AsyncAdminadmin,
+AddColumnFamilyProcedureBiConsumer(AsyncAdminadmin,
TableNametableName)
 
 
@@ -258,7 +258,7 @@ extends 
 
 getOperationType
-http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringgetOperationType()
 
 Specified by:
 getOperationTypein
 classRawAsyncHBaseAdmin.TableProcedureBiConsumer

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
index fb06c5e..403469c 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
 title="class or interface in java.lang">@FunctionalInterface
-private static interface RawAsyncHBaseAdmin.AdminRpcCallRESP,REQ
+private static interface RawAsyncHBaseAdmin.AdminRpcCallRESP,REQ
 
 
 
@@ -159,7 +159,7 @@ private static interface 
 
 call
-voidcall(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.Interfacestub,
+voidcall(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService.Interfacestub,
   HBaseRpcControllercontroller,
   REQreq,
   org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallbackRESPdone)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
index 42b0fa0..7b797b2 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.Converter.html
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 http://docs.oracle.com/javase/8/docs/api/java/lang/FunctionalInterface.html?is-external=true;
 title="class or interface in java.lang">@FunctionalInterface
-private static interface RawAsyncHBaseAdmin.ConverterD,S
+private static interface RawAsyncHBaseAdmin.ConverterD,S
 
 
 
@@ -156,7 +156,7 @@ private static interface 
 
 convert
-Dconvert(Ssrc)
+Dconvert(Ssrc)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
index 9057e21..8dc8236 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
@@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer
+private class 

[07/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 

[12/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyNamespaceProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 

[09/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TableOperator.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-104import 

[37/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 36768a6..8abc3e6 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -25,786 +25,881 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import java.util.List;
-021import java.util.Collection;
-022import java.util.Map;
-023import java.util.Optional;
-024import 
java.util.concurrent.CompletableFuture;
-025import java.util.regex.Pattern;
-026
-027import 
org.apache.hadoop.hbase.HRegionInfo;
-028import 
org.apache.hadoop.hbase.ProcedureInfo;
-029import 
org.apache.hadoop.hbase.ServerName;
-030import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-031import 
org.apache.hadoop.hbase.TableName;
-032import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-033import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-034import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-035import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-036import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-037import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-038import 
org.apache.hadoop.hbase.util.Pair;
-039
-040/**
-041 * The asynchronous administrative API 
for HBase.
-042 * p
-043 * This feature is still under 
development, so marked as IA.Private. Will change to public when
-044 * done. Use it with caution.
-045 */
-046@InterfaceAudience.Public
-047public interface AsyncAdmin {
-048
-049  /**
-050   * @param tableName Table to check.
-051   * @return True if table exists 
already. The return value will be wrapped by a
-052   * {@link CompletableFuture}.
-053   */
-054  CompletableFutureBoolean 
tableExists(TableName tableName);
-055
-056  /**
-057   * List all the userspace tables.
-058   * @return - returns a list of 
TableDescriptors wrapped by a {@link CompletableFuture}.
-059   * @see #listTables(Optional, 
boolean)
-060   */
-061  default 
CompletableFutureListTableDescriptor listTables() {
-062return listTables(Optional.empty(), 
false);
-063  }
-064
-065  /**
-066   * List all the tables matching the 
given pattern.
-067   * @param pattern The compiled regular 
expression to match against
-068   * @param includeSysTables False to 
match only against userspace tables
-069   * @return - returns a list of 
TableDescriptors wrapped by a {@link CompletableFuture}.
-070   */
-071  
CompletableFutureListTableDescriptor 
listTables(OptionalPattern pattern,
-072  boolean includeSysTables);
-073
-074  /**
-075   * List all of the names of userspace 
tables.
-076   * @return a list of table names 
wrapped by a {@link CompletableFuture}.
-077   * @see #listTableNames(Optional, 
boolean)
-078   */
-079  default 
CompletableFutureListTableName listTableNames() {
-080return 
listTableNames(Optional.empty(), false);
-081  }
-082
-083  /**
-084   * List all of the names of userspace 
tables.
-085   * @param pattern The regular 
expression to match against
-086   * @param includeSysTables False to 
match only against userspace tables
-087   * @return a list of table names 
wrapped by a {@link CompletableFuture}.
-088   */
-089  
CompletableFutureListTableName 
listTableNames(OptionalPattern pattern,
-090  boolean includeSysTables);
-091
-092  /**
-093   * Method for getting the 
tableDescriptor
-094   * @param tableName as a {@link 
TableName}
-095   * @return the read-only 
tableDescriptor wrapped by a {@link CompletableFuture}.
-096   */
-097  
CompletableFutureTableDescriptor getTableDescriptor(TableName 
tableName);
-098
-099  /**
-100   * Creates a new table.
-101   * @param desc table descriptor for 
table
-102   */
-103  default CompletableFutureVoid 
createTable(TableDescriptor desc) {
-104return createTable(desc, 
Optional.empty());
-105  }
-106
-107  /**
-108   * Creates a new table with the 
specified number of regions. The start key specified will become
-109   * the end key of the first region of 
the table, and the end key specified will become the start
-110   * key of the last region of the table 
(the first region has a null start key and the last region
-111   * has a null end key). BigInteger math 
will be used to divide the key range specified into enough
-112   * segments to make the required number 
of total regions.
-113   * @param desc table descriptor for 
table
-114   * @param startKey beginning of key 
range
-115   * @param endKey end of key range
-116   * @param numRegions the total number 
of regions to create
-117   */
-118  CompletableFutureVoid 
createTable(TableDescriptor desc, byte[] startKey, byte[] endKey,
-119  int numRegions);
-120
-121  /**
-122   * Creates a new table with 

[11/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.NamespaceProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 

[29/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index d22d7b6..f403a65 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i
 109":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class RawAsyncHBaseAdmin
+public class RawAsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
@@ -592,80 +592,127 @@ implements 
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureClusterStatus
+getClusterStatus()
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+getCompactionState(TableNametableName)
+Get the current compaction state of a table.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+getCompactionStateForRegion(byte[]regionName)
+Get the current compaction state of region.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+getLastMajorCompactionTimestamp(TableNametableName)
+Get the timestamp of the last major compaction for the 
passed table.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+getLastMajorCompactionTimestampForRegion(byte[]regionName)
+Get the timestamp of the last major compaction for the 
passed region.
+
+
+
 

[15/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.MasterRpcCall.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-104import 

[03/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
index 2a7863b..8ea1e1f 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/package-summary.html
@@ -190,895 +190,899 @@
 
 
 
+TestAsyncClusterAdminApi
+
+
+
 TestAsyncNamespaceAdminApi
 
 Class to test asynchronous namespace admin operations.
 
 
-
+
 TestAsyncNonMetaRegionLocator
 
 
-
+
 TestAsyncNonMetaRegionLocatorConcurrenyLimit
 
 
-
+
 TestAsyncNonMetaRegionLocatorConcurrenyLimit.CountingRegionObserver
 
 
-
+
 TestAsyncProcedureAdminApi
 
 Class to test asynchronous procedure admin operations.
 
 
-
+
 TestAsyncProcess
 
 
-
+
 TestAsyncProcess.AsyncProcessForThrowableCheck
 
 
-
+
 TestAsyncProcess.AsyncProcessWithFailure
 
 
-
+
 TestAsyncProcess.CallerWithFailure
 
 
-
+
 TestAsyncProcess.CountingThreadFactory
 
 
-
+
 TestAsyncProcess.MyAsyncProcess
 
 
-
+
 TestAsyncProcess.MyAsyncRequestFutureImplRes
 
 
-
+
 TestAsyncProcess.MyClientBackoffPolicy
 
 Make the backoff time always different on each call.
 
 
-
+
 TestAsyncProcess.MyConnectionImpl
 
 Returns our async process.
 
 
-
+
 TestAsyncProcess.MyConnectionImpl.TestRegistry
 
 
-
+
 TestAsyncProcess.MyConnectionImpl2
 
 Returns our async process.
 
 
-
+
 TestAsyncProcess.MyThreadPoolExecutor
 
 
-
+
 TestAsyncQuotaAdminApi
 
 
-
+
 TestAsyncRegionAdminApi
 
 Class to test asynchronous region admin operations.
 
 
-
+
 TestAsyncRegionLocatorTimeout
 
 
-
+
 TestAsyncRegionLocatorTimeout.SleepRegionObserver
 
 
-
+
 TestAsyncReplicationAdminApi
 
 Class to test asynchronous replication admin 
operations.
 
 
-
+
 TestAsyncSingleRequestRpcRetryingCaller
 
 
-
+
 TestAsyncSnapshotAdminApi
 
 
-
+
 TestAsyncTable
 
 
-
+
 TestAsyncTableAdminApi
 
 Class to test asynchronous table admin operations.
 
 
-
+
 TestAsyncTableBatch
 
 
-
+
 TestAsyncTableBatch.ErrorInjectObserver
 
 
-
+
 TestAsyncTableGetMultiThreaded
 
 Will split the table, and move region randomly when 
testing.
 
 
-
+
 TestAsyncTableGetMultiThreadedWithBasicCompaction
 
 
-
+
 TestAsyncTableGetMultiThreadedWithEagerCompaction
 
 
-
+
 TestAsyncTableNoncedRetry
 
 
-
+
 TestAsyncTableScan
 
 
-
+
 TestAsyncTableScanAll
 
 
-
+
 TestAsyncTableScanMetrics
 
 
-
+
 TestAsyncTableScanner
 
 
-
+
 TestAsyncTableScannerCloseWhileSuspending
 
 
-
+
 TestAsyncTableScanRenewLease
 
 
-
+
 TestAsyncTableScanRenewLease.RenewLeaseConsumer
 
 
-
+
 TestAttributes
 
 
-
+
 TestAvoidCellReferencesIntoShippedBlocks
 
 
-
+
 TestAvoidCellReferencesIntoShippedBlocks.CompactorRegionObserver
 
 
-
+
 TestAvoidCellReferencesIntoShippedBlocks.CompactorStoreScanner
 
 
-
+
 TestAvoidCellReferencesIntoShippedBlocks.ScannerThread
 
 
-
+
 TestBatchScanResultCache
 
 
-
+
 TestBlockEvictionFromClient
 
 
-
+
 TestBlockEvictionFromClient.CustomInnerRegionObserver
 
 
-
+
 TestBlockEvictionFromClient.CustomInnerRegionObserverWrapper
 
 
-
+
 TestBlockEvictionFromClient.CustomScanner
 
 
-
+
 TestBlockEvictionFromClient.GetThread
 
 
-
+
 TestBlockEvictionFromClient.MultiGetThread
 
 
-
+
 TestBlockEvictionFromClient.ScanThread
 
 
-
+
 TestBufferedMutator
 
 
-
+
 TestBufferedMutator.DoNothingRegistry
 
 Registry that does nothing.
 
 
-
+
 TestBufferedMutator.MyBufferedMutator
 
 My BufferedMutator.
 
 
-
+
 TestBufferedMutatorParams
 
 
-
+
 TestCheckAndMutate
 
 
-
+
 TestClientExponentialBackoff
 
 
-
+
 TestClientNoCluster
 
 Test client behavior w/o setting up a cluster.
 
 
-
+
 TestClientNoCluster.FakeServer
 
 Fake 'server'.
 
 
-
+
 TestClientNoCluster.ManyServersManyRegionsConnection
 
 Fake many regionservers and many regions on a connection 
implementation.
 
 
-
+
 TestClientNoCluster.MetaRowsComparator
 
 Comparator for meta row keys.
 
 
-
+
 TestClientNoCluster.RegionServerStoppedOnScannerOpenConnection
 
 Override to shutdown going to zookeeper for cluster id and 
meta location.
 
 
-
+
 TestClientNoCluster.RpcTimeoutConnection
 
 Override to check we are setting rpc timeout right.
 
 
-
+
 TestClientNoCluster.SimpleRegistry
 
 Simple cluster registry inserted in place of our usual 
zookeeper based one.
 
 
-
+
 TestClientOperationInterrupt
 
 
-
+
 TestClientOperationInterrupt.TestCoprocessor
 
 
-
+
 TestClientPushback
 
 Test that we can actually send and use region metrics to 
slowdown client writes
 
 
-
+
 TestClientScanner
 
 Test the ClientScanner.
 
 
-
+
 TestClientScanner.MockClientScanner
 
 
-
+
 TestClientScanner.MockRpcRetryingCallerFactory
 
 
-
+
 TestClientScannerRPCTimeout
 
 Test the scenario where a HRegionServer#scan() call, while 
scanning, timeout at client side and
  getting retried.
 
 
-
+
 

[25/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AddColumnFamilyProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 

[24/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.AdminRpcCall.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-104import 

hbase-site git commit: INFRA-10751 Empty commit

2017-07-07 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 1b6d8c106 -> 225052054


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/22505205
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/22505205
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/22505205

Branch: refs/heads/asf-site
Commit: 2250520541e11c8af84b8e22984c0d24450fc64e
Parents: 1b6d8c1
Author: jenkins 
Authored: Fri Jul 7 15:01:56 2017 +
Committer: jenkins 
Committed: Fri Jul 7 15:01:56 2017 +

--

--




[35/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index ada4075..945d1bd 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2017 The Apache Software Foundation
 
   File: 2242,
- Errors: 14686,
+ Errors: 14711,
  Warnings: 0,
  Infos: 0
   
@@ -8987,7 +8987,7 @@ under the License.
   0
 
 
-  92
+  113
 
   
   
@@ -19935,7 +19935,7 @@ under the License.
   0
 
 
-  8
+  12
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/constant-values.html
--
diff --git a/devapidocs/constant-values.html b/devapidocs/constant-values.html
index dc26dd7..ba7651c 100644
--- a/devapidocs/constant-values.html
+++ b/devapidocs/constant-values.html
@@ -3815,21 +3815,21 @@
 
 publicstaticfinalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 date
-"Fri Jul  7 06:19:53 UTC 2017"
+"Fri Jul  7 14:39:22 UTC 2017"
 
 
 
 
 publicstaticfinalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 revision
-"4fe73857679ecba89a7edd3c17d9f92e4c0e2164"
+"b0a5fa0c2a119168c4272e5efba16a3ef9e9c329"
 
 
 
 
 publicstaticfinalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 srcChecksum
-"f8db4c8dbd8071e110b06b155192969e"
+"28747e804e799f458cbf39955a61b430"
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index d5b2248..441a4c7 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -33182,6 +33182,8 @@
 
 Get the info port of the backup master if it is 
available.
 
+getBackupMasters()
 - Method in interface org.apache.hadoop.hbase.client.AsyncAdmin
+
 getBackupMasters()
 - Method in class org.apache.hadoop.hbase.ClusterStatus
 
 getBackupMastersSize()
 - Method in class org.apache.hadoop.hbase.ClusterStatus
@@ -34276,8 +34278,14 @@
 
 getClusterStatus()
 - Method in interface org.apache.hadoop.hbase.client.Admin
 
+getClusterStatus()
 - Method in interface org.apache.hadoop.hbase.client.AsyncAdmin
+
+getClusterStatus()
 - Method in class org.apache.hadoop.hbase.client.AsyncHBaseAdmin
+
 getClusterStatus()
 - Method in class org.apache.hadoop.hbase.client.HBaseAdmin
 
+getClusterStatus()
 - Method in class org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin
+
 getClusterStatus(RpcController,
 MasterProtos.GetClusterStatusRequest) - Method in class 
org.apache.hadoop.hbase.client.ShortCircuitMasterConnection
 
 getClusterStatus()
 - Method in class org.apache.hadoop.hbase.master.HMaster
@@ -34592,12 +34600,20 @@
 
 Get the current compaction state of a table.
 
+getCompactionState(TableName)
 - Method in interface org.apache.hadoop.hbase.client.AsyncAdmin
+
+Get the current compaction state of a table.
+
+getCompactionState(TableName)
 - Method in class org.apache.hadoop.hbase.client.AsyncHBaseAdmin
+
 getCompactionState(TableName)
 - Method in class org.apache.hadoop.hbase.client.HBaseAdmin
 
 getCompactionState(TableName,
 CompactType) - Method in class org.apache.hadoop.hbase.client.HBaseAdmin
 
 Get the current compaction state of a table.
 
+getCompactionState(TableName)
 - Method in class org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin
+
 getCompactionState()
 - Method in class org.apache.hadoop.hbase.regionserver.HRegion
 
 getCompactionState()
 - Method in interface org.apache.hadoop.hbase.regionserver.Region
@@ -34606,8 +34622,16 @@
 
 Get the current compaction state of region.
 
+getCompactionStateForRegion(byte[])
 - Method in interface org.apache.hadoop.hbase.client.AsyncAdmin
+
+Get the current compaction state of region.
+
+getCompactionStateForRegion(byte[])
 - Method in class org.apache.hadoop.hbase.client.AsyncHBaseAdmin
+
 getCompactionStateForRegion(byte[])
 - Method in class org.apache.hadoop.hbase.client.HBaseAdmin
 
+getCompactionStateForRegion(byte[])
 - Method in class org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin
+
 getCompactionThroughputController()
 - Method in class org.apache.hadoop.hbase.regionserver.CompactSplit
 
 getCompactionType()
 - Method in class org.apache.hadoop.hbase.mob.compactions.MobCompactionRequest
@@ -38916,8 +38940,16 @@
  The timestamp of the oldest 

[22/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 

[38/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/apidocs/org/apache/hadoop/hbase/client/class-use/CompactionState.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/client/class-use/CompactionState.html 
b/apidocs/org/apache/hadoop/hbase/client/class-use/CompactionState.html
index 683e30c..6504731 100644
--- a/apidocs/org/apache/hadoop/hbase/client/class-use/CompactionState.html
+++ b/apidocs/org/apache/hadoop/hbase/client/class-use/CompactionState.html
@@ -138,6 +138,27 @@ the order they are declared.
 
 
 
+
+Methods in org.apache.hadoop.hbase.client
 that return types with arguments of type CompactionState
+
+Modifier and Type
+Method and Description
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+AsyncAdmin.getCompactionState(TableNametableName)
+Get the current compaction state of a table.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+AsyncAdmin.getCompactionStateForRegion(byte[]regionName)
+Get the current compaction state of region.
+
+
+
+
 
 
 



[36/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index df5ab4d..d0dbe7d 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -289,7 +289,7 @@
 2242
 0
 0
-14686
+14711
 
 Files
 
@@ -762,7 +762,7 @@
 org/apache/hadoop/hbase/client/AsyncAdmin.java
 0
 0
-8
+12
 
 org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
 0
@@ -1057,7 +1057,7 @@
 org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
 0
 0
-92
+113
 
 org/apache/hadoop/hbase/client/RawAsyncTable.java
 0
@@ -7312,7 +7312,7 @@
 
 imports
 http://checkstyle.sourceforge.net/config_imports.html#AvoidStarImport;>AvoidStarImport
-15
+16
 Error
 
 
@@ -7321,7 +7321,7 @@
 ordered: true
 sortStaticImportsAlphabetically: true
 option: top
-934
+936
 Error
 
 
@@ -7333,7 +7333,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports;>UnusedImports
 
 processJavadoc: true
-93
+94
 Error
 
 indentation
@@ -7344,19 +7344,19 @@
 caseIndent: 2
 basicOffset: 2
 lineWrappingIndentation: 2
-5155
+5171
 Error
 
 javadoc
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-799
+793
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3229
+3238
 Error
 
 misc
@@ -7374,7 +7374,7 @@
 
 max: 100
 ignorePattern: ^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated
-941
+943
 Error
 
 
@@ -13212,7 +13212,7 @@
 
 Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 43 has parse error. Missed HTML close tag 
'TableName'. Sometimes it means that close tag missed for one of previous 
tags.
 181
 
@@ -14838,51 +14838,75 @@
 
 Error
 imports
+UnusedImports
+Unused import - java.io.IOException.
+20
+
+Error
+imports
 ImportOrder
 Wrong order for 'java.util.Collection' import.
-21
-
+22
+
 Error
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.hbase.NamespaceDescriptor' import.
-30
-
+33
+
 Error
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.hbase.client.replication.TableCFs' 
import.
-35
-
+38
+
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
-162
-
+165
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-172
-
+175
+
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
-178
-
+181
+
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 106).
-181
+184
+
+Error
+javadoc
+NonEmptyAtclauseDescription
+At-clause should have a non-empty description.
+288
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-285
+836
+
+Error
+javadoc
+NonEmptyAtclauseDescription
+At-clause should have a non-empty description.
+845
+
+Error
+javadoc
+NonEmptyAtclauseDescription
+At-clause should have a non-empty description.
+846
 
 org/apache/hadoop/hbase/client/AsyncAdminBuilder.java
 
@@ -15071,19 +15095,19 @@
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.hbase.NamespaceDescriptor' import.
-33
+35
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
-218
+220
 
 Error
 sizes
 LineLength
 Line is longer than 100 characters (found 101).
-390
+397
 
 org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java
 
@@ -16596,7 +16620,7 @@
 
 Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 64 has parse error. Missed HTML close tag 
'code'. Sometimes it means that close tag missed for one of previous tags.
 1956
 
@@ -18029,557 +18053,683 @@
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.hbase.NamespaceDescriptor' import.
-57
+59
 
 Error
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.hbase.HConstants' import.
-58
+60
 
 Error
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.hbase.AsyncMetaTableAccessor' 
import.
-61
+63
 
 Error
 imports
 ImportOrder
-Wrong order for 
'org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest'
 import.
-114
+Wrong order for 
'org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType'
 import.
+102
 
 Error
 imports
 ImportOrder
+Wrong order for 
'org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest'
 import.
+121
+
+Error
+imports
+ImportOrder
 Wrong order for 
'org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest'
 import.
-130
+139
+
+Error
+imports
+AvoidStarImport
+Using the '.*' form of import should be avoided - 
org.apache.hadoop.hbase.shaded.protobuf.generated.*.
+195
 
 Error
+imports
+ImportOrder
+Wrong order for 

[31/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
index d1eae05..7816844 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class AsyncHBaseAdmin
+public class AsyncHBaseAdmin
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements AsyncAdmin
 The implementation of AsyncAdmin.
@@ -386,42 +386,89 @@ implements 
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureClusterStatus
+getClusterStatus()
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+getCompactionState(TableNametableName)
+Get the current compaction state of a table.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+getCompactionStateForRegion(byte[]regionName)
+Get the current compaction state of region.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+getLastMajorCompactionTimestamp(TableNametableName)
+Get the timestamp of the last major compaction for the 
passed table.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+getLastMajorCompactionTimestampForRegion(byte[]regionName)
+Get the timestamp of the last major compaction for the 
passed region.
+
+
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureNamespaceDescriptor
 getNamespaceDescriptor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Get a namespace descriptor by name
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[27/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 36768a6..8abc3e6 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -25,786 +25,881 @@
 017 */
 018package org.apache.hadoop.hbase.client;
 019
-020import java.util.List;
-021import java.util.Collection;
-022import java.util.Map;
-023import java.util.Optional;
-024import 
java.util.concurrent.CompletableFuture;
-025import java.util.regex.Pattern;
-026
-027import 
org.apache.hadoop.hbase.HRegionInfo;
-028import 
org.apache.hadoop.hbase.ProcedureInfo;
-029import 
org.apache.hadoop.hbase.ServerName;
-030import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-031import 
org.apache.hadoop.hbase.TableName;
-032import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-033import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-034import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-035import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-036import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-037import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-038import 
org.apache.hadoop.hbase.util.Pair;
-039
-040/**
-041 * The asynchronous administrative API 
for HBase.
-042 * p
-043 * This feature is still under 
development, so marked as IA.Private. Will change to public when
-044 * done. Use it with caution.
-045 */
-046@InterfaceAudience.Public
-047public interface AsyncAdmin {
-048
-049  /**
-050   * @param tableName Table to check.
-051   * @return True if table exists 
already. The return value will be wrapped by a
-052   * {@link CompletableFuture}.
-053   */
-054  CompletableFutureBoolean 
tableExists(TableName tableName);
-055
-056  /**
-057   * List all the userspace tables.
-058   * @return - returns a list of 
TableDescriptors wrapped by a {@link CompletableFuture}.
-059   * @see #listTables(Optional, 
boolean)
-060   */
-061  default 
CompletableFutureListTableDescriptor listTables() {
-062return listTables(Optional.empty(), 
false);
-063  }
-064
-065  /**
-066   * List all the tables matching the 
given pattern.
-067   * @param pattern The compiled regular 
expression to match against
-068   * @param includeSysTables False to 
match only against userspace tables
-069   * @return - returns a list of 
TableDescriptors wrapped by a {@link CompletableFuture}.
-070   */
-071  
CompletableFutureListTableDescriptor 
listTables(OptionalPattern pattern,
-072  boolean includeSysTables);
-073
-074  /**
-075   * List all of the names of userspace 
tables.
-076   * @return a list of table names 
wrapped by a {@link CompletableFuture}.
-077   * @see #listTableNames(Optional, 
boolean)
-078   */
-079  default 
CompletableFutureListTableName listTableNames() {
-080return 
listTableNames(Optional.empty(), false);
-081  }
-082
-083  /**
-084   * List all of the names of userspace 
tables.
-085   * @param pattern The regular 
expression to match against
-086   * @param includeSysTables False to 
match only against userspace tables
-087   * @return a list of table names 
wrapped by a {@link CompletableFuture}.
-088   */
-089  
CompletableFutureListTableName 
listTableNames(OptionalPattern pattern,
-090  boolean includeSysTables);
-091
-092  /**
-093   * Method for getting the 
tableDescriptor
-094   * @param tableName as a {@link 
TableName}
-095   * @return the read-only 
tableDescriptor wrapped by a {@link CompletableFuture}.
-096   */
-097  
CompletableFutureTableDescriptor getTableDescriptor(TableName 
tableName);
-098
-099  /**
-100   * Creates a new table.
-101   * @param desc table descriptor for 
table
-102   */
-103  default CompletableFutureVoid 
createTable(TableDescriptor desc) {
-104return createTable(desc, 
Optional.empty());
-105  }
-106
-107  /**
-108   * Creates a new table with the 
specified number of regions. The start key specified will become
-109   * the end key of the first region of 
the table, and the end key specified will become the start
-110   * key of the last region of the table 
(the first region has a null start key and the last region
-111   * has a null end key). BigInteger math 
will be used to divide the key range specified into enough
-112   * segments to make the required number 
of total regions.
-113   * @param desc table descriptor for 
table
-114   * @param startKey beginning of key 
range
-115   * @param endKey end of key range
-116   * @param numRegions the total number 
of regions to create
-117   */
-118  CompletableFutureVoid 
createTable(TableDescriptor desc, byte[] startKey, byte[] endKey,
-119  int numRegions);
-120
-121  /**
-122   * Creates a 

[16/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.EnableTableProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 

[28/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactionState.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactionState.html 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactionState.html
index 23c80bc..09ba0ab 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactionState.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/CompactionState.html
@@ -153,6 +153,43 @@ the order they are declared.
 
 
 
+
+Methods in org.apache.hadoop.hbase.client
 that return types with arguments of type CompactionState
+
+Modifier and Type
+Method and Description
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+AsyncHBaseAdmin.getCompactionState(TableNametableName)
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+AsyncAdmin.getCompactionState(TableNametableName)
+Get the current compaction state of a table.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+RawAsyncHBaseAdmin.getCompactionState(TableNametableName)
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+AsyncHBaseAdmin.getCompactionStateForRegion(byte[]regionName)
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+AsyncAdmin.getCompactionStateForRegion(byte[]regionName)
+Get the current compaction state of region.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+RawAsyncHBaseAdmin.getCompactionStateForRegion(byte[]regionName)
+
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index ea44ea7..6636400 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -531,25 +531,25 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
-org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
 org.apache.hadoop.hbase.client.IsolationLevel
-org.apache.hadoop.hbase.client.CompactionState
+org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
 org.apache.hadoop.hbase.client.MasterSwitchType
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
-org.apache.hadoop.hbase.client.TableState.State
-org.apache.hadoop.hbase.client.Consistency
 org.apache.hadoop.hbase.client.ScannerCallable.MoreResults
+org.apache.hadoop.hbase.client.Consistency
+org.apache.hadoop.hbase.client.RequestController.ReturnCode
+org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
+org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
+org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
+org.apache.hadoop.hbase.client.RegionLocateType
 org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
-org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
 org.apache.hadoop.hbase.client.SnapshotType
-org.apache.hadoop.hbase.client.RegionLocateType
-org.apache.hadoop.hbase.client.Durability
+org.apache.hadoop.hbase.client.CompactionState
+org.apache.hadoop.hbase.client.TableState.State
+org.apache.hadoop.hbase.client.CompactType
 org.apache.hadoop.hbase.client.Scan.ReadType
+org.apache.hadoop.hbase.client.Durability
 org.apache.hadoop.hbase.client.HBaseAdmin.ReplicationState
-org.apache.hadoop.hbase.client.CompactType
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState

[20/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 

[06/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
index 9a6c30b..af6a1dd 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-104import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-105import 

[32/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index d271132..8278dde 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":18,"i6":6,"i7":6,"i8":6,"i9":18,"i10":6,"i11":18,"i12":6,"i13":6,"i14":6,"i15":18,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":18,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":18,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":18,"i52":6,"i53":18,"i54":6,"i55":18,"i56":6,"i57":18,"i58":6,"i59":6,"i60":18,"i61":6,"i62":18,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":18,"i78":18,"i79":6,"i80":6,"i81":18,"i82":6,"i83":6,"i84":6,"i85":6,"i86":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":18,"i6":6,"i7":6,"i8":6,"i9":18,"i10":6,"i11":18,"i12":6,"i13":6,"i14":6,"i15":18,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":18,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":18,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":18,"i43":6,"i44":6,"i45":6,"i46":18,"i47":6,"i48":18,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":18,"i57":6,"i58":6,"i59":6,"i60":6,"i61":6,"i62":6,"i63":18,"i64":6,"i65":18,"i66":6,"i67":18,"i68":6,"i69":18,"i70":6,"i71":6,"i72":18,"i73":6,"i74":18,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":6,"i84":6,"i85":6,"i86":6,"i87":6,"i88":6,"i89":18,"i90":18,"i91":6,"i92":6,"i93":18,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -106,7 +106,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public interface AsyncAdmin
+public interface AsyncAdmin
 The asynchronous administrative API for HBase.
  
  This feature is still under development, so marked as IA.Private. Will change 
to public when
@@ -364,42 +364,107 @@ public interface 
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionServerName
+getBackupMasters()
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureClusterStatus
+getClusterStatus()
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+getCompactionState(TableNametableName)
+Get the current compaction state of a table.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+getCompactionStateForRegion(byte[]regionName)
+Get the current compaction state of region.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+getLastMajorCompactionTimestamp(TableNametableName)
+Get the timestamp of the last major compaction for the 
passed table.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+getLastMajorCompactionTimestampForRegion(byte[]regionName)
+Get the timestamp of the last major compaction for the 
passed region.
+
+
+
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[17/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DisableTableProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 

[39/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html 
b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
index 5e905d5..9d51767 100644
--- a/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
+++ b/apidocs/org/apache/hadoop/hbase/client/AsyncAdmin.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":18,"i6":6,"i7":6,"i8":6,"i9":18,"i10":6,"i11":18,"i12":6,"i13":6,"i14":6,"i15":18,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":18,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":6,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":6,"i43":6,"i44":18,"i45":6,"i46":6,"i47":6,"i48":6,"i49":6,"i50":6,"i51":18,"i52":6,"i53":18,"i54":6,"i55":18,"i56":6,"i57":18,"i58":6,"i59":6,"i60":18,"i61":6,"i62":18,"i63":6,"i64":6,"i65":6,"i66":6,"i67":6,"i68":6,"i69":6,"i70":6,"i71":6,"i72":6,"i73":6,"i74":6,"i75":6,"i76":6,"i77":18,"i78":18,"i79":6,"i80":6,"i81":18,"i82":6,"i83":6,"i84":6,"i85":6,"i86":6};
+var methods = 
{"i0":6,"i1":6,"i2":6,"i3":6,"i4":6,"i5":18,"i6":6,"i7":6,"i8":6,"i9":18,"i10":6,"i11":18,"i12":6,"i13":6,"i14":6,"i15":18,"i16":6,"i17":6,"i18":6,"i19":6,"i20":6,"i21":18,"i22":6,"i23":6,"i24":6,"i25":6,"i26":6,"i27":6,"i28":6,"i29":6,"i30":6,"i31":6,"i32":6,"i33":6,"i34":6,"i35":6,"i36":18,"i37":6,"i38":6,"i39":6,"i40":6,"i41":6,"i42":18,"i43":6,"i44":6,"i45":6,"i46":18,"i47":6,"i48":18,"i49":6,"i50":6,"i51":6,"i52":6,"i53":6,"i54":6,"i55":6,"i56":18,"i57":6,"i58":6,"i59":6,"i60":6,"i61":6,"i62":6,"i63":18,"i64":6,"i65":18,"i66":6,"i67":18,"i68":6,"i69":18,"i70":6,"i71":6,"i72":18,"i73":6,"i74":18,"i75":6,"i76":6,"i77":6,"i78":6,"i79":6,"i80":6,"i81":6,"i82":6,"i83":6,"i84":6,"i85":6,"i86":6,"i87":6,"i88":6,"i89":18,"i90":18,"i91":6,"i92":6,"i93":18,"i94":6,"i95":6,"i96":6,"i97":6,"i98":6};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],4:["t3","Abstract Methods"],16:["t5","Default Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -102,7 +102,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public interface AsyncAdmin
+public interface AsyncAdmin
 The asynchronous administrative API for HBase.
  
  This feature is still under development, so marked as IA.Private. Will change 
to public when
@@ -360,42 +360,107 @@ public interface 
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionServerName
+getBackupMasters()
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureClusterStatus
+getClusterStatus()
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+getCompactionState(TableNametableName)
+Get the current compaction state of a table.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFutureCompactionState
+getCompactionStateForRegion(byte[]regionName)
+Get the current compaction state of region.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+getLastMajorCompactionTimestamp(TableNametableName)
+Get the timestamp of the last major compaction for the 
passed table.
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">Optionalhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long
+getLastMajorCompactionTimestampForRegion(byte[]regionName)
+Get the timestamp of the last major compaction for the 
passed region.
+
+
+
+default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true;
 title="class or interface in 

[10/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-104import 

[40/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/1b6d8c10
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/1b6d8c10
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/1b6d8c10

Branch: refs/heads/asf-site
Commit: 1b6d8c106ba96f12cb5434b6389ef52a21d7406b
Parents: 53b1ea4
Author: jenkins 
Authored: Fri Jul 7 15:01:38 2017 +
Committer: jenkins 
Committed: Fri Jul 7 15:01:38 2017 +

--
 apache_hbase_reference_guide.pdf| 4 +-
 apache_hbase_reference_guide.pdfmarks   | 4 +-
 apidocs/index-all.html  |40 +
 .../hadoop/hbase/class-use/ClusterStatus.html   |13 +
 .../hadoop/hbase/class-use/HRegionInfo.html | 6 +
 .../hadoop/hbase/class-use/RegionLoad.html  |13 +
 .../hadoop/hbase/class-use/ServerName.html  |25 +
 .../hadoop/hbase/class-use/TableName.html   |   107 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |   528 +-
 .../hbase/client/class-use/CompactionState.html |21 +
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |  1609 +-
 checkstyle-aggregate.html   | 29694 +
 checkstyle.rss  | 6 +-
 devapidocs/constant-values.html | 6 +-
 devapidocs/index-all.html   |72 +
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../hadoop/hbase/class-use/ClusterStatus.html   |21 +
 .../hadoop/hbase/class-use/HRegionInfo.html |18 +-
 .../hadoop/hbase/class-use/RegionLoad.html  |23 +
 .../hadoop/hbase/class-use/ServerName.html  |41 +-
 .../hadoop/hbase/class-use/TableName.html   |   325 +-
 .../hbase/classification/package-tree.html  | 4 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |   528 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.html|   441 +-
 ...dmin.AddColumnFamilyProcedureBiConsumer.html | 6 +-
 .../client/RawAsyncHBaseAdmin.AdminRpcCall.html | 4 +-
 .../client/RawAsyncHBaseAdmin.Converter.html| 4 +-
 ...dmin.CreateNamespaceProcedureBiConsumer.html | 6 +-
 ...aseAdmin.CreateTableProcedureBiConsumer.html | 6 +-
 ...n.DeleteColumnFamilyProcedureBiConsumer.html | 6 +-
 ...dmin.DeleteNamespaceProcedureBiConsumer.html | 6 +-
 ...aseAdmin.DeleteTableProcedureBiConsumer.html | 8 +-
 ...seAdmin.DisableTableProcedureBiConsumer.html | 6 +-
 ...aseAdmin.EnableTableProcedureBiConsumer.html | 6 +-
 .../RawAsyncHBaseAdmin.MasterRpcCall.html   | 4 +-
 ...min.MergeTableRegionProcedureBiConsumer.html | 6 +-
 ...n.ModifyColumnFamilyProcedureBiConsumer.html | 6 +-
 ...dmin.ModifyNamespaceProcedureBiConsumer.html | 6 +-
 ...HBaseAdmin.NamespaceProcedureBiConsumer.html |14 +-
 .../RawAsyncHBaseAdmin.ProcedureBiConsumer.html |12 +-
 .../RawAsyncHBaseAdmin.TableOperator.html   | 4 +-
 ...syncHBaseAdmin.TableProcedureBiConsumer.html |14 +-
 ...eAdmin.TruncateTableProcedureBiConsumer.html | 6 +-
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.html |   545 +-
 .../hbase/client/class-use/CompactionState.html |37 +
 .../hadoop/hbase/client/package-tree.html   |24 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 2 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hbase/master/balancer/package-tree.html | 2 +-
 .../hadoop/hbase/master/package-tree.html   | 2 +-
 .../hbase/master/procedure/package-tree.html| 4 +-
 .../org/apache/hadoop/hbase/package-tree.html   |10 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 6 +-
 .../hadoop/hbase/quotas/package-tree.html   | 8 +-
 .../hadoop/hbase/regionserver/package-tree.html |16 +-
 .../regionserver/querymatcher/package-tree.html | 4 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../hadoop/hbase/security/package-tree.html | 2 +-
 .../hadoop/hbase/thrift/package-tree.html   | 2 +-
 .../apache/hadoop/hbase/util/package-tree.html  |10 +-
 .../apache/hadoop/hbase/wal/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/Version.html| 6 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.html  |  1609 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.html|   876 +-
 ...dmin.AddColumnFamilyProcedureBiConsumer.html |  4533 +--
 .../client/RawAsyncHBaseAdmin.AdminRpcCall.html |  4533 +--
 .../client/RawAsyncHBaseAdmin.Converter.html|  4533 +--
 ...dmin.CreateNamespaceProcedureBiConsumer.html |  4533 +--
 ...aseAdmin.CreateTableProcedureBiConsumer.html |  4533 +--
 ...n.DeleteColumnFamilyProcedureBiConsumer.html |  4533 +--
 

[01/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 53b1ea46f -> 1b6d8c106


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.html
index d58360f..81d859d 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncTableAdminApi.html
@@ -46,743 +46,822 @@
 038
 039import org.apache.hadoop.fs.Path;
 040import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-041import 
org.apache.hadoop.hbase.HConstants;
-042import 
org.apache.hadoop.hbase.HRegionInfo;
-043import 
org.apache.hadoop.hbase.HRegionLocation;
-044import 
org.apache.hadoop.hbase.HTableDescriptor;
-045import 
org.apache.hadoop.hbase.ServerName;
-046import 
org.apache.hadoop.hbase.TableName;
-047import 
org.apache.hadoop.hbase.TableNotEnabledException;
-048import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor;
-049import 
org.apache.hadoop.hbase.master.MasterFileSystem;
-050import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-051import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-052import 
org.apache.hadoop.hbase.util.Bytes;
-053import 
org.apache.hadoop.hbase.util.FSTableDescriptors;
-054import 
org.apache.hadoop.hbase.util.FSUtils;
-055import org.junit.Assert;
-056import org.junit.Test;
-057import 
org.junit.experimental.categories.Category;
-058import org.junit.runner.RunWith;
-059import org.junit.runners.Parameterized;
-060
-061/**
-062 * Class to test asynchronous table admin 
operations.
-063 */
-064@RunWith(Parameterized.class)
-065@Category({LargeTests.class, 
ClientTests.class})
-066public class TestAsyncTableAdminApi 
extends TestAsyncAdminBase {
-067
-068  @Test
-069  public void testTableExist() throws 
Exception {
-070boolean exist;
-071exist = 
admin.tableExists(tableName).get();
-072assertEquals(false, exist);
-073TEST_UTIL.createTable(tableName, 
FAMILY);
-074exist = 
admin.tableExists(tableName).get();
-075assertEquals(true, exist);
-076exist = 
admin.tableExists(TableName.META_TABLE_NAME).get();
-077assertEquals(true, exist);
-078  }
-079
-080  @Test
-081  public void testListTables() throws 
Exception {
-082int numTables = 
admin.listTables().get().size();
-083final TableName tableName1 = 
TableName.valueOf(tableName.getNameAsString() + "1");
-084final TableName tableName2 = 
TableName.valueOf(tableName.getNameAsString() + "2");
-085final TableName tableName3 = 
TableName.valueOf(tableName.getNameAsString() + "3");
-086TableName[] tables = new TableName[] 
{ tableName1, tableName2, tableName3 };
-087for (int i = 0; i  tables.length; 
i++) {
-088  
createTableWithDefaultConf(tables[i]);
-089}
-090
-091ListTableDescriptor 
tableDescs = admin.listTables().get();
-092int size = tableDescs.size();
-093assertTrue(size = 
tables.length);
-094for (int i = 0; i  tables.length 
 i  size; i++) {
-095  boolean found = false;
-096  for (int j = 0; j  size; j++) 
{
-097if 
(tableDescs.get(j).getTableName().equals(tables[i])) {
-098  found = true;
-099  break;
-100}
-101  }
-102  assertTrue("Not found: " + 
tables[i], found);
-103}
-104
-105ListTableName tableNames = 
admin.listTableNames().get();
-106size = tableNames.size();
-107assertTrue(size == (numTables + 
tables.length));
-108for (int i = 0; i  tables.length 
 i  size; i++) {
-109  boolean found = false;
-110  for (int j = 0; j  size; j++) 
{
-111if 
(tableNames.get(j).equals(tables[i])) {
-112  found = true;
-113  break;
-114}
-115  }
-116  assertTrue("Not found: " + 
tables[i], found);
-117}
-118
-119for (int i = 0; i  tables.length; 
i++) {
-120  
admin.disableTable(tables[i]).join();
-121  
admin.deleteTable(tables[i]).join();
-122}
-123
-124tableDescs = 
admin.listTables(Optional.empty(), true).get();
-125assertTrue("Not found system tables", 
tableDescs.size()  0);
-126tableNames = 
admin.listTableNames(Optional.empty(), true).get();
-127assertTrue("Not found system tables", 
tableNames.size()  0);
-128  }
-129
-130  @Test
-131  public void testGetTableDescriptor() 
throws Exception {
-132byte[][] families = { FAMILY, 
FAMILY_0, FAMILY_1 };
-133TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableName);
-134for (byte[] family : families) {
-135  
builder.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).build());
-136}
-137TableDescriptor desc = 
builder.build();
-138

[13/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
index 9a6c30b..af6a1dd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.ModifyColumnFamilyProcedureBiConsumer.html
@@ -54,1176 +54,1176 @@
 046import org.apache.commons.io.IOUtils;
 047import org.apache.commons.logging.Log;
 048import 
org.apache.commons.logging.LogFactory;
-049import 
org.apache.hadoop.hbase.HRegionInfo;
-050import 
org.apache.hadoop.hbase.HRegionLocation;
-051import 
org.apache.hadoop.hbase.MetaTableAccessor;
-052import 
org.apache.hadoop.hbase.MetaTableAccessor.QueryType;
-053import 
org.apache.hadoop.hbase.NotServingRegionException;
-054import 
org.apache.hadoop.hbase.ProcedureInfo;
-055import 
org.apache.hadoop.hbase.RegionLocations;
-056import 
org.apache.hadoop.hbase.ServerName;
-057import 
org.apache.hadoop.hbase.NamespaceDescriptor;
-058import 
org.apache.hadoop.hbase.HConstants;
-059import 
org.apache.hadoop.hbase.TableExistsException;
-060import 
org.apache.hadoop.hbase.TableName;
-061import 
org.apache.hadoop.hbase.AsyncMetaTableAccessor;
-062import 
org.apache.hadoop.hbase.TableNotDisabledException;
-063import 
org.apache.hadoop.hbase.TableNotEnabledException;
-064import 
org.apache.hadoop.hbase.TableNotFoundException;
-065import 
org.apache.hadoop.hbase.UnknownRegionException;
-066import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-067import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder;
-068import 
org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
-069import 
org.apache.hadoop.hbase.client.Scan.ReadType;
-070import 
org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
-071import 
org.apache.hadoop.hbase.client.replication.TableCFs;
-072import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
-073import 
org.apache.hadoop.hbase.ipc.HBaseRpcController;
-074import 
org.apache.hadoop.hbase.quotas.QuotaFilter;
-075import 
org.apache.hadoop.hbase.quotas.QuotaSettings;
-076import 
org.apache.hadoop.hbase.quotas.QuotaTableUtil;
-077import 
org.apache.hadoop.hbase.replication.ReplicationException;
-078import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-079import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-080import 
org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-081import 
org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-082import 
org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-083import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-084import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
-085import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
-086import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
-087import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-088import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
-089import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
-090import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-091import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-092import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
-093import 
org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse;
-094import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
-095import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
-096import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-097import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-098import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-099import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-100import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-101import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-102import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-103import 

[05/40] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1b6d8c10/hbase-annotations/license.html
--
diff --git a/hbase-annotations/license.html b/hbase-annotations/license.html
index 5db65f7..ff858d1 100644
--- a/hbase-annotations/license.html
+++ b/hbase-annotations/license.html
@@ -117,210 +117,7 @@
 Project Licenses
 
 Apache License, Version 
2.0
-
-
- Apache License
-   Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-  License shall mean the terms and conditions for use, 
reproduction,
-  and distribution as defined by Sections 1 through 9 of this document.
-
-  Licensor shall mean the copyright owner or entity authorized 
by
-  the copyright owner that is granting the License.
-
-  Legal Entity shall mean the union of the acting entity and 
all
-  other entities that control, are controlled by, or are under common
-  control with that entity. For the purposes of this definition,
-  control means (i) the power, direct or indirect, to cause the
-  direction or management of such entity, whether by contract or
-  otherwise, or (ii) ownership of fifty percent (50%) or more of the
-  outstanding shares, or (iii) beneficial ownership of such entity.
-
-  You (or Your) shall mean an individual or Legal 
Entity
-  exercising permissions granted by this License.
-
-  Source form shall mean the preferred form for making 
modifications,
-  including but not limited to software source code, documentation
-  source, and configuration files.
-
-  Object form shall mean any form resulting from mechanical
-  transformation or translation of a Source form, including but
-  not limited to compiled object code, generated documentation,
-  and conversions to other media types.
-
-  Work shall mean the work of authorship, whether in Source or
-  Object form, made available under the License, as indicated by a
-  copyright notice that is included in or attached to the work
-  (an example is provided in the Appendix below).
-
-  Derivative Works shall mean any work, whether in Source or 
Object
-  form, that is based on (or derived from) the Work and for which the
-  editorial revisions, annotations, elaborations, or other modifications
-  represent, as a whole, an original work of authorship. For the purposes
-  of this License, Derivative Works shall not include works that remain
-  separable from, or merely link (or bind by name) to the interfaces of,
-  the Work and Derivative Works thereof.
-
-  Contribution shall mean any work of authorship, including
-  the original version of the Work and any modifications or additions
-  to that Work or Derivative Works thereof, that is intentionally
-  submitted to Licensor for inclusion in the Work by the copyright owner
-  or by an individual or Legal Entity authorized to submit on behalf of
-  the copyright owner. For the purposes of this definition, 
submitted
-  means any form of electronic, verbal, or written communication sent
-  to the Licensor or its representatives, including but not limited to
-  communication on electronic mailing lists, source code control systems,
-  and issue tracking systems that are managed by, or on behalf of, the
-  Licensor for the purpose of discussing and improving the Work, but
-  excluding communication that is conspicuously marked or otherwise
-  designated in writing by the copyright owner as Not a 
Contribution.
-
-  Contributor shall mean Licensor and any individual or Legal 
Entity
-  on behalf of whom a Contribution has been received by Licensor and
-  subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-  this License, each Contributor hereby grants to You a perpetual,
-  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-  copyright license to reproduce, prepare Derivative Works of,
-  publicly display, publicly perform, sublicense, and distribute the
-  Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-  this License, each Contributor hereby grants to You a perpetual,
-  worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-  (except as stated in this section) patent license to make, have made,
-  use, offer to sell, sell, import, and otherwise transfer the Work,
-  where such license applies only to those patent claims licensable
-  by such Contributor that are necessarily infringed by their
-  Contribution(s) alone or by combination of their Contribution(s)
-  with the Work to 

hbase git commit: HBASE-18319 Implement getClusterStatus/getRegionLoad/getCompactionState/getLastMajorCompactionTimestamp methods

2017-07-07 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2 89d2adfe9 -> 359f97711


HBASE-18319 Implement 
getClusterStatus/getRegionLoad/getCompactionState/getLastMajorCompactionTimestamp
 methods


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/359f9771
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/359f9771
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/359f9771

Branch: refs/heads/branch-2
Commit: 359f97711feb1770773ca25d817f436e8d7fbebb
Parents: 89d2adf
Author: Guanghao Zhang 
Authored: Wed Jul 5 18:33:57 2017 +0800
Committer: Guanghao Zhang 
Committed: Fri Jul 7 16:47:19 2017 +0800

--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  95 
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  44 
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 219 ++-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  11 +-
 .../hbase/shaded/protobuf/RequestConverter.java |  16 +-
 .../hbase/client/TestAsyncClusterAdminApi.java  | 132 +++
 .../hbase/client/TestAsyncRegionAdminApi.java   |   8 +-
 .../hbase/client/TestAsyncTableAdminApi.java|  81 ++-
 8 files changed, 591 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/359f9771/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index ff35d46..8ade209 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.io.IOException;
 import java.util.List;
 import java.util.Collection;
 import java.util.Map;
@@ -24,8 +25,10 @@ import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
 import java.util.regex.Pattern;
 
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
@@ -332,6 +335,11 @@ public interface AsyncAdmin {
   CompletableFuture getOnlineRegions(ServerName serverName);
 
   /**
+   * Get the regions of a given table.
+   */
+  CompletableFuture getTableRegions(TableName tableName);
+
+  /**
* Flush a table.
* @param tableName table to flush
*/
@@ -796,4 +804,91 @@ public interface AsyncAdmin {
* @return procedure list wrapped by {@link CompletableFuture}
*/
   CompletableFuture listProcedures();
+
+  /**
+   * @return cluster status wrapped by {@link CompletableFuture}
+   */
+  CompletableFuture getClusterStatus();
+
+  /**
+   * @return current master server name wrapped by {@link CompletableFuture}
+   */
+  default CompletableFuture getMaster() {
+return getClusterStatus().thenApply(ClusterStatus::getMaster);
+  }
+
+  /**
+   * @return current backup master list wrapped by {@link CompletableFuture}
+   */
+  default CompletableFuture getBackupMasters() {
+return getClusterStatus().thenApply(ClusterStatus::getBackupMasters);
+  }
+
+  /**
+   * @return current live region servers list wrapped by {@link 
CompletableFuture}
+   */
+  default CompletableFuture getRegionServers() {
+return getClusterStatus().thenApply(ClusterStatus::getServers);
+  }
+
+  /**
+   * Get a list of {@link RegionLoad} of all regions hosted on a region 
seerver.
+   * @param serverName
+   * @return a list of {@link RegionLoad} wrapped by {@link CompletableFuture}
+   */
+  default CompletableFuture getRegionLoads(ServerName 
serverName) {
+return getRegionLoads(serverName, Optional.empty());
+  }
+
+  /**
+   * Get a list of {@link RegionLoad} of all regions hosted on a region 
seerver for a table.
+   * @param serverName
+   * @param tableName
+   * @return a list of {@link RegionLoad} wrapped by {@link CompletableFuture}
+   */
+  CompletableFuture getRegionLoads(ServerName serverName,
+  Optional tableName);
+
+  /**
+   * Check whether master is in maintenance mode
+   * @return true if master is in maintenance mode, false otherwise. The 
return value will be
+   * wrapped by a {@link CompletableFuture}
+   */
+  CompletableFuture isMasterInMaintenanceMode();
+
+  /**
+   * Get the current compaction state of a table. It could be in a major 
compaction, a minor
+   * compaction, both, or none.
+   * @param tableName table 

hbase git commit: HBASE-18319 Implement getClusterStatus/getRegionLoad/getCompactionState/getLastMajorCompactionTimestamp methods

2017-07-07 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/master 4fe738576 -> b0a5fa0c2


HBASE-18319 Implement 
getClusterStatus/getRegionLoad/getCompactionState/getLastMajorCompactionTimestamp
 methods


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b0a5fa0c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b0a5fa0c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b0a5fa0c

Branch: refs/heads/master
Commit: b0a5fa0c2a119168c4272e5efba16a3ef9e9c329
Parents: 4fe7385
Author: Guanghao Zhang 
Authored: Wed Jul 5 18:33:57 2017 +0800
Committer: Guanghao Zhang 
Committed: Fri Jul 7 16:21:45 2017 +0800

--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  95 
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|  44 
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java | 219 ++-
 .../hbase/shaded/protobuf/ProtobufUtil.java |  11 +-
 .../hbase/shaded/protobuf/RequestConverter.java |  16 +-
 .../hbase/client/TestAsyncClusterAdminApi.java  | 132 +++
 .../hbase/client/TestAsyncRegionAdminApi.java   |   8 +-
 .../hbase/client/TestAsyncTableAdminApi.java|  81 ++-
 8 files changed, 591 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b0a5fa0c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index ff35d46..8ade209 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.io.IOException;
 import java.util.List;
 import java.util.Collection;
 import java.util.Map;
@@ -24,8 +25,10 @@ import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
 import java.util.regex.Pattern;
 
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
@@ -332,6 +335,11 @@ public interface AsyncAdmin {
   CompletableFuture getOnlineRegions(ServerName serverName);
 
   /**
+   * Get the regions of a given table.
+   */
+  CompletableFuture getTableRegions(TableName tableName);
+
+  /**
* Flush a table.
* @param tableName table to flush
*/
@@ -796,4 +804,91 @@ public interface AsyncAdmin {
* @return procedure list wrapped by {@link CompletableFuture}
*/
   CompletableFuture listProcedures();
+
+  /**
+   * @return cluster status wrapped by {@link CompletableFuture}
+   */
+  CompletableFuture getClusterStatus();
+
+  /**
+   * @return current master server name wrapped by {@link CompletableFuture}
+   */
+  default CompletableFuture getMaster() {
+return getClusterStatus().thenApply(ClusterStatus::getMaster);
+  }
+
+  /**
+   * @return current backup master list wrapped by {@link CompletableFuture}
+   */
+  default CompletableFuture getBackupMasters() {
+return getClusterStatus().thenApply(ClusterStatus::getBackupMasters);
+  }
+
+  /**
+   * @return current live region servers list wrapped by {@link 
CompletableFuture}
+   */
+  default CompletableFuture getRegionServers() {
+return getClusterStatus().thenApply(ClusterStatus::getServers);
+  }
+
+  /**
+   * Get a list of {@link RegionLoad} of all regions hosted on a region 
seerver.
+   * @param serverName
+   * @return a list of {@link RegionLoad} wrapped by {@link CompletableFuture}
+   */
+  default CompletableFuture getRegionLoads(ServerName 
serverName) {
+return getRegionLoads(serverName, Optional.empty());
+  }
+
+  /**
+   * Get a list of {@link RegionLoad} of all regions hosted on a region 
seerver for a table.
+   * @param serverName
+   * @param tableName
+   * @return a list of {@link RegionLoad} wrapped by {@link CompletableFuture}
+   */
+  CompletableFuture getRegionLoads(ServerName serverName,
+  Optional tableName);
+
+  /**
+   * Check whether master is in maintenance mode
+   * @return true if master is in maintenance mode, false otherwise. The 
return value will be
+   * wrapped by a {@link CompletableFuture}
+   */
+  CompletableFuture isMasterInMaintenanceMode();
+
+  /**
+   * Get the current compaction state of a table. It could be in a major 
compaction, a minor
+   * compaction, both, or none.
+   * @param tableName table to 

hbase git commit: HBASE-18083 Make large/small file clean thread number configurable in HFileCleaner

2017-07-07 Thread liyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 b2b5cd6de -> 89d2adfe9


HBASE-18083 Make large/small file clean thread number configurable in 
HFileCleaner


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/89d2adfe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/89d2adfe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/89d2adfe

Branch: refs/heads/branch-2
Commit: 89d2adfe92b2e775a0a3761257846e2d4ed7d141
Parents: b2b5cd6
Author: Yu Li 
Authored: Fri Jul 7 14:07:23 2017 +0800
Committer: Yu Li 
Committed: Fri Jul 7 15:39:34 2017 +0800

--
 .../hbase/master/cleaner/HFileCleaner.java  | 154 +--
 .../hbase/master/cleaner/TestHFileCleaner.java  |  13 +-
 2 files changed, 120 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/89d2adfe/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
index 8b3515a..6d1fbe3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/HFileCleaner.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -67,6 +68,16 @@ public class HFileCleaner extends 
CleanerChore impleme
   "hbase.regionserver.hfilecleaner.small.queue.size";
   public final static int DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE = 10240;
 
+  // Configuration key for large file delete thread number
+  public final static String LARGE_HFILE_DELETE_THREAD_NUMBER =
+  "hbase.regionserver.hfilecleaner.large.thread.count";
+  public final static int DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER = 1;
+
+  // Configuration key for small file delete thread number
+  public final static String SMALL_HFILE_DELETE_THREAD_NUMBER =
+  "hbase.regionserver.hfilecleaner.small.thread.count";
+  public final static int DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER = 1;
+
   private static final Log LOG = LogFactory.getLog(HFileCleaner.class);
 
   StealJobQueue largeFileQueue;
@@ -74,11 +85,13 @@ public class HFileCleaner extends 
CleanerChore impleme
   private int throttlePoint;
   private int largeQueueInitSize;
   private int smallQueueInitSize;
+  private int largeFileDeleteThreadNumber;
+  private int smallFileDeleteThreadNumber;
   private List threads = new ArrayList();
   private boolean running;
 
-  private long deletedLargeFiles = 0L;
-  private long deletedSmallFiles = 0L;
+  private AtomicLong deletedLargeFiles = new AtomicLong();
+  private AtomicLong deletedSmallFiles = new AtomicLong();
 
   /**
* @param period the period of time to sleep between each run
@@ -100,6 +113,10 @@ public class HFileCleaner extends 
CleanerChore impleme
 conf.getInt(SMALL_HFILE_QUEUE_INIT_SIZE, 
DEFAULT_SMALL_HFILE_QUEUE_INIT_SIZE);
 largeFileQueue = new StealJobQueue<>(largeQueueInitSize, 
smallQueueInitSize);
 smallFileQueue = largeFileQueue.getStealFromQueue();
+largeFileDeleteThreadNumber =
+conf.getInt(LARGE_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_LARGE_HFILE_DELETE_THREAD_NUMBER);
+smallFileDeleteThreadNumber =
+conf.getInt(SMALL_HFILE_DELETE_THREAD_NUMBER, 
DEFAULT_SMALL_HFILE_DELETE_THREAD_NUMBER);
 startHFileDeleteThreads();
   }
 
@@ -183,30 +200,34 @@ public class HFileCleaner extends 
CleanerChore impleme
 final String n = Thread.currentThread().getName();
 running = true;
 // start thread for large file deletion
-Thread large = new Thread() {
-  @Override
-  public void run() {
-consumerLoop(largeFileQueue);
-  }
-};
-large.setDaemon(true);
-large.setName(n + "-HFileCleaner.large-" + System.currentTimeMillis());
-large.start();
-LOG.debug("Starting hfile cleaner for large files: " + large.getName());
-threads.add(large);
+for (int i = 0; i < largeFileDeleteThreadNumber; i++) {
+  Thread large = new Thread() {
+@Override
+public void run() {
+  consumerLoop(largeFileQueue);
+}
+  };
+  large.setDaemon(true);
+  large.setName(n + "-HFileCleaner.large." + i + "-" + 
System.currentTimeMillis());
+  large.start();
+  LOG.debug("Starting hfile cleaner for large files: " + large.getName());
+  threads.add(large);
+}
 
 // start 

hbase git commit: HBASE-18147 POC jenkinsfile for nightly checks. [Forced Update!]

2017-07-07 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-18147 e660fe514 -> 2ec2d3693 (forced update)


HBASE-18147 POC jenkinsfile for nightly checks.

* adds ruby tools to dockerfile
* adds rubocop to dockerfile
* adds ruby-lint to dockerfile
* adds perlcritic to dockerfile
* TODO branch with mulitple jdk versions
* TODO verify flaky list is in use


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2ec2d369
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2ec2d369
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2ec2d369

Branch: refs/heads/HBASE-18147
Commit: 2ec2d3693b3e466ee92a2cf8afa213bedc674fdf
Parents: df93c13
Author: Sean Busbey 
Authored: Tue Jul 4 15:12:38 2017 -0400
Committer: Sean Busbey 
Committed: Fri Jul 7 03:06:44 2017 -0400

--
 dev-support/Jenkinsfile   | 141 +
 dev-support/docker/Dockerfile |  29 
 2 files changed, 170 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2ec2d369/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
new file mode 100644
index 000..92b2e37
--- /dev/null
+++ b/dev-support/Jenkinsfile
@@ -0,0 +1,141 @@
+pipeline {
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
+  // work around for YETUS-508, requires maven outside of the dockerfile
+  tools {
+maven 'Maven (latest)'
+  }
+  triggers {
+cron('@daily')
+  }
+  options {
+timeout (time: 6, unit: 'HOURS')
+timestamps()
+  }
+  environment {
+TOOLS = "${env.WORKSPACE}/tools"
+BASEDIR = "${env.WORKSPACE}/component"
+YETUS_RELEASE = '0.4.0'
+  }
+  parameters {
+booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.
+
+Should only be used manually when e.g. there is some non-work-aroundable 
issue in yetus we are checking a fix for.''')
+  }
+  stages {
+stage ('yetus check') {
+  environment {
+OUTPUTDIR = "${env.WORKSPACE}/artifacts"
+PROJECT = 'hbase'
+PROJET_PERSONALITY = 
'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
+AUTHOR_IGNORE_LIST = 'src/main/asciidoc/_chapters/developer.adoc'
+WHITESPACE_IGNORE_LIST = '.*/generated/.*'
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop'
+BRANCH_SPECIFIC_DOCKERFILE = 
"${env.BASEDIR}/dev-support/docker/Dockerfile"
+  }
+  steps {
+// TODO we can move the yetus install into a different stage and then use 
stash to deploy it.
+sh  '''#!/usr/bin/env bash
+printenv
+echo "Ensure we have a copy of Apache Yetus."
+if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
+  YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
+  echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
+  if [ ! -d "${YETUS_DIR}" ]; then
+echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm -rf "${WORKSPACE}/.gpg"
+mkdir -p "${WORKSPACE}/.gpg"
+chmod -R 700 "${WORKSPACE}/.gpg"
+
+echo "install yetus project KEYS"
+curl -L --fail -o "${WORKSPACE}/KEYS_YETUS" 
https://dist.apache.org/repos/dist/release/yetus/KEYS
+gpg --homedir "${WORKSPACE}/.gpg" --import "${WORKSPACE}/KEYS_YETUS"
+
+echo "download yetus release ${YETUS_RELEASE}"
+curl -L --fail -O 
"https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/yetus-${YETUS_RELEASE}-bin.tar.gz;
+curl -L --fail -O 
"https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/yetus-${YETUS_RELEASE}-bin.tar.gz.asc;
+echo "verifying yetus release"
+gpg --homedir "${WORKSPACE}/.gpg" --verify 
"yetus-${YETUS_RELEASE}-bin.tar.gz.asc"
+mv "yetus-${YETUS_RELEASE}-bin.tar.gz" yetus.tar.gz
+  else
+echo "Reusing cached download of Apache Yetus version ${YETUS_RELEASE}."
+  fi
+else
+  YETUS_DIR="${WORKSPACE}/yetus-git"
+  rm -rf "${YETUS_DIR}"
+  echo "downloading from github"
+  curl -L --fail https://api.github.com/repos/apache/yetus/tarball/HEAD -o 
yetus.tar.gz
+fi
+if [ ! -d "${YETUS_DIR}" ]; then
+  echo "unpacking yetus into '${YETUS_DIR}'"
+  mkdir -p "${YETUS_DIR}"
+  gunzip -c yetus.tar.gz | tar xpf - -C "${YETUS_DIR}" --strip-components 1
+fi
+   '''
+// TODO we can move the personality install into a different stage and 
then use stash to deploy it.
+   dir ("${env.TOOLS}") {
+ sh """#!/usr/bin/env bash
+echo "Downloading Project personality."
+curl -L  -o personality.sh "${env.PROJET_PERSONALITY}"
+   """
+   }
+   sh '''#!/usr/bin/env bash
+YETUS_ARGS=()

hbase git commit: HBASE-18147 POC jenkinsfile for nightly checks. [Forced Update!]

2017-07-07 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-18147 f27687a5f -> e660fe514 (forced update)


HBASE-18147 POC jenkinsfile for nightly checks.

* adds ruby tools to dockerfile
* adds rubocop to dockerfile
* adds ruby-lint to dockerfile
* adds perlcritic to dockerfile
* TODO branch with mulitple jdk versions
* TODO verify flaky list is in use


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e660fe51
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e660fe51
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e660fe51

Branch: refs/heads/HBASE-18147
Commit: e660fe5148ecfa06ae712ba4359e4b438c1e2976
Parents: df93c13
Author: Sean Busbey 
Authored: Tue Jul 4 15:12:38 2017 -0400
Committer: Sean Busbey 
Committed: Fri Jul 7 02:45:53 2017 -0400

--
 dev-support/Jenkinsfile   | 141 +
 dev-support/docker/Dockerfile |  19 +
 2 files changed, 160 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e660fe51/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
new file mode 100644
index 000..92b2e37
--- /dev/null
+++ b/dev-support/Jenkinsfile
@@ -0,0 +1,141 @@
+pipeline {
+  agent {
+node {
+  label 'Hadoop'
+}
+  }
+  // work around for YETUS-508, requires maven outside of the dockerfile
+  tools {
+maven 'Maven (latest)'
+  }
+  triggers {
+cron('@daily')
+  }
+  options {
+timeout (time: 6, unit: 'HOURS')
+timestamps()
+  }
+  environment {
+TOOLS = "${env.WORKSPACE}/tools"
+BASEDIR = "${env.WORKSPACE}/component"
+YETUS_RELEASE = '0.4.0'
+  }
+  parameters {
+booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.
+
+Should only be used manually when e.g. there is some non-work-aroundable 
issue in yetus we are checking a fix for.''')
+  }
+  stages {
+stage ('yetus check') {
+  environment {
+OUTPUTDIR = "${env.WORKSPACE}/artifacts"
+PROJECT = 'hbase'
+PROJET_PERSONALITY = 
'https://raw.githubusercontent.com/apache/hbase/master/dev-support/hbase-personality.sh'
+AUTHOR_IGNORE_LIST = 'src/main/asciidoc/_chapters/developer.adoc'
+WHITESPACE_IGNORE_LIST = '.*/generated/.*'
+TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop'
+BRANCH_SPECIFIC_DOCKERFILE = 
"${env.BASEDIR}/dev-support/docker/Dockerfile"
+  }
+  steps {
+// TODO we can move the yetus install into a different stage and then use 
stash to deploy it.
+sh  '''#!/usr/bin/env bash
+printenv
+echo "Ensure we have a copy of Apache Yetus."
+if [[ true !=  "${USE_YETUS_PRERELEASE}" ]]; then
+  YETUS_DIR="${WORKSPACE}/yetus-${YETUS_RELEASE}"
+  echo "Checking for Yetus ${YETUS_RELEASE} in '${YETUS_DIR}'"
+  if [ ! -d "${YETUS_DIR}" ]; then
+echo "New download of Apache Yetus version ${YETUS_RELEASE}."
+rm -rf "${WORKSPACE}/.gpg"
+mkdir -p "${WORKSPACE}/.gpg"
+chmod -R 700 "${WORKSPACE}/.gpg"
+
+echo "install yetus project KEYS"
+curl -L --fail -o "${WORKSPACE}/KEYS_YETUS" 
https://dist.apache.org/repos/dist/release/yetus/KEYS
+gpg --homedir "${WORKSPACE}/.gpg" --import "${WORKSPACE}/KEYS_YETUS"
+
+echo "download yetus release ${YETUS_RELEASE}"
+curl -L --fail -O 
"https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/yetus-${YETUS_RELEASE}-bin.tar.gz;
+curl -L --fail -O 
"https://dist.apache.org/repos/dist/release/yetus/${YETUS_RELEASE}/yetus-${YETUS_RELEASE}-bin.tar.gz.asc;
+echo "verifying yetus release"
+gpg --homedir "${WORKSPACE}/.gpg" --verify 
"yetus-${YETUS_RELEASE}-bin.tar.gz.asc"
+mv "yetus-${YETUS_RELEASE}-bin.tar.gz" yetus.tar.gz
+  else
+echo "Reusing cached download of Apache Yetus version ${YETUS_RELEASE}."
+  fi
+else
+  YETUS_DIR="${WORKSPACE}/yetus-git"
+  rm -rf "${YETUS_DIR}"
+  echo "downloading from github"
+  curl -L --fail https://api.github.com/repos/apache/yetus/tarball/HEAD -o 
yetus.tar.gz
+fi
+if [ ! -d "${YETUS_DIR}" ]; then
+  echo "unpacking yetus into '${YETUS_DIR}'"
+  mkdir -p "${YETUS_DIR}"
+  gunzip -c yetus.tar.gz | tar xpf - -C "${YETUS_DIR}" --strip-components 1
+fi
+   '''
+// TODO we can move the personality install into a different stage and 
then use stash to deploy it.
+   dir ("${env.TOOLS}") {
+ sh """#!/usr/bin/env bash
+echo "Downloading Project personality."
+curl -L  -o personality.sh "${env.PROJET_PERSONALITY}"
+   """
+   }
+   sh '''#!/usr/bin/env bash
+YETUS_ARGS=()

hbase-site git commit: INFRA-10751 Empty commit

2017-07-07 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site b3b50f225 -> 53b1ea46f


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/53b1ea46
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/53b1ea46
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/53b1ea46

Branch: refs/heads/asf-site
Commit: 53b1ea46f688267fe0d1e24fe5b8536ceb9fe783
Parents: b3b50f2
Author: jenkins 
Authored: Fri Jul 7 06:42:51 2017 +
Committer: jenkins 
Committed: Fri Jul 7 06:42:51 2017 +

--

--




[51/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/b3b50f22
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/b3b50f22
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/b3b50f22

Branch: refs/heads/asf-site
Commit: b3b50f22501ae38b2c998123d2a6706b2bd3ec86
Parents: 1772a08
Author: jenkins 
Authored: Fri Jul 7 06:42:13 2017 +
Committer: jenkins 
Committed: Fri Jul 7 06:42:13 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 6 +-
 apache_hbase_reference_guide.pdfmarks   | 4 +-
 apidocs/index-all.html  | 2 +
 .../apache/hadoop/hbase/util/VersionInfo.html   |55 +-
 .../apache/hadoop/hbase/util/VersionInfo.html   |   199 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 24474 +
 checkstyle.rss  |16 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |36 +-
 devapidocs/index-all.html   |66 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../hadoop/hbase/class-use/HRegionInfo.html |26 +-
 .../apache/hadoop/hbase/class-use/Server.html   |35 -
 .../hadoop/hbase/class-use/ServerName.html  |   166 +-
 .../class-use/InterfaceAudience.Private.html| 2 +-
 .../hbase/classification/package-tree.html  | 6 +-
 .../hadoop/hbase/client/package-tree.html   |28 +-
 .../hadoop/hbase/filter/package-tree.html   |10 +-
 .../hbase/io/FileLink.FileLinkInputStream.html  |54 +-
 .../org/apache/hadoop/hbase/io/FileLink.html|40 +-
 .../hadoop/hbase/io/class-use/HeapSize.html | 2 +-
 .../encoding/class-use/DataBlockEncoding.html   | 2 +-
 .../io/hfile/HFileBlock.BlockIterator.html  | 8 +-
 .../io/hfile/HFileBlock.BlockWritable.html  | 4 +-
 .../hbase/io/hfile/HFileBlock.FSReader.html |24 +-
 .../hbase/io/hfile/HFileBlock.FSReaderImpl.html |72 +-
 .../hbase/io/hfile/HFileBlock.Header.html   |18 +-
 .../io/hfile/HFileBlock.PrefetchedHeader.html   |18 +-
 .../hadoop/hbase/io/hfile/HFileBlock.html   |   181 +-
 .../hfile/bucket/BucketCache.BucketEntry.html   |42 +-
 .../bucket/BucketCache.BucketEntryGroup.html|22 +-
 .../hfile/bucket/BucketCache.RAMQueueEntry.html |20 +-
 .../bucket/BucketCache.StatisticsThread.html| 8 +-
 .../hfile/bucket/BucketCache.WriterThread.html  |14 +-
 .../hbase/io/hfile/bucket/BucketCache.html  |89 +-
 .../hfile/bucket/FileIOEngine.FileAccessor.html | 4 +-
 .../bucket/FileIOEngine.FileReadAccessor.html   | 6 +-
 .../bucket/FileIOEngine.FileWriteAccessor.html  | 6 +-
 .../hbase/io/hfile/bucket/FileIOEngine.html |26 +-
 .../io/hfile/bucket/class-use/IOEngine.html | 5 +-
 .../hbase/io/hfile/class-use/Cacheable.html | 2 +-
 .../io/hfile/class-use/HFileBlock.FSReader.html | 2 +-
 .../hadoop/hbase/io/hfile/package-summary.html  | 8 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../hadoop/hbase/io/hfile/package-use.html  | 6 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../org/apache/hadoop/hbase/master/HMaster.html |   587 +-
 .../master/HMasterCommandLine.LocalHMaster.html | 2 +-
 .../hadoop/hbase/master/MasterServices.html |   138 +-
 .../hadoop/hbase/master/ServerManager.html  |12 +-
 ...signmentManager.RegionInTransitionChore.html | 6 +-
 ...ssignmentManager.RegionInTransitionStat.html |40 +-
 .../master/assignment/AssignmentManager.html|   616 +-
 .../hbase/master/class-use/MasterServices.html  |35 +
 .../hbase/master/class-use/ServerManager.html   | 4 +-
 .../cleaner/HFileCleaner.HFileDeleteTask.html   |26 +-
 .../hbase/master/cleaner/HFileCleaner.html  |   230 +-
 .../hadoop/hbase/master/package-tree.html   | 2 +-
 .../apache/hadoop/hbase/master/package-use.html | 9 +-
 .../org/apache/hadoop/hbase/package-tree.html   | 8 +-
 .../org/apache/hadoop/hbase/package-use.html|10 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 4 +-
 .../hadoop/hbase/quotas/package-tree.html   | 8 +-
 

[41/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
index 866b438..98fc8f9 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/master/HMasterCommandLine.LocalHMaster.html
@@ -248,7 +248,7 @@ extends 
 
 Methods inherited from classorg.apache.hadoop.hbase.master.HMaster
-abort,
 abortProcedure,
 addColumn,
 addReplicationPeer,
 balance,
 balance,
 balanceSwitch,
 canCreateBaseZNode, canUpdateTableDescriptor,
 checkInitialized,
 checkServiceStarted,
 checkTableModifiable,
 configureInfoServer,
 constructMaster,
 createMetaBootstrap, createNamespace,
 createQuotaSnapshotNotifier,
 createRpcServices,
 createServerManager,
 createSystemTable,
 createTable,
 deleteColumn,
 deleteNamespace,
 deleteTable,
 disableReplicationPeer,
 disableTable,
 dispatchMergingRegions,
 dr
 ainRegionServer, enableReplicationPeer,
 enableTable,
 getAssignmentManager,
 getAverageLoad,
 getCatalogJanitor,
 getClientIdAuditPrefix,
 getClusterSchema,
 getClusterStatus,
 getDumpServlet,
 getFavoredNodesManager,
 getFsTableDescriptors,
 getHFileCleaner,
 getInitializedEvent,
 getLastMajorCompactionTimestamp,
 getLastMajorCompactionTimestampForRegion,
 getLoadBalancer,
  getLoadBalancerClassName,
 getLoadedCoprocessors,
 getLockManager,
 getLogCleaner,
 getMasterActiveTime,
 getMasterCoprocessorHost,
 getMasterCoprocessors,
 getMasterFileSystem,
 getMasterMetrics, getMasterProcedureExecutor,
 getMasterProcedureManagerHost,
 getMasterQuotaManager,
 getMasterRpcServices,
 getMasterStartTime,
 getMasterWalManager,
 getMergePlanCount,
 getMobCompactionState, getNamespace,
 getNamespaces,
 getNumWALFiles,
 getProcessName,
 getQuotaObserverChore,
 getRegionNormalizer,
 getRegionNormalizerTracker,
 getRegionServerFatalLogBuffer,
 getRegionServerInfoPort,
 getRegionServerVersion,
 getRemoteInetAddress,
 getReplicationPeerConfig,
 getServerCrashProcessingEnabledEvent,
 getServerManager,
 getServerName,
 getSnapshotManager, getSpaceQuotaSnapshotNotifier,
 getSplitOrMergeTracker,
 getSplitPlanCount,
 getTableDescriptors,
 getTableRegionForRow,
 getTableStateManager,
 getWalProcedureStore,
 getZooKeeper, initClusterSchemaService,
 initializeZKBasedSystemTrackers,
 initQuotaManager,
 isActiveMaster,
 isBalancerOn,
 isCatalogJanitorEnabled,
 isCleanerChoreEnabled,
 isInitializationSt
 artsMetaRegionAssignment, isInitialized,
 isInMaintenanceMode,
 isNormalizerOn,
 isServerCrashProcessingEnabled,
 isSplitOrMergeEnabled,
 listDrainingRegionServers,
 listLocks,
 listProcedures,
 listReplicationPeers,
 listTableDescriptors,
 listTableDescriptorsByNamespace,
 listTableNames,
 listTableNamesByNamespace,
 login,
 main
 , mergeRegions,
 modifyColumn,
 modifyNamespace,
 modifyTable,
 move,
 normalizeRegions,
 registerService, removeDrainFromRegionServer,
 removeReplicationPeer,
 reportMobCompactionEnd,
 reportMobCompactionStart,
 requestMobCompaction,
 restoreSnapshot, setCatalogJanitorEnabled,
 setInitialized,
 setServerCrashProcessingEnabled,
 shutdown,
 splitRegion,
 stopMaster,
 stopServiceThreads,
 truncateTable, updateConfigurationForSpaceQuotaObserver,
 updateReplicationPeerConfig,
 waitForMasterActive
+abort,
 abortProcedure,
 addColumn,
 addReplicationPeer,
 balance,
 balance,
 balanceSwitch,
 canCreateBaseZNode, canUpdateTableDescriptor,
 checkIfShouldMoveSystemRegionAsync,
 checkInitialized,
 checkServiceStarted,
 checkTableModifiable,
 configureInfoServer,
 construct
 Master, createMetaBootstrap,
 createNamespace,
 createQuotaSnapshotNotifier,
 createRpcServices,
 createServerManager,
 createSystemTable,
 createTable,
 deleteColumn,
 deleteNamespace,
 deleteTable,
 disableReplicationPeer,
 disableTable,
 dispatc
 hMergingRegions, drainRegionServer,
 enableReplicationPeer,
 enableTable,
 getAssignmentManager,
 getAverageLoad,
 getCatalogJanitor,
 getClientIdAuditPrefix,
 getCl
 usterSchema, getClusterStatus,
 getDumpServlet,
 getFavoredNodesManager,
 getFsTableDescriptors,
 getHFileCleaner,
 getInitializedEvent,
 getLastMajorCompactionTimestamp,
 getLast
 MajorCompactionTimestampForRegion, getLoadBalancer,
 getLoadBalancerClassName,
 getLoadedCoprocessors,
 getLockManager,
 getLogCleaner,
 getMasterActiveTime,
 getMasterCoprocessorHost,
 getMasterCoprocessors,
 getMasterFileSystem,
 getMasterMetrics,
 getMasterProcedureExecutor,
 getMasterProcedureManagerHost,
 getMasterQuotaManager,
 getMasterRpcServices,
 getMasterStartTime,
 getMasterWalManager,
 getMergePlanCount, getMobCompactionState,
 getNamespace,
 getNamespaces,
 getNumWALFiles,
 getProcessName,
 getQuotaObserverChore,
 getRegionNormalizer,
 getRegionNormalizerTracker,
 <
 a 

[43/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.

2017-07-07 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index 426e598..79b4a97 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -273,12 +273,12 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.io.hfile.BlockType
 org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
 org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType
-org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
-org.apache.hadoop.hbase.io.hfile.BlockType
-org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
 org.apache.hadoop.hbase.io.hfile.BlockPriority
+org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
+org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/org/apache/hadoop/hbase/io/hfile/package-use.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-use.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-use.html
index ba8b5a8..d26371a 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-use.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-use.html
@@ -363,12 +363,12 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 HFileBlock
-Reads HFile 
version 2 blocks to HFiles and via Cacheable Interface to 
caches.
+Cacheable Blocks of an HFile 
version 2 file.
 
 
 
 HFileBlock.BlockIterator
-An interface allowing to iterate HFileBlocks.
+Iterator for HFileBlocks.
 
 
 
@@ -378,7 +378,7 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 
 
 HFileBlock.FSReader
-A full-fledged reader with iteration ability.
+An HFile block reader with iteration ability.
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
index cdb6af5..08990df 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
@@ -341,8 +341,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl.SourceStorage
 org.apache.hadoop.hbase.ipc.BufferCallBeforeInitHandler.BufferCallAction
+org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl.SourceStorage
 org.apache.hadoop.hbase.ipc.CallEvent.Type
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
index 8e1ffb4..453a2bc 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
@@ -284,10 +284,10 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.mapreduce.SyncTable.SyncMapper.Counter
-org.apache.hadoop.hbase.mapreduce.CellCounter.CellCounterMapper.Counters
 org.apache.hadoop.hbase.mapreduce.TableSplit.Version

  1   2   >