This is an automated email from the ASF dual-hosted git repository.
bharathv pushed a commit to branch branch-1
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-1 by this push:
new 5ece9cef HBASE-26070: Rewrite 1.7.0 faulty table descriptors at
bootstrap (#3463)
5ece9cef is described below
commit 5ece9cefeae096ad5565957e6fc35298ab8416de
Author: Bharath Vissapragada <[email protected]>
AuthorDate: Wed Jul 7 21:19:31 2021 -0700
HBASE-26070: Rewrite 1.7.0 faulty table descriptors at bootstrap (#3463)
This patch rewrites incorrectly serialized table descriptors and states
from HBASE-26021 into PBs prior to the incompatible change. The patch
does this as a part of the active HMaster bootstrap.
This approach is _not_ fool proof as it does not handle all the edge cases
and may break certain rolling upgrade semantics. However it does provide a
way
out for tables created with 1.7.0 PB definitions and an upgrade path for
1.7.1.
Easiest way is to shutdown the 1.7.0 cluster, reboot with 1.7.1 binaries and
everything should work cleanly. However, rolling upgrade could potentially
be
broken as 1.7.0 client may not be compatible with 1.7.1 server.
As noted in the jira, it is not possible that any one upgraded to 1.7.0 yet
because they will run into HBASE-26021, however new clusters may have been
created with 1.7.0 and hence they need an upgrade path.
Tested locally:
- Created a mix of enabled/disabled tables created with 1.7.0
- Upgraded the cluster without this patch, noticed deserialization errors
- Upgraded the cluster with patch and table metadata has been re-serialized
in correct format and table states are preserved.
Signed-off-by: Reid Chan <[email protected]>
---
.../hadoop/hbase/DeprecatedTableDescriptor.java | 77 +
.../org/apache/hadoop/hbase/HTableDescriptor.java | 8 +-
hbase-protocol/pom.xml | 1 +
.../generated/HBase170CompatibilityProtos.java | 1642 ++++++++++++++++++++
...Base-1.7-TableSerialization-Compatibility.proto | 56 +
.../org/apache/hadoop/hbase/master/HMaster.java | 9 +
.../hadoop/hbase/util/FSTableDescriptors.java | 77 +-
7 files changed, 1855 insertions(+), 15 deletions(-)
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/DeprecatedTableDescriptor.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/DeprecatedTableDescriptor.java
new file mode 100644
index 0000000..b239ffa
--- /dev/null
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/DeprecatedTableDescriptor.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State;
+
+/**
+ * Only used for HBase 1.7.0 compatibility sakes.
+ */
[email protected]
+public final class DeprecatedTableDescriptor {
+
+ private HTableDescriptor hTableDescriptor;
+ private Table tableState;
+
+ private DeprecatedTableDescriptor(HTableDescriptor hTableDescriptor, Table
tableState) {
+ this.hTableDescriptor = hTableDescriptor;
+ this.tableState = tableState;
+ }
+
+ public HTableDescriptor getHTableDescriptor() {
+ return hTableDescriptor;
+ }
+
+ public Table getTableState() {
+ return tableState;
+ }
+
+ /**
+ * Utility method to parse bytes serialized as incompatible TableDescriptors.
+ * @param bytes A pb serialized {@link TableDescriptor} instance with pb
magic prefix
+ */
+ public static DeprecatedTableDescriptor parseFrom(final byte [] bytes)
+ throws DeserializationException {
+ if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
+ throw new DeserializationException("Expected PB encoded
TableDescriptor");
+ }
+ int pblen = ProtobufUtil.lengthOfPBMagic();
+ TableDescriptor.Builder builder = TableDescriptor.newBuilder();
+ TableDescriptor ts;
+ try {
+ ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+ } catch (InvalidProtocolBufferException e) {
+ throw new DeserializationException(e);
+ }
+ return convert(ts);
+ }
+
+ private static DeprecatedTableDescriptor convert(TableDescriptor proto) {
+ HTableDescriptor hTableDescriptor =
HTableDescriptor.convert(proto.getSchema());
+ State state = State.valueOf(proto.getState().getNumber());
+ Table tableState = Table.newBuilder().setState(state).build();
+ return new DeprecatedTableDescriptor(hTableDescriptor, tableState);
+ }
+}
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index ca39b98..3bdd7d3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -1707,12 +1707,16 @@ public class HTableDescriptor implements
WritableComparable<HTableDescriptor> {
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
ts = builder.build();
- } catch (IOException e) {
+ return convert(ts);
+ } catch (IOException | IllegalArgumentException e) {
+ // Deserialization may not fail but can return garbage that fails
eventual validations and
+ // hence IAE.
throw new DeserializationException(e);
}
- return convert(ts);
}
+
+
/**
* @return Convert the current {@link HTableDescriptor} into a pb
TableSchema instance.
*/
diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml
index 9c9dbda..7e40dec 100644
--- a/hbase-protocol/pom.xml
+++ b/hbase-protocol/pom.xml
@@ -182,6 +182,7 @@
<include>FS.proto</include>
<include>Filter.proto</include>
<include>HBase.proto</include>
+
<include>HBase-1.7-TableSerialization-Compatibility.proto</include>
<include>HFile.proto</include>
<include>LoadBalancer.proto</include>
<include>MapReduce.proto</include>
diff --git
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBase170CompatibilityProtos.java
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBase170CompatibilityProtos.java
new file mode 100644
index 0000000..e09f8a6
--- /dev/null
+++
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBase170CompatibilityProtos.java
@@ -0,0 +1,1642 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: HBase-1.7-TableSerialization-Compatibility.proto
+
+package org.apache.hadoop.hbase.protobuf.generated;
+
+public final class HBase170CompatibilityProtos {
+ private HBase170CompatibilityProtos() {}
+ public static void registerAllExtensions(
+ com.google.protobuf.ExtensionRegistry registry) {
+ }
+ public interface TableStateOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .hbase.pb.TableState.State state = 1;
+ /**
+ * <code>required .hbase.pb.TableState.State state = 1;</code>
+ *
+ * <pre>
+ * This is the table's state.
+ * </pre>
+ */
+ boolean hasState();
+ /**
+ * <code>required .hbase.pb.TableState.State state = 1;</code>
+ *
+ * <pre>
+ * This is the table's state.
+ * </pre>
+ */
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
getState();
+
+ // required .hbase.pb.TableName table = 2;
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ boolean hasTable();
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName
getTable();
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder
getTableOrBuilder();
+
+ // optional uint64 timestamp = 3;
+ /**
+ * <code>optional uint64 timestamp = 3;</code>
+ */
+ boolean hasTimestamp();
+ /**
+ * <code>optional uint64 timestamp = 3;</code>
+ */
+ long getTimestamp();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableState}
+ *
+ * <pre>
+ ** Denotes state of the table
+ * </pre>
+ */
+ public static final class TableState extends
+ com.google.protobuf.GeneratedMessage
+ implements TableStateOrBuilder {
+ // Use TableState.newBuilder() to construct.
+ private TableState(com.google.protobuf.GeneratedMessage.Builder<?>
builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TableState(boolean noInit) { this.unknownFields =
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TableState defaultInstance;
+ public static TableState getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableState getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TableState(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ int rawValue = input.readEnum();
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
value =
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(1, rawValue);
+ } else {
+ bitField0_ |= 0x00000001;
+ state_ = value;
+ }
+ break;
+ }
+ case 18: {
+
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder
subBuilder = null;
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ subBuilder = table_.toBuilder();
+ }
+ table_ =
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.PARSER,
extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(table_);
+ table_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000002;
+ break;
+ }
+ case 24: {
+ bitField0_ |= 0x00000004;
+ timestamp_ = input.readUInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.internal_static_hbase_pb_TableState_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.internal_static_hbase_pb_TableState_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.class,
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<TableState> PARSER =
+ new com.google.protobuf.AbstractParser<TableState>() {
+ public TableState parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TableState(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<TableState> getParserForType() {
+ return PARSER;
+ }
+
+ /**
+ * Protobuf enum {@code hbase.pb.TableState.State}
+ *
+ * <pre>
+ * Table's current state
+ * </pre>
+ */
+ public enum State
+ implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ * <code>ENABLED = 0;</code>
+ */
+ ENABLED(0, 0),
+ /**
+ * <code>DISABLED = 1;</code>
+ */
+ DISABLED(1, 1),
+ /**
+ * <code>DISABLING = 2;</code>
+ */
+ DISABLING(2, 2),
+ /**
+ * <code>ENABLING = 3;</code>
+ */
+ ENABLING(3, 3),
+ ;
+
+ /**
+ * <code>ENABLED = 0;</code>
+ */
+ public static final int ENABLED_VALUE = 0;
+ /**
+ * <code>DISABLED = 1;</code>
+ */
+ public static final int DISABLED_VALUE = 1;
+ /**
+ * <code>DISABLING = 2;</code>
+ */
+ public static final int DISABLING_VALUE = 2;
+ /**
+ * <code>ENABLING = 3;</code>
+ */
+ public static final int ENABLING_VALUE = 3;
+
+
+ public final int getNumber() { return value; }
+
+ public static State valueOf(int value) {
+ switch (value) {
+ case 0: return ENABLED;
+ case 1: return DISABLED;
+ case 2: return DISABLING;
+ case 3: return ENABLING;
+ default: return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap<State>
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+ private static com.google.protobuf.Internal.EnumLiteMap<State>
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap<State>() {
+ public State findValueByNumber(int number) {
+ return State.valueOf(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor
+ getValueDescriptor() {
+ return getDescriptor().getValues().get(index);
+ }
+ public final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptorForType() {
+ return getDescriptor();
+ }
+ public static final com.google.protobuf.Descriptors.EnumDescriptor
+ getDescriptor() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.getDescriptor().getEnumTypes().get(0);
+ }
+
+ private static final State[] VALUES = values();
+
+ public static State valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException(
+ "EnumValueDescriptor is not for this type.");
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int index;
+ private final int value;
+
+ private State(int index, int value) {
+ this.index = index;
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:hbase.pb.TableState.State)
+ }
+
+ private int bitField0_;
+ // required .hbase.pb.TableState.State state = 1;
+ public static final int STATE_FIELD_NUMBER = 1;
+ private
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
state_;
+ /**
+ * <code>required .hbase.pb.TableState.State state = 1;</code>
+ *
+ * <pre>
+ * This is the table's state.
+ * </pre>
+ */
+ public boolean hasState() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .hbase.pb.TableState.State state = 1;</code>
+ *
+ * <pre>
+ * This is the table's state.
+ * </pre>
+ */
+ public
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
getState() {
+ return state_;
+ }
+
+ // required .hbase.pb.TableName table = 2;
+ public static final int TABLE_FIELD_NUMBER = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName
table_;
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName
getTable() {
+ return table_;
+ }
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ public
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder
getTableOrBuilder() {
+ return table_;
+ }
+
+ // optional uint64 timestamp = 3;
+ public static final int TIMESTAMP_FIELD_NUMBER = 3;
+ private long timestamp_;
+ /**
+ * <code>optional uint64 timestamp = 3;</code>
+ */
+ public boolean hasTimestamp() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional uint64 timestamp = 3;</code>
+ */
+ public long getTimestamp() {
+ return timestamp_;
+ }
+
+ private void initFields() {
+ state_ =
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State.ENABLED;
+ table_ =
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+ timestamp_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasState()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTable()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTable().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeEnum(1, state_.getNumber());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeMessage(2, table_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeUInt64(3, timestamp_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(1, state_.getNumber());
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, table_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(3, timestamp_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState))
{
+ return super.equals(obj);
+ }
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
other =
(org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState)
obj;
+
+ boolean result = true;
+ result = result && (hasState() == other.hasState());
+ if (hasState()) {
+ result = result &&
+ (getState() == other.getState());
+ }
+ result = result && (hasTable() == other.hasTable());
+ if (hasTable()) {
+ result = result && getTable()
+ .equals(other.getTable());
+ }
+ result = result && (hasTimestamp() == other.hasTimestamp());
+ if (hasTimestamp()) {
+ result = result && (getTimestamp()
+ == other.getTimestamp());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasState()) {
+ hash = (37 * hash) + STATE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getState());
+ }
+ if (hasTable()) {
+ hash = (37 * hash) + TABLE_FIELD_NUMBER;
+ hash = (53 * hash) + getTable().hashCode();
+ }
+ if (hasTimestamp()) {
+ hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getTimestamp());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder
newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableState}
+ *
+ * <pre>
+ ** Denotes state of the table
+ * </pre>
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableStateOrBuilder
{
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.internal_static_hbase_pb_TableState_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.internal_static_hbase_pb_TableState_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.class,
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.Builder.class);
+ }
+
+ // Construct using
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ state_ =
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State.ENABLED;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (tableBuilder_ == null) {
+ table_ =
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+ } else {
+ tableBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ timestamp_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.internal_static_hbase_pb_TableState_descriptor;
+ }
+
+ public
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
getDefaultInstanceForType() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.getDefaultInstance();
+ }
+
+ public
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
build() {
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
buildPartial() {
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
result = new
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.state_ = state_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ if (tableBuilder_ == null) {
+ result.table_ = table_;
+ } else {
+ result.table_ = tableBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ result.timestamp_ = timestamp_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState)
{
+ return
mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder
mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
other) {
+ if (other ==
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.getDefaultInstance())
return this;
+ if (other.hasState()) {
+ setState(other.getState());
+ }
+ if (other.hasTable()) {
+ mergeTable(other.getTable());
+ }
+ if (other.hasTimestamp()) {
+ setTimestamp(other.getTimestamp());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasState()) {
+
+ return false;
+ }
+ if (!hasTable()) {
+
+ return false;
+ }
+ if (!getTable().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState
parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage =
(org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState)
e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .hbase.pb.TableState.State state = 1;
+ private
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
state_ =
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State.ENABLED;
+ /**
+ * <code>required .hbase.pb.TableState.State state = 1;</code>
+ *
+ * <pre>
+ * This is the table's state.
+ * </pre>
+ */
+ public boolean hasState() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .hbase.pb.TableState.State state = 1;</code>
+ *
+ * <pre>
+ * This is the table's state.
+ * </pre>
+ */
+ public
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
getState() {
+ return state_;
+ }
+ /**
+ * <code>required .hbase.pb.TableState.State state = 1;</code>
+ *
+ * <pre>
+ * This is the table's state.
+ * </pre>
+ */
+ public Builder
setState(org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ state_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required .hbase.pb.TableState.State state = 1;</code>
+ *
+ * <pre>
+ * This is the table's state.
+ * </pre>
+ */
+ public Builder clearState() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ state_ =
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State.ENABLED;
+ onChanged();
+ return this;
+ }
+
+ // required .hbase.pb.TableName table = 2;
+ private org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName
table_ =
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName,
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder,
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>
tableBuilder_;
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ public boolean hasTable() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName
getTable() {
+ if (tableBuilder_ == null) {
+ return table_;
+ } else {
+ return tableBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ public Builder
setTable(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName
value) {
+ if (tableBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ table_ = value;
+ onChanged();
+ } else {
+ tableBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ public Builder setTable(
+
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder
builderForValue) {
+ if (tableBuilder_ == null) {
+ table_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ public Builder
mergeTable(org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName
value) {
+ if (tableBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002) &&
+ table_ !=
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance())
{
+ table_ =
+
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial();
+ } else {
+ table_ = value;
+ }
+ onChanged();
+ } else {
+ tableBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000002;
+ return this;
+ }
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ public Builder clearTable() {
+ if (tableBuilder_ == null) {
+ table_ =
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ public
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder
getTableBuilder() {
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return getTableFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ public
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder
getTableOrBuilder() {
+ if (tableBuilder_ != null) {
+ return tableBuilder_.getMessageOrBuilder();
+ } else {
+ return table_;
+ }
+ }
+ /**
+ * <code>required .hbase.pb.TableName table = 2;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName,
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder,
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>
+ getTableFieldBuilder() {
+ if (tableBuilder_ == null) {
+ tableBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName,
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableName.Builder,
org.apache.hadoop.hbase.protobuf.generated.TableProtos.TableNameOrBuilder>(
+ table_,
+ getParentForChildren(),
+ isClean());
+ table_ = null;
+ }
+ return tableBuilder_;
+ }
+
+ // optional uint64 timestamp = 3;
+ private long timestamp_ ;
+ /**
+ * <code>optional uint64 timestamp = 3;</code>
+ */
+ public boolean hasTimestamp() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>optional uint64 timestamp = 3;</code>
+ */
+ public long getTimestamp() {
+ return timestamp_;
+ }
+ /**
+ * <code>optional uint64 timestamp = 3;</code>
+ */
+ public Builder setTimestamp(long value) {
+ bitField0_ |= 0x00000004;
+ timestamp_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint64 timestamp = 3;</code>
+ */
+ public Builder clearTimestamp() {
+ bitField0_ = (bitField0_ & ~0x00000004);
+ timestamp_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.TableState)
+ }
+
+ static {
+ defaultInstance = new TableState(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.TableState)
+ }
+
+ public interface TableDescriptorOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .hbase.pb.TableSchema schema = 1;
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ boolean hasSchema();
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema
getSchema();
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder
getSchemaOrBuilder();
+
+ // optional .hbase.pb.TableState.State state = 2 [default = ENABLED];
+ /**
+ * <code>optional .hbase.pb.TableState.State state = 2 [default =
ENABLED];</code>
+ */
+ boolean hasState();
+ /**
+ * <code>optional .hbase.pb.TableState.State state = 2 [default =
ENABLED];</code>
+ */
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
getState();
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableDescriptor}
+ *
+ * <pre>
+ ** On HDFS representation of table state.
+ * </pre>
+ */
+ public static final class TableDescriptor extends
+ com.google.protobuf.GeneratedMessage
+ implements TableDescriptorOrBuilder {
+ // Use TableDescriptor.newBuilder() to construct.
+ private TableDescriptor(com.google.protobuf.GeneratedMessage.Builder<?>
builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private TableDescriptor(boolean noInit) { this.unknownFields =
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final TableDescriptor defaultInstance;
+ public static TableDescriptor getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public TableDescriptor getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private TableDescriptor(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder
subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = schema_.toBuilder();
+ }
+ schema_ =
input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER,
extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(schema_);
+ schema_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ case 16: {
+ int rawValue = input.readEnum();
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
value =
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State.valueOf(rawValue);
+ if (value == null) {
+ unknownFields.mergeVarintField(2, rawValue);
+ } else {
+ bitField0_ |= 0x00000002;
+ state_ = value;
+ }
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor.class,
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<TableDescriptor> PARSER =
+ new com.google.protobuf.AbstractParser<TableDescriptor>() {
+ public TableDescriptor parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new TableDescriptor(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<TableDescriptor> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .hbase.pb.TableSchema schema = 1;
+ public static final int SCHEMA_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema
schema_;
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ public boolean hasSchema() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema
getSchema() {
+ return schema_;
+ }
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ public
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder
getSchemaOrBuilder() {
+ return schema_;
+ }
+
+ // optional .hbase.pb.TableState.State state = 2 [default = ENABLED];
+ public static final int STATE_FIELD_NUMBER = 2;
+ private
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
state_;
+ /**
+ * <code>optional .hbase.pb.TableState.State state = 2 [default =
ENABLED];</code>
+ */
+ public boolean hasState() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional .hbase.pb.TableState.State state = 2 [default =
ENABLED];</code>
+ */
+ public
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
getState() {
+ return state_;
+ }
+
+ private void initFields() {
+ schema_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ state_ =
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State.ENABLED;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasSchema()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getSchema().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, schema_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeEnum(2, state_.getNumber());
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, schema_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeEnumSize(2, state_.getNumber());
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor))
{
+ return super.equals(obj);
+ }
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
other =
(org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor)
obj;
+
+ boolean result = true;
+ result = result && (hasSchema() == other.hasSchema());
+ if (hasSchema()) {
+ result = result && getSchema()
+ .equals(other.getSchema());
+ }
+ result = result && (hasState() == other.hasState());
+ if (hasState()) {
+ result = result &&
+ (getState() == other.getState());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasSchema()) {
+ hash = (37 * hash) + SCHEMA_FIELD_NUMBER;
+ hash = (53 * hash) + getSchema().hashCode();
+ }
+ if (hasState()) {
+ hash = (37 * hash) + STATE_FIELD_NUMBER;
+ hash = (53 * hash) + hashEnum(getState());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder
newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code hbase.pb.TableDescriptor}
+ *
+ * <pre>
+ ** On HDFS representation of table state.
+ * </pre>
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptorOrBuilder
{
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.internal_static_hbase_pb_TableDescriptor_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor.class,
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor.Builder.class);
+ }
+
+ // Construct using
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getSchemaFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (schemaBuilder_ == null) {
+ schema_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ } else {
+ schemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ state_ =
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State.ENABLED;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.internal_static_hbase_pb_TableDescriptor_descriptor;
+ }
+
+ public
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
getDefaultInstanceForType() {
+ return
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor.getDefaultInstance();
+ }
+
+ public
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
build() {
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
buildPartial() {
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
result = new
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (schemaBuilder_ == null) {
+ result.schema_ = schema_;
+ } else {
+ result.schema_ = schemaBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.state_ = state_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor)
{
+ return
mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder
mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
other) {
+ if (other ==
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor.getDefaultInstance())
return this;
+ if (other.hasSchema()) {
+ mergeSchema(other.getSchema());
+ }
+ if (other.hasState()) {
+ setState(other.getState());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasSchema()) {
+
+ return false;
+ }
+ if (!getSchema().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor
parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage =
(org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableDescriptor)
e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .hbase.pb.TableSchema schema = 1;
+ private
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
schemaBuilder_;
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ public boolean hasSchema() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ public
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() {
+ if (schemaBuilder_ == null) {
+ return schema_;
+ } else {
+ return schemaBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ public Builder
setSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema
value) {
+ if (schemaBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ schema_ = value;
+ onChanged();
+ } else {
+ schemaBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ public Builder setSchema(
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder
builderForValue) {
+ if (schemaBuilder_ == null) {
+ schema_ = builderForValue.build();
+ onChanged();
+ } else {
+ schemaBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ public Builder
mergeSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema
value) {
+ if (schemaBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ schema_ !=
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance())
{
+ schema_ =
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(schema_).mergeFrom(value).buildPartial();
+ } else {
+ schema_ = value;
+ }
+ onChanged();
+ } else {
+ schemaBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ public Builder clearSchema() {
+ if (schemaBuilder_ == null) {
+ schema_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance();
+ onChanged();
+ } else {
+ schemaBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ public
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder
getSchemaBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getSchemaFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ public
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder
getSchemaOrBuilder() {
+ if (schemaBuilder_ != null) {
+ return schemaBuilder_.getMessageOrBuilder();
+ } else {
+ return schema_;
+ }
+ }
+ /**
+ * <code>required .hbase.pb.TableSchema schema = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>
+ getSchemaFieldBuilder() {
+ if (schemaBuilder_ == null) {
+ schemaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>(
+ schema_,
+ getParentForChildren(),
+ isClean());
+ schema_ = null;
+ }
+ return schemaBuilder_;
+ }
+
+ // optional .hbase.pb.TableState.State state = 2 [default = ENABLED];
+ private
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
state_ =
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State.ENABLED;
+ /**
+ * <code>optional .hbase.pb.TableState.State state = 2 [default =
ENABLED];</code>
+ */
+ public boolean hasState() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>optional .hbase.pb.TableState.State state = 2 [default =
ENABLED];</code>
+ */
+ public
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
getState() {
+ return state_;
+ }
+ /**
+ * <code>optional .hbase.pb.TableState.State state = 2 [default =
ENABLED];</code>
+ */
+ public Builder
setState(org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State
value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ state_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional .hbase.pb.TableState.State state = 2 [default =
ENABLED];</code>
+ */
+ public Builder clearState() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ state_ =
org.apache.hadoop.hbase.protobuf.generated.HBase170CompatibilityProtos.TableState.State.ENABLED;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:hbase.pb.TableDescriptor)
+ }
+
+ static {
+ defaultInstance = new TableDescriptor(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:hbase.pb.TableDescriptor)
+ }
+
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_TableState_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_TableState_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_hbase_pb_TableDescriptor_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_hbase_pb_TableDescriptor_fieldAccessorTable;
+
+ public static com.google.protobuf.Descriptors.FileDescriptor
+ getDescriptor() {
+ return descriptor;
+ }
+ private static com.google.protobuf.Descriptors.FileDescriptor
+ descriptor;
+ static {
+ java.lang.String[] descriptorData = {
+ "\n0HBase-1.7-TableSerialization-Compatibi" +
+ "lity.proto\022\010hbase.pb\032\013HBase.proto\032\013Table" +
+ ".proto\"\257\001\n\nTableState\022)\n\005state\030\001
\002(\0162\032.h" +
+ "base.pb.TableState.State\022\"\n\005table\030\002 \002(\0132" +
+ "\023.hbase.pb.TableName\022\021\n\ttimestamp\030\003 \001(\004\"" +
+
"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tD"
+
+ "ISABLING\020\002\022\014\n\010ENABLING\020\003\"l\n\017TableDescrip" +
+ "tor\022%\n\006schema\030\001 \002(\0132\025.hbase.pb.TableSche" +
+ "ma\0222\n\005state\030\002 \001(\0162\032.hbase.pb.TableState." +
+ "State:\007ENABLEDBN\n*org.apache.hadoop.hbas",
+ "e.protobuf.generatedB\033HBase170Compatibil" +
+ "ityProtosH\001\240\001\001"
+ };
+ com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner
assigner =
+ new
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+ public com.google.protobuf.ExtensionRegistry assignDescriptors(
+ com.google.protobuf.Descriptors.FileDescriptor root) {
+ descriptor = root;
+ internal_static_hbase_pb_TableState_descriptor =
+ getDescriptor().getMessageTypes().get(0);
+ internal_static_hbase_pb_TableState_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_TableState_descriptor,
+ new java.lang.String[] { "State", "Table", "Timestamp", });
+ internal_static_hbase_pb_TableDescriptor_descriptor =
+ getDescriptor().getMessageTypes().get(1);
+ internal_static_hbase_pb_TableDescriptor_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_hbase_pb_TableDescriptor_descriptor,
+ new java.lang.String[] { "Schema", "State", });
+ return null;
+ }
+ };
+ com.google.protobuf.Descriptors.FileDescriptor
+ .internalBuildGeneratedFileFrom(descriptorData,
+ new com.google.protobuf.Descriptors.FileDescriptor[] {
+
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
+
org.apache.hadoop.hbase.protobuf.generated.TableProtos.getDescriptor(),
+ }, assigner);
+ }
+
+ // @@protoc_insertion_point(outer_class_scope)
+}
diff --git
a/hbase-protocol/src/main/protobuf/HBase-1.7-TableSerialization-Compatibility.proto
b/hbase-protocol/src/main/protobuf/HBase-1.7-TableSerialization-Compatibility.proto
new file mode 100644
index 0000000..7886574
--- /dev/null
+++
b/hbase-protocol/src/main/protobuf/HBase-1.7-TableSerialization-Compatibility.proto
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package hbase.pb;
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "HBase170CompatibilityProtos";
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+import "Table.proto";
+
+/**
+ See HBASE-26021 for context. This exists because of an incompatible change
committed to
+ HBase 1.7.0 release. This change was reverted in 1.7.1 but in order to
provide an upgrade path
+ for the users who installed 1.7.0, we need to be able to read the serialized
TableState
+ definitions from TableDescriptor objects and convert them in HBase 1.7.1
compatible definitions
+ for a smooth upgrade. Other than this use case, these proto definitions are
not expected to be
+ used anywhere in the code.
+ */
+
+/** Denotes state of the table */
+message TableState {
+ // Table's current state
+ enum State {
+ ENABLED = 0;
+ DISABLED = 1;
+ DISABLING = 2;
+ ENABLING = 3;
+ }
+ // This is the table's state.
+ required State state = 1;
+ required TableName table = 2;
+ optional uint64 timestamp = 3;
+}
+
+/** On HDFS representation of table state. */
+message TableDescriptor {
+ required TableSchema schema = 1;
+ optional TableState.State state = 2 [ default = ENABLED ];
+}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index e7c2f1a..2d052e3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -163,6 +163,7 @@ import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.CompressionTest;
import org.apache.hadoop.hbase.util.ConfigUtil;
import org.apache.hadoop.hbase.util.EncryptionTest;
+import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@@ -760,12 +761,20 @@ public class HMaster extends HRegionServer implements
MasterServices, Server {
* below after we determine if cluster startup or failover.
*/
+ // Repair any table descriptors from 1.7.0, if any. See HBASE-26021 for
context.
+ // This should be done before Master FS init as the system tables could be
with faulty
+ // serialization.
+ if (tableDescriptors instanceof FSTableDescriptors) {
+
((FSTableDescriptors)tableDescriptors).repairHBase170TableDescriptors(zooKeeper);
+ }
+
status.setStatus("Initializing Master file system");
this.masterActiveTime = System.currentTimeMillis();
// TODO: Do this using Dependency Injection, using PicoContainer, Guice or
Spring.
this.fileSystemManager = new MasterFileSystem(this, this);
+
// enable table descriptors cache
this.tableDescriptors.setCacheOn();
// set the META's descriptor to the correct replication
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index b0592cf..e475635 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -30,6 +30,7 @@ import java.util.regex.Pattern;
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.DeprecatedTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -45,6 +46,10 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableInfoMissingException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
/**
* Implementation of {@link TableDescriptors} that reads descriptors from the
@@ -500,16 +505,20 @@ public class FSTableDescriptors implements
TableDescriptors {
* @throws TableInfoMissingException if there is no descriptor
*/
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path
tableDir,
- boolean rewritePb)
- throws IOException {
+ boolean rewritePb) throws IOException {
FileStatus status = getTableInfoPath(fs, tableDir, false);
if (status == null) {
throw new TableInfoMissingException("No table descriptor file under " +
tableDir);
}
- return readTableDescriptor(fs, status, rewritePb);
+ return readTableDescriptor(fs, status, rewritePb).first;
}
- private static HTableDescriptor readTableDescriptor(FileSystem fs,
FileStatus status,
+ /**
+ * Reads the HTableDescriptor from FS. This handles any deprecated
TableDescriptor objects from
+ * HBase 1.7.0's faulty serialization and rewrites them on fs. Returns the
corresponding
+ * table's State so that caller can populate it back in ZK if needed.
+ */
+ private static Pair<HTableDescriptor, Table> readTableDescriptor(FileSystem
fs, FileStatus status,
boolean rewritePb) throws IOException {
int len = Ints.checkedCast(status.getLen());
byte [] content = new byte[len];
@@ -520,16 +529,23 @@ public class FSTableDescriptors implements
TableDescriptors {
fsDataInputStream.close();
}
HTableDescriptor htd = null;
+ // From deprecated TableDescriptor, if any. Null otherwise.
+ Table tableState = null;
try {
htd = HTableDescriptor.parseFrom(content);
} catch (DeserializationException e) {
// we have old HTableDescriptor here
try {
- HTableDescriptor ohtd = HTableDescriptor.parseFrom(content);
- LOG.warn("Found old table descriptor, converting to new format for
table " +
- ohtd.getTableName());
- htd = new HTableDescriptor(ohtd);
- if (rewritePb) rewriteTableDescriptor(fs, status, htd);
+ DeprecatedTableDescriptor dtd =
DeprecatedTableDescriptor.parseFrom(content);
+ htd = dtd.getHTableDescriptor();
+ tableState = dtd.getTableState();
+ LOG.warn("Found incompatible table descriptor from 1.7.0 version: "
+ + dtd.getHTableDescriptor().getTableName() + " state: " +
tableState.getState().name());
+ if (rewritePb) {
+ LOG.warn("converting to new format for table " + htd.getTableName());
+ rewriteTableDescriptor(fs, status, htd);
+ rewritePb = false; // already rewritten
+ }
} catch (DeserializationException e1) {
throw new IOException("content=" + Bytes.toShort(content), e1);
}
@@ -538,12 +554,11 @@ public class FSTableDescriptors implements
TableDescriptors {
// Convert the file over to be pb before leaving here.
rewriteTableDescriptor(fs, status, htd);
}
- return htd;
+ return new Pair<>(htd, tableState);
}
private static void rewriteTableDescriptor(final FileSystem fs, final
FileStatus status,
- final HTableDescriptor td)
- throws IOException {
+ final HTableDescriptor td) throws IOException {
Path tableInfoDir = status.getPath().getParent();
Path tableDir = tableInfoDir.getParent();
writeTableDescriptor(fs, td, tableDir, status);
@@ -724,7 +739,7 @@ public class FSTableDescriptors implements TableDescriptors
{
LOG.debug("Current tableInfoPath = " + status.getPath());
if (!forceCreation) {
if (fs.exists(status.getPath()) && status.getLen() > 0) {
- if (readTableDescriptor(fs, status, false).equals(htd)) {
+ if (readTableDescriptor(fs, status, false).first.equals(htd)) {
LOG.debug("TableInfo already exists.. Skipping creation");
return false;
}
@@ -735,5 +750,41 @@ public class FSTableDescriptors implements
TableDescriptors {
return p != null;
}
+ /**
+ * Reads all the table descriptors fs and populates any missing TableStates.
Should be called once
+ * at HMaster bootstrap before calling any other FSDescriptors methods as
they can potentially
+ * overwrite the descriptors states. Not thread safe.
+ */
+ public void repairHBase170TableDescriptors(final ZooKeeperWatcher zkw)
+ throws IOException, KeeperException {
+ LOG.info("Attempting to repair HBase 1.7.0 tables, if any.");
+ for (Path tableDir : FSUtils.getTableDirs(fs, rootdir)) {
+ FileStatus status = getTableInfoPath(fs, tableDir, false);
+ if (status == null) {
+ LOG.warn("No table descriptor file under " + tableDir);
+ continue;
+ }
+ // Read and rewrite the table descriptors from FS, if any.
+ Pair<HTableDescriptor, Table> result = readTableDescriptor(fs, status,
true);
+ if (result.second == null) {
+ // No deprecated TableDescriptor
+ continue;
+ }
+ TableName tableName = result.first.getTableName();
+ Table tableState = result.second;
+ LOG.warn("Rewriting ZK Table state for table " + tableName);
+ // Tricky to plumb TSM here, so instead assume ZK based TSM as default
and overwrite table
+ // state Znodes.
+ String znode = ZKUtil.joinZNode(zkw.tableZNode,
tableName.getNameAsString());
+ if (ZKUtil.checkExists(zkw, znode) != -1) {
+ LOG.warn("Table state znode already exists for table: " + tableName +
". Ignoring.");
+ continue;
+ }
+ ZKUtil.createAndFailSilent(zkw, znode);
+ byte [] data = ProtobufUtil.prependPBMagic(tableState.toByteArray());
+ ZKUtil.setData(zkw, znode, data);
+ LOG.info("Repaired ZK table state for table: " + tableName);
+ }
+ }
}