use coprocessor to set storage policy

Project: http://git-wip-us.apache.org/repos/asf/trafodion/repo
Commit: http://git-wip-us.apache.org/repos/asf/trafodion/commit/c5bab335
Tree: http://git-wip-us.apache.org/repos/asf/trafodion/tree/c5bab335
Diff: http://git-wip-us.apache.org/repos/asf/trafodion/diff/c5bab335

Branch: refs/heads/master
Commit: c5bab335da0e2d3bb306e0c4c2ab577f26971d4a
Parents: 06af8fc
Author: Liu Ming <ovis_p...@sina.com>
Authored: Sun May 6 21:02:15 2018 -0400
Committer: Liu Ming <ovis_p...@sina.com>
Committed: Sun May 6 21:02:15 2018 -0400

----------------------------------------------------------------------
 .../transactional/TransactionManager.java       |   88 +-
 .../transactional/TrxRegionEndpoint.java.tmpl   |   72 +
 .../generated/SsccRegionProtos.java             |   23 -
 .../generated/TrxRegionProtos.java              | 1505 ++++++++++++++++--
 .../hbase-trx/src/main/protobuf/TrxRegion.proto |   13 +
 core/sql/regress/seabase/DIFF002.KNOWN          |   48 -
 .../java/org/trafodion/sql/HBaseClient.java     |   79 +-
 7 files changed, 1582 insertions(+), 246 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/trafodion/blob/c5bab335/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/client/transactional/TransactionManager.java
----------------------------------------------------------------------
diff --git 
a/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/client/transactional/TransactionManager.java
 
b/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/client/transactional/TransactionManager.java
index 6c51568..aeb86ce 100644
--- 
a/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/client/transactional/TransactionManager.java
+++ 
b/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/client/transactional/TransactionManager.java
@@ -65,6 +65,7 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.RegionLocator;
@@ -85,8 +86,13 @@ import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProt
 import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.PushEpochResponse;
 import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.RecoveryRequestRequest;
 import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.RecoveryRequestResponse;
+import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse;
+import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest;
 import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrxRegionService;
 
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -101,6 +107,8 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 import org.apache.hadoop.ipc.RemoteException;
 
+import org.apache.hadoop.fs.FileSystem;
+
 import com.google.protobuf.ByteString;
 
 import org.apache.hadoop.hbase.client.transactional.TmDDL;
@@ -173,6 +181,9 @@ public class TransactionManager {
   public static final int HBASE_DURABILITY = 20;
   public static final int HBASE_MEMSTORE_FLUSH_SIZE = 21;
   public static final int HBASE_SPLIT_POLICY = 22;
+  public static final int HBASE_CACHE_DATA_IN_L1 = 23;
+  public static final int HBASE_PREFETCH_BLOCKS_ON_OPEN = 24;
+  public static final int HBASE_HDFS_STORAGE_POLICY= 25;
 
   public static final int TM_COMMIT_FALSE = 0;
   public static final int TM_COMMIT_READ_ONLY = 1;
@@ -182,6 +193,7 @@ public class TransactionManager {
   public static final int TM_SLEEP = 1000;      // One second
   public static final int TM_SLEEP_INCR = 5000; // Five seconds
   public static final int TM_RETRY_ATTEMPTS = 5;
+  Configuration config;
 
   private IdTm idServer;
   private static final int ID_TM_SERVER_TIMEOUT = 1000;
@@ -1663,6 +1675,7 @@ public class TransactionManager {
     private TransactionManager(final Configuration conf, Connection 
connection) throws ZooKeeperConnectionException, IOException {
         this(LocalTransactionLogger.getInstance(), conf, connection);
         this.connection = connection;
+        this.config = conf;
         int intThreads = 16;
         String retryAttempts = System.getenv("TMCLIENT_RETRY_ATTEMPTS");
         String numThreads = System.getenv("TM_JAVA_THREAD_POOL_SIZE");
@@ -2795,10 +2808,13 @@ public class TransactionManager {
     private class ChangeFlags {
         boolean tableDescriptorChanged;
         boolean columnDescriptorChanged;
+        boolean storagePolicyChanged;
+        String storagePolicy_;
 
         ChangeFlags() {
            tableDescriptorChanged = false;
            columnDescriptorChanged = false;
+           storagePolicyChanged = false;
         }
 
         void setTableDescriptorChanged() {
@@ -2816,6 +2832,16 @@ public class TransactionManager {
        boolean columnDescriptorChanged() {
           return columnDescriptorChanged;
        }
+
+       void setStoragePolicyChanged(String str) {
+           storagePolicy_ = str;
+           storagePolicyChanged = true;
+       }
+
+       boolean storagePolicyChanged()    {
+           return storagePolicyChanged;
+       }
+ 
     }
 
    private ChangeFlags setDescriptors(Object[] tableOptions,
@@ -2996,6 +3022,11 @@ public class TransactionManager {
                    (Long.parseLong(tableOption));
                returnStatus.setTableDescriptorChanged();
                break ;
+           case HBASE_HDFS_STORAGE_POLICY:
+               //TODO HBase 2.0 support this
+               //So when come to HBase 2.0, no need to do this via HDFS, just 
set here
+             returnStatus.setStoragePolicyChanged(tableOption);
+             break ;
            case HBASE_SPLIT_POLICY:
                   // This method not yet available in earlier versions
                   // desc.setRegionSplitPolicyClassName(tableOption)); 
@@ -3054,6 +3085,9 @@ public class TransactionManager {
               admin.modifyColumn(tableName,colDesc);
               waitForCompletion(tblName,admin);
            }
+           else if (status.storagePolicyChanged()) {
+             setStoragePolicy(tblName, status.storagePolicy_);
+           }
         } finally {
            admin.close();
         }
@@ -3138,7 +3172,7 @@ public class TransactionManager {
             admin.createTable(hdesc);
             admin.close();
     }
-
+    
     //Called only by DoPrepare.
     public void disableTable(final TransactionState transactionState, String 
tblName)
             throws IOException{
@@ -3226,5 +3260,57 @@ public class TransactionManager {
 
         return resultArray[0].getResultList();
     }
+
+    public void setStoragePolicy(String tblName, String policy)
+      throws IOException {
+
+      try{
+        Table tbl = connection.getTable(TableName.valueOf(tblName));
+        String rowkey = "0";
+        CoprocessorRpcChannel channel = 
tbl.coprocessorService(rowkey.getBytes());
+        
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrxRegionService.BlockingInterface
 service =
+            
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrxRegionService.newBlockingStub(channel);
+        
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest.Builder
 request =
+         
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest.newBuilder();
+        String hbaseRoot = config.get("hbase.rootdir");
+        FileSystem fs = FileSystem.get(config);
+
+        //Construct the HDFS dir
+        //find out if namespace is there
+        String[] parts = tblName.split(":");
+        String namespacestr="";
+        String fullPath = hbaseRoot + "/data/" ;
+        String fullPath2 = hbaseRoot + "/data/default/";
+        if(fs.exists(new Path(fullPath2)))
+          fullPath = fullPath2;
+
+        if(parts.length >1) //have namespace
+          fullPath = fullPath + parts[0] + "/" + parts[1];
+        else
+          fullPath = fullPath + tblName;
+
+        request.setPath(fullPath);
+        request.setPolicy(policy);
+
+        
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 ret =
+          service.setStoragePolicy(null,request.build());
+
+        //handle result and error
+        if( ret == null)
+        {
+          LOG.error("setStoragePolicy Response ret null ");
+          throw new IOException("coprocessor not response");
+        }
+        else if (ret.getStatus() == false)
+        {
+          LOG.error("setStoragePolicy Response ret false: " + 
ret.getException());
+          throw new IOException(ret.getException());
+        }
+      }
+      catch (Exception e) {
+        throw new IOException(e);
+      }
+
+    }
 }
 

http://git-wip-us.apache.org/repos/asf/trafodion/blob/c5bab335/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/TrxRegionEndpoint.java.tmpl
----------------------------------------------------------------------
diff --git 
a/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/TrxRegionEndpoint.java.tmpl
 
b/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/TrxRegionEndpoint.java.tmpl
index 430378d..050b9bc 100644
--- 
a/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/TrxRegionEndpoint.java.tmpl
+++ 
b/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/TrxRegionEndpoint.java.tmpl
@@ -248,6 +248,8 @@ import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProt
 import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafEstimateRowCountResponse;
 import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateRequest;
 import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateResponse;
+import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse;
+import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest;
 import 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrxRegionService;
 #ifdef CDH5.7 APACHE1.2
 import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
@@ -270,6 +272,10 @@ import com.google.protobuf.RpcController;
 import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
 
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+
 
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
@@ -2404,6 +2410,72 @@ CoprocessorService, Coprocessor {
     done.run(TlogDel_response);
  }
 
+  public void setStoragePolicy(RpcController controller,
+                                             TrafSetStoragePolicyRequest 
request,
+                                             
RpcCallback<TrafSetStoragePolicyResponse> done) {
+    String path = request.getPath();
+    String policy = request.getPolicy();
+    if (LOG.isTraceEnabled()) LOG.trace("setStoragePolicy ENTRY. path " +  
path + " policy " + policy );
+
+    IOException t=null;
+    try {
+      invokeSetStoragePolicy(fs, path, policy);
+    }
+    catch (IOException e) {
+      t = e; 
+    }
+  
+    
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.Builder
 setStoragePolicyResponseBuilder =
+      
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.newBuilder();
+
+    if(t != null)
+    {
+      LOG.error("setStoragePolicy error : " + t.toString() );
+      setStoragePolicyResponseBuilder.setStatus(false);
+      setStoragePolicyResponseBuilder.setException(t.toString());
+    }
+    else
+    {
+      setStoragePolicyResponseBuilder.setStatus(true);
+      setStoragePolicyResponseBuilder.setException("");
+    }
+   
+    TrafSetStoragePolicyResponse resp = 
setStoragePolicyResponseBuilder.build();
+
+    done.run(resp);
+    
+  }
+
+  private static void invokeSetStoragePolicy(final FileSystem fs, final String 
pathstr,
+      final String storagePolicy)
+       throws IOException {
+        Path path = new Path(pathstr);
+        Method m = null;
+        try {
+            m = fs.getClass().getDeclaredMethod("setStoragePolicy",
+            new Class<?>[] { Path.class, String.class });
+            m.setAccessible(true);
+        } catch (NoSuchMethodException e) {
+            m = null;
+            throw new IOException("FileSystem doesn't support 
setStoragePolicy");
+        } catch (SecurityException e) {
+          m = null;
+          throw new IOException("No access to setStoragePolicy on FileSystem 
from the SecurityManager");
+        }
+        if (m != null) {
+          try {
+            m.invoke(fs, path, storagePolicy);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + 
path);
+            }
+          } catch (Exception e) {
+               LOG.error("invoke set storage policy error : " + e);
+               throw new IOException(e);
+          }
+        }
+    }
+
+
   public void getTransactionStatesPriorToAsn(RpcController controller,
                                              
TlogTransactionStatesFromIntervalRequest request,
                                              
RpcCallback<TlogTransactionStatesFromIntervalResponse> done) {

http://git-wip-us.apache.org/repos/asf/trafodion/blob/c5bab335/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/generated/SsccRegionProtos.java
----------------------------------------------------------------------
diff --git 
a/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/generated/SsccRegionProtos.java
 
b/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/generated/SsccRegionProtos.java
index dbbbba7..d2782a0 100644
--- 
a/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/generated/SsccRegionProtos.java
+++ 
b/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/generated/SsccRegionProtos.java
@@ -1,26 +1,3 @@
-/**
- * * @@@ START COPYRIGHT @@@
- * *
- * * Licensed to the Apache Software Foundation (ASF) under one
- * * or more contributor license agreements.  See the NOTICE file
- * * distributed with this work for additional information
- * * regarding copyright ownership.  The ASF licenses this file
- * * to you under the Apache License, Version 2.0 (the
- * * "License"); you may not use this file except in compliance
- * * with the License.  You may obtain a copy of the License at
- * *
- * *   http://www.apache.org/licenses/LICENSE-2.0
- * *
- * * Unless required by applicable law or agreed to in writing,
- * * software distributed under the License is distributed on an
- * * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * * KIND, either express or implied.  See the License for the
- * * specific language governing permissions and limitations
- * * under the License.
- * *
- * * @@@ END COPYRIGHT @@@
- * **/
-
 // Generated by the protocol buffer compiler.  DO NOT EDIT!
 // source: SsccRegion.proto
 

http://git-wip-us.apache.org/repos/asf/trafodion/blob/c5bab335/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/generated/TrxRegionProtos.java
----------------------------------------------------------------------
diff --git 
a/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/generated/TrxRegionProtos.java
 
b/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/generated/TrxRegionProtos.java
index ea96915..c4ef007 100755
--- 
a/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/generated/TrxRegionProtos.java
+++ 
b/core/sqf/src/seatrans/hbase-trx/src/main/java/org/apache/hadoop/hbase/coprocessor/transactional/generated/TrxRegionProtos.java
@@ -1,26 +1,3 @@
-/**
-* @@@ START COPYRIGHT @@@
-*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied.  See the License for the
-* specific language governing permissions and limitations
-* under the License.
-*
-* @@@ END COPYRIGHT @@@
-**/
-
 // Generated by the protocol buffer compiler.  DO NOT EDIT!
 // source: TrxRegion.proto
 
@@ -43108,6 +43085,1217 @@ public final class TrxRegionProtos {
     // @@protoc_insertion_point(class_scope:TrafEstimateRowCountResponse)
   }
 
+  public interface TrafSetStoragePolicyRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string path = 1;
+    /**
+     * <code>required string path = 1;</code>
+     */
+    boolean hasPath();
+    /**
+     * <code>required string path = 1;</code>
+     */
+    java.lang.String getPath();
+    /**
+     * <code>required string path = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getPathBytes();
+
+    // required string policy = 2;
+    /**
+     * <code>required string policy = 2;</code>
+     */
+    boolean hasPolicy();
+    /**
+     * <code>required string policy = 2;</code>
+     */
+    java.lang.String getPolicy();
+    /**
+     * <code>required string policy = 2;</code>
+     */
+    com.google.protobuf.ByteString
+        getPolicyBytes();
+  }
+  /**
+   * Protobuf type {@code TrafSetStoragePolicyRequest}
+   */
+  public static final class TrafSetStoragePolicyRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements TrafSetStoragePolicyRequestOrBuilder {
+    // Use TrafSetStoragePolicyRequest.newBuilder() to construct.
+    private 
TrafSetStoragePolicyRequest(com.google.protobuf.GeneratedMessage.Builder<?> 
builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private TrafSetStoragePolicyRequest(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final TrafSetStoragePolicyRequest defaultInstance;
+    public static TrafSetStoragePolicyRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public TrafSetStoragePolicyRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private TrafSetStoragePolicyRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              path_ = input.readBytes();
+              break;
+            }
+            case 18: {
+              bitField0_ |= 0x00000002;
+              policy_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.internal_static_TrafSetStoragePolicyRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.internal_static_TrafSetStoragePolicyRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest.class,
 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<TrafSetStoragePolicyRequest> 
PARSER =
+        new com.google.protobuf.AbstractParser<TrafSetStoragePolicyRequest>() {
+      public TrafSetStoragePolicyRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new TrafSetStoragePolicyRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<TrafSetStoragePolicyRequest> 
getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string path = 1;
+    public static final int PATH_FIELD_NUMBER = 1;
+    private java.lang.Object path_;
+    /**
+     * <code>required string path = 1;</code>
+     */
+    public boolean hasPath() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string path = 1;</code>
+     */
+    public java.lang.String getPath() {
+      java.lang.Object ref = path_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          path_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string path = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getPathBytes() {
+      java.lang.Object ref = path_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        path_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required string policy = 2;
+    public static final int POLICY_FIELD_NUMBER = 2;
+    private java.lang.Object policy_;
+    /**
+     * <code>required string policy = 2;</code>
+     */
+    public boolean hasPolicy() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required string policy = 2;</code>
+     */
+    public java.lang.String getPolicy() {
+      java.lang.Object ref = policy_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          policy_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string policy = 2;</code>
+     */
+    public com.google.protobuf.ByteString
+        getPolicyBytes() {
+      java.lang.Object ref = policy_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        policy_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      path_ = "";
+      policy_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasPath()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasPolicy()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getPathBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBytes(2, getPolicyBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getPathBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(2, getPolicyBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder 
newBuilder(org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code TrafSetStoragePolicyRequest}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequestOrBuilder
 {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.internal_static_TrafSetStoragePolicyRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.internal_static_TrafSetStoragePolicyRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest.class,
 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest.Builder.class);
+      }
+
+      // Construct using 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        path_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        policy_ = "";
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.internal_static_TrafSetStoragePolicyRequest_descriptor;
+      }
+
+      public 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 getDefaultInstanceForType() {
+        return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest.getDefaultInstance();
+      }
+
+      public 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 build() {
+        
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 buildPartial() {
+        
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 result = new 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.path_ = path_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.policy_ = policy_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest)
 {
+          return 
mergeFrom((org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder 
mergeFrom(org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 other) {
+        if (other == 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest.getDefaultInstance())
 return this;
+        if (other.hasPath()) {
+          bitField0_ |= 0x00000001;
+          path_ = other.path_;
+          onChanged();
+        }
+        if (other.hasPolicy()) {
+          bitField0_ |= 0x00000002;
+          policy_ = other.policy_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasPath()) {
+          
+          return false;
+        }
+        if (!hasPolicy()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = 
(org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest)
 e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string path = 1;
+      private java.lang.Object path_ = "";
+      /**
+       * <code>required string path = 1;</code>
+       */
+      public boolean hasPath() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string path = 1;</code>
+       */
+      public java.lang.String getPath() {
+        java.lang.Object ref = path_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          path_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string path = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getPathBytes() {
+        java.lang.Object ref = path_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          path_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string path = 1;</code>
+       */
+      public Builder setPath(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        path_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string path = 1;</code>
+       */
+      public Builder clearPath() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        path_ = getDefaultInstance().getPath();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string path = 1;</code>
+       */
+      public Builder setPathBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        path_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required string policy = 2;
+      private java.lang.Object policy_ = "";
+      /**
+       * <code>required string policy = 2;</code>
+       */
+      public boolean hasPolicy() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required string policy = 2;</code>
+       */
+      public java.lang.String getPolicy() {
+        java.lang.Object ref = policy_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          policy_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string policy = 2;</code>
+       */
+      public com.google.protobuf.ByteString
+          getPolicyBytes() {
+        java.lang.Object ref = policy_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          policy_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string policy = 2;</code>
+       */
+      public Builder setPolicy(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        policy_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string policy = 2;</code>
+       */
+      public Builder clearPolicy() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        policy_ = getDefaultInstance().getPolicy();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string policy = 2;</code>
+       */
+      public Builder setPolicyBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        policy_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:TrafSetStoragePolicyRequest)
+    }
+
+    static {
+      defaultInstance = new TrafSetStoragePolicyRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:TrafSetStoragePolicyRequest)
+  }
+
+  public interface TrafSetStoragePolicyResponseOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required bool status = 1;
+    /**
+     * <code>required bool status = 1;</code>
+     */
+    boolean hasStatus();
+    /**
+     * <code>required bool status = 1;</code>
+     */
+    boolean getStatus();
+
+    // required string exception = 2;
+    /**
+     * <code>required string exception = 2;</code>
+     */
+    boolean hasException();
+    /**
+     * <code>required string exception = 2;</code>
+     */
+    java.lang.String getException();
+    /**
+     * <code>required string exception = 2;</code>
+     */
+    com.google.protobuf.ByteString
+        getExceptionBytes();
+  }
+  /**
+   * Protobuf type {@code TrafSetStoragePolicyResponse}
+   */
+  public static final class TrafSetStoragePolicyResponse extends
+      com.google.protobuf.GeneratedMessage
+      implements TrafSetStoragePolicyResponseOrBuilder {
+    // Use TrafSetStoragePolicyResponse.newBuilder() to construct.
+    private 
TrafSetStoragePolicyResponse(com.google.protobuf.GeneratedMessage.Builder<?> 
builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private TrafSetStoragePolicyResponse(boolean noInit) { this.unknownFields 
= com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final TrafSetStoragePolicyResponse defaultInstance;
+    public static TrafSetStoragePolicyResponse getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public TrafSetStoragePolicyResponse getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private TrafSetStoragePolicyResponse(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              status_ = input.readBool();
+              break;
+            }
+            case 18: {
+              bitField0_ |= 0x00000002;
+              exception_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.internal_static_TrafSetStoragePolicyResponse_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.internal_static_TrafSetStoragePolicyResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.class,
 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<TrafSetStoragePolicyResponse> 
PARSER =
+        new com.google.protobuf.AbstractParser<TrafSetStoragePolicyResponse>() 
{
+      public TrafSetStoragePolicyResponse parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new TrafSetStoragePolicyResponse(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<TrafSetStoragePolicyResponse> 
getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required bool status = 1;
+    public static final int STATUS_FIELD_NUMBER = 1;
+    private boolean status_;
+    /**
+     * <code>required bool status = 1;</code>
+     */
+    public boolean hasStatus() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required bool status = 1;</code>
+     */
+    public boolean getStatus() {
+      return status_;
+    }
+
+    // required string exception = 2;
+    public static final int EXCEPTION_FIELD_NUMBER = 2;
+    private java.lang.Object exception_;
+    /**
+     * <code>required string exception = 2;</code>
+     */
+    public boolean hasException() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required string exception = 2;</code>
+     */
+    public java.lang.String getException() {
+      java.lang.Object ref = exception_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          exception_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string exception = 2;</code>
+     */
+    public com.google.protobuf.ByteString
+        getExceptionBytes() {
+      java.lang.Object ref = exception_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        exception_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      status_ = false;
+      exception_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasStatus()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasException()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBool(1, status_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBytes(2, getExceptionBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(1, status_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(2, getExceptionBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder 
newBuilder(org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code TrafSetStoragePolicyResponse}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponseOrBuilder
 {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.internal_static_TrafSetStoragePolicyResponse_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.internal_static_TrafSetStoragePolicyResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.class,
 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.Builder.class);
+      }
+
+      // Construct using 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        status_ = false;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        exception_ = "";
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.internal_static_TrafSetStoragePolicyResponse_descriptor;
+      }
+
+      public 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 getDefaultInstanceForType() {
+        return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.getDefaultInstance();
+      }
+
+      public 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 build() {
+        
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 buildPartial() {
+        
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 result = new 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.status_ = status_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.exception_ = exception_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse)
 {
+          return 
mergeFrom((org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder 
mergeFrom(org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 other) {
+        if (other == 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.getDefaultInstance())
 return this;
+        if (other.hasStatus()) {
+          setStatus(other.getStatus());
+        }
+        if (other.hasException()) {
+          bitField0_ |= 0x00000002;
+          exception_ = other.exception_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasStatus()) {
+          
+          return false;
+        }
+        if (!hasException()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = 
(org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse)
 e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required bool status = 1;
+      private boolean status_ ;
+      /**
+       * <code>required bool status = 1;</code>
+       */
+      public boolean hasStatus() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required bool status = 1;</code>
+       */
+      public boolean getStatus() {
+        return status_;
+      }
+      /**
+       * <code>required bool status = 1;</code>
+       */
+      public Builder setStatus(boolean value) {
+        bitField0_ |= 0x00000001;
+        status_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required bool status = 1;</code>
+       */
+      public Builder clearStatus() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        status_ = false;
+        onChanged();
+        return this;
+      }
+
+      // required string exception = 2;
+      private java.lang.Object exception_ = "";
+      /**
+       * <code>required string exception = 2;</code>
+       */
+      public boolean hasException() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required string exception = 2;</code>
+       */
+      public java.lang.String getException() {
+        java.lang.Object ref = exception_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          exception_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string exception = 2;</code>
+       */
+      public com.google.protobuf.ByteString
+          getExceptionBytes() {
+        java.lang.Object ref = exception_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          exception_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string exception = 2;</code>
+       */
+      public Builder setException(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        exception_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string exception = 2;</code>
+       */
+      public Builder clearException() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        exception_ = getDefaultInstance().getException();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string exception = 2;</code>
+       */
+      public Builder setExceptionBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        exception_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:TrafSetStoragePolicyResponse)
+    }
+
+    static {
+      defaultInstance = new TrafSetStoragePolicyResponse(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:TrafSetStoragePolicyResponse)
+  }
+
   public interface TransactionalAggregateRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
@@ -48128,6 +49316,14 @@ public final class TrxRegionProtos {
           
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateRequest
 request,
           
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateResponse>
 done);
 
+      /**
+       * <code>rpc setStoragePolicy(.TrafSetStoragePolicyRequest) returns 
(.TrafSetStoragePolicyResponse);</code>
+       */
+      public abstract void setStoragePolicy(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 request,
+          
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse>
 done);
+
     }
 
     public static com.google.protobuf.Service newReflectiveService(
@@ -48413,6 +49609,14 @@ public final class TrxRegionProtos {
           impl.getMedian(controller, request, done);
         }
 
+        @java.lang.Override
+        public  void setStoragePolicy(
+            com.google.protobuf.RpcController controller,
+            
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 request,
+            
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse>
 done) {
+          impl.setStoragePolicy(controller, request, done);
+        }
+
       };
     }
 
@@ -48505,6 +49709,8 @@ public final class TrxRegionProtos {
               return impl.getStd(controller, 
(org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateRequest)request);
             case 34:
               return impl.getMedian(controller, 
(org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateRequest)request);
+            case 35:
+              return impl.setStoragePolicy(controller, 
(org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -48589,6 +49795,8 @@ public final class TrxRegionProtos {
               return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateRequest.getDefaultInstance();
             case 34:
               return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateRequest.getDefaultInstance();
+            case 35:
+              return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -48673,6 +49881,8 @@ public final class TrxRegionProtos {
               return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateResponse.getDefaultInstance();
             case 34:
               return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateResponse.getDefaultInstance();
+            case 35:
+              return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -48961,6 +50171,14 @@ public final class TrxRegionProtos {
         
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateRequest
 request,
         
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateResponse>
 done);
 
+    /**
+     * <code>rpc setStoragePolicy(.TrafSetStoragePolicyRequest) returns 
(.TrafSetStoragePolicyResponse);</code>
+     */
+    public abstract void setStoragePolicy(
+        com.google.protobuf.RpcController controller,
+        
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 request,
+        
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse>
 done);
+
     public static final
         com.google.protobuf.Descriptors.ServiceDescriptor
         getDescriptor() {
@@ -49158,6 +50376,11 @@ public final class TrxRegionProtos {
             
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateResponse>specializeCallback(
               done));
           return;
+        case 35:
+          this.setStoragePolicy(controller, 
(org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest)request,
+            
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse>specializeCallback(
+              done));
+          return;
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -49242,6 +50465,8 @@ public final class TrxRegionProtos {
           return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateRequest.getDefaultInstance();
         case 34:
           return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateRequest.getDefaultInstance();
+        case 35:
+          return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -49326,6 +50551,8 @@ public final class TrxRegionProtos {
           return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateResponse.getDefaultInstance();
         case 34:
           return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateResponse.getDefaultInstance();
+        case 35:
+          return 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -49871,6 +51098,21 @@ public final class TrxRegionProtos {
             
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateResponse.class,
             
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateResponse.getDefaultInstance()));
       }
+
+      public  void setStoragePolicy(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 request,
+          
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse>
 done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(35),
+          controller,
+          request,
+          
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.getDefaultInstance(),
+          com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.class,
+            
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.getDefaultInstance()));
+      }
     }
 
     public static BlockingInterface newBlockingStub(
@@ -50053,6 +51295,11 @@ public final class TrxRegionProtos {
           com.google.protobuf.RpcController controller,
           
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateRequest
 request)
           throws com.google.protobuf.ServiceException;
+
+      public 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 setStoragePolicy(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 request)
+          throws com.google.protobuf.ServiceException;
     }
 
     private static final class BlockingStub implements BlockingInterface {
@@ -50481,6 +51728,18 @@ public final class TrxRegionProtos {
           
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TransactionalAggregateResponse.getDefaultInstance());
       }
 
+
+      public 
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse
 setStoragePolicy(
+          com.google.protobuf.RpcController controller,
+          
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyRequest
 request)
+          throws com.google.protobuf.ServiceException {
+        return 
(org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse)
 channel.callBlockingMethod(
+          getDescriptor().getMethods().get(35),
+          controller,
+          request,
+          
org.apache.hadoop.hbase.coprocessor.transactional.generated.TrxRegionProtos.TrafSetStoragePolicyResponse.getDefaultInstance());
+      }
+
     }
 
     // @@protoc_insertion_point(class_scope:TrxRegionService)
@@ -50767,6 +52026,16 @@ public final class TrxRegionProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_TrafEstimateRowCountResponse_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_TrafSetStoragePolicyRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_TrafSetStoragePolicyRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_TrafSetStoragePolicyResponse_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_TrafSetStoragePolicyResponse_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_TransactionalAggregateRequest_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -50949,88 +52218,94 @@ public final class TrxRegionProtos {
       "\003\022\026\n\016totalSizeBytes\030\002 
\002(\003\022\025\n\rputKVsSampl" +
       "ed\030\003 \002(\005\022\030\n\020nonPutKVsSampled\030\004 
\002(\005\022\027\n\017mi" +
       "ssingKVsCount\030\005 \002(\005\022\021\n\texception\030\006 
\001(\t\022\024" +
-      "\n\014hasException\030\007 \001(\010\"\264\001\n\035TransactionalAg" +
-      "gregateRequest\022\022\n\nregionName\030\001 \002(\014\022\025\n\rtr" +
-      "ansactionId\030\002 \002(\003\022\017\n\007startId\030\003 
\002(\003\022\036\n\026in" +
-      "terpreter_class_name\030\004 \002(\t\022\023\n\004scan\030\005 
\002(\013" +
-      "2\005.Scan\022\"\n\032interpreter_specific_bytes\030\006 " +
-      "\001(\014\"I\n\036TransactionalAggregateResponse\022\022\n",
-      "\nfirst_part\030\003 \003(\014\022\023\n\013second_part\030\004 
\001(\014\"x" +
-      "\n\022TransactionPersist\022\016\n\006txById\030\001 
\003(\003\022\024\n\014" +
-      "seqNoListSeq\030\002 \003(\003\022\024\n\014seqNoListTxn\030\003 
\003(\003" +
-      "\022\021\n\tnextSeqId\030\004 
\002(\003\022\023\n\013onlineEpoch\030\005 \002(\003" +
-      "\"\372\001\n\023TransactionStateMsg\022\014\n\004txId\030\001 
\002(\003\022\033" +
-      "\n\003put\030\002 
\003(\0132\016.MutationProto\022\036\n\006delete\030\003 " +
-      "\003(\0132\016.MutationProto\022\020\n\010putOrDel\030\004 
\003(\010\022\023\n" +
-      "\013txnsToCheck\030\005 \003(\003\022\023\n\013startSeqNum\030\006 
\002(\003\022" +
-      "\016\n\006seqNum\030\007 \002(\003\022\020\n\010logSeqId\030\010 
\002(\003\022\022\n\nrei" +
-      "nstated\030\t \002(\010\022\016\n\006status\030\n 
\002(\005\022\026\n\016commitP",
-      "rogress\030\013 \002(\0052\353\023\n\020TrxRegionService\022G\n\020ab" +
-      "ortTransaction\022\030.AbortTransactionRequest" +
-      "\032\031.AbortTransactionResponse\022_\n\030abortTran" +
-      "sactionMultiple\022 .AbortTransactionMultip" +
-      "leRequest\032!.AbortTransactionMultipleResp" +
-      "onse\022G\n\020beginTransaction\022\030.BeginTransact" +
-      "ionRequest\032\031.BeginTransactionResponse\022A\n" +
-      "\016checkAndDelete\022\026.CheckAndDeleteRequest\032" +
-      "\027.CheckAndDeleteResponse\022Y\n\026checkAndDele" +
-      "teRegionTx\022\036.CheckAndDeleteRegionTxReque",
-      "st\032\037.CheckAndDeleteRegionTxResponse\0228\n\013c" +
-      "heckAndPut\022\023.CheckAndPutRequest\032\024.CheckA" +
-      "ndPutResponse\022P\n\023checkAndPutRegionTx\022\033.C" +
-      "heckAndPutRegionTxRequest\032\034.CheckAndPutR" +
-      "egionTxResponse\022;\n\014closeScanner\022\024.CloseS" +
-      "cannerRequest\032\025.CloseScannerResponse\022)\n\006" +
-      "commit\022\016.CommitRequest\032\017.CommitResponse\022" +
-      "G\n\020commitIfPossible\022\030.CommitIfPossibleRe" +
-      "quest\032\031.CommitIfPossibleResponse\022V\n\025comm" +
-      "itRequestMultiple\022\035.CommitRequestMultipl",
-      "eRequest\032\036.CommitRequestMultipleResponse" +
-      "\022A\n\016commitMultiple\022\026.CommitMultipleReque" +
-      "st\032\027.CommitMultipleResponse\022>\n\rcommitReq" +
-      "uest\022\025.CommitRequestRequest\032\026.CommitRequ" +
-      "estResponse\022A\n\016deleteRegionTx\022\026.DeleteRe" +
-      "gionTxRequest\032\027.DeleteRegionTxResponse\022C" +
-      "\n\006delete\022\033.DeleteTransactionalRequest\032\034." +
-      "DeleteTransactionalResponse\022[\n\016deleteMul" +
-      "tiple\022#.DeleteMultipleTransactionalReque" +
-      "st\032$.DeleteMultipleTransactionalResponse",
-      "\022:\n\003get\022\030.GetTransactionalRequest\032\031.GetT" +
-      "ransactionalResponse\0228\n\013performScan\022\023.Pe" +
-      "rformScanRequest\032\024.PerformScanResponse\0228" +
-      "\n\013openScanner\022\023.OpenScannerRequest\032\024.Ope" +
-      "nScannerResponse\0228\n\013putRegionTx\022\023.PutReg" +
-      "ionTxRequest\032\024.PutRegionTxResponse\022:\n\003pu" +
-      "t\022\030.PutTransactionalRequest\032\031.PutTransac" +
-      "tionalResponse\022R\n\013putMultiple\022 .PutMulti" +
-      "pleTransactionalRequest\032!.PutMultipleTra" +
-      "nsactionalResponse\0228\n\017pushOnlineEpoch\022\021.",
-      "PushEpochRequest\032\022.PushEpochResponse\022D\n\017" +
-      "recoveryRequest\022\027.RecoveryRequestRequest" +
-      "\032\030.RecoveryRequestResponse\022<\n\021deleteTlog" +
-      "Entries\022\022.TlogDeleteRequest\032\023.TlogDelete" +
-      "Response\0220\n\007putTlog\022\021.TlogWriteRequest\032\022" +
-      ".TlogWriteResponse\022w\n\036getTransactionStat" +
-      "esPriorToAsn\022).TlogTransactionStatesFrom" +
-      "IntervalRequest\032*.TlogTransactionStatesF" +
-      "romIntervalResponse\022S\n\024trafEstimateRowCo" +
-      "unt\022\034.TrafEstimateRowCountRequest\032\035.Traf",
-      "EstimateRowCountResponse\022I\n\006GetMax\022\036.Tra" +
-      "nsactionalAggregateRequest\032\037.Transaction" +
-      "alAggregateResponse\022I\n\006GetMin\022\036.Transact" +
-      "ionalAggregateRequest\032\037.TransactionalAgg" +
-      "regateResponse\022I\n\006GetSum\022\036.Transactional" +
-      "AggregateRequest\032\037.TransactionalAggregat" +
-      "eResponse\022L\n\tGetRowNum\022\036.TransactionalAg" +
-      "gregateRequest\032\037.TransactionalAggregateR" +
-      "esponse\022I\n\006GetAvg\022\036.TransactionalAggrega" +
-      "teRequest\032\037.TransactionalAggregateRespon",
-      "se\022I\n\006GetStd\022\036.TransactionalAggregateReq" +
-      "uest\032\037.TransactionalAggregateResponse\022L\n" +
-      "\tGetMedian\022\036.TransactionalAggregateReque" +
-      "st\032\037.TransactionalAggregateResponseBS\n;o" +
-      "rg.apache.hadoop.hbase.coprocessor.trans" +
-      "actional.generatedB\017TrxRegionProtosH\001\210\001\001"
+      "\n\014hasException\030\007 \001(\010\";\n\033TrafSetStoragePo" +
+      "licyRequest\022\014\n\004path\030\001 
\002(\t\022\016\n\006policy\030\002 \002(" +
+      "\t\"A\n\034TrafSetStoragePolicyResponse\022\016\n\006sta" +
+      "tus\030\001 \002(\010\022\021\n\texception\030\002 
\002(\t\"\264\001\n\035Transac" +
+      "tionalAggregateRequest\022\022\n\nregionName\030\001 \002" +
+      "(\014\022\025\n\rtransactionId\030\002 
\002(\003\022\017\n\007startId\030\003 \002",
+      "(\003\022\036\n\026interpreter_class_name\030\004 
\002(\t\022\023\n\004sc" +
+      "an\030\005 \002(\0132\005.Scan\022\"\n\032interpreter_specific_" +
+      "bytes\030\006 \001(\014\"I\n\036TransactionalAggregateRes" +
+      "ponse\022\022\n\nfirst_part\030\003 \003(\014\022\023\n\013second_part" 
+
+      "\030\004 
\001(\014\"x\n\022TransactionPersist\022\016\n\006txById\030\001" +
+      " \003(\003\022\024\n\014seqNoListSeq\030\002 
\003(\003\022\024\n\014seqNoListT" +
+      "xn\030\003 \003(\003\022\021\n\tnextSeqId\030\004 
\002(\003\022\023\n\013onlineEpo" +
+      "ch\030\005 
\002(\003\"\372\001\n\023TransactionStateMsg\022\014\n\004txId" +
+      "\030\001 \002(\003\022\033\n\003put\030\002 
\003(\0132\016.MutationProto\022\036\n\006d" +
+      "elete\030\003 \003(\0132\016.MutationProto\022\020\n\010putOrDel\030",
+      "\004 \003(\010\022\023\n\013txnsToCheck\030\005 
\003(\003\022\023\n\013startSeqNu" +
+      "m\030\006 \002(\003\022\016\n\006seqNum\030\007 
\002(\003\022\020\n\010logSeqId\030\010 \002(" +
+      "\003\022\022\n\nreinstated\030\t \002(\010\022\016\n\006status\030\n 
\002(\005\022\026\n" +
+      "\016commitProgress\030\013 \002(\0052\274\024\n\020TrxRegionServi" +
+      "ce\022G\n\020abortTransaction\022\030.AbortTransactio" +
+      "nRequest\032\031.AbortTransactionResponse\022_\n\030a" +
+      "bortTransactionMultiple\022 .AbortTransacti" +
+      "onMultipleRequest\032!.AbortTransactionMult" +
+      "ipleResponse\022G\n\020beginTransaction\022\030.Begin" +
+      "TransactionRequest\032\031.BeginTransactionRes",
+      "ponse\022A\n\016checkAndDelete\022\026.CheckAndDelete" +
+      "Request\032\027.CheckAndDeleteResponse\022Y\n\026chec" +
+      "kAndDeleteRegionTx\022\036.CheckAndDeleteRegio" +
+      "nTxRequest\032\037.CheckAndDeleteRegionTxRespo" +
+      "nse\0228\n\013checkAndPut\022\023.CheckAndPutRequest\032" +
+      "\024.CheckAndPutResponse\022P\n\023checkAndPutRegi" +
+      "onTx\022\033.CheckAndPutRegionTxRequest\032\034.Chec" +
+      "kAndPutRegionTxResponse\022;\n\014closeScanner\022" +
+      "\024.CloseScannerRequest\032\025.CloseScannerResp" +
+      "onse\022)\n\006commit\022\016.CommitRequest\032\017.CommitR",
+      "esponse\022G\n\020commitIfPossible\022\030.CommitIfPo" +
+      "ssibleRequest\032\031.CommitIfPossibleResponse" +
+      "\022V\n\025commitRequestMultiple\022\035.CommitReques" +
+      "tMultipleRequest\032\036.CommitRequestMultiple" +
+      "Response\022A\n\016commitMultiple\022\026.CommitMulti" +
+      "pleRequest\032\027.CommitMultipleResponse\022>\n\rc" +
+      "ommitRequest\022\025.CommitRequestRequest\032\026.Co" +
+      "mmitRequestResponse\022A\n\016deleteRegionTx\022\026." +
+      "DeleteRegionTxRequest\032\027.DeleteRegionTxRe" +
+      "sponse\022C\n\006delete\022\033.DeleteTransactionalRe",
+      "quest\032\034.DeleteTransactionalResponse\022[\n\016d" +
+      "eleteMultiple\022#.DeleteMultipleTransactio" +
+      "nalRequest\032$.DeleteMultipleTransactional" +
+      "Response\022:\n\003get\022\030.GetTransactionalReques" +
+      "t\032\031.GetTransactionalResponse\0228\n\013performS" +
+      "can\022\023.PerformScanRequest\032\024.PerformScanRe" +
+      "sponse\0228\n\013openScanner\022\023.OpenScannerReque" +
+      "st\032\024.OpenScannerResponse\0228\n\013putRegionTx\022" +
+      "\023.PutRegionTxRequest\032\024.PutRegionTxRespon" +
+      "se\022:\n\003put\022\030.PutTransactionalRequest\032\031.Pu",
+      "tTransactionalResponse\022R\n\013putMultiple\022 ." +
+      "PutMultipleTransactionalRequest\032!.PutMul" +
+      "tipleTransactionalResponse\0228\n\017pushOnline" +
+      "Epoch\022\021.PushEpochRequest\032\022.PushEpochResp" +
+      "onse\022D\n\017recoveryRequest\022\027.RecoveryReques" +
+      "tRequest\032\030.RecoveryRequestResponse\022<\n\021de" +
+      "leteTlogEntries\022\022.TlogDeleteRequest\032\023.Tl" +
+      "ogDeleteResponse\0220\n\007putTlog\022\021.TlogWriteR" +
+      "equest\032\022.TlogWriteResponse\022w\n\036getTransac" +
+      "tionStatesPriorToAsn\022).TlogTransactionSt",
+      "atesFromIntervalRequest\032*.TlogTransactio" +
+      "nStatesFromIntervalResponse\022S\n\024trafEstim" +
+      "ateRowCount\022\034.TrafEstimateRowCountReques" +
+      "t\032\035.TrafEstimateRowCountResponse\022I\n\006GetM" +
+      "ax\022\036.TransactionalAggregateRequest\032\037.Tra" +
+      "nsactionalAggregateResponse\022I\n\006GetMin\022\036." +
+      "TransactionalAggregateRequest\032\037.Transact" +
+      "ionalAggregateResponse\022I\n\006GetSum\022\036.Trans" +
+      "actionalAggregateRequest\032\037.Transactional" +
+      "AggregateResponse\022L\n\tGetRowNum\022\036.Transac",
+      "tionalAggregateRequest\032\037.TransactionalAg" +
+      "gregateResponse\022I\n\006GetAvg\022\036.Transactiona" +
+      "lAggregateRequest\032\037.TransactionalAggrega" +
+      "teResponse\022I\n\006GetStd\022\036.TransactionalAggr" +
+      "egateRequest\032\037.TransactionalAggregateRes" +
+      "ponse\022L\n\tGetMedian\022\036.TransactionalAggreg" +
+      "ateRequest\032\037.TransactionalAggregateRespo" +
+      "nse\022O\n\020setStoragePolicy\022\034.TrafSetStorage" +
+      "PolicyRequest\032\035.TrafSetStoragePolicyResp" +
+      "onseBS\n;org.apache.hadoop.hbase.coproces",
+      "sor.transactional.generatedB\017TrxRegionPr" +
+      "otosH\001\210\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner 
assigner =
       new 
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -51373,26 +52648,38 @@ public final class TrxRegionProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_TrafEstimateRowCountResponse_descriptor,
               new java.lang.String[] { "TotalEntries", "TotalSizeBytes", 
"PutKVsSampled", "NonPutKVsSampled", "MissingKVsCount", "Exception", 
"HasException", });
-          internal_static_TransactionalAggregateRequest_descriptor =
+          internal_static_TrafSetStoragePolicyRequest_descriptor =
             getDescriptor().getMessageTypes().get(56);
+          internal_static_TrafSetStoragePolicyRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_TrafSetStoragePolicyRequest_descriptor,
+              new java.lang.String[] { "Path", "Policy", });
+          internal_static_TrafSetStoragePolicyResponse_descriptor =
+            getDescriptor().getMessageTypes().get(57);
+          internal_static_TrafSetStoragePolicyResponse_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_TrafSetStoragePolicyResponse_descriptor,
+              new java.lang.String[] { "Status", "Exception", });
+          internal_static_TransactionalAggregateRequest_descriptor =
+            getDescriptor().getMessageTypes().get(58);
           internal_static_TransactionalAggregateRequest_fieldAccessorTable = 
new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_TransactionalAggregateRequest_descriptor,
               new java.lang.String[] { "RegionName", "TransactionId", 
"StartId", "InterpreterClassName", "Scan", "InterpreterSpecificBytes", });
           internal_static_TransactionalAggregateResponse_descriptor =
-            getDescriptor().getMessageTypes().get(57);
+            getDescriptor().getMessageTypes().get(59);
           internal_static_TransactionalAggregateResponse_fieldAccessorTable = 
new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_TransactionalAggregateResponse_descriptor,
               new java.lang.String[] { "FirstPart", "SecondPart", });
           internal_static_TransactionPersist_descriptor =
-            getDescriptor().getMessageTypes().get(58);
+            getDescriptor().getMessageTypes().get(60);
           internal_static_TransactionPersist_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_TransactionPersist_descriptor,
               new java.lang.String[] { "TxById", "SeqNoListSeq", 
"SeqNoListTxn", "NextSeqId", "OnlineEpoch", });
           internal_static_TransactionStateMsg_descriptor =
-            getDescriptor().getMessageTypes().get(59);
+            getDescriptor().getMessageTypes().get(61);
           internal_static_TransactionStateMsg_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_TransactionStateMsg_descriptor,

Reply via email to