Apache9 commented on a change in pull request #3378:
URL: https://github.com/apache/hbase/pull/3378#discussion_r651758562
##########
File path:
hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
##########
@@ -100,7 +100,8 @@
private final NonceGenerator nonceGenerator;
private final ConcurrentMap<String, ClientService.Interface> rsStubs = new
ConcurrentHashMap<>();
- private final ConcurrentMap<String, AdminService.Interface> adminSubs = new
ConcurrentHashMap<>();
+ private final ConcurrentMap<String, AdminService.Interface> adminStubs =
Review comment:
I think this could be a separated issue which target master branch?
##########
File path:
hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
##########
@@ -41,20 +45,36 @@
import
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.CompactionProtos;
+import
org.apache.hadoop.hbase.shaded.protobuf.generated.CompactionProtos.CompactionService;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.getStubKey;
/**
* The implementation of AsyncClusterConnection.
*/
@InterfaceAudience.Private
class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements
AsyncClusterConnection {
-
+ private final ConcurrentMap<String, CompactionService.Interface>
CompactionSubs = new ConcurrentHashMap<>();
Review comment:
Stubs
##########
File path:
hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncClusterConnectionImpl.java
##########
@@ -41,20 +45,36 @@
import
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.CompactionProtos;
+import
org.apache.hadoop.hbase.shaded.protobuf.generated.CompactionProtos.CompactionService;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
import
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.getStubKey;
/**
* The implementation of AsyncClusterConnection.
*/
@InterfaceAudience.Private
class AsyncClusterConnectionImpl extends AsyncConnectionImpl implements
AsyncClusterConnection {
-
+ private final ConcurrentMap<String, CompactionService.Interface>
CompactionSubs = new ConcurrentHashMap<>();
public AsyncClusterConnectionImpl(Configuration conf, ConnectionRegistry
registry,
String clusterId, SocketAddress localAddress, User user) {
super(conf, registry, clusterId, localAddress, user);
}
+ CompactionProtos.CompactionService.Interface getCompactionStub(ServerName
serverName) throws
Review comment:
Do we really need to cache the stub here? This is not the normal
read/write path, just create a new one everytime is enough? The rpc connection
is cached in RpcClient.
##########
File path:
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
##########
@@ -3736,4 +3741,67 @@ public CompactedHFilesDischarger
getCompactedHFilesDischarger() {
public long getRetryPauseTime() {
return this.retryPauseTime;
}
+
+ @Override
+ public boolean isCompactionOffloadEnabled(){
+ return regionServerCompactionOffloadManager.isCompactionOffloadEnabled();
+ }
+
+ private synchronized void createCompactionManagerStub(boolean refresh) {
+ // Create Master Compaction service stub without refreshing the master
node from ZK,
+ // use cached data
+ if (cmsStub == null) {
+ cmsStub =
+ (CompactionService.BlockingInterface)
createMasterStub(CompactionService.class, refresh);
+ }
+ }
+
+ /**
+ * Send compaction request to compaction manager
+ * @return True if send request successfully, otherwise false
+ * @throws IOException If an error occurs
+ */
+ @Override
+ public boolean requestCompactRegion(RegionInfo regionInfo,
ColumnFamilyDescriptor cfd,
+ boolean major, int priority) {
+ if (!isCompactionOffloadEnabled()) {
+ return false;
+ }
+ if (cmsStub == null) {
+ createCompactionManagerStub(false);
Review comment:
So without master, we can not reassign region, and now, we can not even
compact a region...
Could we double check if this is really the only choice here?
Maybe start a discuss thread on dev list?
##########
File path:
hbase-server/src/main/java/org/apache/hadoop/hbase/client/AsyncCompactionServerService.java
##########
@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback;
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+
+import
org.apache.hadoop.hbase.shaded.protobuf.generated.CompactionProtos.CompactRequest;
+import
org.apache.hadoop.hbase.shaded.protobuf.generated.CompactionProtos.CompactResponse;
+import
org.apache.hadoop.hbase.shaded.protobuf.generated.CompactionProtos.CompactionService;
+
+
+/**
+ * A simple wrapper of the {@link CompactionService} for a compaction server,
which returns a
+ * {@link CompletableFuture}. This is easier to use, as if you use the raw
protobuf interface, you
+ * need to get the result from the {@link RpcCallback}, and if there is an
exception, you need to
+ * get it from the {@link RpcController} passed in.
+ * <p/>
+ * Notice that there is no retry, and this is intentional. We have different
retry for different
+ * usage for now, if later we want to unify them, we can move the retry logic
into this class.
+ */
[email protected]
+public class AsyncCompactionServerService {
+
+ private final ServerName server;
+
+ private final AsyncClusterConnectionImpl conn;
+
+ AsyncCompactionServerService(ServerName server, AsyncClusterConnectionImpl
conn) {
+ this.server = server;
+ this.conn = conn;
+ }
+
+ @FunctionalInterface
+ private interface RpcCall<RESP> {
+ void call(CompactionService.Interface stub, HBaseRpcController controller,
+ RpcCallback<RESP> done);
+ }
+
+ private <RESP> CompletableFuture<RESP> call(RpcCall<RESP> rpcCall) {
Review comment:
We do not have a common method in ConnectionUtils?
##########
File path:
hbase-server/src/main/java/org/apache/hadoop/hbase/compactionserver/CompactionThreadManager.java
##########
@@ -0,0 +1,61 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.compactionserver;
+
+import java.io.IOException;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.AsyncRegionServerAdmin;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
[email protected]
+public class CompactionThreadManager {
+ private static Logger LOG =
LoggerFactory.getLogger(CompactionThreadManager.class);
+
+ private final Configuration conf;
+ private final ConcurrentMap<ServerName, AsyncRegionServerAdmin> rsAdmins =
+ new ConcurrentHashMap<>();
+ private final HCompactionServer server;
+
+ public CompactionThreadManager(final Configuration conf, HCompactionServer
server) {
+ TraceUtil.initTracer(conf);
Review comment:
What's this...
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]