[
https://issues.apache.org/jira/browse/HDFS-16429?focusedWorklogId=713300&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-713300
]
ASF GitHub Bot logged work on HDFS-16429:
-----------------------------------------
Author: ASF GitHub Bot
Created on: 23/Jan/22 07:38
Start Date: 23/Jan/22 07:38
Worklog Time Spent: 10m
Work Description: Hexiaoqiao commented on a change in pull request #3900:
URL: https://github.com/apache/hadoop/pull/3900#discussion_r790236964
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/AutoCloseDataSetLock.java
##########
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common;
+
+import org.apache.hadoop.util.AutoCloseableLock;
+import org.apache.hadoop.util.StringUtils;
+
+import java.util.concurrent.locks.Lock;
+
+import static org.apache.hadoop.hdfs.server.datanode.DataSetLockManager.LOG;
+
+/**
+ * Extending AutoCloseableLock such that the users can
+ * use a try-with-resource syntax.
+ */
+public class AutoCloseDataSetLock extends AutoCloseableLock {
+ private Lock lock;
+ private AutoCloseDataSetLock parentLock;
+ private DataNodeLockManager<AutoCloseDataSetLock> dataNodeLockManager;
+
+ public AutoCloseDataSetLock(Lock lock) {
+ this.lock = lock;
+ }
+
+ @Override
+ public void close() {
+ if (lock != null) {
+ lock.unlock();
+ if (dataNodeLockManager != null) {
+ dataNodeLockManager.hook();
+ }
+ } else {
+ LOG.error("Try to unlock null lock" +
+ StringUtils.getStackTrace(Thread.currentThread()));
+ }
+ if (parentLock != null) {
+ parentLock.close();
+ }
+ }
+
+ /**
+ * Actually acquire the lock.
+ */
+ public void lock() {
+ if (lock != null) {
+ lock.lock();
+ return;
+ }
+ LOG.error("Try to lock null lock" +
+ StringUtils.getStackTrace(Thread.currentThread()));
+ }
+
+ public void setParentLock(AutoCloseDataSetLock parent) {
+ if (parentLock == null) {
+ this.parentLock = parent;
+ }
+ }
+
+ public void setLockManager(DataNodeLockManager<AutoCloseDataSetLock>
dataNodeLockManager) {
Review comment:
> 'dataNodeLockManager' hides a field. [HiddenField]
maybe update the parameter variable to another name to avoid this checkstyle
error.
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/DataNodeLockManager.java
##########
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common;
+
+/**
+ * Use for manage a set of lock for datanode.
+ */
+public interface DataNodeLockManager<T extends AutoCloseDataSetLock> {
+
+ enum LockLevel {
Review comment:
Suggestion leave some annotation for this enum to depict what is
`BLOCK_POOL/VOLUMN`. And how will to use them then.
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java
##########
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.common.AutoCloseDataSetLock;
+import org.apache.hadoop.hdfs.server.common.DataNodeLockManager;
+
+import java.util.HashMap;
+import java.util.Stack;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+/**
+ * Class for maintain a set of lock for fsDataSetImpl.
+ */
+public class DataSetLockManager implements
DataNodeLockManager<AutoCloseDataSetLock> {
+ public static final Log LOG = LogFactory.getLog(DataSetLockManager.class);
+ private final HashMap<String, TrackLog> threadCountMap = new HashMap<>();
+ private final LockMap lockMap = new LockMap();
+ private boolean isFair = true;
+ private final boolean openLockTrace;
+ private Exception lastException;
+
+ /**
+ * Class for maintain lockMap and is thread safe.
+ */
+ private class LockMap {
+ private final HashMap<String, AutoCloseDataSetLock> readlockMap = new
HashMap<>();
+ private final HashMap<String, AutoCloseDataSetLock> writeLockMap = new
HashMap<>();
+
+ public synchronized void addLock(String name, ReentrantReadWriteLock lock)
{
+ AutoCloseDataSetLock readLock = new
AutoCloseDataSetLock(lock.readLock());
+ AutoCloseDataSetLock writeLock = new
AutoCloseDataSetLock(lock.writeLock());
+ if (openLockTrace) {
+ readLock.setLockManager(DataSetLockManager.this);
+ writeLock.setLockManager(DataSetLockManager.this);
+ }
+ readlockMap.putIfAbsent(name, readLock);
+ writeLockMap.putIfAbsent(name, writeLock);
+ }
+
+ public synchronized void removeLock(String name) {
+ if (!readlockMap.containsKey(name) || !writeLockMap.containsKey(name)) {
+ LOG.error("The lock " + name + " is not in LockMap");
+ }
+ readlockMap.remove(name);
+ writeLockMap.remove(name);
+ }
+
+ public synchronized AutoCloseDataSetLock getReadLock(String name) {
+ return readlockMap.get(name);
+ }
+
+ public synchronized AutoCloseDataSetLock getWriteLock(String name) {
+ return writeLockMap.get(name);
+ }
+ }
+
+ private String generateLockName(LockLevel level, String... resources) {
Review comment:
suggest to add some annotations to explain what is `level` and what is
`resources` here, and also return result.
##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
##########
@@ -6337,4 +6337,14 @@
times, we should mark it as a badnode.
</description>
</property>
+
+ <property>
+ <name>dfs.lock.manager.trace</name>
+ <value>false</value>
+ <description>
+ If this is true, after shout down datanode lock Manager will print all
leak
Review comment:
`shout down` to `shut down`?
##########
File path: hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
##########
@@ -6337,4 +6337,14 @@
times, we should mark it as a badnode.
</description>
</property>
+
+ <property>
+ <name>dfs.lock.manager.trace</name>
Review comment:
"dfs.lock.manager.tracer" to "dfs.datanode.lockmanager.trace";
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/AutoCloseDataSetLock.java
##########
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common;
+
+import org.apache.hadoop.util.AutoCloseableLock;
+import org.apache.hadoop.util.StringUtils;
+
+import java.util.concurrent.locks.Lock;
+
+import static org.apache.hadoop.hdfs.server.datanode.DataSetLockManager.LOG;
+
+/**
+ * Extending AutoCloseableLock such that the users can
+ * use a try-with-resource syntax.
+ */
+public class AutoCloseDataSetLock extends AutoCloseableLock {
+ private Lock lock;
+ private AutoCloseDataSetLock parentLock;
+ private DataNodeLockManager<AutoCloseDataSetLock> dataNodeLockManager;
+
+ public AutoCloseDataSetLock(Lock lock) {
Review comment:
`parentLock` and `dataNodeLockMananger` are not initialized here?
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
##########
@@ -1658,6 +1658,13 @@
DFS_NAMESERVICES_RESOLVER_IMPL =
"dfs.datanode.nameservices.resolver.impl";
+ public static final String
+ DFS_DATANODE_LOCK_MANAGER_TRACE =
+ "dfs.lock.manager.trace";
Review comment:
Nit:
a. "DFS_DATANODE_LOCK_MANAGE_TRACE" to "DFS_DATANODE_LOCKMANAGER_TRACE";
b. "dfs.lock.manager.tracer" to "dfs.datanode.lockmanager.trace";
c. "DFS_DATANODE_LOCK_MANAGER_TRACE_DEFAULT" to
"DFS_DATANODE_LOCKMANAGER_TRACE_DEFAULT";
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
Issue Time Tracking
-------------------
Worklog Id: (was: 713300)
Time Spent: 1h (was: 50m)
> Add DataSetLockManager to maintain locks for FsDataSetImpl
> ----------------------------------------------------------
>
> Key: HDFS-16429
> URL: https://issues.apache.org/jira/browse/HDFS-16429
> Project: Hadoop HDFS
> Issue Type: Sub-task
> Components: hdfs
> Affects Versions: 3.2.0
> Reporter: Mingxiang Li
> Assignee: Mingxiang Li
> Priority: Major
> Labels: pull-request-available
> Fix For: 3.2.0
>
> Time Spent: 1h
> Remaining Estimate: 0h
>
> 1、Use lockManager to maintain two level lock for FsDataSetImpl.
> The simple lock model like this.Parts of implemented as follows
> * As for finalizeReplica(),append(),createRbw()....First get BlockPoolLock
> read lock,and then get BlockPoolLock-volume-lock write lock.
> * As for getStoredBlock(),getMetaDataInputStream()....First get
> BlockPoolLock read lock,and the then get BlockPoolLock-volume-lock read lock.
> * As for deepCopyReplica(),getBlockReports() get the BlockPoolLock read lock.
> * As for delete hold the BlockPoolLock write lock.
> 2、Make LightWeightResizableGSet become thread safe.It not become performance
> bottleneck if we make it thread safe.We can reduce lock grain size for
> ReplicaMap when make LightWeightResizableGSet thread safe.
--
This message was sent by Atlassian Jira
(v8.20.1#820001)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]