ayushtkn commented on code in PR #4368:
URL: https://github.com/apache/hadoop/pull/4368#discussion_r884086228
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMBean.java:
##########
@@ -345,7 +345,7 @@ public interface FederationMBean {
long getHighestPriorityLowRedundancyECBlocks();
/**
- * Returns the number of paths to be processed by storage policy satisfier.
+ * Returns the number of paths to be processed by storage policy satisfies.
Review Comment:
SPS abbreviates to Storage Policy Satisfier. So this is correct only
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationRPCPerformanceMonitor.java:
##########
@@ -246,7 +246,7 @@ public void routerFailureLocked() {
/**
- * Get time between we receiving the operation and sending it to the
Namenode.
+ * Get time between we're receiving the operation and sending it to the
Namenode.
Review Comment:
not required, it is ok only
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodePriorityComparator.java:
##########
@@ -60,7 +60,7 @@ public int compare(FederationNamenodeContext o1,
*/
private int compareModDates(FederationNamenodeContext o1,
FederationNamenodeContext o2) {
- // Reverse sort, lowest position is highest priority.
+ // Reverse sort, the lowest position is the highest priority.
Review Comment:
avoid, it is ok only
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java:
##########
@@ -325,14 +325,14 @@ protected static boolean isUnavailableSubclusterException(
/**
* Check if a remote method can be retried in other subclusters when it
* failed in the original destination. This method returns the list of
- * locations to retry in. This is used by fault tolerant mount points.
+ * locations to retry in. This is used by fault-tolerant mount points.
Review Comment:
avoid
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java:
##########
@@ -32,8 +32,8 @@
/**
* Driver class for an implementation of a {@link StateStoreService}
- * provider. Driver implementations will extend this class and implement some
of
- * the default methods.
+ * provider. Driver implementations will extend this class and implement some
+ * default methods.
Review Comment:
correct only, revert
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java:
##########
@@ -1830,8 +1830,8 @@ public HAServiceProtocol.HAServiceState
getHAServiceState() {
}
/**
- * Determines combinations of eligible src/dst locations for a rename. A
- * rename cannot change the namespace. Renames are only allowed if there is
an
+ * Determines combinations of eligible src/dst locations for a renamed. A
+ * renamed cannot change the namespace. Renames are only allowed if there is
an
Review Comment:
revert. It was better previously
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java:
##########
@@ -178,7 +178,7 @@ public void cleanup() throws Exception {
}
/**
- * Update a mount table entry to be fault tolerant.
+ * Update a mount table entry to be fault-tolerant.
Review Comment:
no need of -, it is ok, revert
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/MiniRouterDFSCluster.java:
##########
@@ -924,7 +924,7 @@ public void waitRouterRegistrationQuorum(RouterContext
router,
/**
* Wait for name spaces to be active.
- * @throws Exception If we cannot check the status or we timeout.
+ * @throws Exception If we cannot check the status or we time out.
Review Comment:
timeout is well accepted, revert
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java:
##########
@@ -308,9 +308,9 @@ public void run() {
TimeUnit.SECONDS);
mountTableRefresherService.init(config);
// One router is not responding for 1 minute, still refresh should
- // finished in 5 second as cache update timeout is set 5 second.
+ // be finished in 5 second as cache update timeout is set 5 second.
Review Comment:
be finished -> finish
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java:
##########
@@ -594,7 +594,7 @@ public void testMountPointChildren() throws IOException {
}
/**
- * Validate the number of children for the mount point pointing to multiple
+ * Validate the number of children for the mount point to multiple
Review Comment:
revert, sound better before
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java:
##########
@@ -208,7 +208,7 @@ public void testRegistrationMajorityQuorum()
ns, nn, ROUTERS[3], FederationNamenodeServiceState.ACTIVE);
assertTrue(namenodeHeartbeat(report));
- // standby - newest overall
+ // standby - the newest overall
Review Comment:
not required
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java:
##########
@@ -51,7 +51,7 @@ public void setup() throws Exception {
cluster.registerNamenodes();
cluster.waitNamenodeRegistration();
- // Setup the mount table
+ // Set up the mount table
Review Comment:
revert, setup is ok
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java:
##########
@@ -489,7 +489,7 @@ public RemoveMountTableEntryResponse removeMountTableEntry(
synchronizeQuota(request.getSrcPath(), HdfsConstants.QUOTA_RESET,
HdfsConstants.QUOTA_RESET, null);
} catch (Exception e) {
- // Ignore exception, if any while reseting quota. Specifically to handle
+ // Ignore exception, if any while resting quota. Specifically to handle
Review Comment:
revert.
The earlier meant reset now it change to rest
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java:
##########
@@ -153,7 +153,7 @@ public boolean loadCache(boolean force) throws IOException {
}
/**
- * Check if it's time to update the cache. Update it it was never updated.
+ * Check if it's time to update the cache. Update it is never updated.
Review Comment:
was only looks better. revert this
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java:
##########
@@ -30,7 +30,7 @@
/**
* Service to periodically check if the {@link
* org.apache.hadoop.hdfs.server.federation.store.StateStoreService
- * StateStoreService} cached information in the {@link Router} is up to date.
+ * StateStoreService} cached information in the {@link Router} is up-to-date.
Review Comment:
not required
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java:
##########
@@ -95,7 +95,7 @@ public void startThreads() throws IOException {
if (!isTokenWatcherEnabled()) {
LOG.info("Watcher for tokens is disabled in this secret manager");
try {
- // By default set this variable
+ // By default, set this variable
Review Comment:
not required
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java:
##########
@@ -439,7 +439,7 @@ private boolean isQuotaUpdated(UpdateMountTableEntryRequest
request,
}
return false;
} else {
- // If old entry is not available, sync quota always, since we can't
+ // If old entry is not available, sync quota always, since we can
Review Comment:
changed meaning itself, revert
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/security/token/ZKDelegationTokenSecretManagerImpl.java:
##########
@@ -159,7 +159,7 @@ public DelegationTokenIdentifier createIdentifier() {
private void rebuildTokenCache(boolean initial) throws IOException {
localTokenCache.clear();
// Use bare zookeeper client to get all children since curator will
- // wrap the same API with a sorting process. This is time consuming given
+ // wrap the same API with a sorting process. This is time-consuming given
// millions of tokens
Review Comment:
not required
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/SecurityConfUtil.java:
##########
@@ -99,7 +99,7 @@ public static Configuration initSecurity() throws Exception {
assertTrue("Expected configuration to enable security",
UserGroupInformation.isSecurityEnabled());
- // Setup the keytab
+ // Set up the keytab
Review Comment:
revert, ok only
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java:
##########
@@ -325,14 +325,14 @@ protected static boolean isUnavailableSubclusterException(
/**
* Check if a remote method can be retried in other subclusters when it
* failed in the original destination. This method returns the list of
- * locations to retry in. This is used by fault tolerant mount points.
+ * locations to retry in. This is used by fault-tolerant mount points.
* @param method Method that failed and might be retried.
* @param src Path where the method was invoked.
* @param ioe Exception that was triggered.
* @param excludeLoc Location that failed and should be excluded.
* @param locations All the locations to retry.
* @return The locations where we should retry (excluding the failed ones).
- * @throws IOException If this path is not fault tolerant or the exception
+ * @throws IOException If this path is not fault-tolerant or the exception
Review Comment:
avoid
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java:
##########
@@ -304,7 +304,7 @@ private void testConnectionCleanup(float ratio, int
totalConns,
addConnectionsToPool(pool, totalConns - 1, activeConns - 1);
// There are activeConn connections.
- // We can cleanup the pool
+ // We can clean up the pool
Review Comment:
cleanup is well accepted, revert
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java:
##########
@@ -84,7 +84,7 @@ private void checkMountTableEntryPermission(String src,
FsAction action)
}
/**
- * Check parent path permission recursively. It needs WRITE permission
+ * Check parent path permission recursively. It needs to write permission
Review Comment:
revert,
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java:
##########
@@ -355,7 +355,7 @@ Map<ConnectionPoolId, ConnectionPool> getPools() {
/**
* Clean the unused connections for this pool.
*
- * @param pool Connection pool to cleanup.
+ * @param pool Connection pool to clean up.
Review Comment:
not required
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java:
##########
@@ -268,10 +268,10 @@ private void checkPoolConnections(UserGroupInformation
ugi,
@Test
public void testConfigureConnectionActiveRatio() throws IOException {
- // test 1 conn below the threshold and these conns are closed
+ // test 1 conn below the threshold and This conns are closed
testConnectionCleanup(0.8f, 10, 7, 9);
- // test 2 conn below the threshold and these conns are closed
+ // test 2 conn below the threshold and This conns are closed
Review Comment:
revert, sounds ok only
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java:
##########
@@ -278,8 +278,8 @@ private void testWriteWithFailedSubcluster(final
DestinationOrder order)
/**
* Check directory creation on a mount point.
- * If it is fault tolerant, it should be able to write everything.
- * If it is not fault tolerant, it should fail to write some.
+ * If it is fault-tolerant, it should be able to write everything.
+ * If it is not fault-tolerant, it should fail to write some.
Review Comment:
same as above, revert
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MountTableStoreImpl.java:
##########
@@ -99,8 +99,8 @@ private void checkMountTablePermission(final String src)
throws IOException {
}
/**
- * When add mount table entry, it needs WRITE permission of the nearest
parent
- * entry if exist, and EXECUTE permission of other ancestor entries.
+ * When add mount table entry, it needs to write permission of the nearest
parent
+ * entry if existed, and EXECUTE permission of other ancestor entries.
Review Comment:
it was correct only, revert
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java:
##########
@@ -102,7 +102,7 @@ public static void setUp() throws Exception {
private static void setupNamespace() throws IOException {
- // Setup a mount table to map to the two namespaces
+ // Set up a mount table to map to the two namespaces
Review Comment:
revert, setup is ok
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAllResolver.java:
##########
@@ -102,7 +102,7 @@ public void setup() throws Exception {
cluster.registerNamenodes();
cluster.waitNamenodeRegistration();
- // Setup the test mount point
+ // Set up the test mount point
Review Comment:
revert
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java:
##########
@@ -322,8 +322,8 @@ private void checkDirectoriesFaultTolerant(
/**
* Check file creation on a mount point.
- * If it is fault tolerant, it should be able to write everything.
- * If it is not fault tolerant, it should fail to write some of the files.
+ * If it is fault-tolerant, it should be able to write everything.
+ * If it is not fault-tolerant, it should fail to write some files.
Review Comment:
revert, it is ok only
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java:
##########
@@ -234,7 +234,7 @@ public void
testCachedRouterClientBehaviourAfterRouterStoped()
assertEquals(srcPath, mountTableResult.getSourcePath());
}
- // Lets stop one router
+ // Let's stop one router
Review Comment:
revert this, guess Let's is let us and lets is to permit or to allow, it is
ok in this context
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java:
##########
@@ -130,7 +130,7 @@ public void testSetup() throws Exception {
this.setNamenode(cluster.getNamenode(ns, null));
// Create a test file on a single NN that is accessed via a getRouter()
path
- // with 2 destinations. All tests should failover to the alternate
+ // with 2 destinations. All tests should fail over to the alternate
Review Comment:
revert, failover is a well accepted word for us
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/TestStateStoreMembershipState.java:
##########
@@ -186,7 +186,7 @@ public void testRegistrationMajorityQuorum()
// 1) ns0:nn0 - Standby (newest)
// 2) ns0:nn0 - Active (oldest)
// 3) ns0:nn0 - Active (2nd oldest)
- // 4) ns0:nn0 - Active (3nd oldest element, newest active element)
+ // 4) ns0:nn0 - Active (3nd the oldest element, the newest active element)
Review Comment:
Change
3nd -> 3rd,
no need of first the
##########
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java:
##########
@@ -466,7 +466,7 @@ public void testNameserviceManager() throws IOException {
disabled = getDisabledNameservices(nsManager);
assertTrue(disabled.isEmpty());
- // Non existing name services should fail
+ // Non-existing name services should fail
Review Comment:
ok only, revert this
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]