[hadoop] branch trunk updated (c3b3b36 -> 32353eb)

2021-03-01 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from c3b3b36  HDFS-14013. Skip any credentials stored in HDFS when starting 
ZKFC. Contributed by Stephen O'Donnell
 add 32353eb  HDFS-15854. Make some parameters configurable for 
SlowDiskTracker and SlowPeerTracker (#2718)

No new revisions were added by this update.

Summary of changes:
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  8 
 .../hdfs/server/blockmanagement/SlowDiskTracker.java   |  7 +--
 .../hdfs/server/blockmanagement/SlowPeerTracker.java   |  7 +--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 18 ++
 4 files changed, 36 insertions(+), 4 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: MAPREDUCE-7298. Distcp doesn't close the job after the job is completed. Contributed by Aasha Medhi.

2020-10-02 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 18fa439  MAPREDUCE-7298. Distcp doesn't close the job after the job is 
completed. Contributed by Aasha Medhi.
18fa439 is described below

commit 18fa4397e6dc7663bcc7c7309126f45eb8d3fa17
Author: Arpit Agarwal 
AuthorDate: Fri Oct 2 08:29:55 2020 -0700

MAPREDUCE-7298. Distcp doesn't close the job after the job is completed. 
Contributed by Aasha Medhi.

Change-Id: I63d249bbb18ccedaeee9f10123a78e32f9e54ed2
---
 .../main/java/org/apache/hadoop/tools/DistCp.java  | 15 +--
 .../org/apache/hadoop/tools/TestExternalCall.java  | 31 ++
 2 files changed, 44 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
index c36335a..6f8ab2b 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
@@ -127,6 +127,7 @@ public class DistCp extends Configured implements Tool {
* to target location, by:
*  1. Creating a list of files to be copied to target.
*  2. Launching a Map-only job to copy the files. (Delegates to execute().)
+   *  The MR job is not closed as part of run if its a blocking call to run
* @param argv List of arguments passed to DistCp, from the ToolRunner.
* @return On success, it returns 0. Else, -1.
*/
@@ -148,9 +149,10 @@ public class DistCp extends Configured implements Tool {
   OptionsParser.usage();  
   return DistCpConstants.INVALID_ARGUMENT;
 }
-
+
+Job job = null;
 try {
-  execute();
+  job = execute();
 } catch (InvalidInputException e) {
   LOG.error("Invalid input: ", e);
   return DistCpConstants.INVALID_ARGUMENT;
@@ -166,6 +168,15 @@ public class DistCp extends Configured implements Tool {
 } catch (Exception e) {
   LOG.error("Exception encountered ", e);
   return DistCpConstants.UNKNOWN_ERROR;
+} finally {
+  //Blocking distcp so close the job after its done
+  if (job != null && context.shouldBlock()) {
+try {
+  job.close();
+} catch (IOException e) {
+  LOG.error("Exception encountered while closing distcp job", e);
+}
+  }
 }
 return DistCpConstants.SUCCESS;
   }
diff --git 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestExternalCall.java
 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestExternalCall.java
index 06122e6..eba86a9 100644
--- 
a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestExternalCall.java
+++ 
b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestExternalCall.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.tools;
 
+import org.apache.hadoop.mapreduce.Job;
+import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -35,6 +37,8 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.security.Permission;
 
+import static org.mockito.Mockito.*;
+
 public class TestExternalCall {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(TestExternalCall.class);
@@ -134,6 +138,33 @@ public class TestExternalCall {
 
   }
 
+  /**
+   * test methods run end execute of DistCp class. distcp job should be 
cleaned up after completion
+   * @throws Exception
+   */
+  @Test
+  public void testCleanupOfJob() throws Exception {
+
+Configuration conf = getConf();
+
+Path stagingDir = JobSubmissionFiles.getStagingDir(new Cluster(conf),
+  conf);
+stagingDir.getFileSystem(conf).mkdirs(stagingDir);
+Path soure = createFile("tmp.txt");
+Path target = createFile("target.txt");
+
+DistCp distcp = mock(DistCp.class);
+Job job = spy(Job.class);
+Mockito.when(distcp.getConf()).thenReturn(conf);
+Mockito.when(distcp.execute()).thenReturn(job);
+Mockito.when(distcp.run(Mockito.any())).thenCallRealMethod();
+String[] arg = { soure.toString(), target.toString() };
+
+distcp.run(arg);
+Mockito.verify(job, times(1)).close();
+  }
+
+
   private SecurityManager securityManager;
 
   protected static class ExitException extends SecurityException {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15154. Allow only hdfs superusers the ability to assign HDFS storage policies. Contributed by Siddharth Wagle.

2020-03-25 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a700803  HDFS-15154. Allow only hdfs superusers the ability to assign 
HDFS storage policies. Contributed by Siddharth Wagle.
a700803 is described below

commit a700803a18fb957d2799001a2ce1dcb70f75c080
Author: Arpit Agarwal 
AuthorDate: Wed Mar 25 10:28:30 2020 -0700

HDFS-15154. Allow only hdfs superusers the ability to assign HDFS storage 
policies. Contributed by Siddharth Wagle.

Change-Id: I32d6dd2837945b8fc026a759aa367c55daefe348
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |   4 +
 .../hadoop/hdfs/server/namenode/FSDirAttrOp.java   |  12 +-
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  13 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  61 ++--
 .../src/main/resources/hdfs-default.xml|   9 ++
 .../hdfs/TestStoragePolicyPermissionSettings.java  | 157 +
 6 files changed, 222 insertions(+), 34 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e3f4d1e..73cddee 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1114,6 +1114,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
 
   public static final String  DFS_STORAGE_POLICY_ENABLED_KEY = 
"dfs.storage.policy.enabled";
   public static final boolean DFS_STORAGE_POLICY_ENABLED_DEFAULT = true;
+  public static final String DFS_STORAGE_POLICY_PERMISSIONS_SUPERUSER_ONLY_KEY 
=
+  "dfs.storage.policy.permissions.superuser-only";
+  public static final boolean
+  DFS_STORAGE_POLICY_PERMISSIONS_SUPERUSER_ONLY_DEFAULT = false;
 
   public static final String  DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY = 
"dfs.quota.by.storage.type.enabled";
   public static final boolean DFS_QUOTA_BY_STORAGETYPE_ENABLED_DEFAULT = true;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 83df0aa..8e9606d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -47,7 +47,6 @@ import java.util.EnumSet;
 import java.util.List;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
 
 public class FSDirAttrOp {
   static FileStatus setPermission(
@@ -151,7 +150,7 @@ public class FSDirAttrOp {
   static FileStatus unsetStoragePolicy(FSDirectory fsd, FSPermissionChecker pc,
   BlockManager bm, String src) throws IOException {
 return setStoragePolicy(fsd, pc, bm, src,
-HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, "unset");
+HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
   }
 
   static FileStatus setStoragePolicy(FSDirectory fsd, FSPermissionChecker pc,
@@ -162,17 +161,12 @@ public class FSDirAttrOp {
   throw new HadoopIllegalArgumentException(
   "Cannot find a block policy with the name " + policyName);
 }
-return setStoragePolicy(fsd, pc, bm, src, policy.getId(), "set");
+return setStoragePolicy(fsd, pc, bm, src, policy.getId());
   }
 
   static FileStatus setStoragePolicy(FSDirectory fsd, FSPermissionChecker pc,
-  BlockManager bm, String src, final byte policyId, final String operation)
+  BlockManager bm, String src, final byte policyId)
   throws IOException {
-if (!fsd.isStoragePolicyEnabled()) {
-  throw new IOException(String.format(
-  "Failed to %s storage policy since %s is set to false.", operation,
-  DFS_STORAGE_POLICY_ENABLED_KEY));
-}
 INodesInPath iip;
 fsd.writeLock();
 try {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 77d8518..c06b59f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -88,8 +88,6 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECI
 import static 
org.apache.hadoop.hd

[hadoop] branch trunk updated: HADOOP-16833. InstrumentedLock should log lock queue time. Contributed by Stephen O'Donnell.

2020-02-18 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0cfff16  HADOOP-16833. InstrumentedLock should log lock queue time. 
Contributed by Stephen O'Donnell.
0cfff16 is described below

commit 0cfff16ac040bd5fb678d0d027369c68dead
Author: Arpit Agarwal 
AuthorDate: Tue Feb 18 09:50:11 2020 -0800

HADOOP-16833. InstrumentedLock should log lock queue time. Contributed by 
Stephen O'Donnell.

Change-Id: Idddff05051b6f642b88e51694b40c5bb1bef0026
---
 .../org/apache/hadoop/util/InstrumentedLock.java   | 121 ++---
 .../apache/hadoop/util/InstrumentedReadLock.java   |   2 +-
 .../apache/hadoop/util/TestInstrumentedLock.java   | 111 ++-
 .../hadoop/util/TestInstrumentedReadWriteLock.java |   9 +-
 4 files changed, 222 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
index 2c1f591..cc0ebdf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
@@ -55,8 +55,10 @@ public class InstrumentedLock implements Lock {
 
   // Tracking counters for lock statistics.
   private volatile long lockAcquireTimestamp;
-  private final AtomicLong lastLogTimestamp;
-  private final AtomicLong warningsSuppressed = new AtomicLong(0);
+  private final AtomicLong lastHoldLogTimestamp;
+  private final AtomicLong lastWaitLogTimestamp;
+  private final SuppressedStats holdStats = new SuppressedStats();
+  private final SuppressedStats waitStats = new SuppressedStats();
 
   /**
* Create a instrumented lock instance which logs a warning message
@@ -91,19 +93,24 @@ public class InstrumentedLock implements Lock {
 this.logger = logger;
 minLoggingGap = minLoggingGapMs;
 lockWarningThreshold = lockWarningThresholdMs;
-lastLogTimestamp = new AtomicLong(
+lastHoldLogTimestamp = new AtomicLong(
   clock.monotonicNow() - Math.max(minLoggingGap, lockWarningThreshold));
+lastWaitLogTimestamp = new AtomicLong(lastHoldLogTimestamp.get());
   }
 
   @Override
   public void lock() {
+long waitStart = clock.monotonicNow();
 lock.lock();
+check(waitStart, clock.monotonicNow(), false);
 startLockTiming();
   }
 
   @Override
   public void lockInterruptibly() throws InterruptedException {
+long waitStart = clock.monotonicNow();
 lock.lockInterruptibly();
+check(waitStart, clock.monotonicNow(), false);
 startLockTiming();
   }
 
@@ -118,11 +125,14 @@ public class InstrumentedLock implements Lock {
 
   @Override
   public boolean tryLock(long time, TimeUnit unit) throws InterruptedException 
{
+long waitStart = clock.monotonicNow();
+boolean retval = false;
 if (lock.tryLock(time, unit)) {
   startLockTiming();
-  return true;
+  retval = true;
 }
-return false;
+check(waitStart, clock.monotonicNow(), false);
+return retval;
   }
 
   @Override
@@ -130,7 +140,7 @@ public class InstrumentedLock implements Lock {
 long localLockReleaseTime = clock.monotonicNow();
 long localLockAcquireTime = lockAcquireTimestamp;
 lock.unlock();
-check(localLockAcquireTime, localLockReleaseTime);
+check(localLockAcquireTime, localLockReleaseTime, true);
   }
 
   @Override
@@ -139,12 +149,25 @@ public class InstrumentedLock implements Lock {
   }
 
   @VisibleForTesting
-  void logWarning(long lockHeldTime, long suppressed) {
+  void logWarning(long lockHeldTime, SuppressedSnapshot stats) {
 logger.warn(String.format("Lock held time above threshold: " +
 "lock identifier: %s " +
 "lockHeldTimeMs=%d ms. Suppressed %d lock warnings. " +
+"Longest suppressed LockHeldTimeMs=%d. " +
 "The stack trace is: %s" ,
-name, lockHeldTime, suppressed,
+name, lockHeldTime, stats.getSuppressedCount(),
+stats.getMaxSuppressedWait(),
+StringUtils.getStackTrace(Thread.currentThread(;
+  }
+
+  @VisibleForTesting
+  void logWaitWarning(long lockWaitTime, SuppressedSnapshot stats) {
+logger.warn(String.format("Waited above threshold to acquire lock: " +
+"lock identifier: %s " +
+"waitTimeMs=%d ms. Suppressed %d lock wait warnings. " +
+"Longest suppressed WaitTimeMs=%d. " +
+"The stack trace is: %s", name, lockWaitTime,
+stats.getSuppressedCount(), stats.getMaxSuppressedWait(),
 StringUtils.getStackTrace(Thread.currentThread(;
   }
 
@@ -163,27 +186,41 @@ public class In

[hadoop] branch trunk updated (ec86f42 -> c561a70)

2019-10-11 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from ec86f42  YARN-8453. Additional Unit tests to verify queue limit and 
max-limit with multiple resource types. Contributed by Adam Antal
 add c561a70  HDDS-2213.Reduce key provider loading log level in 
OzoneFileSystem#getAdditionalTokenIssuers (#1556)

No new revisions were added by this update.

Summary of changes:
 .../src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (a3fe404 -> 51eaeca)

2019-10-03 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from a3fe404  HDFS-14881. Safemode 'forceExit' option, doesn’t shown in 
help message. Contributed by Renukaprasad C.
 add 51eaeca  HDDS-2211. Collect docker logs if env fails to start (#1553)

No new revisions were added by this update.

Summary of changes:
 hadoop-ozone/dist/src/main/compose/test-all.sh |  4 ++--
 hadoop-ozone/dist/src/main/compose/testlib.sh  | 12 +++-
 2 files changed, 9 insertions(+), 7 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (da2b4fe -> 85b1c72)

2019-09-15 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from da2b4fe  YARN-9794. RM crashes due to runtime errors in 
TimelineServiceV2Publisher. Contributed by Tarun Parimi.
 add 85b1c72  HDDS-2129. Using dist profile fails with pom.ozone.xml as 
parent pom (#1449)

No new revisions were added by this update.

Summary of changes:
 pom.ozone.xml | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: Revert "HDDS-2057. Incorrect Default OM Port in Ozone FS URI Error Message."

2019-09-13 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6a9f7ca  Revert "HDDS-2057. Incorrect Default OM Port in Ozone FS URI 
Error Message."
6a9f7ca is described below

commit 6a9f7caef47c0ccacf778134d33e0c7547017323
Author: Arpit Agarwal 
AuthorDate: Fri Sep 13 11:40:42 2019 -0700

Revert "HDDS-2057. Incorrect Default OM Port in Ozone FS URI Error Message."

This reverts commit 95010a41fcea6ecf5dfd46d6e6f6f38c8b3e2a66.
---
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java  | 26 +++---
 1 file changed, 8 insertions(+), 18 deletions(-)

diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index a1648b4..1759e5c 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -88,20 +87,11 @@ public class BasicOzoneFileSystem extends FileSystem {
   private static final Pattern URL_SCHEMA_PATTERN =
   Pattern.compile("([^\\.]+)\\.([^\\.]+)\\.{0,1}(.*)");
 
-  private OzoneConfiguration getOzoneConf(Configuration conf) {
-
-return (conf instanceof OzoneConfiguration) ?
-(OzoneConfiguration) conf : new OzoneConfiguration(conf);
-  }
-
-  private String getUriExceptionText(Configuration conf) {
-
-return "Ozone file system URL should be one of the following formats: "
-+ "o3fs://bucket.volume/key  OR "
-+ "o3fs://bucket.volume.om-host.example.com/key  OR "
-+ "o3fs://bucket.volume.om-host.example.com:"
-+ OmUtils.getOmRpcPort(getOzoneConf(conf)) + "/key";
-  }
+  private static final String URI_EXCEPTION_TEXT = "Ozone file system URL " +
+  "should be one of the following formats: " +
+  "o3fs://bucket.volume/key  OR " +
+  "o3fs://bucket.volume.om-host.example.com/key  OR " +
+  "o3fs://bucket.volume.om-host.example.com:5678/key";
 
   @Override
   public void initialize(URI name, Configuration conf) throws IOException {
@@ -121,7 +111,7 @@ public class BasicOzoneFileSystem extends FileSystem {
 Matcher matcher = URL_SCHEMA_PATTERN.matcher(authority);
 
 if (!matcher.matches()) {
-  throw new IllegalArgumentException(getUriExceptionText(conf));
+  throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
 }
 String bucketStr = matcher.group(1);
 String volumeStr = matcher.group(2);
@@ -133,14 +123,14 @@ public class BasicOzoneFileSystem extends FileSystem {
   String[] parts = remaining.split(":");
   // Array length should be either 1(hostname or service id) or 
2(host:port)
   if (parts.length > 2) {
-throw new IllegalArgumentException(getUriExceptionText(conf));
+throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
   }
   omHost = parts[0];
   if (parts.length == 2) {
 try {
   omPort = Integer.parseInt(parts[1]);
 } catch (NumberFormatException e) {
-  throw new IllegalArgumentException(getUriExceptionText(conf));
+  throw new IllegalArgumentException(URI_EXCEPTION_TEXT);
 }
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1094. Performance test infrastructure : skip writing user data on Datanode. Contributed by Supratim Deka (#1323)

2019-08-28 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1407414  HDDS-1094. Performance test infrastructure : skip writing 
user data on Datanode. Contributed by Supratim Deka (#1323)
1407414 is described below

commit 1407414a5212e38956c13984e5daf32199175e83
Author: supratimdeka <46919641+supratimd...@users.noreply.github.com>
AuthorDate: Wed Aug 28 22:35:20 2019 +0530

HDDS-1094. Performance test infrastructure : skip writing user data on 
Datanode. Contributed by Supratim Deka (#1323)
---
 .../org/apache/hadoop/hdds/HddsConfigKeys.java |   6 +
 .../ozone/container/keyvalue/KeyValueHandler.java  |   4 +-
 .../keyvalue/impl/ChunkManagerDummyImpl.java   | 162 +
 .../keyvalue/impl/ChunkManagerFactory.java |  90 
 .../container/keyvalue/impl/ChunkManagerImpl.java  |  22 ++-
 .../hadoop/ozone/freon/RandomKeyGenerator.java |  11 +-
 .../freon/TestDataValidateWithDummyContainers.java |  71 +
 7 files changed, 355 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 83e270b..9e757c1 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -238,6 +238,12 @@ public final class HddsConfigKeys {
   public static final String HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL 
=
   "hdds.security.client.scm.certificate.protocol.acl";
 
+  // Determines if the Container Chunk Manager will write user data to disk
+  // Set to false only for specific performance tests
+  public static final String HDDS_CONTAINER_PERSISTDATA =
+  "hdds.container.chunk.persistdata";
+  public static final boolean HDDS_CONTAINER_PERSISTDATA_DEFAULT = true;
+
   public static final String HDDS_DATANODE_HTTP_ENABLED_KEY =
   "hdds.datanode.http.enabled";
   public static final String HDDS_DATANODE_HTTP_BIND_HOST_KEY =
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index a65b5be..50e3706 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -71,7 +71,7 @@ import 
org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import 
org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.SmallFileUtils;
-import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerImpl;
+import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory;
 import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
@@ -114,7 +114,7 @@ public class KeyValueHandler extends Handler {
 doSyncWrite =
 conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY,
 OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT);
-chunkManager = new ChunkManagerImpl(doSyncWrite);
+chunkManager = ChunkManagerFactory.getChunkManager(config, doSyncWrite);
 volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass(
 HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy
 .class, VolumeChoosingPolicy.class), conf);
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
new file mode 100644
index 000..9d63c16
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICE

[hadoop] branch trunk updated: HDFS-2470. NN should automatically set permissions on dfs.namenode.*.dir. Contributed by Siddharth Wagle.

2019-08-26 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 07e3cf9  HDFS-2470. NN should automatically set permissions on 
dfs.namenode.*.dir. Contributed by Siddharth Wagle.
07e3cf9 is described below

commit 07e3cf952eac9e47e7bd5e195b0f9fc28c468313
Author: Arpit Agarwal 
AuthorDate: Mon Aug 26 15:43:52 2019 -0700

HDFS-2470. NN should automatically set permissions on dfs.namenode.*.dir. 
Contributed by Siddharth Wagle.
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  8 +++
 .../hadoop/hdfs/qjournal/server/JNStorage.java |  6 -
 .../apache/hadoop/hdfs/server/common/Storage.java  | 28 ++
 .../hadoop/hdfs/server/namenode/FSImage.java   |  2 +-
 .../hadoop/hdfs/server/namenode/NNStorage.java | 24 ++-
 .../src/main/resources/hdfs-default.xml| 20 
 .../hadoop/hdfs/server/namenode/TestEditLog.java   | 14 +++
 .../hadoop/hdfs/server/namenode/TestStartup.java   | 27 -
 8 files changed, 115 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 95806de..e6e0bae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -562,6 +562,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" 
+ DFS_NAMENODE_HTTPS_PORT_DEFAULT;
   public static final String  DFS_NAMENODE_NAME_DIR_KEY =
   HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_NAME_DIR_KEY;
+  public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_KEY =
+  "dfs.namenode.storage.dir.perm";
+  public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT =
+  "700";
   public static final String  DFS_NAMENODE_EDITS_DIR_KEY =
   HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_EDITS_DIR_KEY;
   public static final String  DFS_NAMENODE_SHARED_EDITS_DIR_KEY = 
"dfs.namenode.shared.edits.dir";
@@ -1109,6 +1113,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final int DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485;
   public static final String  DFS_JOURNALNODE_RPC_BIND_HOST_KEY = 
"dfs.journalnode.rpc-bind-host";
   public static final String  DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" 
+ DFS_JOURNALNODE_RPC_PORT_DEFAULT;
+  public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY =
+  "dfs.journalnode.edits.dir.perm";
+  public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT =
+  "700";
 
   public static final String  DFS_JOURNALNODE_HTTP_ADDRESS_KEY = 
"dfs.journalnode.http-address";
   public static final int DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
index 305f1e8..0ef54a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
@@ -26,6 +26,8 @@ import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -70,7 +72,9 @@ class JNStorage extends Storage {
   StorageErrorReporter errorReporter) throws IOException {
 super(NodeType.JOURNAL_NODE);
 
-sd = new StorageDirectory(logDir);
+sd = new StorageDirectory(logDir, null, false, new FsPermission(conf.get(
+DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY,
+DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT)));
 this.addStorageDir(sd);
 this.fjm = new FileJournalManager(conf, sd, errorReporter);
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 3dd43c7..2ba943a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/

[hadoop] branch branch-3.2 updated: HDFS-2470. NN should automatically set permissions on dfs.namenode.*.dir. Contributed by Siddharth Wagle.

2019-08-26 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 8b12381  HDFS-2470. NN should automatically set permissions on 
dfs.namenode.*.dir. Contributed by Siddharth Wagle.
8b12381 is described below

commit 8b1238171752d03712ae69d8464108ef0803ae10
Author: Arpit Agarwal 
AuthorDate: Mon Aug 26 15:43:52 2019 -0700

HDFS-2470. NN should automatically set permissions on dfs.namenode.*.dir. 
Contributed by Siddharth Wagle.

(cherry picked from commit a64a43b77fb1032dcb66730a6b6257a24726c256)
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  8 +++
 .../hadoop/hdfs/qjournal/server/JNStorage.java |  6 -
 .../apache/hadoop/hdfs/server/common/Storage.java  | 28 ++
 .../hadoop/hdfs/server/namenode/FSImage.java   |  2 +-
 .../hadoop/hdfs/server/namenode/NNStorage.java | 24 ++-
 .../src/main/resources/hdfs-default.xml| 20 
 .../hadoop/hdfs/server/namenode/TestEditLog.java   | 14 +++
 .../hadoop/hdfs/server/namenode/TestStartup.java   | 27 -
 8 files changed, 115 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 7466021..ca19c64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -551,6 +551,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" 
+ DFS_NAMENODE_HTTPS_PORT_DEFAULT;
   public static final String  DFS_NAMENODE_NAME_DIR_KEY =
   HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_NAME_DIR_KEY;
+  public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_KEY =
+  "dfs.namenode.storage.dir.perm";
+  public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT =
+  "700";
   public static final String  DFS_NAMENODE_EDITS_DIR_KEY =
   HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_EDITS_DIR_KEY;
   public static final String  DFS_NAMENODE_SHARED_EDITS_DIR_KEY = 
"dfs.namenode.shared.edits.dir";
@@ -1069,6 +1073,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final int DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485;
   public static final String  DFS_JOURNALNODE_RPC_BIND_HOST_KEY = 
"dfs.journalnode.rpc-bind-host";
   public static final String  DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" 
+ DFS_JOURNALNODE_RPC_PORT_DEFAULT;
+  public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY =
+  "dfs.journalnode.edits.dir.perm";
+  public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT =
+  "700";
 
   public static final String  DFS_JOURNALNODE_HTTP_ADDRESS_KEY = 
"dfs.journalnode.http-address";
   public static final int DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
index 305f1e8..0ef54a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
@@ -26,6 +26,8 @@ import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -70,7 +72,9 @@ class JNStorage extends Storage {
   StorageErrorReporter errorReporter) throws IOException {
 super(NodeType.JOURNAL_NODE);
 
-sd = new StorageDirectory(logDir);
+sd = new StorageDirectory(logDir, null, false, new FsPermission(conf.get(
+DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY,
+DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT)));
 this.addStorageDir(sd);
 this.fjm = new FileJournalManager(conf, sd, errorReporter);
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 3dd43c7..2ba943a

[hadoop] branch trunk updated: HDDS-1366. Add ability in Recon to track the number of small files in an Ozone Cluster (#1146)

2019-08-10 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d29007f  HDDS-1366. Add ability in Recon to track the number of small 
files in an Ozone Cluster (#1146)
d29007f is described below

commit d29007fb35d6667f9e8f1d9befafe61b19ca7c18
Author: Shweta Yakkali 
AuthorDate: Sat Aug 10 10:14:55 2019 -0700

HDDS-1366. Add ability in Recon to track the number of small files in an 
Ozone Cluster (#1146)
---
 .../recon/schema/UtilizationSchemaDefinition.java  |  13 +-
 .../org/apache/hadoop/ozone/recon/ReconServer.java |  11 +-
 .../ozone/recon/api/ContainerKeyService.java   |   2 +-
 .../hadoop/ozone/recon/api/UtilizationService.java |  67 ++
 .../ozone/recon/tasks/FileSizeCountTask.java   | 255 +
 .../ozone/recon/AbstractOMMetadataManagerTest.java |  28 +++
 .../ozone/recon/api/TestUtilizationService.java|  86 +++
 .../TestUtilizationSchemaDefinition.java   |  76 +-
 .../ozone/recon/tasks/TestFileSizeCountTask.java   | 140 +++
 .../org.mockito.plugins.MockMaker  |  16 ++
 10 files changed, 690 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java
 
b/hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java
index 977a3b3..b8e6560 100644
--- 
a/hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java
+++ 
b/hadoop-ozone/ozone-recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/UtilizationSchemaDefinition.java
@@ -38,6 +38,9 @@ public class UtilizationSchemaDefinition implements 
ReconSchemaDefinition {
   public static final String CLUSTER_GROWTH_DAILY_TABLE_NAME =
   "cluster_growth_daily";
 
+  public static final String FILE_COUNT_BY_SIZE_TABLE_NAME =
+  "file_count_by_size";
+
   @Inject
   UtilizationSchemaDefinition(DataSource dataSource) {
 this.dataSource = dataSource;
@@ -48,6 +51,7 @@ public class UtilizationSchemaDefinition implements 
ReconSchemaDefinition {
   public void initializeSchema() throws SQLException {
 Connection conn = dataSource.getConnection();
 createClusterGrowthTable(conn);
+createFileSizeCount(conn);
   }
 
   void createClusterGrowthTable(Connection conn) {
@@ -65,5 +69,12 @@ public class UtilizationSchemaDefinition implements 
ReconSchemaDefinition {
 .execute();
   }
 
-
+  void createFileSizeCount(Connection conn) {
+DSL.using(conn).createTableIfNotExists(FILE_COUNT_BY_SIZE_TABLE_NAME)
+.column("file_size", SQLDataType.BIGINT)
+.column("count", SQLDataType.BIGINT)
+.constraint(DSL.constraint("pk_file_size")
+.primaryKey("file_size"))
+.execute();
+  }
 }
diff --git 
a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
 
b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
index 39c82d0..a11cb5f 100644
--- 
a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
+++ 
b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java
@@ -33,9 +33,11 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.recon.spi.ContainerDBServiceProvider;
 import org.apache.hadoop.ozone.recon.spi.OzoneManagerServiceProvider;
 import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask;
+import org.apache.hadoop.ozone.recon.tasks.FileSizeCountTask;
 import org.hadoop.ozone.recon.schema.ReconInternalSchemaDefinition;
 import org.hadoop.ozone.recon.schema.StatsSchemaDefinition;
 import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition;
+import org.jooq.Configuration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -122,7 +124,7 @@ public class ReconServer extends GenericCli {
 .getInstance(ContainerDBServiceProvider.class);
 OzoneManagerServiceProvider ozoneManagerServiceProvider = injector
 .getInstance(OzoneManagerServiceProvider.class);
-
+Configuration sqlConfiguration = injector.getInstance(Configuration.class);
 long initialDelay = configuration.getTimeDuration(
 RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY,
 RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT,
@@ -143,6 +145,13 @@ public class ReconServer extends GenericCli {
 ozoneManagerServiceProvider.getOMMetadataManagerInstance());
 containerKeyMapperTask.reprocess(
 ozoneManagerServiceProvider.getOMMetadataManagerInstance());
+FileSizeCountTask fileSizeCountTask = new
+FileSizeCountTask(
+ozoneManagerServiceProvider.g

[hadoop] branch trunk updated: HDDS-1895. Support Key ACL operations for OM HA. (#1230)

2019-08-09 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new bd4be6e  HDDS-1895. Support Key ACL operations for OM HA. (#1230)
bd4be6e is described below

commit bd4be6e1682a154b07580b12a48d4e4346cb046e
Author: Bharat Viswanadham 
AuthorDate: Fri Aug 9 20:32:01 2019 -0700

HDDS-1895. Support Key ACL operations for OM HA. (#1230)
---
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  | 150 ++--
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java | 196 ++---
 .../om/ratis/utils/OzoneManagerRatisUtils.java |   9 +
 .../ozone/om/request/key/acl/OMKeyAclRequest.java  | 173 ++
 .../om/request/key/acl/OMKeyAddAclRequest.java | 107 +++
 .../om/request/key/acl/OMKeyRemoveAclRequest.java  | 108 
 .../om/request/key/acl/OMKeySetAclRequest.java | 105 +++
 .../ozone/om/request/key/acl/package-info.java |  24 +++
 .../hadoop/ozone/om/request/util/ObjectParser.java |   2 +-
 .../om/response/key/acl/OMKeyAclResponse.java  |  63 +++
 .../ozone/om/response/key/acl/package-info.java|  24 +++
 11 files changed, 885 insertions(+), 76 deletions(-)

diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 80c9f58..17aabd2 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -19,12 +19,14 @@ package org.apache.hadoop.ozone.om.helpers;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.BitSet;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.stream.Collectors;
 
+import com.google.protobuf.ByteString;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
@@ -34,6 +36,8 @@ import org.apache.hadoop.util.Time;
 
 import com.google.common.base.Preconditions;
 
+import static org.apache.hadoop.ozone.OzoneAcl.ZERO_BITSET;
+
 /**
  * Args for key block. The block instance for the key requested in putKey.
  * This is returned from OM to client, and client use class to talk to
@@ -236,6 +240,119 @@ public final class OmKeyInfo extends WithMetadata {
   }
 
   /**
+   * Add an ozoneAcl to list of existing Acl set.
+   * @param ozoneAcl
+   * @return true - if successfully added, false if not added or acl is
+   * already existing in the acl list.
+   */
+  public boolean addAcl(OzoneAclInfo ozoneAcl) {
+// Case 1: When we are adding more rights to existing user/group.
+boolean addToExistingAcl = false;
+for(OzoneAclInfo existingAcl: getAcls()) {
+  if(existingAcl.getName().equals(ozoneAcl.getName()) &&
+  existingAcl.getType().equals(ozoneAcl.getType())) {
+
+// We need to do "or" before comparision because think of a case like
+// existing acl is 777 and newly added acl is 444, we have already
+// that acl set. In this case if we do direct check they will not
+// be equal, but if we do or and then check, we shall know it
+// has acl's already set or not.
+BitSet newAclBits = BitSet.valueOf(
+existingAcl.getRights().toByteArray());
+
+newAclBits.or(BitSet.valueOf(ozoneAcl.getRights().toByteArray()));
+
+if (newAclBits.equals(BitSet.valueOf(
+existingAcl.getRights().toByteArray( {
+  return false;
+} else {
+  OzoneAclInfo newAcl = OzoneAclInfo.newBuilder()
+  .setType(ozoneAcl.getType())
+  .setName(ozoneAcl.getName())
+  .setAclScope(ozoneAcl.getAclScope())
+  .setRights(ByteString.copyFrom(newAclBits.toByteArray()))
+  .build();
+  getAcls().remove(existingAcl);
+  getAcls().add(newAcl);
+  addToExistingAcl = true;
+  break;
+}
+  }
+}
+
+// Case 2: When a completely new acl is added.
+if(!addToExistingAcl) {
+  getAcls().add(ozoneAcl);
+}
+return true;
+  }
+
+  /**
+   * Remove acl from existing acl list.
+   * @param ozoneAcl
+   * @return true - if successfully removed, false if not able to remove due
+   * to that acl is not in the existing acl list.
+   */
+  public boolean removeAcl(OzoneAclInfo ozoneAcl) {
+boolean removed = false;
+
+// When we are removing subset of rights from existing acl.
+for(OzoneAclInfo existingAcl: getAcls()) {
+  if (existingAcl.getName().equals(ozoneAcl.getName()) &&
+  existingAcl.getType

[hadoop] branch trunk updated (b079914 -> 14a4ce3)

2019-08-08 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from b079914  HDFS-14459. ClosedChannelException silently ignored in 
FsVolumeList.addBlockPool(). Contributed by Stephen O'Donnell.
 add 14a4ce3  HDDS-1829 On OM reload/restart OmMetrics#numKeys should be 
updated. Contributed by Siyao Meng.

No new revisions were added by this update.

Summary of changes:
 .../java/org/apache/hadoop/utils/db/RDBTable.java   | 10 ++
 .../main/java/org/apache/hadoop/utils/db/Table.java |  7 +++
 .../java/org/apache/hadoop/utils/db/TypedTable.java |  5 +
 .../apache/hadoop/utils/db/TestRDBTableStore.java   | 21 -
 .../hadoop/utils/db/TestTypedRDBTableStore.java | 21 -
 .../apache/hadoop/ozone/om/OMMetadataManager.java   | 11 +++
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  | 10 ++
 .../org/apache/hadoop/ozone/om/OzoneManager.java|  2 ++
 8 files changed, 85 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1875. Fix failures in TestS3MultipartUploadAbortResponse. (#1188)

2019-07-31 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a6f47b5  HDDS-1875. Fix failures in 
TestS3MultipartUploadAbortResponse. (#1188)
a6f47b5 is described below

commit a6f47b5876e51e888058e3731e6c15ea2656f2f7
Author: Bharat Viswanadham 
AuthorDate: Wed Jul 31 09:37:53 2019 -0700

HDDS-1875. Fix failures in TestS3MultipartUploadAbortResponse. (#1188)
---
 .../hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java  | 2 +-
 .../om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
index 6a9c1ed..1d0d4f2 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java
@@ -113,7 +113,7 @@ public class TestS3MultipartResponse {
 .setAbortMultiPartUploadResponse(
 MultipartUploadAbortResponse.newBuilder().build()).build();
 
-return new S3MultipartUploadAbortResponse(multipartKey, Time.now(),
+return new S3MultipartUploadAbortResponse(multipartKey, timeStamp,
 omMultipartKeyInfo,
 omResponse);
   }
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
index 6bd27aa..fdddb2c 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadAbortResponse.java
@@ -82,7 +82,7 @@ public class TestS3MultipartUploadAbortResponse
 PartKeyInfo part1 = createPartKeyInfo(volumeName, bucketName,
 keyName, 1);
 PartKeyInfo part2 = createPartKeyInfo(volumeName, bucketName,
-keyName, 1);
+keyName, 2);
 
 addPart(1, part1, omMultipartKeyInfo);
 addPart(2, part2, omMultipartKeyInfo);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1391 : Add ability in OM to serve delta updates through an API. (#1033)

2019-07-29 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 60325c9  HDDS-1391 : Add ability in OM to serve delta updates through 
an API. (#1033)
60325c9 is described below

commit 60325c9611641d64dc6f9132ae9b376c1621
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Mon Jul 29 09:39:40 2019 -0700

HDDS-1391 : Add ability in OM to serve delta updates through an API. (#1033)
---
 .../java/org/apache/hadoop/utils/db/DBStore.java   |  9 
 .../apache/hadoop/utils/db/DBUpdatesWrapper.java   | 48 +
 .../java/org/apache/hadoop/utils/db/RDBStore.java  | 46 
 .../utils/db/SequenceNumberNotFoundException.java  | 37 
 .../org/apache/hadoop/utils/db/TestRDBStore.java   | 21 ++
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |  1 +
 .../src/main/proto/OzoneManagerProtocol.proto  | 12 ++
 .../apache/hadoop/ozone/om/TestOzoneManager.java   | 49 --
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 17 
 .../protocolPB/OzoneManagerRequestHandler.java | 23 ++
 10 files changed, 251 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
index 59004c6..3c8df38 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
@@ -184,4 +184,13 @@ public interface DBStore extends AutoCloseable {
* @return codec registry.
*/
   CodecRegistry getCodecRegistry();
+
+  /**
+   * Get data written to DB since a specific sequence number.
+   * @param sequenceNumber
+   * @return
+   * @throws SequenceNumberNotFoundException
+   */
+  DBUpdatesWrapper getUpdatesSince(long sequenceNumber)
+  throws SequenceNumberNotFoundException;
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBUpdatesWrapper.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBUpdatesWrapper.java
new file mode 100644
index 000..54ebc7c
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBUpdatesWrapper.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.utils.db;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Wrapper class to hold DB data read from the RocksDB log file.
+ */
+public class DBUpdatesWrapper {
+
+  private List dataList = new ArrayList<>();
+  private long currentSequenceNumber = -1;
+
+  public void addWriteBatch(byte[] data, long sequenceNumber) {
+dataList.add(data);
+if (currentSequenceNumber < sequenceNumber) {
+  currentSequenceNumber = sequenceNumber;
+}
+  }
+
+  public List getData() {
+return dataList;
+  }
+
+  public long getCurrentSequenceNumber() {
+return currentSequenceNumber;
+  }
+}
+
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
index 23c03f1..4182687 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBStore.java
@@ -46,6 +46,7 @@ import org.rocksdb.DBOptions;
 import org.rocksdb.FlushOptions;
 import org.rocksdb.RocksDB;
 import org.rocksdb.RocksDBException;
+import org.rocksdb.TransactionLogIterator;
 import org.rocksdb.WriteOptions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -327,6 +328,51 @@ public class RDBStore implements DBStore {
 return codecRegistry;
   }
 
+  @Override
+  public DBUpdatesWrapper getUpdatesSince(long sequenceNumber)
+  throws SequenceNumberNotFoundException {
+
+DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper();
+try {
+  TransactionLogIterator transactionLogIterator =
+

[hadoop] 01/01: Revert "HDDS-1830 OzoneManagerDoubleBuffer#stop should wait for daemon thread to die (#1156)"

2019-07-25 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch revert-1156-HDDS-1830
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 74937b8286753671deb4e9e4e16b24d182dde28b
Author: Arpit Agarwal 
AuthorDate: Thu Jul 25 16:18:04 2019 -0700

Revert "HDDS-1830 OzoneManagerDoubleBuffer#stop should wait for daemon 
thread to die (#1156)"

This reverts commit b7fba78fb63a0971835db87292822fd8cd4aa7ad.
---
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java | 20 +++-
 1 file changed, 7 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
index e329d5a..2bde3ad 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.om.ratis;
 import java.io.IOException;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -65,7 +64,7 @@ public class OzoneManagerDoubleBuffer {
   private final OMMetadataManager omMetadataManager;
   private final AtomicLong flushedTransactionCount = new AtomicLong(0);
   private final AtomicLong flushIterations = new AtomicLong(0);
-  private final AtomicBoolean isRunning = new AtomicBoolean(false);
+  private volatile boolean isRunning;
   private OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics;
   private long maxFlushedTransactionsInOneIteration;
 
@@ -80,7 +79,7 @@ public class OzoneManagerDoubleBuffer {
 this.ozoneManagerDoubleBufferMetrics =
 OzoneManagerDoubleBufferMetrics.create();
 
-isRunning.set(true);
+isRunning = true;
 // Daemon thread which runs in back ground and flushes transactions to DB.
 daemon = new Daemon(this::flushTransactions);
 daemon.setName("OMDoubleBufferFlushThread");
@@ -93,7 +92,7 @@ public class OzoneManagerDoubleBuffer {
* and commit to DB.
*/
   private void flushTransactions() {
-while (isRunning.get()) {
+while(isRunning) {
   try {
 if (canFlush()) {
   setReadyBuffer();
@@ -141,7 +140,7 @@ public class OzoneManagerDoubleBuffer {
 }
   } catch (InterruptedException ex) {
 Thread.currentThread().interrupt();
-if (isRunning.get()) {
+if (isRunning) {
   final String message = "OMDoubleBuffer flush thread " +
   Thread.currentThread().getName() + " encountered Interrupted " +
   "exception while running";
@@ -202,16 +201,11 @@ public class OzoneManagerDoubleBuffer {
   /**
* Stop OM DoubleBuffer flush thread.
*/
-  public void stop() {
-if (isRunning.compareAndSet(true, false)) {
+  public synchronized void stop() {
+if (isRunning) {
   LOG.info("Stopping OMDoubleBuffer flush thread");
+  isRunning = false;
   daemon.interrupt();
-  try {
-// Wait for daemon thread to exit
-daemon.join();
-  } catch (InterruptedException e) {
-LOG.error("Interrupted while waiting for daemon to exit.");
-  }
 
   // stop metrics.
   ozoneManagerDoubleBufferMetrics.unRegister();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch revert-1156-HDDS-1830 created (now 74937b8)

2019-07-25 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch revert-1156-HDDS-1830
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 74937b8  Revert "HDDS-1830 OzoneManagerDoubleBuffer#stop should wait 
for daemon thread to die (#1156)"

This branch includes the following new commits:

 new 74937b8  Revert "HDDS-1830 OzoneManagerDoubleBuffer#stop should wait 
for daemon thread to die (#1156)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1830 OzoneManagerDoubleBuffer#stop should wait for daemon thread to die (#1156)

2019-07-25 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b7fba78  HDDS-1830 OzoneManagerDoubleBuffer#stop should wait for 
daemon thread to die (#1156)
b7fba78 is described below

commit b7fba78fb63a0971835db87292822fd8cd4aa7ad
Author: Siyao Meng <50227127+smen...@users.noreply.github.com>
AuthorDate: Thu Jul 25 16:14:50 2019 -0700

HDDS-1830 OzoneManagerDoubleBuffer#stop should wait for daemon thread to 
die (#1156)
---
 .../ozone/om/ratis/OzoneManagerDoubleBuffer.java | 20 +---
 1 file changed, 13 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
index 2bde3ad..e329d5a 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.om.ratis;
 import java.io.IOException;
 import java.util.Queue;
 import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -64,7 +65,7 @@ public class OzoneManagerDoubleBuffer {
   private final OMMetadataManager omMetadataManager;
   private final AtomicLong flushedTransactionCount = new AtomicLong(0);
   private final AtomicLong flushIterations = new AtomicLong(0);
-  private volatile boolean isRunning;
+  private final AtomicBoolean isRunning = new AtomicBoolean(false);
   private OzoneManagerDoubleBufferMetrics ozoneManagerDoubleBufferMetrics;
   private long maxFlushedTransactionsInOneIteration;
 
@@ -79,7 +80,7 @@ public class OzoneManagerDoubleBuffer {
 this.ozoneManagerDoubleBufferMetrics =
 OzoneManagerDoubleBufferMetrics.create();
 
-isRunning = true;
+isRunning.set(true);
 // Daemon thread which runs in back ground and flushes transactions to DB.
 daemon = new Daemon(this::flushTransactions);
 daemon.setName("OMDoubleBufferFlushThread");
@@ -92,7 +93,7 @@ public class OzoneManagerDoubleBuffer {
* and commit to DB.
*/
   private void flushTransactions() {
-while(isRunning) {
+while (isRunning.get()) {
   try {
 if (canFlush()) {
   setReadyBuffer();
@@ -140,7 +141,7 @@ public class OzoneManagerDoubleBuffer {
 }
   } catch (InterruptedException ex) {
 Thread.currentThread().interrupt();
-if (isRunning) {
+if (isRunning.get()) {
   final String message = "OMDoubleBuffer flush thread " +
   Thread.currentThread().getName() + " encountered Interrupted " +
   "exception while running";
@@ -201,11 +202,16 @@ public class OzoneManagerDoubleBuffer {
   /**
* Stop OM DoubleBuffer flush thread.
*/
-  public synchronized void stop() {
-if (isRunning) {
+  public void stop() {
+if (isRunning.compareAndSet(true, false)) {
   LOG.info("Stopping OMDoubleBuffer flush thread");
-  isRunning = false;
   daemon.interrupt();
+  try {
+// Wait for daemon thread to exit
+daemon.join();
+  } catch (InterruptedException e) {
+LOG.error("Interrupted while waiting for daemon to exit.");
+  }
 
   // stop metrics.
   ozoneManagerDoubleBufferMetrics.unRegister();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1686. Remove check to get from openKeyTable in acl implementatio… (#966)

2019-07-22 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2ea71d9  HDDS-1686. Remove check to get from openKeyTable in acl 
implementatio… (#966)
2ea71d9 is described below

commit 2ea71d953b46221f90b38d75a2999056f044471f
Author: Bharat Viswanadham 
AuthorDate: Mon Jul 22 15:11:10 2019 -0700

HDDS-1686. Remove check to get from openKeyTable in acl implementatio… 
(#966)
---
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 43 --
 1 file changed, 8 insertions(+), 35 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 24af013..c7182c2 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1396,17 +1396,10 @@ public class KeyManagerImpl implements KeyManager {
   validateBucket(volume, bucket);
   String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
   OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
-  Table keyTable;
   if (keyInfo == null) {
-keyInfo = metadataManager.getOpenKeyTable().get(objectKey);
-if (keyInfo == null) {
-  throw new OMException("Key not found. Key:" +
-  objectKey, KEY_NOT_FOUND);
-}
-keyTable = metadataManager.getOpenKeyTable();
-  } else {
-keyTable = metadataManager.getKeyTable();
+throw new OMException("Key not found. Key:" + objectKey, 
KEY_NOT_FOUND);
   }
+
   List newAcls = new ArrayList<>(keyInfo.getAcls());
   OzoneAclInfo newAcl = null;
   for(OzoneAclInfo a: keyInfo.getAcls()) {
@@ -1442,7 +1435,7 @@ public class KeyManagerImpl implements KeyManager {
   .setDataSize(keyInfo.getDataSize())
   .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo())
   .build();
-  keyTable.put(objectKey, newObj);
+  metadataManager.getKeyTable().put(objectKey, newObj);
 } catch (IOException ex) {
   if (!(ex instanceof OMException)) {
 LOG.error("Add acl operation failed for key:{}/{}/{}", volume,
@@ -1475,16 +1468,8 @@ public class KeyManagerImpl implements KeyManager {
   validateBucket(volume, bucket);
   String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
   OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
-  Table keyTable;
   if (keyInfo == null) {
-keyInfo = metadataManager.getOpenKeyTable().get(objectKey);
-if (keyInfo == null) {
-  throw new OMException("Key not found. Key:" +
-  objectKey, KEY_NOT_FOUND);
-}
-keyTable = metadataManager.getOpenKeyTable();
-  } else {
-keyTable = metadataManager.getKeyTable();
+throw new OMException("Key not found. Key:" + objectKey, 
KEY_NOT_FOUND);
   }
 
   List newAcls = new ArrayList<>(keyInfo.getAcls());
@@ -1529,7 +1514,7 @@ public class KeyManagerImpl implements KeyManager {
   .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo())
   .build();
 
-  keyTable.put(objectKey, newObj);
+  metadataManager.getKeyTable().put(objectKey, newObj);
 } catch (IOException ex) {
   if (!(ex instanceof OMException)) {
 LOG.error("Remove acl operation failed for key:{}/{}/{}", volume,
@@ -1562,16 +1547,8 @@ public class KeyManagerImpl implements KeyManager {
   validateBucket(volume, bucket);
   String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
   OmKeyInfo keyInfo = metadataManager.getKeyTable().get(objectKey);
-  Table keyTable;
   if (keyInfo == null) {
-keyInfo = metadataManager.getOpenKeyTable().get(objectKey);
-if (keyInfo == null) {
-  throw new OMException("Key not found. Key:" +
-  objectKey, KEY_NOT_FOUND);
-}
-keyTable = metadataManager.getOpenKeyTable();
-  } else {
-keyTable = metadataManager.getKeyTable();
+throw new OMException("Key not found. Key:" + objectKey, 
KEY_NOT_FOUND);
   }
 
   List newAcls = new ArrayList<>();
@@ -1592,7 +1569,7 @@ public class KeyManagerImpl implements KeyManager {
   .setFileEncryptionInfo(keyInfo.getFileEncryptionInfo())
   .build();
 
-  keyTable.put(objectKey, newObj);
+  metadataManager.getKeyTable().put(objectKey, newObj);
 } catch (IOException ex) {
   if (!(ex instanceof OMException)) {
 LOG.error("Set acl operation failed for key:{}/{}/{}"

[hadoop] branch trunk updated: HDDS-1649. On installSnapshot notification from OM leader, download checkpoint and reload OM state (#948)

2019-07-22 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new cdc36fe  HDDS-1649. On installSnapshot notification from OM leader, 
download checkpoint and reload OM state (#948)
cdc36fe is described below

commit cdc36fe286708b5ff12675599da8c7650744f064
Author: Hanisha Koneru 
AuthorDate: Mon Jul 22 12:06:55 2019 -0700

HDDS-1649. On installSnapshot notification from OM leader, download 
checkpoint and reload OM state (#948)
---
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   1 +
 .../common/src/main/resources/ozone-default.xml|   8 +
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   3 +
 .../hadoop/ozone/om/exceptions/OMException.java|   3 +-
 .../ozone/om/protocol/OzoneManagerHAProtocol.java  |   3 +-
 .../src/main/proto/OzoneManagerProtocol.proto  |   2 +
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |   6 +
 .../hadoop/ozone/MiniOzoneHAClusterImpl.java   |  49 ++-
 .../hadoop/ozone/om/TestOMRatisSnapshots.java  | 189 +++
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java |   7 +-
 .../hadoop/ozone/om/OMDBCheckpointServlet.java |   2 +-
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |   9 +-
 .../org/apache/hadoop/ozone/om/OzoneManager.java   | 359 -
 .../ozone/om/ratis/OzoneManagerRatisServer.java|  15 +-
 .../ozone/om/ratis/OzoneManagerStateMachine.java   |  81 -
 .../om/snapshot/OzoneManagerSnapshotProvider.java  |   2 +-
 16 files changed, 637 insertions(+), 102 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index d28e477..67bd22d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -119,6 +119,7 @@ public final class OzoneConsts {
   public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
   public static final String DELETED_BLOCK_DB = "deletedBlock.db";
   public static final String OM_DB_NAME = "om.db";
+  public static final String OM_DB_BACKUP_PREFIX = "om.db.backup.";
   public static final String OM_DB_CHECKPOINTS_DIR_NAME = "om.db.checkpoints";
   public static final String OZONE_MANAGER_TOKEN_DB_NAME = "om-token.db";
   public static final String SCM_DB_NAME = "scm.db";
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 30cf386..b2f820b 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1630,6 +1630,14 @@
 Byte limit for Raft's Log Worker queue.
 
   
+  
+ozone.om.ratis.log.purge.gap
+100
+OZONE, OM, RATIS
+The minimum gap between log indices for Raft server to purge
+  its log segments after taking snapshot.
+
+  
 
   
 ozone.om.ratis.snapshot.auto.trigger.threshold
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 14b6783..35431fa 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -123,6 +123,9 @@ public final class OMConfigKeys {
   "ozone.om.ratis.log.appender.queue.byte-limit";
   public static final String
   OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB";
+  public static final String OZONE_OM_RATIS_LOG_PURGE_GAP =
+  "ozone.om.ratis.log.purge.gap";
+  public static final int OZONE_OM_RATIS_LOG_PURGE_GAP_DEFAULT = 100;
 
   // OM Snapshot configurations
   public static final String OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
index 66ce1cc..78bdb21 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
+++ 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java
@@ -203,7 +203,8 @@ public class OMException extends IOException {
 
 PREFIX_NOT_FOUND,
 
-S3_BUCKET_INVALID_LENGTH
+S3_BUCKET_INVALID_LENGTH,
 
+RATIS_ERROR // Error in Ratis server
   }
 }
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.java
 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerHAProtocol.ja

[hadoop] branch trunk updated: HDDS-1840. Fix TestSecureOzoneContainer. (#1135)

2019-07-22 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 340bbaf  HDDS-1840. Fix TestSecureOzoneContainer. (#1135)
340bbaf is described below

commit 340bbaf8bfba1368e45dbcefd64937ef8afe7a9c
Author: Bharat Viswanadham 
AuthorDate: Mon Jul 22 10:23:48 2019 -0700

HDDS-1840. Fix TestSecureOzoneContainer. (#1135)
---
 .../hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java  | 2 ++
 1 file changed, 2 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
index c086f31..fca449b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
@@ -26,6 +26,7 @@ import 
org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.Ac
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.client.CertificateClientTestImpl;
@@ -110,6 +111,7 @@ public class TestSecureOzoneContainer {
 
   @Before
   public void setup() throws Exception {
+DefaultMetricsSystem.setMiniClusterMode(true);
 conf = new OzoneConfiguration();
 String ozoneMetaPath =
 GenericTestUtils.getTempPath("ozoneMeta");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1666. Issue in openKey when allocating block. (#943)

2019-07-15 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ef66e49  HDDS-1666. Issue in openKey when allocating block. (#943)
ef66e49 is described below

commit ef66e4999f3cd5f0ea2fa018359facb776bf892f
Author: Bharat Viswanadham 
AuthorDate: Mon Jul 15 17:54:41 2019 -0700

HDDS-1666. Issue in openKey when allocating block. (#943)
---
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 25 --
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  4 ++--
 2 files changed, 25 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index 0aa301a..0c5ce2b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -24,6 +24,7 @@ import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.util.ArrayList;
 import java.util.BitSet;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -61,6 +62,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
 import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
@@ -76,6 +78,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -213,11 +216,29 @@ public class TestKeyManagerImpl {
 OmKeyArgs keyArgs = createBuilder()
 .setKeyName(KEY_NAME)
 .build();
-OpenKeySession keySession = keyManager1.openKey(keyArgs);
+
+// As now openKey will allocate at least one block, even if the size
+// passed is 0. So adding an entry to openKeyTable manually to test
+// allocateBlock failure.
+OmKeyInfo omKeyInfo = new OmKeyInfo.Builder()
+.setVolumeName(keyArgs.getVolumeName())
+.setBucketName(keyArgs.getBucketName())
+.setKeyName(keyArgs.getKeyName())
+.setOmKeyLocationInfos(Collections.singletonList(
+new OmKeyLocationInfoGroup(0, new ArrayList<>(
+.setCreationTime(Time.now())
+.setModificationTime(Time.now())
+.setDataSize(0)
+.setReplicationType(keyArgs.getType())
+.setReplicationFactor(keyArgs.getFactor())
+.setFileEncryptionInfo(null).build();
+metadataManager.getOpenKeyTable().put(
+metadataManager.getOpenKey(VOLUME_NAME, BUCKET_NAME, KEY_NAME, 1L),
+omKeyInfo);
 LambdaTestUtils.intercept(OMException.class,
 "SafeModePrecheck failed for allocateBlock", () -> {
   keyManager1
-  .allocateBlock(keyArgs, keySession.getId(), new ExcludeList());
+  .allocateBlock(keyArgs, 1L, new ExcludeList());
 });
   }
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 9e29825..90f7e4a 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -436,7 +436,7 @@ public class KeyManagerImpl implements KeyManager {
 // client should expect, in terms of current size of key. If client sets
 // a value, then this value is used, otherwise, we allocate a single
 // block which is the current size, if read by the client.
-final long size = args.getDataSize() >= 0 ?
+final long size = args.getDataSize() > 0 ?
 args.getDataSize() : scmBlockSize;
 final List locations = new ArrayList<>();
 
@@ -477,7 +477,7 @@ public class KeyManagerImpl implements KeyManager {
 openVersion = keyInfo.getLatestVersionLocations().getVersion();
 LOG.debug("Key {} allocated in volume {} bucket {}",
 keyName, volumeName, bucketName);
-allocateBlockInKey(keyInfo, args.getDataSize(), currentTime);
+allocateBlockInKey(keyInfo, size, currentTime);
 return new OpenKeySession(currentTime, keyInfo, openVersion);
   }
 


---

[hadoop] branch trunk updated: HDDS-1778. Fix existing blockade tests. (#1068)

2019-07-10 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new efb9164  HDDS-1778. Fix existing blockade tests. (#1068)
efb9164 is described below

commit efb916457fc5af868cb7003ee99e0ce3a050a4d2
Author: Nanda kumar 
AuthorDate: Wed Jul 10 22:13:59 2019 +0530

HDDS-1778. Fix existing blockade tests. (#1068)
---
 .../src/main/compose/ozoneblockade/docker-config   |   3 +
 .../test/blockade/clusterUtils/cluster_utils.py| 335 -
 .../blockade/{blockadeUtils => ozone}/blockade.py  |  16 +-
 .../src/test/blockade/ozone/client.py  |  75 +++
 .../src/test/blockade/ozone/cluster.py | 526 ++---
 .../__init__.py => ozone/constants.py} |  11 +-
 .../src/test/blockade/ozone/container.py   | 117 +
 .../__init__.py => ozone/exceptions.py}|  10 +-
 .../src/test/blockade/{ => ozone}/util.py  |  56 ++-
 .../test/blockade/test_blockade_client_failure.py  | 158 +++
 .../blockade/test_blockade_datanode_isolation.py   | 228 -
 .../src/test/blockade/test_blockade_flaky.py   |  42 +-
 .../test/blockade/test_blockade_mixed_failure.py   | 240 --
 ...t_blockade_mixed_failure_three_nodes_isolate.py | 357 ++
 .../test_blockade_mixed_failure_two_nodes.py   | 275 +--
 .../test/blockade/test_blockade_scm_isolation.py   | 252 --
 16 files changed, 1185 insertions(+), 1516 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
index f5e6a92..8347998 100644
--- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
@@ -23,12 +23,15 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.handler.type=distributed
 OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_ozone.client.max.retries=10
+OZONE-SITE.XML_ozone.scm.stale.node.interval=2m
 OZONE-SITE.XML_ozone.scm.dead.node.interval=5m
 OZONE-SITE.XML_ozone.replication=1
 OZONE-SITE.XML_hdds.datanode.dir=/data/hdds
 OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1
 OZONE-SITE.XML_ozone.scm.pipeline.destroy.timeout=15s
 OZONE-SITE.XML_hdds.heartbeat.interval=2s
+OZONE-SITE.XML_hdds.scm.wait.time.after.safemode.exit=30s
 OZONE-SITE.XML_hdds.scm.replication.thread.interval=5s
 OZONE-SITE.XML_hdds.scm.replication.event.timeout=7s
 OZONE-SITE.XML_dfs.ratis.server.failure.duration=25s
diff --git 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/clusterUtils/cluster_utils.py
 
b/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/clusterUtils/cluster_utils.py
deleted file mode 100644
index 53e3fa0..000
--- 
a/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade/clusterUtils/cluster_utils.py
+++ /dev/null
@@ -1,335 +0,0 @@
-#!/usr/bin/python
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from subprocess import call
-
-import subprocess
-import logging
-import time
-import re
-import os
-import yaml
-
-
-logger = logging.getLogger(__name__)
-
-
-class ClusterUtils(object):
-  """
-  This class contains all cluster related operations.
-  """
-
-  @classmethod
-  def cluster_setup(cls, docker_compose_file, datanode_count,
-destroy_existing_cluster=True):
-"""start a blockade cluster"""
-logger.info("compose file :%s", docker_compose_file)
-logger.info("number of DNs :%d", datanode_count)
-if destroy_existing_cluster:
-  call(["docker-compose", "-f", docker_compose_file, "down"])
-call(["docker-compose", "-f", docker_compose_file, "up", "-d",
-  "--scale", "datanode=" + str(datanode_count)])
-
-logger.info("Waiting 30s for cluster start up...")
-   

[hadoop] branch trunk updated: HDDS-1705. Recon: Add estimatedTotalCount to the response of containers and containers/{id} endpoints. Contributed by Vivek Ratnavel Subramanian.

2019-07-08 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 82d88a8  HDDS-1705. Recon: Add estimatedTotalCount to the response of 
containers and containers/{id} endpoints. Contributed by Vivek Ratnavel 
Subramanian.
82d88a8 is described below

commit 82d88a8d30790c5841fc4f71ea39cc12b470c41f
Author: Vivek Ratnavel Subramanian 
AuthorDate: Mon Jul 8 21:06:50 2019 -0700

HDDS-1705. Recon: Add estimatedTotalCount to the response of containers and 
containers/{id} endpoints. Contributed by Vivek Ratnavel Subramanian.
---
 .../org/apache/hadoop/ozone/common/Storage.java|   6 +-
 .../common/src/main/resources/ozone-default.xml|   6 +-
 .../hadoop/ozone/om/OzoneManagerStarter.java   |   2 +-
 .../recon/codegen/ReconSchemaGenerationModule.java |   2 +
 .../ozone/recon/schema/StatsSchemaDefinition.java  |  61 
 .../apache/hadoop/ozone/recon/ReconConstants.java  |   8 +-
 .../org/apache/hadoop/ozone/recon/ReconServer.java |  22 ++
 .../ozone/recon/api/ContainerKeyService.java   |  22 +-
 .../ozone/recon/api/types/ContainersResponse.java  |  94 ++
 .../hadoop/ozone/recon/api/types/KeysResponse.java |  93 ++
 .../recon/spi/ContainerDBServiceProvider.java  |  58 +++-
 .../spi/impl/ContainerDBServiceProviderImpl.java   | 137 -
 .../recon/spi/impl/ReconContainerDBProvider.java   |   4 +
 .../ozone/recon/tasks/ContainerKeyMapperTask.java  |  57 +++-
 .../recon/GuiceInjectorUtilsForTestsImpl.java} |  25 +-
 .../ozone/recon/api/TestContainerKeyService.java   | 186 +++-
 .../recon/persistence/AbstractSqlDatabaseTest.java |  12 +-
 .../persistence/TestStatsSchemaDefinition.java | 147 ++
 .../impl/TestContainerDBServiceProviderImpl.java   | 326 +
 .../impl/TestOzoneManagerServiceProviderImpl.java  |  53 +---
 .../recon/tasks/TestContainerKeyMapperTask.java| 127 
 .../recon/types/GuiceInjectorUtilsForTests.java| 117 
 22 files changed, 1209 insertions(+), 356 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
index f393ed9..7992dad 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Storage.java
@@ -81,7 +81,7 @@ public abstract class Storage {
 
   /**
* Gets the path of the Storage dir.
-   * @return Stoarge dir path
+   * @return Storage dir path
*/
   public String getStorageDir() {
 return storageDir.getAbsoluteFile().toString();
@@ -117,7 +117,7 @@ public abstract class Storage {
   }
 
   /**
-   * Retreives the storageInfo instance to read/write the common
+   * Retrieves the storageInfo instance to read/write the common
* version file properties.
* @return the instance of the storageInfo class
*/
@@ -128,7 +128,7 @@ public abstract class Storage {
   abstract protected Properties getNodeProperties();
 
   /**
-   * Sets the Node properties spaecific to OM/SCM.
+   * Sets the Node properties specific to OM/SCM.
*/
   private void setNodeProperties() {
 Properties nodeProperties = getNodeProperties();
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index c10aa33..219bd29 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -659,7 +659,7 @@
 
 OZONE, OM, SCM, CONTAINER, STORAGE, REQUIRED
 
-  This setting is the fallback location for SCM, OM and DataNodes
+  This setting is the fallback location for SCM, OM, Recon and DataNodes
   to store their metadata. This setting may be used only in test/PoC
   clusters to simplify configuration.
 
@@ -2457,7 +2457,7 @@
 
 OZONE, RECON
 
-  Ozone Recon datbase password.
+  Ozone Recon database password.
 
   
   
@@ -2484,7 +2484,7 @@
 
   The max active connections to the SQL database. The default SQLite
   database only allows single active connection, set this to a
-  resonable value like 10, for external production database.
+  reasonable value like 10, for external production database.
 
   
   
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
index 8a0c317..fa229aa 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java
@@ -60,7 +60,7 @@ public class OzoneManagerStarter extends

[hadoop] branch trunk updated: HDDS-1559. Include committedBytes to determine Out of Space in VolumeChoosingPolicy. Contributed by Supratim Deka (#841)

2019-05-28 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 346c2b7  HDDS-1559. Include committedBytes to determine Out of Space 
in VolumeChoosingPolicy. Contributed by Supratim Deka (#841)
346c2b7 is described below

commit 346c2b798080cc1f22d6ba85e584141e7dee2c08
Author: supratimdeka <46919641+supratimd...@users.noreply.github.com>
AuthorDate: Wed May 29 09:18:14 2019 +0530

HDDS-1559. Include committedBytes to determine Out of Space in 
VolumeChoosingPolicy. Contributed by Supratim Deka (#841)
---
 .../volume/RoundRobinVolumeChoosingPolicy.java |  4 ++-
 .../container/ozoneimpl/TestOzoneContainer.java| 35 ++
 2 files changed, 38 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
index 75c92ec..f503149 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/RoundRobinVolumeChoosingPolicy.java
@@ -58,7 +58,9 @@ public class RoundRobinVolumeChoosingPolicy implements 
VolumeChoosingPolicy {
 
 while (true) {
   final HddsVolume volume = volumes.get(currentVolumeIndex);
-  long availableVolumeSize = volume.getAvailable();
+  // adjust for remaining capacity in Open containers
+  long availableVolumeSize = volume.getAvailable()
+  - volume.getCommittedBytes();
 
   currentVolumeIndex = (currentVolumeIndex + 1) % volumes.size();
 
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index e678282..f5ebb49 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import 
org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
@@ -52,6 +53,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.ArrayList;
 
+import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DISK_OUT_OF_SPACE;
 import static org.junit.Assert.assertEquals;
 
 /**
@@ -135,6 +137,39 @@ public class TestOzoneContainer {
 verifyCommittedSpace(ozoneContainer);
   }
 
+  @Test
+  public void testContainerCreateDiskFull() throws Exception {
+volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
+volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
+long containerSize = (long) StorageUnit.MB.toBytes(100);
+boolean diskSpaceException = false;
+
+// Format the volumes
+for (HddsVolume volume : volumeSet.getVolumesList()) {
+  volume.format(UUID.randomUUID().toString());
+
+  // eat up all available space except size of 1 container
+  volume.incCommittedBytes(volume.getAvailable() - containerSize);
+  // eat up 10 bytes more, now available space is less than 1 container
+  volume.incCommittedBytes(10);
+}
+keyValueContainerData = new KeyValueContainerData(99, containerSize,
+UUID.randomUUID().toString(), datanodeDetails.getUuidString());
+keyValueContainer = new KeyValueContainer(keyValueContainerData, conf);
+
+// we expect an out of space Exception
+try {
+  keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+} catch (StorageContainerException e) {
+  if (e.getResult() == DISK_OUT_OF_SPACE) {
+diskSpaceException = true;
+  }
+}
+
+// Test failed if there was no exception
+assertEquals(true, diskSpaceException);
+  }
+
   //verify committed space on each volume
   private void verifyCommittedSpace(OzoneContainer oc) {
 for (HddsVolume dnVol : oc.getVolumeSet().getVolumesList()) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional co

[hadoop] branch trunk updated: HDDS-1501 : Create a Recon task interface to update internal DB on updates from OM. (#819)

2019-05-23 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4b099b8   HDDS-1501 : Create a Recon task interface to update internal 
DB on updates from OM. (#819)
4b099b8 is described below

commit 4b099b8b890cc578b13630369ef44a42ecd6496c
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Thu May 23 15:34:55 2019 -0700

 HDDS-1501 : Create a Recon task interface to update internal DB on updates 
from OM. (#819)
---
 .../java/org/apache/hadoop/utils/db/DBStore.java   |  14 ++
 .../org/apache/hadoop/utils/db/DBStoreBuilder.java |   8 +-
 .../java/org/apache/hadoop/utils/db/RDBStore.java  |  40 +++-
 .../common/src/main/resources/ozone-default.xml|   8 +
 .../org/apache/hadoop/utils/db/TestRDBStore.java   |  43 
 .../hadoop/ozone/om/OmMetadataManagerImpl.java |  20 +-
 .../recon/codegen/ReconSchemaGenerationModule.java |   3 +-
 .../schema/ReconInternalSchemaDefinition.java  |  65 ++
 .../hadoop/ozone/recon/ReconControllerModule.java  |   5 +
 .../org/apache/hadoop/ozone/recon/ReconServer.java |  25 ++-
 .../hadoop/ozone/recon/ReconServerConfigKeys.java  |   4 +
 .../recon/recovery/ReconOmMetadataManagerImpl.java |   1 -
 .../recon/spi/ContainerDBServiceProvider.java  |  16 ++
 .../spi/impl/ContainerDBServiceProviderImpl.java   |  11 ++
 .../ozone/recon/tasks/ContainerKeyMapperTask.java  | 154 +++
 .../hadoop/ozone/recon/tasks/OMDBUpdateEvent.java  | 150 ++
 .../ozone/recon/tasks/OMDBUpdatesHandler.java  | 220 +
 .../ozone/recon/tasks/OMUpdateEventBatch.java  |  69 +++
 .../ozone/recon/tasks/ReconDBUpdateTask.java   |  66 +++
 .../ozone/recon/tasks/ReconTaskController.java}|  37 ++--
 .../ozone/recon/tasks/ReconTaskControllerImpl.java | 198 +++
 .../ozone/recon/api/TestContainerKeyService.java   |   7 +-
 .../recon/persistence/AbstractSqlDatabaseTest.java |   5 +-
 .../TestReconInternalSchemaDefinition.java | 143 ++
 .../recovery/TestReconOmMetadataManagerImpl.java   |  17 --
 .../impl/TestContainerDBServiceProviderImpl.java   |  25 +++
 .../hadoop/ozone/recon/tasks/DummyReconDBTask.java |  77 
 .../recon/tasks/TestContainerKeyMapperTask.java| 155 ---
 .../ozone/recon/tasks/TestOMDBUpdatesHandler.java  | 207 +++
 .../recon/tasks/TestReconTaskControllerImpl.java   | 171 
 30 files changed, 1790 insertions(+), 174 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
index 9e0c4a4..d01dfe4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.utils.db;
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -158,4 +159,17 @@ public interface DBStore extends AutoCloseable {
* @return DB file location.
*/
   File getDbLocation();
+
+  /**
+   * Get List of Index to Table Names.
+   * (For decoding table from column family index)
+   * @return Map of Index -> TableName
+   */
+  Map getTableNames();
+
+  /**
+   * Get Codec registry.
+   * @return codec registry.
+   */
+  CodecRegistry getCodecRegistry();
 }
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java
index 34bdc5d..3459b20 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStoreBuilder.java
@@ -57,7 +57,6 @@ public final class DBStoreBuilder {
   private List tableNames;
   private Configuration configuration;
   private CodecRegistry registry;
-  private boolean readOnly = false;
 
   private DBStoreBuilder(Configuration configuration) {
 tables = new HashSet<>();
@@ -114,11 +113,6 @@ public final class DBStoreBuilder {
 return this;
   }
 
-  public DBStoreBuilder setReadOnly(boolean rdOnly) {
-readOnly = rdOnly;
-return this;
-  }
-
   /**
* Builds a DBStore instance and returns that.
*
@@ -137,7 +131,7 @@ public final class DBStoreBuilder {
 if (!dbFile.getParentFile().exists()) {
   throw new IOException("The DB destination directory should exist.");
 }
-return new RDBStore(dbFile, options, tables, registry, readOnly);
+return new RDBStore(dbFile, options, tables, registry);
   }
 
   /**
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/d

[hadoop] branch trunk updated (20a4ec3 -> b4b9120)

2019-05-23 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 20a4ec3  HDDS-700. Support rack awared node placement policy based on 
network topology. Contributed by Sammi Chen.
 add 869a1ab  HDDS-1535. Space tracking for Open Containers : Handle Node 
Startup. Contributed by Supratim Deka
 add 64c3985  Fixed checkstyle issues.
 add 456bb8a  verifyContainerData also does fixup, renamed. Added a Javadoc 
comment, both as per review discussion
 add 9da62f3  Merge branch 'trunk' into HDDS-1535
 add 72bef0f  fixed merge error. adapted to new signature of BlockUtils 
getDB
 add ca93760  fixed checkstyle issue post merge
 new b4b9120  HDDS-1535. Space tracking for Open Containers : Handle Node 
Startup. Contributed by Supratim Deka (#832)

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../ozone/container/ozoneimpl/ContainerReader.java | 43 +-
 .../container/ozoneimpl/TestOzoneContainer.java| 91 --
 2 files changed, 127 insertions(+), 7 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: HDDS-1535. Space tracking for Open Containers : Handle Node Startup. Contributed by Supratim Deka (#832)

2019-05-23 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit b4b9120e281a2d59cf56d3f76dab945f36003484
Merge: 20a4ec3 ca93760
Author: Arpit Agarwal 
AuthorDate: Thu May 23 10:42:08 2019 -0700

HDDS-1535. Space tracking for Open Containers : Handle Node Startup. 
Contributed by Supratim Deka (#832)

* HDDS-1535. Space tracking for Open Containers : Handle Node Startup. 
Contributed by Supratim Deka

* Fixed checkstyle issues.

* verifyContainerData also does fixup, renamed. Added a Javadoc comment, 
both as per review discussion

* fixed merge error. adapted to new signature of BlockUtils getDB

* fixed checkstyle issue post merge

 .../ozone/container/ozoneimpl/ContainerReader.java | 43 +-
 .../container/ozoneimpl/TestOzoneContainer.java| 91 --
 2 files changed, 127 insertions(+), 7 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1499. OzoneManager Cache. (#798)

2019-05-19 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0d1d7c8  HDDS-1499. OzoneManager Cache. (#798)
0d1d7c8 is described below

commit 0d1d7c86ec34fabc62c0e3844aca3733024bc172
Author: Bharat Viswanadham 
AuthorDate: Sun May 19 19:23:02 2019 -0700

HDDS-1499. OzoneManager Cache. (#798)
---
 .../java/org/apache/hadoop/utils/db/DBStore.java   |   1 +
 .../java/org/apache/hadoop/utils/db/RDBTable.java  |  10 +-
 .../java/org/apache/hadoop/utils/db/Table.java |  26 +++-
 .../org/apache/hadoop/utils/db/TypedTable.java |  78 ++-
 .../org/apache/hadoop/utils/db/cache/CacheKey.java |  56 
 .../apache/hadoop/utils/db/cache/CacheValue.java   |  47 +++
 .../apache/hadoop/utils/db/cache/EpochEntry.java   |  74 +++
 .../hadoop/utils/db/cache/PartialTableCache.java   |  97 ++
 .../apache/hadoop/utils/db/cache/TableCache.java   |  63 +
 .../apache/hadoop/utils/db/cache/package-info.java |  18 +++
 .../hadoop/utils/db/TestTypedRDBTableStore.java|  82 +++-
 .../utils/db/cache/TestPartialTableCache.java  | 142 +
 .../apache/hadoop/utils/db/cache/package-info.java |  22 
 .../hadoop/ozone/om/OmMetadataManagerImpl.java |   4 +-
 14 files changed, 709 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
index 56166ab..9e0c4a4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/DBStore.java
@@ -44,6 +44,7 @@ public interface DBStore extends AutoCloseable {
*/
   Table getTable(String name) throws IOException;
 
+
   /**
* Gets an existing TableStore with implicit key/value conversion.
*
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java
index 88b0411..7bbe9d9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/RDBTable.java
@@ -22,6 +22,7 @@ package org.apache.hadoop.utils.db;
 import java.io.IOException;
 import java.nio.charset.StandardCharsets;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSUtil;
 
 import org.rocksdb.ColumnFamilyHandle;
@@ -33,9 +34,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * RocksDB implementation of ozone metadata store.
+ * RocksDB implementation of ozone metadata store. This class should be only
+ * used as part of TypedTable as it's underlying implementation to access the
+ * metadata store content. All other user's using Table should use TypedTable.
  */
-public class RDBTable implements Table {
+@InterfaceAudience.Private
+class RDBTable implements Table {
 
 
   private static final Logger LOG =
@@ -52,7 +56,7 @@ public class RDBTable implements Table {
* @param handle - ColumnFamily Handle.
* @param writeOptions - RocksDB write Options.
*/
-  public RDBTable(RocksDB db, ColumnFamilyHandle handle,
+  RDBTable(RocksDB db, ColumnFamilyHandle handle,
   WriteOptions writeOptions) {
 this.db = db;
 this.handle = handle;
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java
index 2f14e77..905a68b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Table.java
@@ -21,8 +21,10 @@ package org.apache.hadoop.utils.db;
 
 import java.io.IOException;
 
+import org.apache.commons.lang3.NotImplementedException;
 import org.apache.hadoop.classification.InterfaceStability;
-
+import org.apache.hadoop.utils.db.cache.CacheKey;
+import org.apache.hadoop.utils.db.cache.CacheValue;
 /**
  * Interface for key-value store that stores ozone metadata. Ozone metadata is
  * stored as key value pairs, both key and value are arbitrary byte arrays. 
Each
@@ -98,6 +100,28 @@ public interface Table extends AutoCloseable {
   String getName() throws IOException;
 
   /**
+   * Add entry to the table cache.
+   *
+   * If the cacheKey already exists, it will override the entry.
+   * @param cacheKey
+   * @param cacheValue
+   */
+  default void addCacheEntry(CacheKey cacheKey,
+  CacheValue cacheValue) {
+throw new NotImplementedException("addCacheEntry is not implemented");
+  }
+
+  /**
+   * Removes all the entries from the table cache which are having epoch value
+   * less
+   * than or equal to specified epoch value.
+   * @param epoch
+   */
+  de

[hadoop] branch trunk updated: HDDS-1511. Space tracking for Open Containers in HDDS Volumes. Contributed by Supratim Deka (#812)

2019-05-15 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9569015  HDDS-1511. Space tracking for Open Containers in HDDS 
Volumes. Contributed by Supratim Deka (#812)
9569015 is described below

commit 9569015802e695f1c242c74d5ac9df27e180374c
Author: supratimdeka <46919641+supratimd...@users.noreply.github.com>
AuthorDate: Wed May 15 22:56:01 2019 +0530

HDDS-1511. Space tracking for Open Containers in HDDS Volumes. Contributed 
by Supratim Deka (#812)
---
 .../ozone/container/common/impl/ContainerData.java | 63 ++
 .../ozone/container/common/impl/ContainerSet.java  |  2 +
 .../ozone/container/common/volume/HddsVolume.java  | 21 
 .../common/impl/TestContainerPersistence.java  | 21 
 4 files changed, 107 insertions(+)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
index d2fa2c8..ec70dbd 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
@@ -71,6 +71,8 @@ public abstract class ContainerData {
 
   private final long maxSize;
 
+  private boolean committedSpace;
+
   //ID of the pipeline where this container is created
   private String originPipelineId;
   //ID of the datanode where this container is created
@@ -184,7 +186,23 @@ public abstract class ContainerData {
* @param state
*/
   public synchronized void setState(ContainerDataProto.State state) {
+ContainerDataProto.State oldState = this.state;
 this.state = state;
+
+if ((oldState == ContainerDataProto.State.OPEN) &&
+(state != oldState)) {
+  releaseCommitSpace();
+}
+
+/**
+ * commit space when container transitions (back) to Open.
+ * when? perhaps closing a container threw an exception
+ */
+if ((state == ContainerDataProto.State.OPEN) &&
+(state != oldState)) {
+  Preconditions.checkState(getMaxSize() > 0);
+  commitSpace();
+}
   }
 
   /**
@@ -280,6 +298,41 @@ public abstract class ContainerData {
 setState(ContainerDataProto.State.CLOSED);
   }
 
+  private void releaseCommitSpace() {
+long unused = getMaxSize() - getBytesUsed();
+
+// only if container size < max size
+if (unused > 0 && committedSpace) {
+  getVolume().incCommittedBytes(0 - unused);
+}
+committedSpace = false;
+  }
+
+  /**
+   * add available space in the container to the committed space in the volume.
+   * available space is the number of bytes remaining till max capacity.
+   */
+  public void commitSpace() {
+long unused = getMaxSize() - getBytesUsed();
+ContainerDataProto.State myState = getState();
+HddsVolume cVol;
+
+//we don't expect duplicate calls
+Preconditions.checkState(!committedSpace);
+
+// Only Open Containers have Committed Space
+if (myState != ContainerDataProto.State.OPEN) {
+  return;
+}
+
+// junit tests do not always set up volume
+cVol = getVolume();
+if (unused > 0 && (cVol != null)) {
+  cVol.incCommittedBytes(unused);
+  committedSpace = true;
+}
+  }
+
   /**
* Get the number of bytes read from the container.
* @return the number of bytes read from the container.
@@ -321,10 +374,20 @@ public abstract class ContainerData {
 
   /**
* Increase the number of bytes write into the container.
+   * Also decrement committed bytes against the bytes written.
* @param bytes the number of bytes write into the container.
*/
   public void incrWriteBytes(long bytes) {
+long unused = getMaxSize() - getBytesUsed();
+
 this.writeBytes.addAndGet(bytes);
+
+// only if container size < max size
+if (committedSpace && unused > 0) {
+  //with this write, container size might breach max size
+  long decrement = Math.min(bytes, unused);
+  this.getVolume().incCommittedBytes(0 - decrement);
+}
   }
 
   /**
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
index 4a7a950..7dbcbef 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java
@@ -68,6 +68,8 @@ public class ContainerSet {
 if(containerMap.putIfAbsent(containerId, con

[hadoop] branch trunk updated: HDDS-1489. Unnecessary log messages on console with Ozone shell. (#797)

2019-05-07 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c4be3ea  HDDS-1489. Unnecessary log messages on console with Ozone 
shell. (#797)
c4be3ea is described below

commit c4be3ea27635e0aec0104a78d89ff4f1d01dff94
Author: Siddharth 
AuthorDate: Tue May 7 14:48:39 2019 -0700

HDDS-1489. Unnecessary log messages on console with Ozone shell. (#797)
---
 hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config   | 1 +
 hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config   | 1 +
 hadoop-ozone/dist/src/main/compose/ozone/docker-config | 1 +
 hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config | 1 +
 hadoop-ozone/dist/src/main/compose/ozonefs/docker-config   | 1 +
 hadoop-ozone/dist/src/main/compose/ozoneperf/docker-config | 1 +
 hadoop-ozone/dist/src/main/conf/log4j.properties   | 1 +
 hadoop-ozone/ozonefs/src/test/resources/log4j.properties   | 3 +++
 8 files changed, 10 insertions(+)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
index f7ebcde..ddba89a 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config
@@ -39,6 +39,7 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd HH
 LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
 LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
 
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN
 
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config
index 2493cae..9813ac4 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config
@@ -38,6 +38,7 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd HH
 LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
 LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
 
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN
 
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
index 8f28967..6165499 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
@@ -36,6 +36,7 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd HH
 LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
 LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
 
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN
 
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
index 1db1a79..3ab0c6a 100644
--- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
@@ -35,6 +35,7 @@ 
LOG4J.PROPERTIES_log4j.appender.stdout.layout.ConversionPattern=%d{-MM-dd HH
 LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR
 LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.conf.ConfUtils=WARN
 
LOG4J.PROPERTIES_log4j.logger.org.apache.hadoop.security.ShellBasedUnixGroupsMapping=ERROR
+LOG4J.PROPERTIES_log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN
 
 #Enable this variable to print out all hadoop rpc traffic to the stdout. See 
http://byteman.jboss.org/ to define your own instrumentation.
 
#BYTEMAN_SCRIPT_URL=https

[hadoop] branch branch-2.8 updated: HDFS-13677. Dynamic refresh Disk configuration results in overwriting VolumeMap. Contributed by xuzq and Stephen O'Donnell.

2019-05-06 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new d71eda9  HDFS-13677. Dynamic refresh Disk configuration results in 
overwriting VolumeMap. Contributed by xuzq and Stephen O'Donnell.
d71eda9 is described below

commit d71eda92c6730f542be7ffd911cdc924d7e17b05
Author: Arpit Agarwal 
AuthorDate: Mon May 6 13:34:38 2019 -0700

HDFS-13677. Dynamic refresh Disk configuration results in overwriting 
VolumeMap. Contributed by xuzq and Stephen O'Donnell.

(cherry picked from commit 102c8fca10f3c626ab8bc47f818c8391a5c35289)
(cherry picked from commit 4a1d51dea2149e8f458467467798e81b126b7cc5)
---
 .../datanode/fsdataset/impl/FsDatasetImpl.java |  2 +-
 .../server/datanode/fsdataset/impl/ReplicaMap.java | 14 -
 .../datanode/TestDataNodeHotSwapVolumes.java   | 71 ++
 .../datanode/fsdataset/impl/TestReplicaMap.java| 22 +++
 4 files changed, 107 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index b14f9e9..d14033d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -415,7 +415,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 LOG.error(errorMsg);
 throw new IOException(errorMsg);
   }
-  volumeMap.addAll(replicaMap);
+  volumeMap.mergeAll(replicaMap);
   storageMap.put(sd.getStorageUuid(),
   new DatanodeStorage(sd.getStorageUuid(),
   DatanodeStorage.State.NORMAL,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
index 5705792..e94670a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
@@ -126,7 +126,19 @@ class ReplicaMap {
   void addAll(ReplicaMap other) {
 map.putAll(other.map);
   }
-  
+
+
+  /**
+   * Merge all entries from the given replica map into the local replica map.
+   */
+  void mergeAll(ReplicaMap other) {
+for(String bp : other.getBlockPoolList()) {
+  for(ReplicaInfo r : other.map.get(bp)) {
+add(bp, r);
+  }
+}
+  }
+
   /**
* Remove the replica's meta information from the map that matches
* the input block's id and generation stamp
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 08df71f..a07ecc7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -47,6 +47,8 @@ import 
org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Test;
 
@@ -393,6 +395,75 @@ public class TestDataNodeHotSwapVolumes {
 verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
   }
 
+  /**
+   * Test re-adding one volume with some blocks on a running MiniDFSCluster
+   * with only one NameNode to reproduce HDFS-13677.
+   */
+  @Test(timeout=6)
+  public void testReAddVolumeWithBlocks()
+  throws IOException, ReconfigurationException,
+  InterruptedException, TimeoutException {
+startDFSCluster(1, 1);
+String bpid = cluster.getNamesystem().getBlockPoolId();
+final int numBlocks = 10;
+
+Path testFile = new Path("/test");
+createFile(testFile, numBlocks);
+
+List> blockReports =
+cluster.getAllBlockReports(bpid);
+assertEquals(1, blockReports.size());  // 1 DataNode
+assertEquals(2, blockReports.get(0).size());  // 2 volumes
+
+// Now remove the second volume
+DataNode dn = cluster.getDataNodes().get(0);
+Collection oldDirs = getDataDirs(dn)

[hadoop] branch branch-2.9 updated: HDFS-13677. Dynamic refresh Disk configuration results in overwriting VolumeMap. Contributed by xuzq and Stephen O'Donnell.

2019-05-06 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new 4a1d51d  HDFS-13677. Dynamic refresh Disk configuration results in 
overwriting VolumeMap. Contributed by xuzq and Stephen O'Donnell.
4a1d51d is described below

commit 4a1d51dea2149e8f458467467798e81b126b7cc5
Author: Arpit Agarwal 
AuthorDate: Mon May 6 13:34:38 2019 -0700

HDFS-13677. Dynamic refresh Disk configuration results in overwriting 
VolumeMap. Contributed by xuzq and Stephen O'Donnell.

(cherry picked from commit 102c8fca10f3c626ab8bc47f818c8391a5c35289)
---
 .../datanode/fsdataset/impl/FsDatasetImpl.java |  2 +-
 .../server/datanode/fsdataset/impl/ReplicaMap.java | 14 -
 .../datanode/TestDataNodeHotSwapVolumes.java   | 70 ++
 .../datanode/fsdataset/impl/TestReplicaMap.java| 22 +++
 4 files changed, 106 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 4486b73..9946a3f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -432,7 +432,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 LOG.error(errorMsg);
 throw new IOException(errorMsg);
   }
-  volumeMap.addAll(replicaMap);
+  volumeMap.mergeAll(replicaMap);
   storageMap.put(sd.getStorageUuid(),
   new DatanodeStorage(sd.getStorageUuid(),
   DatanodeStorage.State.NORMAL,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
index 5705792..e94670a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
@@ -126,7 +126,19 @@ class ReplicaMap {
   void addAll(ReplicaMap other) {
 map.putAll(other.map);
   }
-  
+
+
+  /**
+   * Merge all entries from the given replica map into the local replica map.
+   */
+  void mergeAll(ReplicaMap other) {
+for(String bp : other.getBlockPoolList()) {
+  for(ReplicaInfo r : other.map.get(bp)) {
+add(bp, r);
+  }
+}
+  }
+
   /**
* Remove the replica's meta information from the map that matches
* the input block's id and generation stamp
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index ea28ea4..125b431 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.After;
 import org.junit.Assert;
@@ -416,6 +417,75 @@ public class TestDataNodeHotSwapVolumes {
 verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
   }
 
+  /**
+   * Test re-adding one volume with some blocks on a running MiniDFSCluster
+   * with only one NameNode to reproduce HDFS-13677.
+   */
+  @Test(timeout=6)
+  public void testReAddVolumeWithBlocks()
+  throws IOException, ReconfigurationException,
+  InterruptedException, TimeoutException {
+startDFSCluster(1, 1);
+String bpid = cluster.getNamesystem().getBlockPoolId();
+final int numBlocks = 10;
+
+Path testFile = new Path("/test");
+createFile(testFile, numBlocks);
+
+List> blockReports =
+cluster.getAllBlockReports(bpid);
+assertEquals(1, blockReports.size());  // 1 DataNode
+assertEquals(2, blockReports.get(0).size());  // 2 volumes
+
+// Now remove the second volume
+DataNode dn = cluster.getDataNodes().get(0);
+Collection oldDirs = getDataDirs(dn);
+String newDirs = oldDirs.iterator().next();  // Keep the

[hadoop] branch branch-3.2 updated: HDFS-13677. Dynamic refresh Disk configuration results in overwriting VolumeMap. Contributed by xuzq.

2019-04-29 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new db4c0b3  HDFS-13677. Dynamic refresh Disk configuration results in 
overwriting VolumeMap. Contributed by xuzq.
db4c0b3 is described below

commit db4c0b357d573a1507d6a3da632bb5a11187d414
Author: Arpit Agarwal 
AuthorDate: Mon Apr 29 14:49:35 2019 -0700

HDFS-13677. Dynamic refresh Disk configuration results in overwriting 
VolumeMap. Contributed by xuzq.

(cherry picked from commit 4b4200f1f87ad40d9c19ba160f706ffd0470a8d4)
---
 .../datanode/fsdataset/impl/FsDatasetImpl.java |  2 +-
 .../server/datanode/fsdataset/impl/ReplicaMap.java | 14 +
 .../datanode/TestDataNodeHotSwapVolumes.java   | 68 ++
 .../datanode/fsdataset/impl/TestReplicaMap.java| 22 +++
 4 files changed, 105 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index ad43b45..29ea1de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -419,7 +419,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 LOG.error(errorMsg);
 throw new IOException(errorMsg);
   }
-  volumeMap.addAll(replicaMap);
+  volumeMap.mergeAll(replicaMap);
   storageMap.put(sd.getStorageUuid(),
   new DatanodeStorage(sd.getStorageUuid(),
   DatanodeStorage.State.NORMAL,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
index 73d3c60..786078f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
@@ -166,6 +166,20 @@ class ReplicaMap {
   void addAll(ReplicaMap other) {
 map.putAll(other.map);
   }
+
+
+  /**
+   * Merge all entries from the given replica map into the local replica map.
+   */
+  void mergeAll(ReplicaMap other) {
+other.map.forEach(
+(bp, replicaInfos) -> {
+  replicaInfos.forEach(
+  replicaInfo -> add(bp, replicaInfo)
+  );
+}
+);
+  }
   
   /**
* Remove the replica's meta information from the map that matches
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index c19c849..609e16c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -420,6 +420,74 @@ public class TestDataNodeHotSwapVolumes {
 verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
   }
 
+  /**
+   * Test re-adding one volume with some blocks on a running MiniDFSCluster
+   * with only one NameNode to reproduce HDFS-13677.
+   */
+  @Test(timeout=6)
+  public void testReAddVolumeWithBlocks()
+  throws IOException, ReconfigurationException,
+  InterruptedException, TimeoutException {
+startDFSCluster(1, 1);
+String bpid = cluster.getNamesystem().getBlockPoolId();
+final int numBlocks = 10;
+
+Path testFile = new Path("/test");
+createFile(testFile, numBlocks);
+
+List> blockReports =
+cluster.getAllBlockReports(bpid);
+assertEquals(1, blockReports.size());  // 1 DataNode
+assertEquals(2, blockReports.get(0).size());  // 2 volumes
+
+// Now remove the second volume
+DataNode dn = cluster.getDataNodes().get(0);
+Collection oldDirs = getDataDirs(dn);
+String newDirs = oldDirs.iterator().next();  // Keep the first volume.
+assertThat(
+"DN did not update its own config",
+dn.reconfigurePropertyImpl(
+DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs),
+is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
+assertFileLocksReleased(
+new ArrayList(oldDirs).subList(1, oldDirs.size()));
+
+// Now create another file - the first volume should have 15 blocks
+// and 5 blocks on the previously removed vo

[hadoop] branch branch-3.1 updated: HDFS-13677. Dynamic refresh Disk configuration results in overwriting VolumeMap. Contributed by xuzq.

2019-04-29 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 7b51edb  HDFS-13677. Dynamic refresh Disk configuration results in 
overwriting VolumeMap. Contributed by xuzq.
7b51edb is described below

commit 7b51edbab9f98d6a41096ef029b1a0ef2ff959bc
Author: Arpit Agarwal 
AuthorDate: Mon Apr 29 14:49:35 2019 -0700

HDFS-13677. Dynamic refresh Disk configuration results in overwriting 
VolumeMap. Contributed by xuzq.

(cherry picked from commit 4b4200f1f87ad40d9c19ba160f706ffd0470a8d4)
---
 .../datanode/fsdataset/impl/FsDatasetImpl.java |  2 +-
 .../server/datanode/fsdataset/impl/ReplicaMap.java | 14 +
 .../datanode/TestDataNodeHotSwapVolumes.java   | 68 ++
 .../datanode/fsdataset/impl/TestReplicaMap.java| 22 +++
 4 files changed, 105 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index ad43b45..29ea1de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -419,7 +419,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 LOG.error(errorMsg);
 throw new IOException(errorMsg);
   }
-  volumeMap.addAll(replicaMap);
+  volumeMap.mergeAll(replicaMap);
   storageMap.put(sd.getStorageUuid(),
   new DatanodeStorage(sd.getStorageUuid(),
   DatanodeStorage.State.NORMAL,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
index 73d3c60..786078f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
@@ -166,6 +166,20 @@ class ReplicaMap {
   void addAll(ReplicaMap other) {
 map.putAll(other.map);
   }
+
+
+  /**
+   * Merge all entries from the given replica map into the local replica map.
+   */
+  void mergeAll(ReplicaMap other) {
+other.map.forEach(
+(bp, replicaInfos) -> {
+  replicaInfos.forEach(
+  replicaInfo -> add(bp, replicaInfo)
+  );
+}
+);
+  }
   
   /**
* Remove the replica's meta information from the map that matches
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 6530720..2a9b61d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -420,6 +420,74 @@ public class TestDataNodeHotSwapVolumes {
 verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
   }
 
+  /**
+   * Test re-adding one volume with some blocks on a running MiniDFSCluster
+   * with only one NameNode to reproduce HDFS-13677.
+   */
+  @Test(timeout=6)
+  public void testReAddVolumeWithBlocks()
+  throws IOException, ReconfigurationException,
+  InterruptedException, TimeoutException {
+startDFSCluster(1, 1);
+String bpid = cluster.getNamesystem().getBlockPoolId();
+final int numBlocks = 10;
+
+Path testFile = new Path("/test");
+createFile(testFile, numBlocks);
+
+List> blockReports =
+cluster.getAllBlockReports(bpid);
+assertEquals(1, blockReports.size());  // 1 DataNode
+assertEquals(2, blockReports.get(0).size());  // 2 volumes
+
+// Now remove the second volume
+DataNode dn = cluster.getDataNodes().get(0);
+Collection oldDirs = getDataDirs(dn);
+String newDirs = oldDirs.iterator().next();  // Keep the first volume.
+assertThat(
+"DN did not update its own config",
+dn.reconfigurePropertyImpl(
+DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs),
+is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
+assertFileLocksReleased(
+new ArrayList(oldDirs).subList(1, oldDirs.size()));
+
+// Now create another file - the first volume should have 15 blocks
+// and 5 blocks on the previously removed vo

[hadoop] branch trunk updated: HDFS-13677. Dynamic refresh Disk configuration results in overwriting VolumeMap. Contributed by xuzq.

2019-04-29 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4b4200f  HDFS-13677. Dynamic refresh Disk configuration results in 
overwriting VolumeMap. Contributed by xuzq.
4b4200f is described below

commit 4b4200f1f87ad40d9c19ba160f706ffd0470a8d4
Author: Arpit Agarwal 
AuthorDate: Mon Apr 29 14:49:35 2019 -0700

HDFS-13677. Dynamic refresh Disk configuration results in overwriting 
VolumeMap. Contributed by xuzq.
---
 .../datanode/fsdataset/impl/FsDatasetImpl.java |  2 +-
 .../server/datanode/fsdataset/impl/ReplicaMap.java | 14 +
 .../datanode/TestDataNodeHotSwapVolumes.java   | 68 ++
 .../datanode/fsdataset/impl/TestReplicaMap.java| 22 +++
 4 files changed, 105 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 6fe0d0f..9cfdbc3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -419,7 +419,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 LOG.error(errorMsg);
 throw new IOException(errorMsg);
   }
-  volumeMap.addAll(replicaMap);
+  volumeMap.mergeAll(replicaMap);
   storageMap.put(sd.getStorageUuid(),
   new DatanodeStorage(sd.getStorageUuid(),
   DatanodeStorage.State.NORMAL,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
index 73d3c60..786078f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java
@@ -166,6 +166,20 @@ class ReplicaMap {
   void addAll(ReplicaMap other) {
 map.putAll(other.map);
   }
+
+
+  /**
+   * Merge all entries from the given replica map into the local replica map.
+   */
+  void mergeAll(ReplicaMap other) {
+other.map.forEach(
+(bp, replicaInfos) -> {
+  replicaInfos.forEach(
+  replicaInfo -> add(bp, replicaInfo)
+  );
+}
+);
+  }
   
   /**
* Remove the replica's meta information from the map that matches
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index f34ac51..6672cd2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -420,6 +420,74 @@ public class TestDataNodeHotSwapVolumes {
 verifyFileLength(cluster.getFileSystem(), testFile, numBlocks);
   }
 
+  /**
+   * Test re-adding one volume with some blocks on a running MiniDFSCluster
+   * with only one NameNode to reproduce HDFS-13677.
+   */
+  @Test(timeout=6)
+  public void testReAddVolumeWithBlocks()
+  throws IOException, ReconfigurationException,
+  InterruptedException, TimeoutException {
+startDFSCluster(1, 1);
+String bpid = cluster.getNamesystem().getBlockPoolId();
+final int numBlocks = 10;
+
+Path testFile = new Path("/test");
+createFile(testFile, numBlocks);
+
+List> blockReports =
+cluster.getAllBlockReports(bpid);
+assertEquals(1, blockReports.size());  // 1 DataNode
+assertEquals(2, blockReports.get(0).size());  // 2 volumes
+
+// Now remove the second volume
+DataNode dn = cluster.getDataNodes().get(0);
+Collection oldDirs = getDataDirs(dn);
+String newDirs = oldDirs.iterator().next();  // Keep the first volume.
+assertThat(
+"DN did not update its own config",
+dn.reconfigurePropertyImpl(
+DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs),
+is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
+assertFileLocksReleased(
+new ArrayList(oldDirs).subList(1, oldDirs.size()));
+
+// Now create another file - the first volume should have 15 blocks
+// and 5 blocks on the previously removed volume
+createFile(new Path("/test2"), numBlocks);
+dn.sched

[hadoop] branch ozone-0.4 updated: HDDS-1425. Ozone compose files are not compatible with the latest docker-compose. (#751)

2019-04-22 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 3431333  HDDS-1425. Ozone compose files are not compatible with the 
latest docker-compose. (#751)
3431333 is described below

commit 3431333988cfd93339e9686a299be1c64accace5
Author: Elek, Márton 
AuthorDate: Mon Apr 22 17:12:06 2019 +0200

HDDS-1425. Ozone compose files are not compatible with the latest 
docker-compose. (#751)
---
 hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config | 2 +-
 hadoop-ozone/dist/src/main/compose/ozone/docker-config  | 2 +-
 hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config  | 2 +-
 hadoop-ozone/dist/src/main/compose/ozones3/docker-config| 2 +-
 hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config | 2 +-
 hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config| 2 +-
 hadoop-ozone/dist/src/main/compose/ozonetrace/docker-config | 2 +-
 7 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
index ffce1a5..4fdb0cb 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone-hdfs/docker-config
@@ -61,7 +61,7 @@ LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
 LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
 LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log
 
LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
 LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
index 6ba54b1..8f28967 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config
@@ -58,7 +58,7 @@ LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
 LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
 LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log
 
LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
 LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
diff --git a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
index 2c87eb9..1db1a79 100644
--- a/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozoneblockade/docker-config
@@ -57,7 +57,7 @@ LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
 LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
 LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.fileName 
=${sys:hadoop.log.dir}/om-audit-${hostName}.log
+LOG4J2.PROPERTIES_appender.rolling.fileName=${sys:hadoop.log.dir}/om-audit-${hostName}.log
 
LOG4J2.PROPERTIES_appender.rolling.filePattern=${sys:hadoop.log.dir}/om-audit-${hostName}-%d{-MM-dd-HH-mm-ss}-%i.log.gz
 LOG4J2.PROPERTIES_appender.rolling.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.rolling.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
diff --git a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
index ca0d557..22436a4 100644
--- a/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozones3/docker-config
@@ -59,7 +59,7 @@ LOG4J2.PROPERTIES_appender.console.layout.type=PatternLayout
 LOG4J2.PROPERTIES_appender.console.layout.pattern=%d{DEFAULT} | %-5level | 
%c{1} | %msg | %throwable{3} %n
 LOG4J2.PROPERTIES_appender.rolling.type=RollingFile
 LOG4J2.PROPERTIES_appender.rolling.name=RollingFile
-LOG4J2.PROPERTIES_appender.rolling.fileName

[hadoop] branch trunk updated: HDDS-1432. Ozone client list command truncates response without any indication. Contributed by Siddharth Wagle.

2019-04-16 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f2ab279  HDDS-1432. Ozone client list command truncates response 
without any indication. Contributed by Siddharth Wagle.
f2ab279 is described below

commit f2ab2795db0da1c912f86855031604de389411da
Author: Arpit Agarwal 
AuthorDate: Tue Apr 16 12:35:49 2019 -0700

HDDS-1432. Ozone client list command truncates response without any 
indication. Contributed by Siddharth Wagle.
---
 .../test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java  | 5 +
 .../org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java   | 7 +++
 2 files changed, 12 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 1b10135..0b53f69 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -1114,11 +1114,16 @@ public class TestOzoneShell {
 }
 
 out.reset();
+String msgText = "Listing first 3 entries of the result. " +
+"Use --length (-l) to override max returned keys.";
 args =
 new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName,
 "--length", "3"};
 execute(shell, args);
 commandOutput = out.toString();
+assertTrue("Expecting output to start with " + msgText,
+commandOutput.contains(msgText));
+commandOutput = commandOutput.replace(msgText, "");
 keys = (List) JsonUtils.toJsonList(commandOutput,
 KeyInfo.class);
 
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
index 5642bc7..111ce16 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/web/ozShell/keys/ListKeyHandler.java
@@ -90,12 +90,19 @@ public class ListKeyHandler extends Handler {
 startKey);
 List keyInfos = new ArrayList<>();
 
+int maxKeyLimit = maxKeys;
 while (maxKeys > 0 && keyIterator.hasNext()) {
   KeyInfo key = OzoneClientUtils.asKeyInfo(keyIterator.next());
   keyInfos.add(key);
   maxKeys -= 1;
 }
 
+// More keys were returned notify about max length
+if (keyIterator.hasNext()) {
+  System.out.println("Listing first " + maxKeyLimit + " entries of the " +
+  "result. Use --length (-l) to override max returned keys.");
+}
+
 if (isVerbose()) {
   System.out.printf("Found : %d keys for bucket %s in volume : %s ",
   keyInfos.size(), bucketName, volumeName);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1387. ConcurrentModificationException in TestMiniChaosOzoneCluster. Contributed by Marton Elek. (#732)

2019-04-12 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5379d85  HDDS-1387. ConcurrentModificationException in 
TestMiniChaosOzoneCluster. Contributed by Marton Elek. (#732)
5379d85 is described below

commit 5379d85d8ed09b35e293239d3a7f96f8f98c411c
Author: Elek, Márton 
AuthorDate: Sat Apr 13 00:19:50 2019 +0200

HDDS-1387. ConcurrentModificationException in TestMiniChaosOzoneCluster. 
Contributed by Marton Elek. (#732)
---
 .../src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 8bef479..059af5a 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -136,11 +136,13 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
   }
 
   public void shutdown() {
-super.shutdown();
 try {
   stopChaos();
   executorService.shutdown();
   executorService.awaitTermination(1, TimeUnit.DAYS);
+  //this should be called after stopChaos to be sure that the
+  //datanode collection is not modified during the shutdown
+  super.shutdown();
 } catch (Exception e) {
   LOG.error("failed to shutdown MiniOzoneChaosCluster", e);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1428. Remove benign warning in handleCreateContainer. Contributed by Siddharth Wagle.

2019-04-12 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 62f4808  HDDS-1428. Remove benign warning in handleCreateContainer. 
Contributed by Siddharth Wagle.
62f4808 is described below

commit 62f4808617a354b8f4f803cdb8915c179b7210be
Author: Arpit Agarwal 
AuthorDate: Fri Apr 12 15:08:01 2019 -0700

HDDS-1428. Remove benign warning in handleCreateContainer. Contributed by 
Siddharth Wagle.
---
 .../org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index d2d7bf7..531fb02 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -251,7 +251,7 @@ public class KeyValueHandler extends Handler {
 // The create container request for an already existing container can
 // arrive in case the ContainerStateMachine reapplies the transaction
 // on datanode restart. Just log a warning msg here.
-LOG.warn("Container already exists." +
+LOG.debug("Container already exists." +
 "container Id " + containerID);
   }
 } catch (StorageContainerException ex) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1410. TestSCMNodeMetrics is flaky. Contributed by Siddharth Wagle.

2019-04-11 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fcc7f9b  HDDS-1410. TestSCMNodeMetrics is flaky. Contributed by 
Siddharth Wagle.
fcc7f9b is described below

commit fcc7f9b32ff5fa5883bb01fad0f55b28ba17733d
Author: Arpit Agarwal 
AuthorDate: Thu Apr 11 15:14:28 2019 -0700

HDDS-1410. TestSCMNodeMetrics is flaky. Contributed by Siddharth Wagle.
---
 .../java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java   | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
index 3a8f9f0..65a6357 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
@@ -101,9 +101,9 @@ public class TestSCMNodeMetrics {
 NodeReportProto nodeReport = NodeReportProto.newBuilder()
 .addStorageReport(storageReport).build();
 datanode.getDatanodeStateMachine().getContext().addReport(nodeReport);
-datanode.getDatanodeStateMachine().triggerHeartbeat();
-// Give some time so that SCM receives and processes the heartbeat.
-Thread.sleep(100L);
+cluster.getStorageContainerManager().getScmNodeManager()
+.processNodeReport(datanode.getDatanodeDetails(), nodeReport);
+
 assertCounter("NumNodeReportProcessed", nrProcessed + 1,
 getMetrics(SCMNodeMetrics.class.getSimpleName()));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16243. Change Log Level to trace in NetUtils.java. Contributed by chencan.

2019-04-10 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8740755  HADOOP-16243. Change Log Level to trace in NetUtils.java. 
Contributed by chencan.
8740755 is described below

commit 87407553ef9215c008ccc5836dc7c9a9201a9e7d
Author: Arpit Agarwal 
AuthorDate: Wed Apr 10 13:21:04 2019 -0700

HADOOP-16243. Change Log Level to trace in NetUtils.java. Contributed by 
chencan.
---
 .../hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index 0e9ea04..acdec93 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -833,7 +833,7 @@ public class NetUtils {
   Throwable t = ctor.newInstance(msg);
   return (T)(t.initCause(exception));
 } catch (Throwable e) {
-  LOG.warn("Unable to wrap exception of type {}: it has no (String) "
+  LOG.trace("Unable to wrap exception of type {}: it has no (String) "
   + "constructor", clazz, e);
   throw exception;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1418. Move bang line to the start of the start-chaos.sh script. Contributed by Arpit Agarwal. (#720)

2019-04-10 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new feaab24  HDDS-1418. Move bang line to the start of the start-chaos.sh 
script. Contributed by Arpit Agarwal. (#720)
feaab24 is described below

commit feaab241e530fef9aaf8adab0dadc510fcb8701a
Author: Arpit Agarwal 
AuthorDate: Wed Apr 10 12:44:01 2019 -0700

HDDS-1418. Move bang line to the start of the start-chaos.sh script. 
Contributed by Arpit Agarwal. (#720)

Change-Id: I4fcf39d61a7d4c4ca79cb56a6958db0f691fe971
---
 hadoop-ozone/integration-test/src/test/bin/start-chaos.sh | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh 
b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
index dcec909..63e4a95 100755
--- a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
+++ b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
@@ -1,3 +1,5 @@
+#!/usr/bin/env bash
+
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
 # this work for additional information regarding copyright ownership.
@@ -13,8 +15,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-#!/usr/bin/env bash
-
 date=`date +"%m-%d-%y-%T"`
 fileformat=".MiniOzoneChaosCluster.log"
 heapformat=".dump"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/01: Revert "HDDS-1370. Command Execution in Datanode fails because of NPE (#715)"

2019-04-10 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch revert-715-HDDS-1370
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 5b308cc38cb3167a6899d267efce63c5b1686f29
Author: Arpit Agarwal 
AuthorDate: Wed Apr 10 11:38:08 2019 -0700

Revert "HDDS-1370. Command Execution in Datanode fails because of NPE 
(#715)"

This reverts commit 0e770a65394a2aeaa56154d200c02afbe5bbb5d7.
---
 .../common/statemachine/StateContext.java  | 30 +-
 .../states/datanode/RunningDatanodeState.java  | 11 +---
 2 files changed, 13 insertions(+), 28 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 7e06473..4a979fd 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -348,26 +348,20 @@ public class StateContext {
   throws InterruptedException, ExecutionException, TimeoutException {
 stateExecutionCount.incrementAndGet();
 DatanodeState task = getTask();
-
-// Adding not null check, in a case where datanode is still starting up, 
but
-// we called stop DatanodeStateMachine, this sets state to SHUTDOWN, and
-// there is a chance of getting task as null.
-if (task != null) {
-  if (this.isEntering()) {
-task.onEnter();
+if (this.isEntering()) {
+  task.onEnter();
+}
+task.execute(service);
+DatanodeStateMachine.DatanodeStates newState = task.await(time, unit);
+if (this.state != newState) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Task {} executed, state transited from {} to {}",
+task.getClass().getSimpleName(), this.state, newState);
   }
-  task.execute(service);
-  DatanodeStateMachine.DatanodeStates newState = task.await(time, unit);
-  if (this.state != newState) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Task {} executed, state transited from {} to {}",
-  task.getClass().getSimpleName(), this.state, newState);
-}
-if (isExiting(newState)) {
-  task.onExit();
-}
-this.setState(newState);
+  if (isExiting(newState)) {
+task.onExit();
   }
+  this.setState(newState);
 }
   }
 
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
index 6b596fe..ec2358a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
@@ -86,16 +86,7 @@ public class RunningDatanodeState implements DatanodeState {
 for (EndpointStateMachine endpoint : connectionManager.getValues()) {
   Callable endpointTask
   = getEndPointTask(endpoint);
-  if (endpointTask != null) {
-ecs.submit(endpointTask);
-  } else {
-// This can happen if a task is taking more time than the timeOut
-// specified for the task in await, and when it is completed the task
-// has set the state to Shutdown, we may see the state as shutdown
-// here. So, we need to Shutdown DatanodeStateMachine.
-LOG.error("State is Shutdown in RunningDatanodeState");
-context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN);
-  }
+  ecs.submit(endpointTask);
 }
   }
   //TODO : Cache some of these tasks instead of creating them


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch revert-715-HDDS-1370 created (now 5b308cc)

2019-04-10 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch revert-715-HDDS-1370
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


  at 5b308cc  Revert "HDDS-1370. Command Execution in Datanode fails 
because of NPE (#715)"

This branch includes the following new commits:

 new 5b308cc  Revert "HDDS-1370. Command Execution in Datanode fails 
because of NPE (#715)"

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1388. Add a shell script to run MiniOzoneChaosCluster using mvn exec. Contributed by Mukul Kumar Singh. (#709)

2019-04-09 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 312d4d4  HDDS-1388. Add a shell script to run MiniOzoneChaosCluster 
using mvn exec. Contributed by Mukul Kumar Singh. (#709)
312d4d4 is described below

commit 312d4d4cc5d6f022a839f7ae390ee0f4520898d4
Author: Mukul Kumar Singh 
AuthorDate: Wed Apr 10 01:38:25 2019 +0530

HDDS-1388. Add a shell script to run MiniOzoneChaosCluster using mvn exec. 
Contributed by Mukul Kumar Singh. (#709)
---
 .../hadoop/hdds/scm/block/BlockManagerImpl.java|  2 ++
 .../integration-test/src/test/bin/start-chaos.sh   | 35 ++
 .../apache/hadoop/ozone/MiniOzoneChaosCluster.java |  9 --
 .../hadoop/ozone/MiniOzoneLoadGenerator.java   | 15 +++---
 .../hadoop/ozone/TestMiniChaosOzoneCluster.java|  6 ++--
 5 files changed, 58 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 5ae4115..d15f07b 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -189,6 +189,8 @@ public class BlockManagerImpl implements BlockManager, 
BlockmanagerMXBean {
   // factors are handled by pipeline creator
   pipeline = pipelineManager.createPipeline(type, factor);
 } catch (IOException e) {
+  LOG.error("pipeline creation failed type:{} factor:{}", type,
+  factor, e);
   break;
 }
   } else {
diff --git a/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh 
b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
new file mode 100755
index 000..dcec909
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/bin/start-chaos.sh
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env bash
+
+date=`date +"%m-%d-%y-%T"`
+fileformat=".MiniOzoneChaosCluster.log"
+heapformat=".dump"
+current="/tmp/"
+filename=$current$date$fileformat
+heapdumpfile=$current$date$heapformat
+
+export MAVEN_OPTS="-XX:+HeapDumpOnOutOfMemoryError 
-XX:HeapDumpPath={$heapdumpfile}"
+
+echo "logging to" ${filename}
+echo "heapdump to" ${heapdumpfile}
+
+echo "Starting MiniOzoneChaosCluster"
+mvn clean install -DskipTests > ${filename} 2>&1
+mvn exec:java \
+  -Dexec.mainClass="org.apache.hadoop.ozone.TestMiniChaosOzoneCluster" \
+  -Dexec.classpathScope=test \
+  -Dexec.args="$*" >> ${filename} 2>&1
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 8e25d48..52a2d40 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -66,6 +66,7 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
 
 this.executorService =  Executors.newSingleThreadScheduledExecutor();
 this.numDatanodes = getHddsDatanodes().size();
+LOG.info("Starting MiniOzoneChaosCluster with:{} datanodes" + 
numDatanodes);
 LogUtils.setLogLevel(GrpcClientProtocolClient.LOG, Level.WARN);
   }
 
@@ -117,13 +118,16 @@ public class MiniOzoneChaosCluster extends 
MiniOzoneClusterImpl {
   }
 
   void startChaos(long initialDelay, long period, TimeUnit timeUnit) {
+LOG.info("Starting Chaos with failure period:{} unit:{}", period, 
timeUnit);
 scheduledFuture = executorService.scheduleAtFixedRate(this::fail,
 initialDelay, period, timeUnit);
   }
 
   void stopChaos() throws Exception {
-scheduledFuture.cancel(false);
-scheduledFut

[hadoop] branch trunk updated: HDDS-1332. Attempt to fix flaky test testStartStopDatanodeStateMachine (#697)

2019-04-05 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 274ab4e  HDDS-1332. Attempt to fix flaky test 
testStartStopDatanodeStateMachine (#697)
274ab4e is described below

commit 274ab4e86c162d1446230948659372c6e8dca492
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Fri Apr 5 22:21:58 2019 +0200

HDDS-1332. Attempt to fix flaky test testStartStopDatanodeStateMachine 
(#697)
---
 .../container/common/statemachine/SCMConnectionManager.java | 13 -
 .../ozone/container/common/TestDatanodeStateMachine.java|  8 +++-
 2 files changed, 11 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
index 775a91a..f6f64a4 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
@@ -35,13 +35,13 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import static java.util.Collections.unmodifiableList;
 import static org.apache.hadoop.hdds.scm.HddsServerUtil
 .getScmRpcTimeOutInMilliseconds;
 
@@ -184,7 +184,12 @@ public class SCMConnectionManager
* @return - List of RPC Endpoints.
*/
   public Collection getValues() {
-return scmMachines.values();
+readLock();
+try {
+  return unmodifiableList(new ArrayList<>(scmMachines.values()));
+} finally {
+  readUnlock();
+}
   }
 
   @Override
@@ -201,9 +206,7 @@ public class SCMConnectionManager
   public List getSCMServers() {
 readLock();
 try {
-  return Collections
-  .unmodifiableList(new ArrayList<>(scmMachines.values()));
-
+  return unmodifiableList(new ArrayList<>(scmMachines.values()));
 } finally {
   readUnlock();
 }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index a6fef1e..29160ee 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -154,8 +154,6 @@ public class TestDatanodeStateMachine {
 
   /**
* Assert that starting statemachine executes the Init State.
-   *
-   * @throws InterruptedException
*/
   @Test
   public void testStartStopDatanodeStateMachine() throws IOException,
@@ -167,9 +165,9 @@ public class TestDatanodeStateMachine {
   stateMachine.getConnectionManager();
   GenericTestUtils.waitFor(
   () -> {
-LOG.info("connectionManager.getValues().size() is {}",
-connectionManager.getValues().size());
-return connectionManager.getValues().size() == 1;
+int size = connectionManager.getValues().size();
+LOG.info("connectionManager.getValues().size() is {}", size);
+return size == 1;
   }, 1000, 3);
 
   stateMachine.stopDaemon();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1358 : Recon Server REST API not working as expected. (#668)

2019-04-03 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8ff41d6  HDDS-1358 : Recon Server REST API not working as expected. 
(#668)
8ff41d6 is described below

commit 8ff41d62434266d49fab65abacea5f9a20fe8172
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Wed Apr 3 14:52:06 2019 -0700

HDDS-1358 : Recon Server REST API not working as expected. (#668)
---
 hadoop-ozone/ozone-recon/pom.xml   |  69 ++-
 java => ReconGuiceServletContextListener.java} |  21 +++-
 .../hadoop/ozone/recon/ReconRestServletModule.java | 134 +
 .../org/apache/hadoop/ozone/recon/ReconServer.java |  12 +-
 .../ozone/recon/api/ContainerKeyService.java   |  50 ++--
 .../hadoop/ozone/recon/api/types/KeyMetadata.java  |  35 ++
 .../recon/recovery/ReconOmMetadataManagerImpl.java |   2 +-
 .../spi/impl/ContainerDBServiceProviderImpl.java   |   2 +-
 .../ozone/recon/tasks/ContainerKeyMapperTask.java  |   9 +-
 .../main/resources/webapps.recon.WEB-INF/web.xml   |  25 ++--
 .../ozone/recon/api/TestContainerKeyService.java   |  22 +++-
 hadoop-ozone/s3gateway/pom.xml |  16 +++
 12 files changed, 356 insertions(+), 41 deletions(-)

diff --git a/hadoop-ozone/ozone-recon/pom.xml b/hadoop-ozone/ozone-recon/pom.xml
index 2ff57a5..ef23770 100644
--- a/hadoop-ozone/ozone-recon/pom.xml
+++ b/hadoop-ozone/ozone-recon/pom.xml
@@ -27,6 +27,20 @@
 
   org.apache.hadoop
   hadoop-ozone-common
+  
+
+  jersey-server
+  com.sun.jersey
+
+
+  jersey-core
+  com.sun.jersey
+
+
+  jersey-servlet
+  com.sun.jersey
+
+  
 
 
   org.apache.hadoop
@@ -40,14 +54,38 @@
 
   com.google.inject.extensions
   guice-servlet
-  4.1.0
-  compile
+  ${guice.version}
+
+
+  org.glassfish.jersey.containers
+  jersey-container-servlet
+  2.27
+  
+
+  org.glassfish.hk2
+  hk2-api
+
+  
 
 
   org.glassfish.jersey.containers
   jersey-container-servlet-core
   2.27
-  compile
+
+
+  org.glassfish.hk2
+  guice-bridge
+  2.5.0
+
+
+  org.glassfish.jersey.core
+  jersey-server
+  2.27
+
+
+  org.glassfish.jersey.media
+  jersey-media-json-jackson
+  2.27
 
 
   com.google.inject.extensions
@@ -55,6 +93,25 @@
   4.1.0
 
 
+  org.glassfish.jersey.inject
+  jersey-hk2
+  2.27
+  
+
+  hk2-api
+  org.glassfish.hk2
+
+
+  org.glassfish.hk2.external
+  aopalliance-repackaged
+
+
+  org.glassfish.hk2
+  hk2-utils
+
+  
+
+
   junit
   junit
   test
@@ -70,6 +127,12 @@
   powermock-module-junit4
   1.7.4
   test
+  
+
+  org.javassist
+  javassist
+
+  
 
 
   org.powermock
diff --git 
a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconApplication.java
 
b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java
similarity index 67%
rename from 
hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconApplication.java
rename to 
hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java
index 24ba5ee..ab11f0e 100644
--- 
a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconApplication.java
+++ 
b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconGuiceServletContextListener.java
@@ -15,15 +15,26 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.hadoop.ozone.recon;
 
-import org.glassfish.jersey.server.ResourceConfig;
+import com.google.inject.Injector;
+import com.google.inject.servlet.GuiceServletContextListener;
 
 /**
- * JaxRS resource definition.
+ * Servlet Context Listener that provides the Guice injector.
  */
-public class ReconApplication extends ResourceConfig {
-  public ReconApplication() {
-packages("org.apache.hadoop.ozone.recon.api");
+public class ReconGuiceServletContextListener
+extends GuiceServletContextListener {
+
+  private static Injector injector;
+
+  @Override
+  public Injector getInjector() {
+return injector;
+  }
+
+  static void setInjector(Injector inj) {
+injector = inj;
   }
 }
diff --git 
a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/ReconRestServletModule.java
 
b/hadoop-ozone/ozone-recon/src/main/java/org/apache

[hadoop] branch trunk updated: HDDS-1330 : Add a docker compose for Ozone deployment with Recon. (#669)

2019-04-03 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 59022b2  HDDS-1330 : Add a docker compose for Ozone deployment with 
Recon. (#669)
59022b2 is described below

commit 59022b28b7cca1d9b5867bc0787d0b5d0ad1a3e7
Author: avijayanhwx <14299376+avijayan...@users.noreply.github.com>
AuthorDate: Wed Apr 3 13:20:51 2019 -0700

HDDS-1330 : Add a docker compose for Ozone deployment with Recon. (#669)
---
 .../dist/src/main/compose/ozone-recon/.env | 17 +
 .../main/compose/ozone-recon/docker-compose.yaml   | 66 ++
 .../src/main/compose/ozone-recon/docker-config | 80 ++
 3 files changed, 163 insertions(+)

diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/.env 
b/hadoop-ozone/dist/src/main/compose/ozone-recon/.env
new file mode 100644
index 000..67eed25
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/.env
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+HDDS_VERSION=${hdds.version}
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml 
b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml
new file mode 100644
index 000..dd0abef
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-compose.yaml
@@ -0,0 +1,66 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "3"
+services:
+   datanode:
+  image: apache/hadoop-runner:jdk11
+  privileged: true #required by the profiler
+  volumes:
+- ../..:/opt/hadoop
+  ports:
+- 9864
+- 9882
+  command: ["/opt/hadoop/bin/ozone","datanode"]
+  env_file:
+- ./docker-config
+   om:
+  image: apache/hadoop-runner:jdk11
+  privileged: true #required by the profiler
+  volumes:
+ - ../..:/opt/hadoop
+  ports:
+ - 9874:9874
+  environment:
+ ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION
+ WAITFOR: scm:9876
+  env_file:
+  - ./docker-config
+  command: ["/opt/hadoop/bin/ozone","om"]
+   scm:
+  image: apache/hadoop-runner:jdk11
+  privileged: true #required by the profiler
+  volumes:
+ - ../..:/opt/hadoop
+  ports:
+ - 9876:9876
+  env_file:
+  - ./docker-config
+  environment:
+  ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION
+  command: ["/opt/hadoop/bin/ozone","scm"]
+   recon:
+  image: apache/hadoop-runner:jdk11
+  privileged: true #required by the profiler
+  volumes:
+ - ../..:/opt/hadoop
+  ports:
+ - 9888:9888
+  env_file:
+  - ./docker-config
+  environment:
+ WAITFOR: om:9874
+  command: ["/opt/hadoop/bin/ozone","recon"]
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config 
b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config
new file mode 100644
index 000..f00ace1
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/ozone-recon/docker-config
@@ -0,0 +1,80 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTI

[hadoop] branch trunk updated: HDDS-1322. Hugo errors when building Ozone (#671)

2019-04-01 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3675670  HDDS-1322. Hugo errors when building Ozone (#671)
3675670 is described below

commit 36756703f062d944daa148a5709dc24506106e4f
Author: Doroszlai, Attila <6454655+adorosz...@users.noreply.github.com>
AuthorDate: Mon Apr 1 17:52:05 2019 +0200

HDDS-1322. Hugo errors when building Ozone (#671)
---
 hadoop-hdds/docs/pom.xml | 2 --
 1 file changed, 2 deletions(-)

diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml
index 3df0636..e3644f8 100644
--- a/hadoop-hdds/docs/pom.xml
+++ b/hadoop-hdds/docs/pom.xml
@@ -49,8 +49,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   ${project.build.directory}
   
 
${basedir}/dev-support/bin/generate-site.sh
-${hdds.version}
-${project.build.directory}
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4 updated: HDDS-139. Output of createVolume can be improved. Contributed by Shweta.

2019-03-26 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 299177e  HDDS-139. Output of createVolume can be improved. Contributed 
by Shweta.
299177e is described below

commit 299177eaa74b839781f02f53e62c99269456f22f
Author: Shweta Yakkali 
AuthorDate: Tue Mar 26 19:01:49 2019 -0700

HDDS-139. Output of createVolume can be improved. Contributed by Shweta.

(cherry picked from commit f426b7ce8fb33d57e4187484448b9e0bfc04ccfa)
---
 .../main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java   | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index a3214f3..d326cbc 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -269,8 +269,12 @@ public class RpcClient implements ClientProtocol, 
KeyProviderTokenIssuer {
   builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(ozoneAcl));
 }
 
-LOG.info("Creating Volume: {}, with {} as owner and quota set to {} 
bytes.",
-volumeName, owner, quota);
+if (volArgs.getQuota() == null) {
+  LOG.info("Creating Volume: {}, with {} as owner.", volumeName, owner);
+} else {
+  LOG.info("Creating Volume: {}, with {} as owner "
+  + "and quota set to {} bytes.", volumeName, owner, quota);
+}
 ozoneManagerClient.createVolume(builder.build());
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-139. Output of createVolume can be improved. Contributed by Shweta.

2019-03-26 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f426b7c  HDDS-139. Output of createVolume can be improved. Contributed 
by Shweta.
f426b7c is described below

commit f426b7ce8fb33d57e4187484448b9e0bfc04ccfa
Author: Shweta Yakkali 
AuthorDate: Tue Mar 26 19:01:49 2019 -0700

HDDS-139. Output of createVolume can be improved. Contributed by Shweta.
---
 .../main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java   | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 9ba07d3..6ecda09 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -270,8 +270,12 @@ public class RpcClient implements ClientProtocol, 
KeyProviderTokenIssuer {
   builder.addOzoneAcls(OMPBHelper.convertOzoneAcl(ozoneAcl));
 }
 
-LOG.info("Creating Volume: {}, with {} as owner and quota set to {} 
bytes.",
-volumeName, owner, quota);
+if (volArgs.getQuota() == null) {
+  LOG.info("Creating Volume: {}, with {} as owner.", volumeName, owner);
+} else {
+  LOG.info("Creating Volume: {}, with {} as owner "
+  + "and quota set to {} bytes.", volumeName, owner, quota);
+}
 ozoneManagerClient.createVolume(builder.build());
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1205. Refactor ReplicationManager to handle QUASI_CLOSED containers. Contributed by Nanda kumar. (#620)

2019-03-22 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f854a89  HDDS-1205. Refactor ReplicationManager to handle QUASI_CLOSED 
containers. Contributed by Nanda kumar. (#620)
f854a89 is described below

commit f854a89190bd2453ccb1bfaa123d63d546e913cd
Author: Arpit Agarwal 
AuthorDate: Fri Mar 22 14:36:29 2019 -0700

HDDS-1205. Refactor ReplicationManager to handle QUASI_CLOSED containers. 
Contributed by Nanda kumar. (#620)
---
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |  12 +
 .../common/src/main/resources/ozone-default.xml|  20 +
 .../hdds/scm/container/ContainerManager.java   |   8 +
 .../hdds/scm/container/ReplicationManager.java | 748 +
 .../hdds/scm/container/SCMContainerManager.java|  10 +
 .../scm/container/states/ContainerStateMap.java|   9 +-
 .../java/org/apache/hadoop/hdds/scm/TestUtils.java |  53 +-
 .../scm/container/TestContainerReportHandler.java  |   8 +-
 .../scm/container/TestContainerReportHelper.java   |  40 --
 .../TestIncrementalContainerReportHandler.java |   6 +-
 .../hdds/scm/container/TestReplicationManager.java | 625 +
 11 files changed, 1485 insertions(+), 54 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 4e197d3..3b45b89 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -348,6 +348,18 @@ public final class ScmConfigKeys {
   public static final String HDDS_SCM_WATCHER_TIMEOUT_DEFAULT =
   "10m";
 
+  public static final String HDDS_SCM_REPLICATION_THREAD_INTERVAL =
+  "hdds.scm.replication.thread.interval";
+
+  public static final String HDDS_SCM_REPLICATION_THREAD_INTERVAL_DEFAULT =
+  "5m";
+
+  public static final String HDDS_SCM_REPLICATION_EVENT_TIMEOUT =
+  "hdds.scm.replication.event.timeout";
+
+  public static final String HDDS_SCM_REPLICATION_EVENT_TIMEOUT_DEFAULT =
+  "10m";
+
   public static final String
   HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY =
   "hdds.scm.http.kerberos.principal";
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 462a07b..9fd4ef3 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2357,4 +2357,24 @@
   Request to flush the OM DB before taking checkpoint snapshot.
 
   
+  
+hdds.scm.replication.thread.interval
+5m
+OZONE, SCM
+
+  There is a replication monitor thread running inside SCM which
+  takes care of replicating the containers in the cluster. This
+  property is used to configure the interval in which that thread
+  runs.
+
+  
+  
+hdds.scm.replication.event.timeout
+10m
+OZONE, SCM
+
+  Timeout for the container replication/deletion commands sent
+  to datanodes. After this timeout the command will be retried.
+
+  
 
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
index b2fe4b4..717d58d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java
@@ -34,6 +34,14 @@ import java.util.Set;
  */
 public interface ContainerManager extends Closeable {
 
+
+  /**
+   * Returns all the container Ids managed by ContainerManager.
+   *
+   * @return Set of ContainerID
+   */
+  Set getContainerIDs();
+
   /**
* Returns all the containers managed by ContainerManager.
*
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
new file mode 100644
index 000..97c600b
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@ -0,0 +1,748 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a 

[hadoop] branch ozone-0.4 updated: HDDS-1323. Ignore unit test TestFailureHandlingByClient. Contributed by Supratim Deka.

2019-03-22 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 6d73e7b  HDDS-1323. Ignore unit test TestFailureHandlingByClient. 
Contributed by Supratim Deka.
6d73e7b is described below

commit 6d73e7bdf432fadf5a46dad8a249ded656b3a1aa
Author: Arpit Agarwal 
AuthorDate: Fri Mar 22 11:29:56 2019 -0700

HDDS-1323. Ignore unit test TestFailureHandlingByClient. Contributed by 
Supratim Deka.

(cherry picked from commit 1d389ecb24482c2c4b41df898e8f9bc937cc524d)
---
 .../apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java| 3 +++
 1 file changed, 3 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index 092c56f..aaf238b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.junit.Assert;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -51,7 +52,9 @@ import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTER
 
 /**
  * Tests Close Container Exception handling by Ozone Client.
+ * XXX Disabled [HDDS-1323]
  */
+@Ignore
 public class TestFailureHandlingByClient {
 
   private MiniOzoneCluster cluster;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1323. Ignore unit test TestFailureHandlingByClient. Contributed by Supratim Deka.

2019-03-22 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1d389ec  HDDS-1323. Ignore unit test TestFailureHandlingByClient. 
Contributed by Supratim Deka.
1d389ec is described below

commit 1d389ecb24482c2c4b41df898e8f9bc937cc524d
Author: Arpit Agarwal 
AuthorDate: Fri Mar 22 11:29:56 2019 -0700

HDDS-1323. Ignore unit test TestFailureHandlingByClient. Contributed by 
Supratim Deka.
---
 .../apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java| 3 +++
 1 file changed, 3 insertions(+)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index 092c56f..aaf238b 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.junit.Assert;
+import org.junit.Ignore;
 import org.junit.Test;
 
 import java.io.IOException;
@@ -51,7 +52,9 @@ import static 
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTER
 
 /**
  * Tests Close Container Exception handling by Ozone Client.
+ * XXX Disabled [HDDS-1323]
  */
+@Ignore
 public class TestFailureHandlingByClient {
 
   private MiniOzoneCluster cluster;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4 updated: HDDS-1320. Update ozone to latest ratis snapshot build (0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.

2019-03-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 8f422d7  HDDS-1320. Update ozone to latest ratis snapshot build 
(0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.
8f422d7 is described below

commit 8f422d7b5ec3bbc3b0fbf5ecde636a0c74b28ef0
Author: Arpit Agarwal 
AuthorDate: Thu Mar 21 14:24:02 2019 -0700

HDDS-1320. Update ozone to latest ratis snapshot build 
(0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.

(cherry picked from commit 90afc9ab0382d45083dca1434f02936985798e48)
---
 .../container/common/transport/server/ratis/XceiverServerRatis.java| 3 ++-
 hadoop-hdds/pom.xml| 2 +-
 hadoop-ozone/pom.xml   | 2 +-
 3 files changed, 4 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index f1ace28..e70e012 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -494,7 +494,8 @@ public final class XceiverServerRatis extends XceiverServer 
{
   RaftClientRequest.Type type) {
 return new RaftClientRequest(clientId, server.getId(),
 RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()),
-nextCallId(), 0, Message.valueOf(request.toByteString()), type);
+nextCallId(), Message.valueOf(request.toByteString()), type,
+null);
   }
 
   private void handlePipelineFailure(RaftGroupId groupId,
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 6f95547..61fe426 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -46,7 +46,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 0.4.0-SNAPSHOT
 
 
-0.4.0-5680cf5-SNAPSHOT
+0.4.0-1fc5ace-SNAPSHOT
 
 1.60
 
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index ae17655..09cede0 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -29,7 +29,7 @@
 3.2.0
 0.4.0-SNAPSHOT
 0.4.0-SNAPSHOT
-0.4.0-5680cf5-SNAPSHOT
+0.4.0-1fc5ace-SNAPSHOT
 1.60
 Badlands
 ${ozone.version}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1320. Update ozone to latest ratis snapshot build (0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.

2019-03-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 90afc9a  HDDS-1320. Update ozone to latest ratis snapshot build 
(0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.
90afc9a is described below

commit 90afc9ab0382d45083dca1434f02936985798e48
Author: Arpit Agarwal 
AuthorDate: Thu Mar 21 14:24:02 2019 -0700

HDDS-1320. Update ozone to latest ratis snapshot build 
(0.4.0-1fc5ace-SNAPSHOT). Contributed by Mukul Kumar Singh.
---
 .../common/transport/server/ratis/XceiverServerRatis.java | 3 ++-
 hadoop-hdds/pom.xml   | 2 +-
 .../hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java   | 8 
 hadoop-ozone/pom.xml  | 2 +-
 4 files changed, 8 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index d0a56f9..8f09ff2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -495,7 +495,8 @@ public final class XceiverServerRatis extends XceiverServer 
{
   RaftClientRequest.Type type) {
 return new RaftClientRequest(clientId, server.getId(),
 RaftGroupId.valueOf(PipelineID.getFromProtobuf(pipelineID).getId()),
-nextCallId(), 0, Message.valueOf(request.toByteString()), type);
+nextCallId(), Message.valueOf(request.toByteString()), type,
+null);
   }
 
   private void handlePipelineFailure(RaftGroupId groupId,
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 9ca65c0..32b2c03 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -46,7 +46,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 0.5.0-SNAPSHOT
 
 
-0.4.0-5680cf5-SNAPSHOT
+0.4.0-1fc5ace-SNAPSHOT
 
 1.60
 
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
index 4c2edfe..4406af6 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
@@ -134,11 +134,11 @@ public class TestOzoneManagerStateMachine {
 
 RaftClientRequest raftClientRequest =
 new RaftClientRequest(ClientId.randomId(),
-RaftPeerId.valueOf("random"), raftGroupId, 1, 1,
+RaftPeerId.valueOf("random"), raftGroupId, 1,
 Message.valueOf(
 OMRatisHelper.convertRequestToByteString(omRequest)),
 RaftClientRequest.Type.valueOf(
-RaftProtos.WriteRequestTypeProto.getDefaultInstance()));
+RaftProtos.WriteRequestTypeProto.getDefaultInstance()), null);
 
 TransactionContext transactionContext =
 ozoneManagerStateMachine.startTransaction(raftClientRequest);
@@ -232,11 +232,11 @@ public class TestOzoneManagerStateMachine {
 
 RaftClientRequest raftClientRequest =
 new RaftClientRequest(ClientId.randomId(),
-RaftPeerId.valueOf("random"), raftGroupId, 1, 1,
+RaftPeerId.valueOf("random"), raftGroupId, 1,
 Message.valueOf(
 OMRatisHelper.convertRequestToByteString(omRequest)),
 RaftClientRequest.Type.valueOf(
-RaftProtos.WriteRequestTypeProto.getDefaultInstance()));
+RaftProtos.WriteRequestTypeProto.getDefaultInstance()), null);
 
 TransactionContext transactionContext =
 ozoneManagerStateMachine.startTransaction(raftClientRequest);
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 4dcb80e..b243ccc 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -29,7 +29,7 @@
 3.2.0
 0.5.0-SNAPSHOT
 0.5.0-SNAPSHOT
-0.4.0-5680cf5-SNAPSHOT
+0.4.0-1fc5ace-SNAPSHOT
 1.60
 Crater Lake
 ${ozone.version}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4 updated: HDDS-1321. TestOzoneManagerHttpServer depends on hard-coded port numbers. Contributed by Arpit Agarwal. (#633)

2019-03-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 6c009a3  HDDS-1321. TestOzoneManagerHttpServer depends on hard-coded 
port numbers. Contributed by Arpit Agarwal. (#633)
6c009a3 is described below

commit 6c009a3d044665ff3227f793004be2910f5d7eb1
Author: Arpit Agarwal 
AuthorDate: Thu Mar 21 14:18:58 2019 -0700

HDDS-1321. TestOzoneManagerHttpServer depends on hard-coded port numbers. 
Contributed by Arpit Agarwal. (#633)

Change-Id: I9656af4a7f41812da9d125c10ae0e8daf3dcf7f5
(cherry picked from commit 2828f8c339d7a03bd2bedf99c7700a3bbeec3a34)
---
 .../java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java  | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
index fc85d8e..b071e27 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig.Policy;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.AfterClass;
@@ -94,8 +93,8 @@ public class TestOzoneManagerHttpServer {
 
   @Test public void testHttpPolicy() throws Exception {
 conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
-conf.set(ScmConfigKeys.OZONE_SCM_HTTPS_ADDRESS_KEY, "localhost:0");
-InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 
0);
+conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, "localhost:0");
+conf.set(OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, "localhost:0");
 
 OzoneManagerHttpServer server = null;
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1308. Fix asf license errors. (#623)

2019-03-19 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 310ebf5  HDDS-1308. Fix asf license errors. (#623)
310ebf5 is described below

commit 310ebf5dc83b6c9e68d09246ed6c6f7cf6370fde
Author: Bharat Viswanadham 
AuthorDate: Tue Mar 19 14:06:39 2019 -0700

HDDS-1308. Fix asf license errors. (#623)
---
 .../apache/hadoop/ozone/protocolPB/RequestHandler.java  | 17 +
 .../ozone/om/ratis/TestOzoneManagerStateMachine.java| 17 +
 2 files changed, 34 insertions(+)

diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java
index 2cf8dea..367efd4 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
 package org.apache.hadoop.ozone.protocolPB;
 
 import org.apache.hadoop.ozone.om.exceptions.OMException;
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
index f98c8ca..4c2edfe 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
 package org.apache.hadoop.ozone.om.ratis;
 
 import org.apache.commons.lang3.RandomUtils;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4 updated: HDDS-1307. Test ScmChillMode testChillModeOperations failed. (#622)

2019-03-19 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 3edb697  HDDS-1307. Test ScmChillMode testChillModeOperations failed. 
(#622)
3edb697 is described below

commit 3edb697a37bfc0bcdd43af6101d286f95e3db0c7
Author: Bharat Viswanadham 
AuthorDate: Tue Mar 19 13:57:02 2019 -0700

HDDS-1307. Test ScmChillMode testChillModeOperations failed. (#622)

HDDS-1307. Test ScmChillMode testChillModeOperations failed. Contributed by 
Bharat Viswanadham. (#622)

(cherry picked from commit 1639071b054d120d8b99f34b4deed837d3afa11f)
---
 .../src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
index 8e11a66..6863401 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.MiniOzoneClusterImpl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.TestStorageContainerManagerHelper;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -178,7 +177,7 @@ public class TestScmChillMode {
 
 om = miniCluster.get().getOzoneManager();
 
-LambdaTestUtils.intercept(OMException.class,
+LambdaTestUtils.intercept(IOException.class,
 "ChillModePrecheck failed for allocateBlock",
 () -> om.openKey(keyArgs));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1307. Test ScmChillMode testChillModeOperations failed. (#622)

2019-03-19 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1639071  HDDS-1307. Test ScmChillMode testChillModeOperations failed. 
(#622)
1639071 is described below

commit 1639071b054d120d8b99f34b4deed837d3afa11f
Author: Bharat Viswanadham 
AuthorDate: Tue Mar 19 13:57:02 2019 -0700

HDDS-1307. Test ScmChillMode testChillModeOperations failed. (#622)

HDDS-1307. Test ScmChillMode testChillModeOperations failed. Contributed by 
Bharat Viswanadham. (#622)
---
 .../src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
index 0f8ac08..e66532f 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmChillMode.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.MiniOzoneClusterImpl;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.TestStorageContainerManagerHelper;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -178,7 +177,7 @@ public class TestScmChillMode {
 
 om = miniCluster.get().getOzoneManager();
 
-LambdaTestUtils.intercept(OMException.class,
+LambdaTestUtils.intercept(IOException.class,
 "ChillModePrecheck failed for allocateBlock",
 () -> om.openKey(keyArgs));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch ozone-0.4 updated: Revert "HDDS-1284. Adjust default values of pipline recovery for more resilient service restart. Contributed by Elek, Marton. (#608)"

2019-03-18 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new 7cd82b4  Revert "HDDS-1284. Adjust default values of pipline recovery 
for more resilient service restart. Contributed by Elek, Marton. (#608)"
7cd82b4 is described below

commit 7cd82b4547ab776863a33cbff7e7978ee490be49
Author: Arpit Agarwal 
AuthorDate: Mon Mar 18 14:23:56 2019 -0700

Revert "HDDS-1284. Adjust default values of pipline recovery for more 
resilient service restart. Contributed by Elek, Marton. (#608)"

This reverts commit 0d35cfc0065a021318c604dc01dd2794acb9105b.
---
 .../src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java   | 4 ++--
 hadoop-hdds/common/src/main/resources/ozone-default.xml   | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index a45a169..4c67eb3 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -260,7 +260,7 @@ public final class ScmConfigKeys {
   public static final String OZONE_SCM_STALENODE_INTERVAL =
   "ozone.scm.stale.node.interval";
   public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
-  "5m";
+  "90s";
 
   public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
   "ozone.scm.heartbeat.rpc-timeout";
@@ -331,7 +331,7 @@ public final class ScmConfigKeys {
   "ozone.scm.pipeline.destroy.timeout";
 
   public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT =
-  "66s";
+  "300s";
 
   public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL =
   "ozone.scm.pipeline.creation.interval";
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 9a5a35b..d09360a 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1035,7 +1035,7 @@
   
   
 ozone.scm.stale.node.interval
-5m
+90s
 OZONE, MANAGEMENT
 
   The interval for stale node flagging. Please
@@ -1274,7 +1274,7 @@
   
   
 ozone.scm.pipeline.destroy.timeout
-66s
+300s
 OZONE, SCM, PIPELINE
 
   Once a pipeline is closed, SCM should wait for the above configured time


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: Revert "HDDS-1284. Adjust default values of pipline recovery for more resilient service restart. Contributed by Elek, Marton. (#608)"

2019-03-18 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ae3a2c3  Revert "HDDS-1284. Adjust default values of pipline recovery 
for more resilient service restart. Contributed by Elek, Marton. (#608)"
ae3a2c3 is described below

commit ae3a2c3851cbf7f010f7ae5734ed9e2dbac5d50c
Author: Arpit Agarwal 
AuthorDate: Mon Mar 18 14:21:57 2019 -0700

Revert "HDDS-1284. Adjust default values of pipline recovery for more 
resilient service restart. Contributed by Elek, Marton. (#608)"

This reverts commit 44b8451821c392dd59ee84153c98547ae9ce7042.
---
 .../src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java   | 4 ++--
 hadoop-hdds/common/src/main/resources/ozone-default.xml   | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 7a3baff..4e197d3 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -260,7 +260,7 @@ public final class ScmConfigKeys {
   public static final String OZONE_SCM_STALENODE_INTERVAL =
   "ozone.scm.stale.node.interval";
   public static final String OZONE_SCM_STALENODE_INTERVAL_DEFAULT =
-  "5m";
+  "90s";
 
   public static final String OZONE_SCM_HEARTBEAT_RPC_TIMEOUT =
   "ozone.scm.heartbeat.rpc-timeout";
@@ -331,7 +331,7 @@ public final class ScmConfigKeys {
   "ozone.scm.pipeline.destroy.timeout";
 
   public static final String OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT =
-  "66s";
+  "300s";
 
   public static final String OZONE_SCM_PIPELINE_CREATION_INTERVAL =
   "ozone.scm.pipeline.creation.interval";
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index dd43c62..99977f8 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1035,7 +1035,7 @@
   
   
 ozone.scm.stale.node.interval
-5m
+90s
 OZONE, MANAGEMENT
 
   The interval for stale node flagging. Please
@@ -1274,7 +1274,7 @@
   
   
 ozone.scm.pipeline.destroy.timeout
-66s
+300s
 OZONE, SCM, PIPELINE
 
   Once a pipeline is closed, SCM should wait for the above configured time


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1163. Basic framework for Ozone Data Scrubber. Contributed by Supratim Deka.

2019-03-12 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 24793d2  HDDS-1163. Basic framework for Ozone Data Scrubber. 
Contributed by Supratim Deka.
24793d2 is described below

commit 24793d2d971788de904165f7490f17d79d078a6a
Author: Arpit Agarwal 
AuthorDate: Wed Mar 13 04:32:39 2019 +0900

HDDS-1163. Basic framework for Ozone Data Scrubber. Contributed by Supratim 
Deka.
---
 .../org/apache/hadoop/hdds/HddsConfigKeys.java |   5 +-
 .../common/src/main/resources/ozone-default.xml|  10 +
 .../container/common/interfaces/Container.java |   5 +
 .../container/keyvalue/KeyValueContainer.java  |  69 +++-
 .../container/keyvalue/KeyValueContainerCheck.java | 432 +
 .../container/ozoneimpl/ContainerScrubber.java | 158 
 .../ozone/container/ozoneimpl/OzoneContainer.java  |  34 ++
 .../keyvalue/TestKeyValueContainerCheck.java   | 194 +
 8 files changed, 904 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 3dd28f6..3bb3895 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -65,6 +65,9 @@ public final class HddsConfigKeys {
   public static final float HDDS_CONTAINER_CLOSE_THRESHOLD_DEFAULT = 0.9f;
   public static final String HDDS_SCM_CHILLMODE_ENABLED =
   "hdds.scm.chillmode.enabled";
+  public static final String HDDS_CONTAINERSCRUB_ENABLED =
+  "hdds.containerscrub.enabled";
+  public static final boolean HDDS_CONTAINERSCRUB_ENABLED_DEFAULT = false;
   public static final boolean HDDS_SCM_CHILLMODE_ENABLED_DEFAULT = true;
   public static final String HDDS_SCM_CHILLMODE_MIN_DATANODE =
   "hdds.scm.chillmode.min.datanode";
@@ -255,4 +258,4 @@ public final class HddsConfigKeys {
   public static final String
   HDDS_DATANODE_HTTP_KERBEROS_KEYTAB_FILE_KEY =
   "hdds.datanode.http.kerberos.keytab";
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 54eb5c8..331b5c4 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1347,6 +1347,16 @@
   
 
   
+hdds.containerscrub.enabled
+false
+DATANODE
+
+  Boolean value to enable data and metadata scrubbing in the containers
+  running on each datanode.
+
+  
+
+  
 hdds.container.action.max.limit
 20
 DATANODE
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
index 89f09fd..1fcaaf5 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
@@ -151,4 +151,9 @@ public interface Container extends RwLock {
* updates the blockCommitSequenceId.
*/
   void updateBlockCommitSequenceId(long blockCommitSequenceId);
+
+  /**
+   * check and report the structural integrity of the container.
+   */
+  void check() throws StorageContainerException;
 }
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index de1b109..20dfd9c 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -565,8 +565,13 @@ public class KeyValueContainer implements 
Container {
*/
   @Override
   public File getContainerFile() {
-return new File(containerData.getMetadataPath(), containerData
-.getContainerID() + OzoneConsts.CONTAINER_EXTENSION);
+return getContainerFile(containerData.getMetadataPath(),
+containerData.getContainerID());
+  }
+
+  static File getContainerFile(String metadataPath, long containerId) {
+return new File(metadataPath,
+containerId + OzoneConsts.CONTAINER_EXTENSION);
   }
 
   @Override
@@ -635,6 +640,66 @@ public class KeyValueContainer implements 
Container {
   }
 
   /**
+   * run integrity checks on the Container metadata.
+   */
+  public void check() throws StorageContainerEx

[hadoop] branch trunk updated: HDDS-1188. Implement a skeleton patch for Recon server with initial set of interfaces. Contributed by Siddharth Wagle.

2019-03-05 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1ad5bfc  HDDS-1188. Implement a skeleton patch for Recon server with 
initial set of interfaces. Contributed by Siddharth Wagle.
1ad5bfc is described below

commit 1ad5bfc53fb4871e6ac297812de1d98025ebd13f
Author: Arpit Agarwal 
AuthorDate: Tue Mar 5 15:54:29 2019 -0800

HDDS-1188. Implement a skeleton patch for Recon server with initial set of 
interfaces. Contributed by Siddharth Wagle.
---
 .../org/apache/hadoop/hdds/cli/GenericCli.java |  3 +-
 hadoop-ozone/ozone-recon/pom.xml   | 50 
 .../ozone/recon/OzoneConfigurationProvider.java| 43 +++
 .../hadoop/ozone/recon/ReconApplication.java   | 29 +++
 .../hadoop/ozone/recon/ReconControllerModule.java  | 36 +
 .../apache/hadoop/ozone/recon/ReconHttpServer.java | 88 ++
 .../org/apache/hadoop/ozone/recon/ReconServer.java | 76 +++
 .../ozone/recon/ReconServerConfiguration.java  | 56 ++
 .../ozone/recon/api/ContainerKeyService.java   | 43 +++
 .../hadoop/ozone/recon/api/package-info.java   | 23 ++
 .../ozone/recon/api/types/ContainerMetadata.java   | 73 ++
 .../ozone/recon/api/types/IsoDateAdapter.java  | 48 
 .../hadoop/ozone/recon/api/types/KeyMetadata.java  | 88 ++
 .../hadoop/ozone/recon/api/types/package-info.java | 22 ++
 .../apache/hadoop/ozone/recon/package-info.java| 22 ++
 .../hadoop/ozone/recon/recovery/package-info.java  | 22 ++
 .../recon/spi/HddsDatanodeServiceProvider.java | 25 ++
 .../recon/spi/OzoneManagerServiceProvider.java | 25 ++
 .../recon/spi/StorageContainerServiceProvider.java | 25 ++
 .../hadoop/ozone/recon/spi/package-info.java   | 24 ++
 .../main/resources/webapps.recon.WEB-INF/web.xml   | 33 
 .../apache/hadoop/ozone/recon/package-info.java| 21 ++
 hadoop-ozone/pom.xml   |  1 +
 23 files changed, 874 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
index 79ef711..f905f60 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java
@@ -85,8 +85,7 @@ public class GenericCli implements Callable, 
GenericParentCommand {
 OzoneConfiguration ozoneConf = new OzoneConfiguration();
 if (configurationOverrides != null) {
   for (Entry entry : configurationOverrides.entrySet()) {
-ozoneConf
-.set(entry.getKey(), entry.getValue());
+ozoneConf.set(entry.getKey(), entry.getValue());
   }
 }
 return ozoneConf;
diff --git a/hadoop-ozone/ozone-recon/pom.xml b/hadoop-ozone/ozone-recon/pom.xml
new file mode 100644
index 000..fed7ad4
--- /dev/null
+++ b/hadoop-ozone/ozone-recon/pom.xml
@@ -0,0 +1,50 @@
+
+
+http://maven.apache.org/POM/4.0.0;
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+  
+hadoop-ozone
+org.apache.hadoop
+0.4.0-SNAPSHOT
+  
+  Apache Hadoop Ozone Recon
+  4.0.0
+  ozone-recon
+  
+
+  com.google.inject
+  guice
+  ${guice.version}
+
+
+  com.google.inject.extensions
+  guice-servlet
+  4.1.0
+  compile
+
+
+  org.glassfish.jersey.containers
+  jersey-container-servlet-core
+  2.27
+  compile
+
+
+  com.google.inject.extensions
+  guice-assistedinject
+  4.1.0
+
+  
+
\ No newline at end of file
diff --git 
a/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/OzoneConfigurationProvider.java
 
b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/OzoneConfigurationProvider.java
new file mode 100644
index 000..3c8dae0
--- /dev/null
+++ 
b/hadoop-ozone/ozone-recon/src/main/java/org/apache/hadoop/ozone/recon/OzoneConfigurationProvider.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distri

[hadoop] branch ozone-0.4 updated: HDDS-1218. Do the dist-layout-stitching for Ozone after the test-compile phase. Contributed by Marton Elek.

2019-03-05 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
 new c57e1dd  HDDS-1218. Do the dist-layout-stitching for Ozone after the 
test-compile phase. Contributed by Marton Elek.
c57e1dd is described below

commit c57e1ddd63973f7dce5a9a3170b0fc1a521e14ab
Author: Arpit Agarwal 
AuthorDate: Tue Mar 5 10:25:31 2019 -0800

HDDS-1218. Do the dist-layout-stitching for Ozone after the test-compile 
phase. Contributed by Marton Elek.

(cherry picked from commit 3ef1235215c68e9f0aff8e17643c52838fa363a3)
---
 hadoop-ozone/dist/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml
index e66bbbe..c8a34a4 100644
--- a/hadoop-ozone/dist/pom.xml
+++ b/hadoop-ozone/dist/pom.xml
@@ -126,7 +126,7 @@
 
   
 dist
-compile
+prepare-package
 
   exec
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1218. Do the dist-layout-stitching for Ozone after the test-compile phase. Contributed by Marton Elek.

2019-03-05 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3ef1235  HDDS-1218. Do the dist-layout-stitching for Ozone after the 
test-compile phase. Contributed by Marton Elek.
3ef1235 is described below

commit 3ef1235215c68e9f0aff8e17643c52838fa363a3
Author: Arpit Agarwal 
AuthorDate: Tue Mar 5 10:25:31 2019 -0800

HDDS-1218. Do the dist-layout-stitching for Ozone after the test-compile 
phase. Contributed by Marton Elek.
---
 hadoop-ozone/dist/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml
index e66bbbe..c8a34a4 100644
--- a/hadoop-ozone/dist/pom.xml
+++ b/hadoop-ozone/dist/pom.xml
@@ -126,7 +126,7 @@
 
   
 dist
-compile
+prepare-package
 
   exec
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1219. TestContainerActionsHandler.testCloseContainerAction has an intermittent failure. Contributed by Elek, Marton.

2019-03-05 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 296259e  HDDS-1219. 
TestContainerActionsHandler.testCloseContainerAction has an intermittent 
failure. Contributed by Elek, Marton.
 new df99137  Merge pull request #555 from elek/HDDS-1219
296259e is described below

commit 296259e9b3bbb510ff4d9c8064bfcd12411af4ca
Author: Márton Elek 
AuthorDate: Tue Mar 5 13:45:33 2019 +0100

HDDS-1219. TestContainerActionsHandler.testCloseContainerAction has an 
intermittent failure. Contributed by Elek, Marton.
---
 .../apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
index 0997e1f..09daa59 100644
--- 
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
+++ 
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
@@ -59,7 +59,7 @@ public class TestContainerActionsHandler {
 TestUtils.randomDatanodeDetails(), cap);
 
 queue.fireEvent(SCMEvents.CONTAINER_ACTIONS, containerActions);
-
+queue.processAll(1000L);
 verify(closeContainerEventHandler, times(1))
 .onMessage(ContainerID.valueof(1L), queue);
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1222. Remove TestContainerSQLCli unit test stub. Contributed by Elek, Marton.

2019-03-05 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 02da06d  HDDS-1222. Remove TestContainerSQLCli unit test stub. 
Contributed by Elek, Marton.
 new 549d54f  Merge pull request #556 from elek/HDDS-1222
02da06d is described below

commit 02da06dd9f2d962e1db85d265b97ac3e269a60fd
Author: Márton Elek 
AuthorDate: Tue Mar 5 16:34:27 2019 +0100

HDDS-1222. Remove TestContainerSQLCli unit test stub. Contributed by Elek, 
Marton.
---
 .../hadoop/ozone/scm/TestContainerSQLCli.java  | 262 -
 1 file changed, 262 deletions(-)

diff --git 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
 
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
deleted file mode 100644
index bba5995..000
--- 
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java
+++ /dev/null
@@ -1,262 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright containerOwnership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.scm;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
-import org.apache.hadoop.hdds.server.events.EventQueue;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
-import 
org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
-import 
org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
-import org.apache.hadoop.ozone.scm.cli.SQLCLI;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.nio.file.Files;
-import java.nio.file.Paths;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
-import static org.apache.hadoop.ozone.OzoneConsts.KB;
-import static org.junit.Assert.assertEquals;
-
-/**
- * This class tests the CLI that transforms container into SQLite DB files.
- */
-@RunWith(Parameterized.class)
-public class TestContainerSQLCli {
-
-  private EventQueue eventQueue;
-
-  @Parameterized.Parameters
-  public static Collection data() {
-return Arrays.asList(new Object[][] {
-{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB},
-{OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB}
-});
-  }
-
-  private static String metaStoreType;
-
-  public TestContainerSQLCli(String type) {
-metaStoreType = type;
-  }
-
-  private static SQLCLI cli;
-
-  private MiniOzoneCluster cluster;
-  private OzoneConfiguration conf;
-  private String datanodeIpAddress;
-
-  private ContainerManager containerManager;
-  private NodeManager nodeManager;
-  private BlockManagerImpl blockManager;
-
-  private HashMap blockContainerMap;
-
-  private final static long DEFAULT_BLOCK_SIZE = 4 * KB;
-  private static HddsProtos.ReplicationFactor factor;
-  private static HddsProtos.ReplicationType type;
-  private static final String CONTAINER_OWNER = "OZONE";

[hadoop] branch branch-2.8 updated: HDFS-14219. ConcurrentModificationException occurs in datanode occasionally. Contributed by Tao Jie.

2019-02-20 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-2.8
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.8 by this push:
 new 3c145a7  HDFS-14219. ConcurrentModificationException occurs in 
datanode occasionally. Contributed by Tao Jie.
3c145a7 is described below

commit 3c145a7ef0171ef248d7916c366778de95bc0d4b
Author: Arpit Agarwal 
AuthorDate: Wed Feb 20 15:04:33 2019 -0800

HDFS-14219. ConcurrentModificationException occurs in datanode 
occasionally. Contributed by Tao Jie.
---
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 36d68d0..b14f9e9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1876,7 +1876,7 @@ class FsDatasetImpl implements FsDatasetSpi 
{
 new HashMap();
 
 List curVolumes = null;
-synchronized(this) {
+try (AutoCloseableLock lock = datasetLock.acquire()) {
   curVolumes = volumes.getVolumes();
   for (FsVolumeSpi v : curVolumes) {
 builders.put(v.getStorageID(), 
BlockListAsLongs.builder(maxDataLength));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDDS-1053. Generate RaftGroupId from OMServiceID. Contributed by Aravindan Vijayan.

2019-02-20 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 676a9cb  HDDS-1053. Generate RaftGroupId from OMServiceID. Contributed 
by Aravindan Vijayan.
676a9cb is described below

commit 676a9cbbfa80e8eeeda7a272971e1b3354f8
Author: Arpit Agarwal 
AuthorDate: Wed Feb 20 12:57:49 2019 -0800

HDDS-1053. Generate RaftGroupId from OMServiceID. Contributed by Aravindan 
Vijayan.
---
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |  2 +-
 .../ozone/om/ratis/OzoneManagerRatisServer.java|  8 +++-
 .../om/ratis/TestOzoneManagerRatisServer.java  | 49 ++
 3 files changed, 56 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 2931a54..37cfb7f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -273,5 +273,5 @@ public final class OzoneConsts {
   Metadata.Key.of(OZONE_USER, ASCII_STRING_MARSHALLER);
 
   // Default OMServiceID for OM Ratis servers to use as RaftGroupId
-  public static final String OM_SERVICE_ID_DEFAULT = "om-service-value";
+  public static final String OM_SERVICE_ID_DEFAULT = "omServiceIdDefault";
 }
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
index 2cac258..8baa03b 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java
@@ -26,6 +26,7 @@ import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
@@ -48,7 +49,6 @@ import org.apache.ratis.rpc.SupportedRpcType;
 import org.apache.ratis.server.RaftServer;
 import org.apache.ratis.server.RaftServerConfigKeys;
 import org.apache.ratis.statemachine.impl.BaseStateMachine;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.apache.ratis.util.LifeCycle;
 import org.apache.ratis.util.SizeInBytes;
 import org.apache.ratis.util.TimeDuration;
@@ -91,7 +91,7 @@ public final class OzoneManagerRatisServer {
 
 this.raftPeerId = localRaftPeerId;
 this.raftGroupId = RaftGroupId.valueOf(
-ByteString.copyFromUtf8(raftGroupIdStr));
+getRaftGroupIdFromOmServiceId(raftGroupIdStr));
 this.raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers);
 
 StringBuilder raftPeersStr = new StringBuilder();
@@ -355,4 +355,8 @@ public final class OzoneManagerRatisServer {
 }
 return storageDir;
   }
+
+  private UUID getRaftGroupIdFromOmServiceId(String omServiceId) {
+return UUID.nameUUIDFromBytes(omServiceId.getBytes());
+  }
 }
diff --git 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
index ffa6680..83d2245 100644
--- 
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
+++ 
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java
@@ -38,6 +38,7 @@ import 
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
 .OMResponse;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.ratis.protocol.RaftGroupId;
 import org.apache.ratis.util.LifeCycle;
 import org.junit.After;
 import org.junit.Assert;
@@ -152,4 +153,52 @@ public class TestOzoneManagerRatisServer {
   logCapturer.clearOutput();
 }
   }
+
+  @Test
+  public void verifyRaftGroupIdGenerationWithDefaultOmServiceId() throws
+  Exception {
+UUID uuid = UUID.nameUUIDFromBytes(OzoneConsts.OM_SERVICE_ID_DEFAULT
+.getBytes());
+RaftGroupId raftGroupId = omRatisServer.getRaftGroup().getGroupId();
+Assert.assertEquals(uuid, raftGroupId.getUuid());
+Assert.assertEquals(raftGroupId.toByteString().size(), 16);
+  }
+
+  @Test
+  public void verifyRaftGroupIdGenerationWithCustomOmServiceId() throws
+  Exception {
+String customOmServiceId = "omSIdCustom123";
+OzoneConfiguration newConf = new OzoneConfiguration();
+String newOmId = UUID.randomUUID().toString();
+String path = GenericTestUtils.g

[hadoop] branch trunk updated: HDDS-1016. Allow marking containers as unhealthy. Contributed by Arpit Agarwal.

2019-01-30 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c354195  HDDS-1016. Allow marking containers as unhealthy. Contributed 
by Arpit Agarwal.
c354195 is described below

commit c35419579b5c5b315c5b62d8b89149924416b480
Author: Arpit Agarwal 
AuthorDate: Wed Jan 30 11:40:50 2019 -0800

HDDS-1016. Allow marking containers as unhealthy. Contributed by Arpit 
Agarwal.
---
 .../container/common/interfaces/Container.java |   5 +
 .../container/keyvalue/KeyValueContainer.java  |  60 +-
 .../ozone/container/keyvalue/KeyValueHandler.java  |  94 -
 .../TestKeyValueContainerMarkUnhealthy.java| 172 
 .../TestKeyValueHandlerWithUnhealthyContainer.java | 227 +
 5 files changed, 538 insertions(+), 20 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
index 405cac3..58e3383 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java
@@ -85,6 +85,11 @@ public interface Container extends RwLock {
   void markContainerForClose() throws StorageContainerException;
 
   /**
+   * Marks the container replica as unhealthy.
+   */
+  void markContainerUnhealthy() throws StorageContainerException;
+
+  /**
* Quasi Closes a open container, if it is already closed or does not exist a
* StorageContainerException is thrown.
*
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index e737a53..ba559e9 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -64,6 +64,7 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.CONTAINER_FILES_CREATE_ERROR;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.CONTAINER_INTERNAL_ERROR;
+import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_OPEN;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.DISK_OUT_OF_SPACE;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -72,6 +73,7 @@ import static 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.INVALID_CONTAINER_STATE;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .Result.UNSUPPORTED_REQUEST;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -109,8 +111,8 @@ public class KeyValueContainer implements 
Container {
 
 File containerMetaDataPath = null;
 //acquiring volumeset read lock
-volumeSet.readLock();
 long maxSize = containerData.getMaxSize();
+volumeSet.readLock();
 try {
   HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet
   .getVolumesList(), maxSize);
@@ -270,28 +272,67 @@ public class KeyValueContainer implements 
Container {
 
   @Override
   public void markContainerForClose() throws StorageContainerException {
-updateContainerData(() ->
-containerData.setState(ContainerDataProto.State.CLOSING));
+writeLock();
+try {
+  if (getContainerState() != ContainerDataProto.State.OPEN) {
+throw new StorageContainerException(
+"Attempting to close a " + getContainerState() + " container.",
+CONTAINER_NOT_OPEN);
+  }
+  updateContainerData(() ->
+  containerData.setState(ContainerDataProto.State.CLOSING));
+} finally {
+  writeUnlock();
+}
+  }
+
+  @Override
+  public void markContainerUnhealthy() throws StorageContainerException {
+writeLock();
+try {
+  updateContainerData(() ->
+  containerData.setState(ContainerDataProto.State.UNHEALTHY));
+} finally {
+  writeUnlock();
+}
   }
 
   @Override
   public void quasiClose() throws StorageContainerException {
-updateContainerData(containerData::quasiCloseContainer);
+writeLock();
+try {
+  updateContainerData(containerData::quasiCloseContainer);
+} finally {
+  writeUnlock();
+}
   }
 
   @Override
   public void close() throws StorageContainerException {
-updateContainerData(

[hadoop] branch trunk updated: HDDS-989. Check Hdds Volumes for errors. Contributed by Arpit Agarwal.

2019-01-27 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3b49d7a  HDDS-989. Check Hdds Volumes for errors. Contributed by Arpit 
Agarwal.
3b49d7a is described below

commit 3b49d7aeae8819ce7c2c4f4fec057dd9e75dedf1
Author: Arpit Agarwal 
AuthorDate: Sun Jan 27 11:18:30 2019 -0800

HDDS-989. Check Hdds Volumes for errors. Contributed by Arpit Agarwal.
---
 .../container/common/volume/AbstractFuture.java| 1291 
 .../ozone/container/common/volume/HddsVolume.java  |   24 +-
 .../container/common/volume/HddsVolumeChecker.java |  418 +++
 .../common/volume/ThrottledAsyncChecker.java   |  245 
 .../container/common/volume/TimeoutFuture.java |  161 +++
 .../ozone/container/common/volume/VolumeSet.java   |  116 +-
 .../ozone/container/ozoneimpl/OzoneContainer.java  |1 +
 .../common/volume/TestHddsVolumeChecker.java   |  212 
 .../common/volume/TestVolumeSetDiskChecks.java |  185 +++
 9 files changed, 2643 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
new file mode 100644
index 000..438692c
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java
@@ -0,0 +1,1291 @@
+/*
+ * Copyright (C) 2007 The Guava Authors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License. You may obtain a
+ * copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+/**
+ * Some portions of this class have been modified to make it functional in this
+ * package.
+ */
+package org.apache.hadoop.ozone.container.common.volume;
+
+import com.google.common.annotations.Beta;
+import com.google.common.annotations.GwtCompatible;
+import com.google.common.base.Preconditions;
+import static com.google.common.base.Preconditions.checkNotNull;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
+import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater
+.newUpdater;
+
+import javax.annotation.Nullable;
+import java.security.AccessController;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executor;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy;
+import java.util.concurrent.locks.LockSupport;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * An abstract implementation of {@link ListenableFuture}, intended for
+ * advanced users only. More common ways to create a {@code ListenableFuture}
+ * include instantiating a {@link SettableFuture}, submitting a task to a
+ * {@link ListeningExecutorService}, and deriving a {@code Future} from an
+ * existing one, typically using methods like {@link Futures#transform
+ * (ListenableFuture, com.google.common.base.Function) Futures.transform}
+ * and its overloaded versions.
+ * 
+ * This class implements all methods in {@code ListenableFuture}.
+ * Subclasses should provide a way to set the result of the computation
+ * through the protected methods {@link #set(Object)},
+ * {@link #setFuture(ListenableFuture)} and {@link #setException(Throwable)}.
+ * Subclasses may also override {@link #interruptTask()}, which will be
+ * invoked automatically if a call to {@link #cancel(boolean) cancel(true)}
+ * succeeds in canceling the future. Subclasses should rarely override other
+ * methods.
+ */
+
+@GwtCompatible(emulated = true)
+public abstract class AbstractFuture implements ListenableFuture {
+  // NOTE: Whenever both tests are cheap and functional, it's faster to use &,
+  // | instead of &&, ||
+
+  private static final bool

[hadoop] 01/02: HDFS-14222. Make ThrottledAsyncChecker constructor public. Contributed by Arpit Agarwal.

2019-01-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit cd5e7f152f67c60eec7c2163035420dd92377dc5
Author: Arpit Agarwal 
AuthorDate: Mon Jan 21 20:45:09 2019 -0800

HDFS-14222. Make ThrottledAsyncChecker constructor public. Contributed by 
Arpit Agarwal.

(cherry picked from commit 6f0756fc0e43b785d3dee72a669997d934b57e4c)
---
 .../hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
index 9227d94..3ef0883 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
@@ -88,7 +88,7 @@ public class ThrottledAsyncChecker implements 
AsyncChecker {
*/
   private final Map> completedChecks;
 
-  ThrottledAsyncChecker(final Timer timer,
+  public ThrottledAsyncChecker(final Timer timer,
 final long minMsBetweenChecks,
 final long diskCheckTimeout,
 final ExecutorService executorService) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated (cf520b7 -> f5bfeac)

2019-01-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from cf520b7  HDFS-14207. ZKFC should catch exception when ha configuration 
missing. Contributed by Fei Hui.
 new 78b4597  HDFS-14222. Make ThrottledAsyncChecker constructor public. 
Contributed by Arpit Agarwal.
 new f5bfeac  HDFS-14221. Replace Guava Optional with Java Optional. 
Contributed by Arpit Agarwal.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hdfs/server/datanode/checker/AsyncChecker.java |  2 +-
 .../datanode/checker/DatasetVolumeChecker.java |  2 +-
 .../datanode/checker/StorageLocationChecker.java   |  2 +-
 .../datanode/checker/ThrottledAsyncChecker.java|  8 +++---
 .../datanode/checker/TestDatasetVolumeChecker.java |  2 +-
 .../checker/TestThrottledAsyncChecker.java |  2 +-
 .../checker/TestThrottledAsyncCheckerTimeout.java  | 29 +++---
 .../hdfs/server/namenode/TestAuditLogAtDebug.java  |  6 ++---
 8 files changed, 21 insertions(+), 32 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HDFS-14221. Replace Guava Optional with Java Optional. Contributed by Arpit Agarwal.

2019-01-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 8dd8121f7ddc690f40128031452fe0859d45b48e
Author: Arpit Agarwal 
AuthorDate: Mon Jan 21 20:44:37 2019 -0800

HDFS-14221. Replace Guava Optional with Java Optional. Contributed by Arpit 
Agarwal.

(cherry picked from commit 1ff658b2ef3fb933897712c728bc628f3f44bded)
---
 .../hdfs/server/datanode/checker/AsyncChecker.java |  2 +-
 .../datanode/checker/DatasetVolumeChecker.java |  2 +-
 .../datanode/checker/StorageLocationChecker.java   |  2 +-
 .../datanode/checker/ThrottledAsyncChecker.java|  6 ++---
 .../datanode/checker/TestDatasetVolumeChecker.java |  2 +-
 .../checker/TestThrottledAsyncChecker.java |  2 +-
 .../checker/TestThrottledAsyncCheckerTimeout.java  | 29 +++---
 .../hdfs/server/namenode/TestAuditLogAtDebug.java  |  6 ++---
 8 files changed, 20 insertions(+), 31 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
index 997c0cb..d698454 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
-import com.google.common.base.Optional;
+import java.util.Optional;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 30602c0..5c590f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.FutureCallback;
@@ -44,6 +43,7 @@ import javax.annotation.Nullable;
 import java.nio.channels.ClosedChannelException;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
index dabaa83..c5de065 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode.checker;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
-import com.google.common.base.Optional;
 import com.google.common.collect.Maps;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -47,6 +46,7 @@ import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executors;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
index 3ef0883..bb1ed46 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
-import com.google.common.base.Optional;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture

[hadoop] 02/02: HDFS-14221. Replace Guava Optional with Java Optional. Contributed by Arpit Agarwal.

2019-01-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit f5bfeac62e113142e063e975bd85a347297300f9
Author: Arpit Agarwal 
AuthorDate: Mon Jan 21 20:44:37 2019 -0800

HDFS-14221. Replace Guava Optional with Java Optional. Contributed by Arpit 
Agarwal.

(cherry picked from commit 1ff658b2ef3fb933897712c728bc628f3f44bded)
---
 .../hdfs/server/datanode/checker/AsyncChecker.java |  2 +-
 .../datanode/checker/DatasetVolumeChecker.java |  2 +-
 .../datanode/checker/StorageLocationChecker.java   |  2 +-
 .../datanode/checker/ThrottledAsyncChecker.java|  6 ++---
 .../datanode/checker/TestDatasetVolumeChecker.java |  2 +-
 .../checker/TestThrottledAsyncChecker.java |  2 +-
 .../checker/TestThrottledAsyncCheckerTimeout.java  | 29 +++---
 .../hdfs/server/namenode/TestAuditLogAtDebug.java  |  6 ++---
 8 files changed, 20 insertions(+), 31 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
index 997c0cb..d698454 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
-import com.google.common.base.Optional;
+import java.util.Optional;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 30602c0..5c590f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.FutureCallback;
@@ -44,6 +43,7 @@ import javax.annotation.Nullable;
 import java.nio.channels.ClosedChannelException;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
index dabaa83..c5de065 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode.checker;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
-import com.google.common.base.Optional;
 import com.google.common.collect.Maps;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -47,6 +46,7 @@ import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executors;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
index 3ef0883..bb1ed46 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
-import com.google.common.base.Optional;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture

[hadoop] branch branch-3.2 updated (2253a86 -> 8dd8121)

2019-01-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 2253a86  HDFS-14207. ZKFC should catch exception when ha configuration 
missing. Contributed by Fei Hui.
 new cd5e7f1  HDFS-14222. Make ThrottledAsyncChecker constructor public. 
Contributed by Arpit Agarwal.
 new 8dd8121  HDFS-14221. Replace Guava Optional with Java Optional. 
Contributed by Arpit Agarwal.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hdfs/server/datanode/checker/AsyncChecker.java |  2 +-
 .../datanode/checker/DatasetVolumeChecker.java |  2 +-
 .../datanode/checker/StorageLocationChecker.java   |  2 +-
 .../datanode/checker/ThrottledAsyncChecker.java|  8 +++---
 .../datanode/checker/TestDatasetVolumeChecker.java |  2 +-
 .../checker/TestThrottledAsyncChecker.java |  2 +-
 .../checker/TestThrottledAsyncCheckerTimeout.java  | 29 +++---
 .../hdfs/server/namenode/TestAuditLogAtDebug.java  |  6 ++---
 8 files changed, 21 insertions(+), 32 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: HDFS-14222. Make ThrottledAsyncChecker constructor public. Contributed by Arpit Agarwal.

2019-01-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 78b45977b1a192aa0764445071c21a0617c6aa7f
Author: Arpit Agarwal 
AuthorDate: Mon Jan 21 20:45:09 2019 -0800

HDFS-14222. Make ThrottledAsyncChecker constructor public. Contributed by 
Arpit Agarwal.

(cherry picked from commit 6f0756fc0e43b785d3dee72a669997d934b57e4c)
---
 .../hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
index 9227d94..3ef0883 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
@@ -88,7 +88,7 @@ public class ThrottledAsyncChecker implements 
AsyncChecker {
*/
   private final Map> completedChecks;
 
-  ThrottledAsyncChecker(final Timer timer,
+  public ThrottledAsyncChecker(final Timer timer,
 final long minMsBetweenChecks,
 final long diskCheckTimeout,
 final ExecutorService executorService) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: HDFS-14221. Replace Guava Optional with Java Optional. Contributed by Arpit Agarwal.

2019-01-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 1ff658b2ef3fb933897712c728bc628f3f44bded
Author: Arpit Agarwal 
AuthorDate: Mon Jan 21 20:44:37 2019 -0800

HDFS-14221. Replace Guava Optional with Java Optional. Contributed by Arpit 
Agarwal.
---
 .../hdfs/server/datanode/checker/AsyncChecker.java |  2 +-
 .../datanode/checker/DatasetVolumeChecker.java |  2 +-
 .../datanode/checker/StorageLocationChecker.java   |  2 +-
 .../datanode/checker/ThrottledAsyncChecker.java|  6 ++---
 .../datanode/checker/TestDatasetVolumeChecker.java |  2 +-
 .../checker/TestThrottledAsyncChecker.java |  2 +-
 .../checker/TestThrottledAsyncCheckerTimeout.java  | 29 +++---
 .../hdfs/server/namenode/TestAuditLogAtDebug.java  |  6 ++---
 8 files changed, 20 insertions(+), 31 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
index 997c0cb..d698454 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AsyncChecker.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
-import com.google.common.base.Optional;
+import java.util.Optional;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 30602c0..5c590f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.FutureCallback;
@@ -44,6 +43,7 @@ import javax.annotation.Nullable;
 import java.nio.channels.ClosedChannelException;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
index dabaa83..c5de065 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode.checker;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
-import com.google.common.base.Optional;
 import com.google.common.collect.Maps;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -47,6 +46,7 @@ import java.util.HashSet;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Executors;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
index 9227d94..f809fea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
-import com.google.common.base.Optional;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -35,6 +34,7 @@ import javax.annotation.Nonnull;
 import javax.annotation.Nullable

[hadoop] branch trunk updated (de34fc1 -> 6f0756f)

2019-01-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from de34fc1  HDFS-14207. ZKFC should catch exception when ha configuration 
missing. Contributed by Fei Hui.
 new 1ff658b  HDFS-14221. Replace Guava Optional with Java Optional. 
Contributed by Arpit Agarwal.
 new 6f0756f  HDFS-14222. Make ThrottledAsyncChecker constructor public. 
Contributed by Arpit Agarwal.

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hdfs/server/datanode/checker/AsyncChecker.java |  2 +-
 .../datanode/checker/DatasetVolumeChecker.java |  2 +-
 .../datanode/checker/StorageLocationChecker.java   |  2 +-
 .../datanode/checker/ThrottledAsyncChecker.java|  8 +++---
 .../datanode/checker/TestDatasetVolumeChecker.java |  2 +-
 .../checker/TestThrottledAsyncChecker.java |  2 +-
 .../checker/TestThrottledAsyncCheckerTimeout.java  | 29 +++---
 .../hdfs/server/namenode/TestAuditLogAtDebug.java  |  6 ++---
 8 files changed, 21 insertions(+), 32 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 02/02: HDFS-14222. Make ThrottledAsyncChecker constructor public. Contributed by Arpit Agarwal.

2019-01-21 Thread arp
This is an automated email from the ASF dual-hosted git repository.

arp pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 6f0756fc0e43b785d3dee72a669997d934b57e4c
Author: Arpit Agarwal 
AuthorDate: Mon Jan 21 20:45:09 2019 -0800

HDFS-14222. Make ThrottledAsyncChecker constructor public. Contributed by 
Arpit Agarwal.
---
 .../hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
index f809fea..bb1ed46 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
@@ -88,7 +88,7 @@ public class ThrottledAsyncChecker implements 
AsyncChecker {
*/
   private final Map> completedChecks;
 
-  ThrottledAsyncChecker(final Timer timer,
+  public ThrottledAsyncChecker(final Timer timer,
 final long minMsBetweenChecks,
 final long diskCheckTimeout,
 final ExecutorService executorService) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-893. pipeline status is ALLOCATED in scmcli listPipelines command. Contributed by Lokesh Jain.

2018-12-19 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk b1ce9aa3b -> cf571133b


HDDS-893. pipeline status is ALLOCATED in scmcli listPipelines command. 
Contributed by Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf571133
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf571133
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf571133

Branch: refs/heads/trunk
Commit: cf571133b89643e4db96ec4cc7988d4a2f850be9
Parents: b1ce9aa
Author: Arpit Agarwal 
Authored: Wed Dec 19 13:42:06 2018 +0530
Committer: Arpit Agarwal 
Committed: Wed Dec 19 13:42:06 2018 +0530

--
 .../common/helpers/ContainerWithPipeline.java   |  7 ++-
 .../hadoop/hdds/scm/pipeline/Pipeline.java  | 38 ++--
 .../pipeline/UnknownPipelineStateException.java | 46 
 ...rLocationProtocolClientSideTranslatorPB.java |  9 ++--
 ...rLocationProtocolServerSideTranslatorPB.java |  8 ++--
 hadoop-hdds/common/src/main/proto/hdds.proto|  8 +++-
 .../hdds/scm/pipeline/SCMPipelineManager.java   |  6 ++-
 7 files changed, 107 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf571133/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
index 8f49255..5b01bd2 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerWithPipeline.java
@@ -24,6 +24,7 @@ import org.apache.commons.lang3.builder.HashCodeBuilder;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.UnknownPipelineStateException;
 
 /**
  * Class wraps ozone container info.
@@ -48,13 +49,15 @@ public class ContainerWithPipeline implements 
Comparator,
   }
 
   public static ContainerWithPipeline fromProtobuf(
-  HddsProtos.ContainerWithPipeline allocatedContainer) {
+  HddsProtos.ContainerWithPipeline allocatedContainer)
+  throws UnknownPipelineStateException {
 return new ContainerWithPipeline(
 ContainerInfo.fromProtobuf(allocatedContainer.getContainerInfo()),
 Pipeline.getFromProtobuf(allocatedContainer.getPipeline()));
   }
 
-  public HddsProtos.ContainerWithPipeline getProtobuf() {
+  public HddsProtos.ContainerWithPipeline getProtobuf()
+  throws UnknownPipelineStateException {
 HddsProtos.ContainerWithPipeline.Builder builder =
 HddsProtos.ContainerWithPipeline.newBuilder();
 builder.setContainerInfo(getContainerInfo().getProtobuf())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf571133/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index a103bd7..7fcfc8a 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -136,11 +136,13 @@ public final class Pipeline {
 return nodeStatus.isEmpty();
   }
 
-  public HddsProtos.Pipeline getProtobufMessage() {
+  public HddsProtos.Pipeline getProtobufMessage()
+  throws UnknownPipelineStateException {
 HddsProtos.Pipeline.Builder builder = HddsProtos.Pipeline.newBuilder()
 .setId(id.getProtobuf())
 .setType(type)
 .setFactor(factor)
+.setState(PipelineState.getProtobuf(state))
 .setLeaderID("")
 .addAllMembers(nodeStatus.keySet().stream()
 .map(DatanodeDetails::getProtoBufMessage)
@@ -148,11 +150,13 @@ public final class Pipeline {
 return builder.build();
   }
 
-  public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) {
+  public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline)
+  throws UnknownPipelineStateException {
+Preconditions.checkNotNull(pipeline, "Pipeline is null");
 return new Builder().setId(PipelineID.getFromProtobuf(pipeline.getId()))
 .setFactor(pipeline.getFactor())
 .setType(pipeline.getType())
-

hadoop git commit: HDDS-940. Remove dead store to local variable in OmMetadataManagerImpl. Contributed by Dinesh Chitlangia.

2018-12-18 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk ccdd982e5 -> b1ce9aa3b


HDDS-940. Remove dead store to local variable in OmMetadataManagerImpl. 
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1ce9aa3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1ce9aa3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1ce9aa3

Branch: refs/heads/trunk
Commit: b1ce9aa3b364204e357cc46657201a14c8c0f241
Parents: ccdd982
Author: Arpit Agarwal 
Authored: Wed Dec 19 12:30:40 2018 +0530
Committer: Arpit Agarwal 
Committed: Wed Dec 19 12:30:40 2018 +0530

--
 .../main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1ce9aa3/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
--
diff --git 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 28438a1..603bd18 100644
--- 
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ 
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -637,7 +637,6 @@ public class OmMetadataManagerImpl implements 
OMMetadataManager {
   @Override
   public List getExpiredOpenKeys() throws IOException {
 List keyBlocksList = Lists.newArrayList();
-long now = Time.now();
 // TODO: Fix the getExpiredOpenKeys, Not part of this patch.
 return keyBlocksList;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HADOOP-12558. distcp documentation is woefully out of date. Contributed by Dinesh Chitlangia.

2018-11-15 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 ba75aeec2 -> 3e57adee0
  refs/heads/branch-3.2 8c9681d7f -> 351bfa1bc
  refs/heads/trunk 1e15c7e85 -> 914b0cf15


HADOOP-12558. distcp documentation is woefully out of date. Contributed by 
Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/914b0cf1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/914b0cf1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/914b0cf1

Branch: refs/heads/trunk
Commit: 914b0cf15f14ddff581b63f0902b7760e4d2bd56
Parents: 1e15c7e
Author: Arpit Agarwal 
Authored: Thu Nov 15 13:58:13 2018 -0800
Committer: Arpit Agarwal 
Committed: Thu Nov 15 13:58:13 2018 -0800

--
 hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/914b0cf1/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
--
diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index f2b3dee..b855422 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -240,6 +240,7 @@ Flag  | Description  | 
Notes
 `-skipcrccheck` | Whether to skip CRC checks between source and target paths. |
 `-blocksperchunk ` | Number of blocks per chunk. When 
specified, split files into chunks to copy in parallel | If set to a positive 
value, files with more blocks than this value will be split into chunks of 
`` blocks to be transferred in parallel, and reassembled on the 
destination. By default, `` is 0 and the files will be 
transmitted in their entirety without splitting. This switch is only applicable 
when the source file system implements getBlockLocations method and the target 
file system implements concat method. |
 `-copybuffersize ` | Size of the copy buffer to use. By 
default, `` is set to 8192B |
+`-xtrack ` | Save information about missing source files to the 
specified path. | This option is only valid with `-update` option. This is an 
experimental property and it cannot be used with `-atomic` option.
 
 Architecture of DistCp
 --


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HADOOP-12558. distcp documentation is woefully out of date. Contributed by Dinesh Chitlangia.

2018-11-15 Thread arp
HADOOP-12558. distcp documentation is woefully out of date. Contributed by 
Dinesh Chitlangia.

(cherry picked from commit 914b0cf15f14ddff581b63f0902b7760e4d2bd56)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e57adee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e57adee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e57adee

Branch: refs/heads/branch-3.1
Commit: 3e57adee0a621c928ebe64d2050dc3e827d556d9
Parents: ba75aee
Author: Arpit Agarwal 
Authored: Thu Nov 15 13:58:13 2018 -0800
Committer: Arpit Agarwal 
Committed: Thu Nov 15 13:58:38 2018 -0800

--
 hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e57adee/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
--
diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index f2b3dee..b855422 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -240,6 +240,7 @@ Flag  | Description  | 
Notes
 `-skipcrccheck` | Whether to skip CRC checks between source and target paths. |
 `-blocksperchunk ` | Number of blocks per chunk. When 
specified, split files into chunks to copy in parallel | If set to a positive 
value, files with more blocks than this value will be split into chunks of 
`` blocks to be transferred in parallel, and reassembled on the 
destination. By default, `` is 0 and the files will be 
transmitted in their entirety without splitting. This switch is only applicable 
when the source file system implements getBlockLocations method and the target 
file system implements concat method. |
 `-copybuffersize ` | Size of the copy buffer to use. By 
default, `` is set to 8192B |
+`-xtrack ` | Save information about missing source files to the 
specified path. | This option is only valid with `-update` option. This is an 
experimental property and it cannot be used with `-atomic` option.
 
 Architecture of DistCp
 --


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: HADOOP-12558. distcp documentation is woefully out of date. Contributed by Dinesh Chitlangia.

2018-11-15 Thread arp
HADOOP-12558. distcp documentation is woefully out of date. Contributed by 
Dinesh Chitlangia.

(cherry picked from commit 914b0cf15f14ddff581b63f0902b7760e4d2bd56)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/351bfa1b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/351bfa1b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/351bfa1b

Branch: refs/heads/branch-3.2
Commit: 351bfa1bcfa300f3d4baf45d9f1b7a78a606e5ac
Parents: 8c9681d
Author: Arpit Agarwal 
Authored: Thu Nov 15 13:58:13 2018 -0800
Committer: Arpit Agarwal 
Committed: Thu Nov 15 13:58:29 2018 -0800

--
 hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/351bfa1b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
--
diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index f2b3dee..b855422 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -240,6 +240,7 @@ Flag  | Description  | 
Notes
 `-skipcrccheck` | Whether to skip CRC checks between source and target paths. |
 `-blocksperchunk ` | Number of blocks per chunk. When 
specified, split files into chunks to copy in parallel | If set to a positive 
value, files with more blocks than this value will be split into chunks of 
`` blocks to be transferred in parallel, and reassembled on the 
destination. By default, `` is 0 and the files will be 
transmitted in their entirety without splitting. This switch is only applicable 
when the source file system implements getBlockLocations method and the target 
file system implements concat method. |
 `-copybuffersize ` | Size of the copy buffer to use. By 
default, `` is set to 8192B |
+`-xtrack ` | Save information about missing source files to the 
specified path. | This option is only valid with `-update` option. This is an 
experimental property and it cannot be used with `-atomic` option.
 
 Architecture of DistCp
 --


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15936. [JDK 11] MiniDFSClusterManager & MiniHadoopClusterManager compilation fails due to the usage of '_' as identifier. Contributed by Zsolt Venczel.

2018-11-15 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk 460a94a10 -> 1e15c7e85


HADOOP-15936. [JDK 11] MiniDFSClusterManager & MiniHadoopClusterManager 
compilation fails due to the usage of '_' as identifier. Contributed by Zsolt 
Venczel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e15c7e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e15c7e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e15c7e8

Branch: refs/heads/trunk
Commit: 1e15c7e855f9eb37286dc58f572a71f1cf04cc3b
Parents: 460a94a
Author: Arpit Agarwal 
Authored: Thu Nov 15 12:42:31 2018 -0800
Committer: Arpit Agarwal 
Committed: Thu Nov 15 12:42:31 2018 -0800

--
 .../test/java/org/apache/hadoop/test/MiniDFSClusterManager.java| 2 +-
 .../java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e15c7e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
index 1d06616..b26c1d6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
@@ -127,7 +127,7 @@ public class MiniDFSClusterManager {
   LOG.info("Cluster is no longer up, exiting");
   return;
 }
-  } catch (InterruptedException _) {
+  } catch (InterruptedException e) {
 // nothing
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e15c7e8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
index d29dd34..1d1c083 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
@@ -137,7 +137,7 @@ public class MiniHadoopClusterManager {
 while (true) {
   try {
 Thread.sleep(1000 * 60);
-  } catch (InterruptedException _) {
+  } catch (InterruptedException e) {
 // nothing
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-819. Match OzoneFileSystem behavior with S3AFileSystem. Contributed by Hanisha Koneru.

2018-11-14 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk 21ec4bdae -> bac8807c8


HDDS-819. Match OzoneFileSystem behavior with S3AFileSystem. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bac8807c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bac8807c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bac8807c

Branch: refs/heads/trunk
Commit: bac8807c8b7abb4864aed921585f6e6fc5e9cd5c
Parents: 21ec4bd
Author: Arpit Agarwal 
Authored: Wed Nov 14 16:12:06 2018 -0800
Committer: Arpit Agarwal 
Committed: Wed Nov 14 16:12:06 2018 -0800

--
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java | 277 ---
 .../hadoop/fs/ozone/TestOzoneFileSystem.java| 174 +++-
 2 files changed, 411 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bac8807c/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
--
diff --git 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
index 1336382..78b6e5d 100644
--- 
a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
+++ 
b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java
@@ -24,13 +24,17 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Objects;
 import java.util.Iterator;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import com.google.common.base.Preconditions;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -199,17 +203,7 @@ public class OzoneFileSystem extends FileSystem {
 deleteObject(key);
   }
 } catch (FileNotFoundException ignored) {
-  // check if the parent directory needs to be created
-  Path parent = f.getParent();
-  try {
-// create all the directories for the parent
-FileStatus parentStatus = getFileStatus(parent);
-LOG.trace("parent key:{} status:{}", key, parentStatus);
-  } catch (FileNotFoundException e) {
-mkdirs(parent);
-  }
-  // This exception needs to ignored as this means that the file currently
-  // does not exists and a new file can thus be created.
+  // this means the file is not found
 }
 
 OzoneOutputStream ozoneOutputStream =
@@ -390,8 +384,14 @@ public class OzoneFileSystem extends FileSystem {
 }
   }
 
-  @Override
-  public boolean delete(Path f, boolean recursive) throws IOException {
+  /**
+   * Deletes the children of the input dir path by iterating though the
+   * DeleteIterator.
+   * @param f directory path to be deleted
+   * @return true if successfully deletes all required keys, false otherwise
+   * @throws IOException
+   */
+  private boolean innerDelete(Path f, boolean recursive) throws IOException {
 LOG.trace("delete() path:{} recursive:{}", f, recursive);
 try {
   DeleteIterator iterator = new DeleteIterator(f, recursive);
@@ -402,35 +402,185 @@ public class OzoneFileSystem extends FileSystem {
 }
   }
 
+  @Override
+  public boolean delete(Path f, boolean recursive) throws IOException {
+LOG.debug("Delete path {} - recursive {}", f, recursive);
+FileStatus status;
+try {
+  status = getFileStatus(f);
+} catch (FileNotFoundException ex) {
+  LOG.warn("delete: Path does not exist: {}", f);
+  return false;
+}
+
+String key = pathToKey(f);
+boolean result;
+
+if (status.isDirectory()) {
+  LOG.debug("delete: Path is a directory: {}", f);
+  key = addTrailingSlashIfNeeded(key);
+
+  if (key.equals("/")) {
+LOG.warn("Cannot delete root directory.");
+return false;
+  }
+
+  result = innerDelete(f, recursive);
+} else {
+  LOG.debug("delete: Path is a file: {}", f);
+  result = deleteObject(key);
+}
+
+if (result) {
+  // If this delete operation removes all files/directories from the
+  // parent direcotry, then an empty parent directory must be created.
+  Path parent = f.getParent();
+  if (parent != null && !parent.isRoot()) {
+createFakeDirectoryIfNecessary(parent);
+  }
+}
+
+return result;
+  }
+
+  /**
+   * Create a fake parent directory key if it does not already exist and no
+   * other child of this parent directory exists.
+   * @param f path to the fake parent directory

[3/4] hadoop git commit: HDFS-14065. Failed Storage Locations shows nothing in the Datanode Volume Failures. Contributed by Ayush Saxena.

2018-11-12 Thread arp
HDFS-14065. Failed Storage Locations shows nothing in the Datanode Volume 
Failures. Contributed by Ayush Saxena.

(cherry picked from commit b6d4e19f34f474ea8068ebb374f55e0db2f714da)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6ef824e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6ef824e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6ef824e

Branch: refs/heads/branch-3.1
Commit: b6ef824e760b2bfd8156e993dbf46fe6244c3dac
Parents: 29752ed
Author: Arpit Agarwal 
Authored: Mon Nov 12 15:31:42 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 12 15:32:05 2018 -0800

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6ef824e/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 4495b99..36a9578 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -427,7 +427,7 @@ No nodes are decommissioning
 {#helper_date_tostring value="{lastVolumeFailureDate}"/}
 {volfails}
 {estimatedCapacityLostTotal|fmt_bytes}
-{#failedStorageLocations}{.}{@sep}{/sep}{/failedStorageLocations}
+{#failedStorageIDs}{.}{@sep}{/sep}{/failedStorageIDs}
   
   {/LiveNodes}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/4] hadoop git commit: HDFS-14065. Failed Storage Locations shows nothing in the Datanode Volume Failures. Contributed by Ayush Saxena.

2018-11-12 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 50ce9fd4e -> f514c0890
  refs/heads/branch-3.1 29752ed58 -> b6ef824e7
  refs/heads/branch-3.2 d4afeac28 -> e6532f7eb
  refs/heads/trunk e269c3fb5 -> b6d4e19f3


HDFS-14065. Failed Storage Locations shows nothing in the Datanode Volume 
Failures. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6d4e19f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6d4e19f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6d4e19f

Branch: refs/heads/trunk
Commit: b6d4e19f34f474ea8068ebb374f55e0db2f714da
Parents: e269c3f
Author: Arpit Agarwal 
Authored: Mon Nov 12 15:31:42 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 12 15:31:42 2018 -0800

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6d4e19f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index f993ae7f..1caa4e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -429,7 +429,7 @@ No nodes are decommissioning
 {#helper_date_tostring value="{lastVolumeFailureDate}"/}
 {volfails}
 {estimatedCapacityLostTotal|fmt_bytes}
-{#failedStorageLocations}{.}{@sep}{/sep}{/failedStorageLocations}
+{#failedStorageIDs}{.}{@sep}{/sep}{/failedStorageIDs}
   
   {/LiveNodes}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/4] hadoop git commit: HDFS-14065. Failed Storage Locations shows nothing in the Datanode Volume Failures. Contributed by Ayush Saxena.

2018-11-12 Thread arp
HDFS-14065. Failed Storage Locations shows nothing in the Datanode Volume 
Failures. Contributed by Ayush Saxena.

(cherry picked from commit b6d4e19f34f474ea8068ebb374f55e0db2f714da)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f514c089
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f514c089
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f514c089

Branch: refs/heads/branch-3.0
Commit: f514c0890060a3d098b1bcd440d2634d84b9b959
Parents: 50ce9fd
Author: Arpit Agarwal 
Authored: Mon Nov 12 15:31:42 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 12 15:31:56 2018 -0800

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f514c089/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index ec801e4..bbe6082 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -426,7 +426,7 @@ No nodes are decommissioning
 {#helper_date_tostring value="{lastVolumeFailureDate}"/}
 {volfails}
 {estimatedCapacityLostTotal|fmt_bytes}
-{#failedStorageLocations}{.}{@sep}{/sep}{/failedStorageLocations}
+{#failedStorageIDs}{.}{@sep}{/sep}{/failedStorageIDs}
   
   {/LiveNodes}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/4] hadoop git commit: HDFS-14065. Failed Storage Locations shows nothing in the Datanode Volume Failures. Contributed by Ayush Saxena.

2018-11-12 Thread arp
HDFS-14065. Failed Storage Locations shows nothing in the Datanode Volume 
Failures. Contributed by Ayush Saxena.

(cherry picked from commit b6d4e19f34f474ea8068ebb374f55e0db2f714da)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6532f7e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6532f7e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6532f7e

Branch: refs/heads/branch-3.2
Commit: e6532f7eb2bdd6d682b040fa068e1b6b51b2263f
Parents: d4afeac
Author: Arpit Agarwal 
Authored: Mon Nov 12 15:31:42 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 12 15:32:14 2018 -0800

--
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6532f7e/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index f993ae7f..1caa4e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -429,7 +429,7 @@ No nodes are decommissioning
 {#helper_date_tostring value="{lastVolumeFailureDate}"/}
 {volfails}
 {estimatedCapacityLostTotal|fmt_bytes}
-{#failedStorageLocations}{.}{@sep}{/sep}{/failedStorageLocations}
+{#failedStorageIDs}{.}{@sep}{/sep}{/failedStorageIDs}
   
   {/LiveNodes}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-794. Add configs to set StateMachineData write timeout in ContainerStateMachine. Contributed by Shashikant Banerjee.

2018-11-05 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk 942693bdd -> 5ddefdd50


HDDS-794. Add configs to set StateMachineData write timeout in 
ContainerStateMachine. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ddefdd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ddefdd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ddefdd5

Branch: refs/heads/trunk
Commit: 5ddefdd50751ed316f2eb9046f294bbdcdfb2428
Parents: 942693b
Author: Arpit Agarwal 
Authored: Mon Nov 5 10:10:10 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 5 10:41:28 2018 -0800

--
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java |  6 ++
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java  |  9 +
 .../common/src/main/resources/ozone-default.xml   |  7 +++
 .../server/ratis/ContainerStateMachine.java   | 18 --
 .../server/ratis/XceiverServerRatis.java  | 14 ++
 .../container/keyvalue/helpers/ChunkUtils.java|  2 ++
 .../container/keyvalue/impl/ChunkManagerImpl.java |  3 ++-
 7 files changed, 56 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ddefdd5/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 56692af..38eec61 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -79,6 +79,12 @@ public final class ScmConfigKeys {
   "dfs.container.ratis.segment.preallocated.size";
   public static final int
   DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = 128 * 1024 * 
1024;
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
+  "dfs.container.ratis.statemachinedata.sync.timeout";
+  public static final TimeDuration
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
+  TimeDuration.valueOf(10, TimeUnit.SECONDS);
   public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
   "dfs.ratis.client.request.timeout.duration";
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ddefdd5/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 3b4f017..54b1cf8 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -229,6 +229,15 @@ public final class OzoneConfigKeys {
   = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY;
   public static final int DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT
   = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT;
+
+  // config settings to enable stateMachineData write timeout
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT;
+  public static final TimeDuration
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT;
+
   public static final int DFS_CONTAINER_CHUNK_MAX_SIZE
   = ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE;
   public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ddefdd5/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index eb68662..5ff60eb 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -53,6 +53,13 @@
 
   
   
+dfs.container.ratis.statemachinedata.sync.timeout
+10s
+OZONE, DEBUG, CONTAINER, RATIS
+Timeout for StateMachine data writes by Ratis.
+
+  
+  
 dfs.container.ratis.datanode.storage.dir
 
 OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS


hadoop git commit: HDDS-794. Add configs to set StateMachineData write timeout in ContainerStateMachine. Contributed by Shashikant Banerjee.

2018-11-05 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 db90350c9 -> 53d4aefae


HDDS-794. Add configs to set StateMachineData write timeout in 
ContainerStateMachine. Contributed by Shashikant Banerjee.

(cherry picked from commit 408f59caa9321be8a55afe44b1811c5dacf23206)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53d4aefa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53d4aefa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53d4aefa

Branch: refs/heads/ozone-0.3
Commit: 53d4aefae8490acbd3e64dd791ffbe17afaf91c4
Parents: db90350
Author: Arpit Agarwal 
Authored: Mon Nov 5 10:10:10 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 5 10:10:17 2018 -0800

--
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java |  6 ++
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java  |  9 +
 .../common/src/main/resources/ozone-default.xml   |  7 +++
 .../server/ratis/ContainerStateMachine.java   | 18 --
 .../server/ratis/XceiverServerRatis.java  | 14 ++
 .../container/keyvalue/helpers/ChunkUtils.java|  2 ++
 .../container/keyvalue/impl/ChunkManagerImpl.java |  3 ++-
 7 files changed, 56 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d4aefa/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index f95b748..11e6a23 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -74,6 +74,12 @@ public final class ScmConfigKeys {
   "dfs.container.ratis.segment.preallocated.size";
   public static final int
   DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = 128 * 1024 * 
1024;
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
+  "dfs.container.ratis.statemachinedata.sync.timeout";
+  public static final TimeDuration
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
+  TimeDuration.valueOf(10, TimeUnit.SECONDS);
   public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
   "dfs.ratis.client.request.timeout.duration";
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d4aefa/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index c931dcf..5e9fe08 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -232,6 +232,15 @@ public final class OzoneConfigKeys {
   = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY;
   public static final int DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT
   = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT;
+
+  // config settings to enable stateMachineData write timeout
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT;
+  public static final TimeDuration
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
+  ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT;
+
   public static final int DFS_CONTAINER_CHUNK_MAX_SIZE
   = ScmConfigKeys.OZONE_SCM_CHUNK_MAX_SIZE;
   public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d4aefa/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 237f8d8..2e250fa 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -53,6 +53,13 @@
 
   
   
+dfs.container.ratis.statemachinedata.sync.timeout
+10s
+OZONE, DEBUG, CONTAINER, RATIS
+Timeout for StateMachine data writes by Ratis.
+
+  
+  
 dfs.container.ratis.datanode.storage.dir
 
 OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS


hadoop git commit: HDDS-797. If DN is started before SCM, it does not register. Contributed by Hanisha Koneru.

2018-11-05 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 732df81a6 -> db90350c9


HDDS-797. If DN is started before SCM, it does not register. Contributed by 
Hanisha Koneru.

(cherry picked from commit c8ca1747c08d905cdefaa5566dd58d770a6b71bd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db90350c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db90350c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db90350c

Branch: refs/heads/ozone-0.3
Commit: db90350c97cfe1f3cffb2f1e6df53e353e1c25af
Parents: 732df81
Author: Arpit Agarwal 
Authored: Mon Nov 5 09:40:00 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 5 10:07:43 2018 -0800

--
 .../states/endpoint/VersionEndpointTask.java| 79 +++-
 .../hadoop/ozone/TestMiniOzoneCluster.java  | 52 -
 2 files changed, 94 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db90350c/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 79fa174..2d00da8 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -64,50 +64,57 @@ public class VersionEndpointTask implements
   public EndpointStateMachine.EndPointStates call() throws Exception {
 rpcEndPoint.lock();
 try{
-  SCMVersionResponseProto versionResponse =
-  rpcEndPoint.getEndPoint().getVersion(null);
-  VersionResponse response = VersionResponse.getFromProtobuf(
-  versionResponse);
-  rpcEndPoint.setVersion(response);
+  if (rpcEndPoint.getState().equals(
+  EndpointStateMachine.EndPointStates.GETVERSION)) {
+SCMVersionResponseProto versionResponse =
+rpcEndPoint.getEndPoint().getVersion(null);
+VersionResponse response = VersionResponse.getFromProtobuf(
+versionResponse);
+rpcEndPoint.setVersion(response);
 
-  String scmId = response.getValue(OzoneConsts.SCM_ID);
-  String clusterId = response.getValue(OzoneConsts.CLUSTER_ID);
+String scmId = response.getValue(OzoneConsts.SCM_ID);
+String clusterId = response.getValue(OzoneConsts.CLUSTER_ID);
 
-  // Check volumes
-  VolumeSet volumeSet = ozoneContainer.getVolumeSet();
-  volumeSet.writeLock();
-  try {
-Map volumeMap = volumeSet.getVolumeMap();
+// Check volumes
+VolumeSet volumeSet = ozoneContainer.getVolumeSet();
+volumeSet.writeLock();
+try {
+  Map volumeMap = volumeSet.getVolumeMap();
 
-Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " +
-"null");
-Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " +
-"cannot be null");
+  Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " 
+
+  "null");
+  Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " +
+  "cannot be null");
 
-// If version file does not exist create version file and also set 
scmId
-for (Map.Entry entry : volumeMap.entrySet()) {
-  HddsVolume hddsVolume = entry.getValue();
-  boolean result = HddsVolumeUtil.checkVolume(hddsVolume, scmId,
-  clusterId, LOG);
-  if (!result) {
-volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath());
+  // If version file does not exist create version file and also set 
scmId
+
+  for (Map.Entry entry : volumeMap.entrySet()) {
+HddsVolume hddsVolume = entry.getValue();
+boolean result = HddsVolumeUtil.checkVolume(hddsVolume, scmId,
+clusterId, LOG);
+if (!result) {
+  volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath());
+}
   }
+  if (volumeSet.getVolumesList().size() == 0) {
+// All volumes are in inconsistent state
+throw new DiskOutOfSpaceException("All configured Volumes are in " 
+
+"Inconsistent State");
+  }
+} finally {
+  volumeSet.writeUnlock();
 }
-if (volumeSet.getVolumesList().size() == 0) {
-  // All volumes are in inconsistent 

hadoop git commit: HDDS-797. If DN is started before SCM, it does not register. Contributed by Hanisha Koneru.

2018-11-05 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk 15df2e7a7 -> c8ca1747c


HDDS-797. If DN is started before SCM, it does not register. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8ca1747
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8ca1747
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8ca1747

Branch: refs/heads/trunk
Commit: c8ca1747c08d905cdefaa5566dd58d770a6b71bd
Parents: 15df2e7
Author: Arpit Agarwal 
Authored: Mon Nov 5 09:40:00 2018 -0800
Committer: Arpit Agarwal 
Committed: Mon Nov 5 09:40:00 2018 -0800

--
 .../states/endpoint/VersionEndpointTask.java| 79 +++-
 .../hadoop/ozone/TestMiniOzoneCluster.java  | 52 -
 2 files changed, 94 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8ca1747/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 79fa174..2d00da8 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -64,50 +64,57 @@ public class VersionEndpointTask implements
   public EndpointStateMachine.EndPointStates call() throws Exception {
 rpcEndPoint.lock();
 try{
-  SCMVersionResponseProto versionResponse =
-  rpcEndPoint.getEndPoint().getVersion(null);
-  VersionResponse response = VersionResponse.getFromProtobuf(
-  versionResponse);
-  rpcEndPoint.setVersion(response);
+  if (rpcEndPoint.getState().equals(
+  EndpointStateMachine.EndPointStates.GETVERSION)) {
+SCMVersionResponseProto versionResponse =
+rpcEndPoint.getEndPoint().getVersion(null);
+VersionResponse response = VersionResponse.getFromProtobuf(
+versionResponse);
+rpcEndPoint.setVersion(response);
 
-  String scmId = response.getValue(OzoneConsts.SCM_ID);
-  String clusterId = response.getValue(OzoneConsts.CLUSTER_ID);
+String scmId = response.getValue(OzoneConsts.SCM_ID);
+String clusterId = response.getValue(OzoneConsts.CLUSTER_ID);
 
-  // Check volumes
-  VolumeSet volumeSet = ozoneContainer.getVolumeSet();
-  volumeSet.writeLock();
-  try {
-Map volumeMap = volumeSet.getVolumeMap();
+// Check volumes
+VolumeSet volumeSet = ozoneContainer.getVolumeSet();
+volumeSet.writeLock();
+try {
+  Map volumeMap = volumeSet.getVolumeMap();
 
-Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " +
-"null");
-Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " +
-"cannot be null");
+  Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " 
+
+  "null");
+  Preconditions.checkNotNull(clusterId, "Reply from SCM: clusterId " +
+  "cannot be null");
 
-// If version file does not exist create version file and also set 
scmId
-for (Map.Entry entry : volumeMap.entrySet()) {
-  HddsVolume hddsVolume = entry.getValue();
-  boolean result = HddsVolumeUtil.checkVolume(hddsVolume, scmId,
-  clusterId, LOG);
-  if (!result) {
-volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath());
+  // If version file does not exist create version file and also set 
scmId
+
+  for (Map.Entry entry : volumeMap.entrySet()) {
+HddsVolume hddsVolume = entry.getValue();
+boolean result = HddsVolumeUtil.checkVolume(hddsVolume, scmId,
+clusterId, LOG);
+if (!result) {
+  volumeSet.failVolume(hddsVolume.getHddsRootDir().getPath());
+}
   }
+  if (volumeSet.getVolumesList().size() == 0) {
+// All volumes are in inconsistent state
+throw new DiskOutOfSpaceException("All configured Volumes are in " 
+
+"Inconsistent State");
+  }
+} finally {
+  volumeSet.writeUnlock();
 }
-if (volumeSet.getVolumesList().size() == 0) {
-  // All volumes are in inconsistent state
-  throw new DiskOutOfSpaceException("All configured Volumes are in 

hadoop git commit: HDDS-759. Create config settings for SCM and OM DB directories. Contributed by Arpit Agarwal.

2018-10-31 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/trunk 478b2cba0 -> 08bb0362e


HDDS-759. Create config settings for SCM and OM DB directories. Contributed by 
Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08bb0362
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08bb0362
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08bb0362

Branch: refs/heads/trunk
Commit: 08bb0362e0c57f562e2f2e366cba725649d1d9c8
Parents: 478b2cb
Author: Arpit Agarwal 
Authored: Wed Oct 31 11:23:15 2018 -0700
Committer: Arpit Agarwal 
Committed: Wed Oct 31 11:23:15 2018 -0700

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |  7 ++
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |  2 +-
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  5 ++
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  3 -
 .../common/src/main/resources/ozone-default.xml | 45 +++---
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  | 16 ++--
 .../ozone/container/common/SCMTestUtils.java|  3 +-
 .../common/TestDatanodeStateMachine.java|  3 +-
 .../container/ozoneimpl/TestOzoneContainer.java |  4 +-
 .../apache/hadoop/hdds/server/ServerUtils.java  | 49 +--
 .../hdds/scm/block/DeletedBlockLogImpl.java | 10 +--
 .../hdds/scm/container/SCMContainerManager.java |  4 +-
 .../hdds/scm/pipeline/SCMPipelineManager.java   |  6 +-
 .../hadoop/hdds/scm/server/SCMStorage.java  |  4 +-
 .../hadoop/hdds/scm/TestHddsServerUtils.java| 50 +++
 .../hadoop/hdds/scm/block/TestBlockManager.java |  4 +-
 .../hdds/scm/block/TestDeletedBlockLog.java |  4 +-
 .../TestCloseContainerEventHandler.java |  4 +-
 .../container/TestContainerReportHandler.java   |  4 +-
 .../scm/container/TestSCMContainerManager.java  |  4 +-
 .../hdds/scm/node/TestContainerPlacement.java   |  4 +-
 .../hdds/scm/node/TestDeadNodeHandler.java  |  4 +-
 .../hadoop/hdds/scm/node/TestNodeManager.java   |  4 +-
 .../ozone/container/common/TestEndPoint.java|  3 +-
 .../java/org/apache/hadoop/ozone/OmUtils.java   | 42 +
 .../apache/hadoop/ozone/om/OMConfigKeys.java|  3 +
 .../org/apache/hadoop/ozone/TestOmUtils.java| 91 
 .../scm/pipeline/TestSCMPipelineManager.java|  4 +-
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  5 +-
 .../hadoop/ozone/TestMiniOzoneCluster.java  |  7 +-
 .../ozone/TestStorageContainerManager.java  |  9 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |  3 +-
 .../org/apache/hadoop/ozone/om/OMStorage.java   |  5 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  |  5 +-
 .../apache/hadoop/ozone/om/TestOmSQLCli.java|  3 +-
 .../hadoop/ozone/scm/TestContainerSQLCli.java   |  3 +-
 36 files changed, 343 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08bb0362/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 210b075..abacafe 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -97,4 +97,11 @@ public final class HddsConfigKeys {
   "hdds.lock.max.concurrency";
   public static final int HDDS_LOCK_MAX_CONCURRENCY_DEFAULT = 100;
 
+  // This configuration setting is used as a fallback location by all
+  // Ozone/HDDS services for their metadata. It is useful as a single
+  // config point for test/PoC clusters.
+  //
+  // In any real cluster where performance matters, the SCM, OM and DN
+  // metadata locations must be configured explicitly.
+  public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08bb0362/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 09fc75b..89edfdd 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -305,7 +305,7 @@ public final class HddsUtils {
   public static String getDatanodeIdFilePath(Configuration conf) {
 String dataNodeIDPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID);
 if (dataNodeIDPath == null) {
-  String metaPath = conf.get(OzoneConfigKeys.OZONE_METADATA_DIRS);
+  String metaPath = 

[1/2] hadoop git commit: HDDS-620. ozone.scm.client.address should be an optional setting. Contributed by chencan and Arpit Agarwal.

2018-10-29 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/ozone-0.3 ad61bc8d9 -> da32f6537
  refs/heads/trunk 3655e573e -> 496f0ffe9


HDDS-620. ozone.scm.client.address should be an optional setting. Contributed 
by chencan and Arpit Agarwal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/496f0ffe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/496f0ffe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/496f0ffe

Branch: refs/heads/trunk
Commit: 496f0ffe9017b11d0d7c071bad259d132687c656
Parents: 3655e57
Author: Arpit Agarwal 
Authored: Mon Oct 29 17:14:15 2018 -0700
Committer: Arpit Agarwal 
Committed: Mon Oct 29 17:14:18 2018 -0700

--
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |  40 -
 .../apache/hadoop/hdds/scm/HddsServerUtil.java  |  16 +-
 .../hadoop/hdds/scm/TestHddsServerUtils.java| 153 +++
 .../ozone/client/TestHddsClientUtils.java   | 137 +++--
 4 files changed, 325 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/496f0ffe/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 7a42a10..09fc75b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -87,10 +87,22 @@ public final class HddsUtils {
* @return Target InetSocketAddress for the SCM client endpoint.
*/
   public static InetSocketAddress getScmAddressForClients(Configuration conf) {
-final Optional host = getHostNameFromConfigKeys(conf,
+Optional host = getHostNameFromConfigKeys(conf,
 ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
 
 if (!host.isPresent()) {
+  // Fallback to Ozone SCM names.
+  Collection scmAddresses = getSCMAddresses(conf);
+  if (scmAddresses.size() > 1) {
+throw new IllegalArgumentException(
+ScmConfigKeys.OZONE_SCM_NAMES +
+" must contain a single hostname. Multiple SCM hosts are " +
+"currently unsupported");
+  }
+  host = Optional.of(scmAddresses.iterator().next().getHostName());
+}
+
+if (!host.isPresent()) {
   throw new IllegalArgumentException(
   ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY + " must be defined. See"
   + " https://wiki.apache.org/hadoop/Ozone#Configuration for "
@@ -109,7 +121,8 @@ public final class HddsUtils {
* Retrieve the socket address that should be used by clients to connect
* to the SCM for block service. If
* {@link ScmConfigKeys#OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY} is not defined
-   * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used.
+   * then {@link ScmConfigKeys#OZONE_SCM_CLIENT_ADDRESS_KEY} is used. If 
neither
+   * is defined then {@link ScmConfigKeys#OZONE_SCM_NAMES} is used.
*
* @param conf
* @return Target InetSocketAddress for the SCM block client endpoint.
@@ -123,13 +136,26 @@ public final class HddsUtils {
 if (!host.isPresent()) {
   host = getHostNameFromConfigKeys(conf,
   ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
-  if (!host.isPresent()) {
+}
+
+if (!host.isPresent()) {
+  // Fallback to Ozone SCM names.
+  Collection scmAddresses = getSCMAddresses(conf);
+  if (scmAddresses.size() > 1) {
 throw new IllegalArgumentException(
-ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY
-+ " must be defined. See"
-+ " https://wiki.apache.org/hadoop/Ozone#Configuration;
-+ " for details on configuring Ozone.");
+ScmConfigKeys.OZONE_SCM_NAMES +
+" must contain a single hostname. Multiple SCM hosts are " +
+"currently unsupported");
   }
+  host = Optional.of(scmAddresses.iterator().next().getHostName());
+}
+
+if (!host.isPresent()) {
+  throw new IllegalArgumentException(
+  ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY
+  + " must be defined. See"
+  + " https://wiki.apache.org/hadoop/Ozone#Configuration;
+  + " for details on configuring Ozone.");
 }
 
 final Optional port = getPortNumberFromConfigKeys(conf,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/496f0ffe/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/hdds/scm/HddsServerUtil.java
 

  1   2   3   4   5   6   7   8   9   10   >