hadoop git commit: HADOOP-13817. Add a finite shell command timeout to ShellBasedUnixGroupsMapping. (harsh)

2017-02-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b5a8c49cf -> c097d0343


HADOOP-13817. Add a finite shell command timeout to 
ShellBasedUnixGroupsMapping. (harsh)

(cherry picked from commit e8694deb6ad180449f8ce6c1c8b4f84873c0587a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c097d034
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c097d034
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c097d034

Branch: refs/heads/branch-2
Commit: c097d0343ca07b645fdb6fd52f75d8b46790c8ab
Parents: b5a8c49
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Nov 14 15:59:58 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Fri Feb 24 21:34:31 2017 +0530

--
 .../fs/CommonConfigurationKeysPublic.java   |  15 +++
 .../security/ShellBasedUnixGroupsMapping.java   | 114 +---
 .../main/java/org/apache/hadoop/util/Shell.java |  19 ++-
 .../src/main/resources/core-default.xml |  13 ++
 .../hadoop/security/TestGroupsCaching.java  |  19 +--
 .../TestShellBasedUnixGroupsMapping.java| 135 ++-
 6 files changed, 277 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c097d034/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 84cdccf..4072fa6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -526,6 +526,21 @@ public class CommonConfigurationKeysPublic {
* 
* core-default.xml
*/
+  public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS =
+  "hadoop.security.groups.shell.command.timeout";
+  /**
+   * @see
+   * 
+   * core-default.xml
+   */
+  public static final long
+  HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT =
+  0L;
+  /**
+   * @see
+   * 
+   * core-default.xml
+   */
   public static final String  HADOOP_SECURITY_AUTHENTICATION =
 "hadoop.security.authentication";
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c097d034/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
index 9b80be9..4146e7b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
@@ -18,17 +18,25 @@
 package org.apache.hadoop.security;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.StringTokenizer;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ExitCodeException;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A simple shell-based implementation of {@link GroupMappingServiceProvider} 
@@ -37,11 +45,28 @@ import org.apache.hadoop.util.Shell.ShellCommandExecutor;
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
-public class ShellBasedUnixGroupsMapping
+public class ShellBasedUnixGroupsMapping extends Configured
   implements GroupMappingServiceProvider {
-  
-  private static final Log LOG =
-LogFactory.getLog(ShellBasedUnixGroupsMapping.class);
+
+  @VisibleForTesting
+  protected static final Logger LOG =
+  LoggerFactory.getLogger(ShellBasedU

hadoop git commit: HADOOP-13817. Add a finite shell command timeout to ShellBasedUnixGroupsMapping. (harsh)

2017-02-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 50decd361 -> e8694deb6


HADOOP-13817. Add a finite shell command timeout to 
ShellBasedUnixGroupsMapping. (harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8694deb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8694deb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8694deb

Branch: refs/heads/trunk
Commit: e8694deb6ad180449f8ce6c1c8b4f84873c0587a
Parents: 50decd3
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Nov 14 15:59:58 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Fri Feb 24 21:34:00 2017 +0530

--
 .../fs/CommonConfigurationKeysPublic.java   |  15 +++
 .../security/ShellBasedUnixGroupsMapping.java   | 114 +---
 .../main/java/org/apache/hadoop/util/Shell.java |  19 ++-
 .../src/main/resources/core-default.xml |  13 ++
 .../hadoop/security/TestGroupsCaching.java  |  19 +--
 .../TestShellBasedUnixGroupsMapping.java| 135 ++-
 6 files changed, 277 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8694deb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index f23dd51..e1feda1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -517,6 +517,21 @@ public class CommonConfigurationKeysPublic {
* 
* core-default.xml
*/
+  public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS =
+  "hadoop.security.groups.shell.command.timeout";
+  /**
+   * @see
+   * 
+   * core-default.xml
+   */
+  public static final long
+  HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT =
+  0L;
+  /**
+   * @see
+   * 
+   * core-default.xml
+   */
   public static final String  HADOOP_SECURITY_AUTHENTICATION =
 "hadoop.security.authentication";
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8694deb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
index 9b80be9..4146e7b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
@@ -18,17 +18,25 @@
 package org.apache.hadoop.security;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.StringTokenizer;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ExitCodeException;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A simple shell-based implementation of {@link GroupMappingServiceProvider} 
@@ -37,11 +45,28 @@ import org.apache.hadoop.util.Shell.ShellCommandExecutor;
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
-public class ShellBasedUnixGroupsMapping
+public class ShellBasedUnixGroupsMapping extends Configured
   implements GroupMappingServiceProvider {
-  
-  private static final Log LOG =
-LogFactory.getLog(ShellBasedUnixGroupsMapping.class);
+
+  @VisibleForTesting
+  protected static final Logger LOG =
+  LoggerFactory.getLogger(ShellBasedUnixGroupsMapping.class);
+
+  private long timeout = 0L;
+  private static final

hadoop git commit: HADOOP-1381. The distance between sync blocks in SequenceFiles should be configurable rather than hard coded to 2000 bytes. Contributed by Harsh J.

2016-11-25 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk ee3d437a3 -> 07825f2b4


HADOOP-1381. The distance between sync blocks in SequenceFiles should be 
configurable rather than hard coded to 2000 bytes. Contributed by Harsh J.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07825f2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07825f2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07825f2b

Branch: refs/heads/trunk
Commit: 07825f2b49384dbec92bfae87ea661cef9ffab49
Parents: ee3d437
Author: Harsh J <ha...@cloudera.com>
Authored: Wed Oct 26 20:04:33 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Fri Nov 25 22:22:23 2016 +0530

--
 .../java/org/apache/hadoop/io/SequenceFile.java |  74 
 .../apache/hadoop/io/TestSequenceFileSync.java  | 113 +++
 2 files changed, 146 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07825f2b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
index 2ac1389..c510ff7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
@@ -24,6 +24,7 @@ import java.util.*;
 import java.rmi.server.UID;
 import java.security.MessageDigest;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.*;
 import org.apache.hadoop.util.Options;
 import org.apache.hadoop.fs.*;
@@ -146,7 +147,7 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
  *   
  * 
  * 
- * A sync-marker every few 100 bytes or so.
+ * A sync-marker every few 100 kilobytes or so.
  * 
  * 
  *
@@ -165,7 +166,7 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
  *   
  * 
  * 
- * A sync-marker every few 100 bytes or so.
+ * A sync-marker every few 100 kilobytes or so.
  * 
  * 
  * 
@@ -217,8 +218,11 @@ public class SequenceFile {
   private static final int SYNC_HASH_SIZE = 16;   // number of bytes in hash 
   private static final int SYNC_SIZE = 4+SYNC_HASH_SIZE; // escape + hash
 
-  /** The number of bytes between sync points.*/
-  public static final int SYNC_INTERVAL = 100*SYNC_SIZE; 
+  /**
+   * The number of bytes between sync points. 100 KB, default.
+   * Computed as 5 KB * 20 = 100 KB
+   */
+  public static final int SYNC_INTERVAL = 5 * 1024 * SYNC_SIZE; // 5KB*(16+4)
 
   /** 
* The compression type used to compress key/value pairs in the 
@@ -856,6 +860,9 @@ public class SequenceFile {
 // starts and ends by scanning for this value.
 long lastSyncPos; // position of last sync
 byte[] sync;  // 16 random bytes
+@VisibleForTesting
+int syncInterval;
+
 {
   try {   
 MessageDigest digester = MessageDigest.getInstance("MD5");
@@ -987,7 +994,16 @@ public class SequenceFile {
 private static Option filesystem(FileSystem fs) {
   return new SequenceFile.Writer.FileSystemOption(fs);
 }
-
+
+private static class SyncIntervalOption extends Options.IntegerOption
+implements Option {
+  SyncIntervalOption(int val) {
+// If a negative sync interval is provided,
+// fall back to the default sync interval.
+super(val < 0 ? SYNC_INTERVAL : val);
+  }
+}
+
 public static Option bufferSize(int value) {
   return new BufferSizeOption(value);
 }
@@ -1032,11 +1048,15 @@ public class SequenceFile {
 CompressionCodec codec) {
   return new CompressionOption(value, codec);
 }
-
+
+public static Option syncInterval(int value) {
+  return new SyncIntervalOption(value);
+}
+
 /**
  * Construct a uncompressed writer from a set of options.
  * @param conf the configuration to use
- * @param options the options used when creating the writer
+ * @param opts the options used when creating the writer
  * @throws IOException if it fails
  */
 Writer(Configuration conf, 
@@ -1062,6 +1082,8 @@ public class SequenceFile {
 Options.getOption(MetadataOption.class, opts);
   CompressionOption compressionTypeOption =
 Options.getOption(CompressionOption.class, opts);
+  SyncIntervalOption syncIntervalOption =
+  Options.getOption(SyncIntervalOption.class, opts);
   // 

hadoop git commit: MAPREDUCE-2398. MRBench: setting the baseDir parameter has no effect. Contributed by Michael Noll and Wilfred Spiegelenburg.

2016-04-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 75cf23835 -> 2d7e4a822


MAPREDUCE-2398. MRBench: setting the baseDir parameter has no effect. 
Contributed by Michael Noll and Wilfred Spiegelenburg.

(cherry picked from commit 7da5847cf10bc61f2131d578f16ee37cb32c4639)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d7e4a82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d7e4a82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d7e4a82

Branch: refs/heads/branch-2
Commit: 2d7e4a8228e6f42f5c20506f095fe43b2c8f53d8
Parents: 75cf238
Author: Harsh J <ha...@cloudera.com>
Authored: Thu Apr 21 17:33:22 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Thu Apr 21 17:54:24 2016 +0530

--
 .../java/org/apache/hadoop/mapred/MRBench.java| 18 --
 1 file changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7e4a82/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
index bb287cf..0516445 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
@@ -41,10 +41,13 @@ import org.apache.hadoop.util.ToolRunner;
 public class MRBench extends Configured implements Tool{
   
   private static final Log LOG = LogFactory.getLog(MRBench.class);
+  private static final String DEFAULT_INPUT_SUB = "mr_input";
+  private static final String DEFAULT_OUTPUT_SUB = "mr_output";
+
   private static Path BASE_DIR =
 new Path(System.getProperty("test.build.data","/benchmarks/MRBench"));
-  private static Path INPUT_DIR = new Path(BASE_DIR, "mr_input");
-  private static Path OUTPUT_DIR = new Path(BASE_DIR, "mr_output");
+  private static Path INPUT_DIR = new Path(BASE_DIR, DEFAULT_INPUT_SUB);
+  private static Path OUTPUT_DIR = new Path(BASE_DIR, DEFAULT_OUTPUT_SUB);
   
   public static enum Order {RANDOM, ASCENDING, DESCENDING}; 
   
@@ -243,6 +246,8 @@ public class MRBench extends Configured implements Tool{
 numRuns = Integer.parseInt(args[++i]);
   } else if (args[i].equals("-baseDir")) {
 BASE_DIR = new Path(args[++i]);
+INPUT_DIR = new Path(BASE_DIR, DEFAULT_INPUT_SUB);
+OUTPUT_DIR = new Path(BASE_DIR, DEFAULT_OUTPUT_SUB);
   } else if (args[i].equals("-maps")) {
 numMaps = Integer.parseInt(args[++i]);
   } else if (args[i].equals("-reduces")) {
@@ -283,14 +288,15 @@ public class MRBench extends Configured implements Tool{
 Path inputFile = new Path(INPUT_DIR, "input_" + (new Random()).nextInt() + 
".txt");
 generateTextFile(fs, inputFile, inputLines, inputSortOrder);
 
-// setup test output directory
-fs.mkdirs(BASE_DIR); 
 ArrayList execTimes = new ArrayList();
 try {
   execTimes = runJobInSequence(jobConf, numRuns);
 } finally {
-  // delete output -- should we really do this?
-  fs.delete(BASE_DIR, true);
+  // delete all generated data -- should we really do this?
+  // we don't know how much of the path was created for the run but this
+  // cleans up as much as we can
+  fs.delete(OUTPUT_DIR, true);
+  fs.delete(INPUT_DIR, true);
 }
 
 if (verbose) {



hadoop git commit: MAPREDUCE-2398. MRBench: setting the baseDir parameter has no effect. Contributed by Michael Noll and Wilfred Spiegelenburg.

2016-04-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 170c4fd4c -> 7da5847cf


MAPREDUCE-2398. MRBench: setting the baseDir parameter has no effect. 
Contributed by Michael Noll and Wilfred Spiegelenburg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7da5847c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7da5847c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7da5847c

Branch: refs/heads/trunk
Commit: 7da5847cf10bc61f2131d578f16ee37cb32c4639
Parents: 170c4fd
Author: Harsh J <ha...@cloudera.com>
Authored: Thu Apr 21 17:33:22 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Thu Apr 21 17:53:51 2016 +0530

--
 .../java/org/apache/hadoop/mapred/MRBench.java| 18 --
 1 file changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7da5847c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
index 20d27fb..079fad7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
@@ -41,10 +41,13 @@ import org.apache.hadoop.util.ToolRunner;
 public class MRBench extends Configured implements Tool{
   
   private static final Log LOG = LogFactory.getLog(MRBench.class);
+  private static final String DEFAULT_INPUT_SUB = "mr_input";
+  private static final String DEFAULT_OUTPUT_SUB = "mr_output";
+
   private static Path BASE_DIR =
 new Path(System.getProperty("test.build.data","/benchmarks/MRBench"));
-  private static Path INPUT_DIR = new Path(BASE_DIR, "mr_input");
-  private static Path OUTPUT_DIR = new Path(BASE_DIR, "mr_output");
+  private static Path INPUT_DIR = new Path(BASE_DIR, DEFAULT_INPUT_SUB);
+  private static Path OUTPUT_DIR = new Path(BASE_DIR, DEFAULT_OUTPUT_SUB);
   
   public static enum Order {RANDOM, ASCENDING, DESCENDING}; 
   
@@ -243,6 +246,8 @@ public class MRBench extends Configured implements Tool{
 numRuns = Integer.parseInt(args[++i]);
   } else if (args[i].equals("-baseDir")) {
 BASE_DIR = new Path(args[++i]);
+INPUT_DIR = new Path(BASE_DIR, DEFAULT_INPUT_SUB);
+OUTPUT_DIR = new Path(BASE_DIR, DEFAULT_OUTPUT_SUB);
   } else if (args[i].equals("-maps")) {
 numMaps = Integer.parseInt(args[++i]);
   } else if (args[i].equals("-reduces")) {
@@ -283,14 +288,15 @@ public class MRBench extends Configured implements Tool{
 Path inputFile = new Path(INPUT_DIR, "input_" + (new Random()).nextInt() + 
".txt");
 generateTextFile(fs, inputFile, inputLines, inputSortOrder);
 
-// setup test output directory
-fs.mkdirs(BASE_DIR); 
 ArrayList execTimes = new ArrayList();
 try {
   execTimes = runJobInSequence(jobConf, numRuns);
 } finally {
-  // delete output -- should we really do this?
-  fs.delete(BASE_DIR, true);
+  // delete all generated data -- should we really do this?
+  // we don't know how much of the path was created for the run but this
+  // cleans up as much as we can
+  fs.delete(OUTPUT_DIR, true);
+  fs.delete(INPUT_DIR, true);
 }
 
 if (verbose) {



hadoop git commit: HADOOP-11687. Ignore x-* and response headers when copying an Amazon S3 object. Contributed by Aaron Peterson and harsh.

2016-04-01 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 10d8f8a39 -> 7c5b55d4e


HADOOP-11687. Ignore x-* and response headers when copying an Amazon S3 object. 
Contributed by Aaron Peterson and harsh.

(cherry picked from commit 256c82fe2981748cd0befc5490d8118d139908f9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c5b55d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c5b55d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c5b55d4

Branch: refs/heads/branch-2
Commit: 7c5b55d4e5f4317abed0259909b89a32297836f8
Parents: 10d8f8a
Author: Harsh J <ha...@cloudera.com>
Authored: Fri Apr 1 14:18:10 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Fri Apr 1 14:35:58 2016 +0530

--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 70 +++-
 .../src/site/markdown/tools/hadoop-aws/index.md |  7 ++
 2 files changed, 76 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c5b55d4/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 4cda7cd..33db86e 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -26,6 +26,7 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -1186,7 +1187,7 @@ public class S3AFileSystem extends FileSystem {
 }
 
 ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
-final ObjectMetadata dstom = srcom.clone();
+ObjectMetadata dstom = cloneObjectMetadata(srcom);
 if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
   dstom.setSSEAlgorithm(serverSideEncryptionAlgorithm);
 }
@@ -1293,6 +1294,73 @@ public class S3AFileSystem extends FileSystem {
   }
 
   /**
+   * Creates a copy of the passed {@link ObjectMetadata}.
+   * Does so without using the {@link ObjectMetadata#clone()} method,
+   * to avoid copying unnecessary headers.
+   * @param source the {@link ObjectMetadata} to copy
+   * @return a copy of {@link ObjectMetadata} with only relevant attributes
+   */
+  private ObjectMetadata cloneObjectMetadata(ObjectMetadata source) {
+// This approach may be too brittle, especially if
+// in future there are new attributes added to ObjectMetadata
+// that we do not explicitly call to set here
+ObjectMetadata ret = new ObjectMetadata();
+
+// Non null attributes
+ret.setContentLength(source.getContentLength());
+
+// Possibly null attributes
+// Allowing nulls to pass breaks it during later use
+if (source.getCacheControl() != null) {
+  ret.setCacheControl(source.getCacheControl());
+}
+if (source.getContentDisposition() != null) {
+  ret.setContentDisposition(source.getContentDisposition());
+}
+if (source.getContentEncoding() != null) {
+  ret.setContentEncoding(source.getContentEncoding());
+}
+if (source.getContentMD5() != null) {
+  ret.setContentMD5(source.getContentMD5());
+}
+if (source.getContentType() != null) {
+  ret.setContentType(source.getContentType());
+}
+if (source.getExpirationTime() != null) {
+  ret.setExpirationTime(source.getExpirationTime());
+}
+if (source.getExpirationTimeRuleId() != null) {
+  ret.setExpirationTimeRuleId(source.getExpirationTimeRuleId());
+}
+if (source.getHttpExpiresDate() != null) {
+  ret.setHttpExpiresDate(source.getHttpExpiresDate());
+}
+if (source.getLastModified() != null) {
+  ret.setLastModified(source.getLastModified());
+}
+if (source.getOngoingRestore() != null) {
+  ret.setOngoingRestore(source.getOngoingRestore());
+}
+if (source.getRestoreExpirationTime() != null) {
+  ret.setRestoreExpirationTime(source.getRestoreExpirationTime());
+}
+if (source.getSSEAlgorithm() != null) {
+  ret.setSSEAlgorithm(source.getSSEAlgorithm());
+}
+if (source.getSSECustomerAlgorithm() != null) {
+  ret.setSSECustomerAlgorithm(source.getSSECustomerAlgorithm());
+}
+if (source.getSSECustomerKeyMd5() != null) {
+  ret.setSSECustomerKeyMd5(source.getSSECustomerKeyMd5());
+}
+
+for (Map.Entry<String, String> e : source.getUserMetadata().entrySet()) {
+  ret.add

hadoop git commit: HADOOP-11687. Ignore x-* and response headers when copying an Amazon S3 object. Contributed by Aaron Peterson and harsh.

2016-04-01 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3488c4f2c -> 256c82fe2


HADOOP-11687. Ignore x-* and response headers when copying an Amazon S3 object. 
Contributed by Aaron Peterson and harsh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/256c82fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/256c82fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/256c82fe

Branch: refs/heads/trunk
Commit: 256c82fe2981748cd0befc5490d8118d139908f9
Parents: 3488c4f
Author: Harsh J <ha...@cloudera.com>
Authored: Fri Apr 1 14:18:10 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Fri Apr 1 14:18:10 2016 +0530

--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 70 +++-
 .../src/site/markdown/tools/hadoop-aws/index.md |  7 ++
 2 files changed, 76 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/256c82fe/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 7ab6c79..6afb05d 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -26,6 +26,7 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
@@ -1128,7 +1129,7 @@ public class S3AFileSystem extends FileSystem {
 }
 
 ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
-final ObjectMetadata dstom = srcom.clone();
+ObjectMetadata dstom = cloneObjectMetadata(srcom);
 if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
   dstom.setSSEAlgorithm(serverSideEncryptionAlgorithm);
 }
@@ -1235,6 +1236,73 @@ public class S3AFileSystem extends FileSystem {
   }
 
   /**
+   * Creates a copy of the passed {@link ObjectMetadata}.
+   * Does so without using the {@link ObjectMetadata#clone()} method,
+   * to avoid copying unnecessary headers.
+   * @param source the {@link ObjectMetadata} to copy
+   * @return a copy of {@link ObjectMetadata} with only relevant attributes
+   */
+  private ObjectMetadata cloneObjectMetadata(ObjectMetadata source) {
+// This approach may be too brittle, especially if
+// in future there are new attributes added to ObjectMetadata
+// that we do not explicitly call to set here
+ObjectMetadata ret = new ObjectMetadata();
+
+// Non null attributes
+ret.setContentLength(source.getContentLength());
+
+// Possibly null attributes
+// Allowing nulls to pass breaks it during later use
+if (source.getCacheControl() != null) {
+  ret.setCacheControl(source.getCacheControl());
+}
+if (source.getContentDisposition() != null) {
+  ret.setContentDisposition(source.getContentDisposition());
+}
+if (source.getContentEncoding() != null) {
+  ret.setContentEncoding(source.getContentEncoding());
+}
+if (source.getContentMD5() != null) {
+  ret.setContentMD5(source.getContentMD5());
+}
+if (source.getContentType() != null) {
+  ret.setContentType(source.getContentType());
+}
+if (source.getExpirationTime() != null) {
+  ret.setExpirationTime(source.getExpirationTime());
+}
+if (source.getExpirationTimeRuleId() != null) {
+  ret.setExpirationTimeRuleId(source.getExpirationTimeRuleId());
+}
+if (source.getHttpExpiresDate() != null) {
+  ret.setHttpExpiresDate(source.getHttpExpiresDate());
+}
+if (source.getLastModified() != null) {
+  ret.setLastModified(source.getLastModified());
+}
+if (source.getOngoingRestore() != null) {
+  ret.setOngoingRestore(source.getOngoingRestore());
+}
+if (source.getRestoreExpirationTime() != null) {
+  ret.setRestoreExpirationTime(source.getRestoreExpirationTime());
+}
+if (source.getSSEAlgorithm() != null) {
+  ret.setSSEAlgorithm(source.getSSEAlgorithm());
+}
+if (source.getSSECustomerAlgorithm() != null) {
+  ret.setSSECustomerAlgorithm(source.getSSECustomerAlgorithm());
+}
+if (source.getSSECustomerKeyMd5() != null) {
+  ret.setSSECustomerKeyMd5(source.getSSECustomerKeyMd5());
+}
+
+for (Map.Entry<String, String> e : source.getUserMetadata().entrySet()) {
+  ret.addUserMetadata(e.getKey(), e.getValue());
+}
+return ret;
+  }
+
+  /**
* Return the number of bytes that large input file

hadoop git commit: HADOOP-11404. Clarify the "expected client Kerberos principal is null" authorization message. Contributed by Stephen Chu

2016-03-10 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 95d30a6ec -> 2e32aa547


HADOOP-11404. Clarify the "expected client Kerberos principal is null" 
authorization message. Contributed by Stephen Chu

(cherry picked from commit 318c9b68b059981796f2742b4b7ee604ccdc47e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e32aa54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e32aa54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e32aa54

Branch: refs/heads/branch-2
Commit: 2e32aa5475ebf0d6d0e001f8b7c9192dae036156
Parents: 95d30a6
Author: Harsh J <ha...@cloudera.com>
Authored: Thu Mar 10 17:05:09 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Thu Mar 10 17:09:56 2016 +0530

--
 .../security/authorize/ServiceAuthorizationManager.java | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e32aa54/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
index 5d29516..9da95dc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
@@ -116,11 +116,13 @@ public class ServiceAuthorizationManager {
 }
 if((clientPrincipal != null && 
!clientPrincipal.equals(user.getUserName())) || 
acls.length != 2  || !acls[0].isUserAllowed(user) || 
acls[1].isUserAllowed(user)) {
-  AUDITLOG.warn(AUTHZ_FAILED_FOR + user + " for protocol=" + protocol
-  + ", expected client Kerberos principal is " + clientPrincipal);
-  throw new AuthorizationException("User " + user + 
-  " is not authorized for protocol " + protocol + 
-  ", expected client Kerberos principal is " + clientPrincipal);
+  String cause = clientPrincipal != null ?
+  ": this service is only accessible by " + clientPrincipal :
+  ": denied by configured ACL";
+  AUDITLOG.warn(AUTHZ_FAILED_FOR + user
+  + " for protocol=" + protocol + cause);
+  throw new AuthorizationException("User " + user +
+  " is not authorized for protocol " + protocol + cause);
 }
 if (addr != null) {
   String hostAddress = addr.getHostAddress();



hadoop git commit: HADOOP-11404. Clarify the "expected client Kerberos principal is null" authorization message. Contributed by Stephen Chu

2016-03-10 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2e040d31c -> 318c9b68b


HADOOP-11404. Clarify the "expected client Kerberos principal is null" 
authorization message. Contributed by Stephen Chu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/318c9b68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/318c9b68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/318c9b68

Branch: refs/heads/trunk
Commit: 318c9b68b059981796f2742b4b7ee604ccdc47e5
Parents: 2e040d3
Author: Harsh J <ha...@cloudera.com>
Authored: Thu Mar 10 17:05:09 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Thu Mar 10 17:05:09 2016 +0530

--
 .../security/authorize/ServiceAuthorizationManager.java | 12 +++-
 1 file changed, 7 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/318c9b68/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
index 5d29516..9da95dc 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
@@ -116,11 +116,13 @@ public class ServiceAuthorizationManager {
 }
 if((clientPrincipal != null && 
!clientPrincipal.equals(user.getUserName())) || 
acls.length != 2  || !acls[0].isUserAllowed(user) || 
acls[1].isUserAllowed(user)) {
-  AUDITLOG.warn(AUTHZ_FAILED_FOR + user + " for protocol=" + protocol
-  + ", expected client Kerberos principal is " + clientPrincipal);
-  throw new AuthorizationException("User " + user + 
-  " is not authorized for protocol " + protocol + 
-  ", expected client Kerberos principal is " + clientPrincipal);
+  String cause = clientPrincipal != null ?
+  ": this service is only accessible by " + clientPrincipal :
+  ": denied by configured ACL";
+  AUDITLOG.warn(AUTHZ_FAILED_FOR + user
+  + " for protocol=" + protocol + cause);
+  throw new AuthorizationException("User " + user +
+  " is not authorized for protocol " + protocol + cause);
 }
 if (addr != null) {
   String hostAddress = addr.getHostAddress();



hadoop git commit: HDFS-9521. TransferFsImage.receiveFile should account and log separate times for image download and fsync to disk. Contributed by Wellington Chevreuil

2016-03-07 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 78919f8c3 -> ed421cb12


HDFS-9521. TransferFsImage.receiveFile should account and log separate times 
for image download and fsync to disk. Contributed by Wellington Chevreuil

(cherry picked from commit fd1c09be3e7c67c188a1dd7e4fccb3d92dcc5b5b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed421cb1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed421cb1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed421cb1

Branch: refs/heads/branch-2
Commit: ed421cb12880882d58b6b40d9c2461747a860ab5
Parents: 78919f8
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Mar 7 13:49:47 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Mon Mar 7 17:23:11 2016 +0530

--
 .../hdfs/server/namenode/TransferFsImage.java   | 33 
 1 file changed, 27 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed421cb1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
index e6e75b0..db5a81d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
@@ -30,6 +30,7 @@ import java.net.URL;
 import java.security.DigestInputStream;
 import java.security.MessageDigest;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -458,6 +459,9 @@ public class TransferFsImage {
   MD5Hash advertisedDigest, String fsImageName, InputStream stream,
   DataTransferThrottler throttler) throws IOException {
 long startTime = Time.monotonicNow();
+Map<FileOutputStream, File> streamPathMap = new HashMap<>();
+StringBuilder xferStats = new StringBuilder();
+double xferCombined = 0;
 if (localPaths != null) {
   // If the local paths refer to directories, use the server-provided 
header
   // as the filename within that directory
@@ -494,7 +498,9 @@ public class TransferFsImage {
   LOG.warn("Overwriting existing file " + f
   + " with file downloaded from " + url);
 }
-outputStreams.add(new FileOutputStream(f));
+FileOutputStream fos = new FileOutputStream(f);
+outputStreams.add(fos);
+streamPathMap.put(fos, f);
   } catch (IOException ioe) {
 LOG.warn("Unable to download file " + f, ioe);
 // This will be null if we're downloading the fsimage to a file
@@ -527,11 +533,26 @@ public class TransferFsImage {
 }
   }
   finishedReceiving = true;
+  double xferSec = Math.max(
+ ((float)(Time.monotonicNow() - startTime)) / 1000.0, 0.001);
+  long xferKb = received / 1024;
+  xferCombined += xferSec;
+  xferStats.append(
+  String.format(" The fsimage download took %.2fs at %.2f KB/s.",
+  xferSec, xferKb / xferSec));
 } finally {
   stream.close();
   for (FileOutputStream fos : outputStreams) {
+long flushStartTime = Time.monotonicNow();
 fos.getChannel().force(true);
 fos.close();
+double writeSec = Math.max(((float)
+   (flushStartTime - Time.monotonicNow())) / 1000.0, 0.001);
+xferCombined += writeSec;
+xferStats.append(String
+.format(" Synchronous (fsync) write to disk of " +
+ streamPathMap.get(fos).getAbsolutePath() +
+" took %.2fs.", writeSec));
   }
 
   // Something went wrong and did not finish reading.
@@ -550,11 +571,11 @@ public class TransferFsImage {
   advertisedSize);
   }
 }
-double xferSec = Math.max(
-((float)(Time.monotonicNow() - startTime)) / 1000.0, 0.001);
-long xferKb = received / 1024;
-LOG.info(String.format("Transfer took %.2fs at %.2f KB/s",
-xferSec, xferKb / xferSec));
+xferStats.insert(
+0, String.format(
+"Combined time for fsimage download and fsync " +
+"to all disks took %.2fs.", xferCombined));
+LOG.info(xferStats.toString());
 
 if (digester != null) {
   MD5Hash computedDigest = new MD5Hash(digester.digest());



hadoop git commit: HDFS-9521. TransferFsImage.receiveFile should account and log separate times for image download and fsync to disk. Contributed by Wellington Chevreuil

2016-03-07 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8ed2e060e -> fd1c09be3


HDFS-9521. TransferFsImage.receiveFile should account and log separate times 
for image download and fsync to disk. Contributed by Wellington Chevreuil


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd1c09be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd1c09be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd1c09be

Branch: refs/heads/trunk
Commit: fd1c09be3e7c67c188a1dd7e4fccb3d92dcc5b5b
Parents: 8ed2e06
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Mar 7 13:49:47 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Mon Mar 7 17:14:51 2016 +0530

--
 .../hdfs/server/namenode/TransferFsImage.java   | 33 
 1 file changed, 27 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd1c09be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
index eda6303..0186d8b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
@@ -30,6 +30,7 @@ import java.net.URL;
 import java.security.DigestInputStream;
 import java.security.MessageDigest;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -481,6 +482,9 @@ public class TransferFsImage {
   MD5Hash advertisedDigest, String fsImageName, InputStream stream,
   DataTransferThrottler throttler) throws IOException {
 long startTime = Time.monotonicNow();
+Map<FileOutputStream, File> streamPathMap = new HashMap<>();
+StringBuilder xferStats = new StringBuilder();
+double xferCombined = 0;
 if (localPaths != null) {
   // If the local paths refer to directories, use the server-provided 
header
   // as the filename within that directory
@@ -517,7 +521,9 @@ public class TransferFsImage {
   LOG.warn("Overwriting existing file " + f
   + " with file downloaded from " + url);
 }
-outputStreams.add(new FileOutputStream(f));
+FileOutputStream fos = new FileOutputStream(f);
+outputStreams.add(fos);
+streamPathMap.put(fos, f);
   } catch (IOException ioe) {
 LOG.warn("Unable to download file " + f, ioe);
 // This will be null if we're downloading the fsimage to a file
@@ -550,11 +556,26 @@ public class TransferFsImage {
 }
   }
   finishedReceiving = true;
+  double xferSec = Math.max(
+ ((float)(Time.monotonicNow() - startTime)) / 1000.0, 0.001);
+  long xferKb = received / 1024;
+  xferCombined += xferSec;
+  xferStats.append(
+  String.format(" The fsimage download took %.2fs at %.2f KB/s.",
+  xferSec, xferKb / xferSec));
 } finally {
   stream.close();
   for (FileOutputStream fos : outputStreams) {
+long flushStartTime = Time.monotonicNow();
 fos.getChannel().force(true);
 fos.close();
+double writeSec = Math.max(((float)
+   (flushStartTime - Time.monotonicNow())) / 1000.0, 0.001);
+xferCombined += writeSec;
+xferStats.append(String
+.format(" Synchronous (fsync) write to disk of " +
+ streamPathMap.get(fos).getAbsolutePath() +
+" took %.2fs.", writeSec));
   }
 
   // Something went wrong and did not finish reading.
@@ -573,11 +594,11 @@ public class TransferFsImage {
   advertisedSize);
   }
 }
-double xferSec = Math.max(
-((float)(Time.monotonicNow() - startTime)) / 1000.0, 0.001);
-long xferKb = received / 1024;
-LOG.info(String.format("Transfer took %.2fs at %.2f KB/s",
-xferSec, xferKb / xferSec));
+xferStats.insert(
+0, String.format(
+"Combined time for fsimage download and fsync " +
+"to all disks took %.2fs.", xferCombined));
+LOG.info(xferStats.toString());
 
 if (digester != null) {
   MD5Hash computedDigest = new MD5Hash(digester.digest());



hadoop git commit: MAPREDUCE-6648. Add yarn.app.mapreduce.am.log.level to mapred-default.xml (harsh)

2016-03-06 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 1a99d5ab9 -> 5f059e03f


MAPREDUCE-6648. Add yarn.app.mapreduce.am.log.level to mapred-default.xml 
(harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f059e03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f059e03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f059e03

Branch: refs/heads/branch-2.8
Commit: 5f059e03f54f882c9fb5fa6595b7e55ff1e57c95
Parents: 1a99d5a
Author: Harsh J <ha...@cloudera.com>
Authored: Sat Mar 5 14:26:06 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Mon Mar 7 13:13:43 2016 +0530

--
 .../src/main/resources/mapred-default.xml | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f059e03/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 47ab99f..71f14ea 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -347,6 +347,16 @@
 
 
 
+  yarn.app.mapreduce.am.log.level
+  INFO
+  The logging level for the MR ApplicationMaster. The allowed
+  levels are: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+  The setting here could be overriden if "mapreduce.job.log4j-properties-file"
+  is set.
+  
+
+
+
   mapreduce.map.log.level
   INFO
   The logging level for the map task. The allowed levels are:



hadoop git commit: HDFS-9257. improve error message for "Absolute path required" in INode.java to contain the rejected path. Contributed by Marcell Szabo

2015-10-16 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk a554701fe -> 52ac73f34


HDFS-9257. improve error message for "Absolute path required" in INode.java to 
contain the rejected path. Contributed by Marcell Szabo


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52ac73f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52ac73f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52ac73f3

Branch: refs/heads/trunk
Commit: 52ac73f344e822e41457582f82abb4f35eba9dec
Parents: a554701
Author: Harsh J <ha...@cloudera.com>
Authored: Fri Oct 16 17:12:35 2015 +0200
Committer: Harsh J <ha...@cloudera.com>
Committed: Fri Oct 16 17:12:35 2015 +0200

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../main/java/org/apache/hadoop/hdfs/server/namenode/INode.java   | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52ac73f3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 652b7a0..858325f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -849,6 +849,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-9257. improve error message for "Absolute path required" in INode.java
+to contain the rejected path (Marcell Szabo via harsh)
+
 HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value
     (Gautam Gopalakrishnan via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/52ac73f3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 64442fd..9d04fbb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -748,7 +748,8 @@ public abstract class INode implements INodeAttributes, 
Diff.Element<byte[]> {
*/
   public static String[] getPathNames(String path) {
 if (path == null || !path.startsWith(Path.SEPARATOR)) {
-  throw new AssertionError("Absolute path required");
+  throw new AssertionError("Absolute path required, but got '"
+  + path + "'");
 }
 return StringUtils.split(path, Path.SEPARATOR_CHAR);
   }



hadoop git commit: HDFS-9257. improve error message for "Absolute path required" in INode.java to contain the rejected path. Contributed by Marcell Szabo

2015-10-16 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2862057f1 -> 76cfd833f


HDFS-9257. improve error message for "Absolute path required" in INode.java to 
contain the rejected path. Contributed by Marcell Szabo

(cherry picked from commit 52ac73f344e822e41457582f82abb4f35eba9dec)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76cfd833
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76cfd833
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76cfd833

Branch: refs/heads/branch-2
Commit: 76cfd833f3f6c809aa933975fbcb188f49c6b8d6
Parents: 2862057
Author: Harsh J <ha...@cloudera.com>
Authored: Fri Oct 16 17:12:35 2015 +0200
Committer: Harsh J <ha...@cloudera.com>
Committed: Fri Oct 16 17:13:05 2015 +0200

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../main/java/org/apache/hadoop/hdfs/server/namenode/INode.java   | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76cfd833/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fcccbe1..28c96eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -16,6 +16,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-9257. improve error message for "Absolute path required" in INode.java
+to contain the rejected path (Marcell Szabo via harsh)
+
 HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value
     (Gautam Gopalakrishnan via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76cfd833/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 64442fd..9d04fbb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -748,7 +748,8 @@ public abstract class INode implements INodeAttributes, 
Diff.Element<byte[]> {
*/
   public static String[] getPathNames(String path) {
 if (path == null || !path.startsWith(Path.SEPARATOR)) {
-  throw new AssertionError("Absolute path required");
+  throw new AssertionError("Absolute path required, but got '"
+  + path + "'");
 }
 return StringUtils.split(path, Path.SEPARATOR_CHAR);
   }



hadoop git commit: HADOOP-12458. Retries is typoed to spell Retires in parts of hadoop-yarn and hadoop-common. Contributed by Neelesh Srinivas Salian.

2015-10-03 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 caa970974 -> 21b4ba48c


HADOOP-12458. Retries is typoed to spell Retires in parts of hadoop-yarn and 
hadoop-common. Contributed by Neelesh Srinivas Salian.

(cherry picked from commit c918f7be5e6aecaa3cb0c23a95819329802d32d4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21b4ba48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21b4ba48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21b4ba48

Branch: refs/heads/branch-2
Commit: 21b4ba48ce3f6e8d7e7895202ad22978c9be392d
Parents: caa9709
Author: Harsh J <ha...@cloudera.com>
Authored: Sat Oct 3 18:37:58 2015 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Sat Oct 3 18:39:22 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 4 
 .../src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java  | 2 +-
 .../src/test/java/org/apache/hadoop/net/ServerSocketUtil.java| 2 +-
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 4 ++--
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml   | 2 +-
 .../resourcemanager/amlauncher/ApplicationMasterLauncher.java| 4 ++--
 6 files changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21b4ba48/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b873ba6..3f933b2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -35,6 +35,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-12458. Retries is typoed to spell Retires in parts of
+hadoop-yarn and hadoop-common
+(Neelesh Srinivas Salian via harsh)
+
 HADOOP-12271. Hadoop Jar Error Should Be More Explanatory
 (Josh Elser via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21b4ba48/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 871a8ba..f978ae7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -371,7 +371,7 @@ public class RetryPolicies {
 /**
  * Parse the given string as a MultipleLinearRandomRetry object.
  * The format of the string is "t_1, n_1, t_2, n_2, ...",
- * where t_i and n_i are the i-th pair of sleep time and number of retires.
+ * where t_i and n_i are the i-th pair of sleep time and number of retries.
  * Note that the white spaces in the string are ignored.
  *
  * @return the parsed object, or null if the parsing fails.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21b4ba48/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
index 1917287..3685162 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
@@ -34,7 +34,7 @@ public class ServerSocketUtil {
* Port scan & allocate is how most other apps find ports
* 
* @param port given port
-   * @param retries number of retires
+   * @param retries number of retries
* @return
* @throws IOException
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21b4ba48/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 848907f..3fbea1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn

hadoop git commit: HADOOP-12458. Retries is typoed to spell Retires in parts of hadoop-yarn and hadoop-common. Contributed by Neelesh Srinivas Salian.

2015-10-03 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk ed5f51faa -> c918f7be5


HADOOP-12458. Retries is typoed to spell Retires in parts of hadoop-yarn and 
hadoop-common. Contributed by Neelesh Srinivas Salian.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c918f7be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c918f7be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c918f7be

Branch: refs/heads/trunk
Commit: c918f7be5e6aecaa3cb0c23a95819329802d32d4
Parents: ed5f51f
Author: Harsh J <ha...@cloudera.com>
Authored: Sat Oct 3 18:37:58 2015 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Sat Oct 3 18:37:58 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 4 
 .../src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java  | 2 +-
 .../src/test/java/org/apache/hadoop/net/ServerSocketUtil.java| 2 +-
 .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 4 ++--
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml   | 2 +-
 .../resourcemanager/amlauncher/ApplicationMasterLauncher.java| 4 ++--
 6 files changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c918f7be/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 544a345..a672962 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -546,6 +546,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-12458. Retries is typoed to spell Retires in parts of
+hadoop-yarn and hadoop-common
+(Neelesh Srinivas Salian via harsh)
+
 HADOOP-12271. Hadoop Jar Error Should Be More Explanatory
 (Josh Elser via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c918f7be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 871a8ba..f978ae7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -371,7 +371,7 @@ public class RetryPolicies {
 /**
  * Parse the given string as a MultipleLinearRandomRetry object.
  * The format of the string is "t_1, n_1, t_2, n_2, ...",
- * where t_i and n_i are the i-th pair of sleep time and number of retires.
+ * where t_i and n_i are the i-th pair of sleep time and number of retries.
  * Note that the white spaces in the string are ignored.
  *
  * @return the parsed object, or null if the parsing fails.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c918f7be/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
index 1917287..3685162 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
@@ -34,7 +34,7 @@ public class ServerSocketUtil {
* Port scan & allocate is how most other apps find ports
* 
* @param port given port
-   * @param retries number of retires
+   * @param retries number of retries
* @return
* @throws IOException
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c918f7be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index d2106cd..d7bd678 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ 

hadoop git commit: MAPREDUCE-6471. Document distcp incremental copy. Contributed by Neelesh Srinivas Salian.

2015-09-28 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1c030c6e5 -> 66dad854c


MAPREDUCE-6471. Document distcp incremental copy. Contributed by Neelesh 
Srinivas Salian.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66dad854
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66dad854
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66dad854

Branch: refs/heads/trunk
Commit: 66dad854c0aea8c137017fcf198b165cc1bd8bdd
Parents: 1c030c6
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Sep 28 13:12:43 2015 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Mon Sep 28 13:12:43 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm | 5 -
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66dad854/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b7e9016..67adcbd 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -295,6 +295,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6471. Document distcp incremental copy
+(Neelesh Srinivas Salian via harsh)
+
 MAPREDUCE-5045. UtilTest#isCygwin method appears to be unused
 (Neelesh Srinivas Salian via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66dad854/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
--
diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index 7424267..aacf4c7 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -189,7 +189,9 @@ $H3 Update and Overwrite
   because it doesn't exist at the target. `10` and `20` are overwritten since
   the contents don't match the source.
 
-  If `-update` is used, `1` is overwritten as well.
+  If `-update` is used, `1` is skipped because the file-length and contents 
match. `2` is copied because it doesn’t exist at the target. `10` and `20` 
are overwritten since the contents don’t match the source. However, if 
`-append` is additionally used, then only `10` is overwritten (source length 
less than destination) and `20` is appended with the change in file (if the 
files match up to the destination's original length).
+
+  If `-overwrite` is used, `1` is overwritten as well.
 
 $H3 raw Namespace Extended Attribute Preservation
 
@@ -222,6 +224,7 @@ Flag  | Description  | 
Notes
 `-m ` | Maximum number of simultaneous copies | Specify the number 
of maps to copy data. Note that more maps may not necessarily improve 
throughput.
 `-overwrite` | Overwrite destination | If a map fails and `-i` is not 
specified, all the files in the split, not only those that failed, will be 
recopied. As discussed in the Usage documentation, it also changes the 
semantics for generating destination paths, so users should use this carefully.
 `-update` | Overwrite if source and destination differ in size, blocksize, or 
checksum | As noted in the preceding, this is not a "sync" operation. The 
criteria examined are the source and destination file sizes, blocksizes, and 
checksums; if they differ, the source file replaces the destination file. As 
discussed in the Usage documentation, it also changes the semantics for 
generating destination paths, so users should use this carefully.
+`-append` | Incremental copy of file with same name but different length | If 
the source file is greater in length than the destination file, the checksum of 
the common length part is compared. If the checksum matches, only the 
difference is copied using read and append functionalities. The -append option 
only works with `-update` without `-skipcrccheck`
 `-f ` | Use list at \<urilist_uri\> as src list | This is 
equivalent to listing each source on the command line. The `urilist_uri` list 
should be a fully qualified URI.
 `-filelimit ` | Limit the total number of files to be <= n | 
**Deprecated!** Ignored in the new DistCp.
 `-sizelimit ` | Limit the total size to be <= n bytes | **Deprecated!** 
Ignored in the new DistCp.



hadoop git commit: MAPREDUCE-6471. Document distcp incremental copy. Contributed by Neelesh Srinivas Salian.

2015-09-28 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 504d6fd95 -> 03e370e30


MAPREDUCE-6471. Document distcp incremental copy. Contributed by Neelesh 
Srinivas Salian.

(cherry picked from commit 66dad854c0aea8c137017fcf198b165cc1bd8bdd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03e370e3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03e370e3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03e370e3

Branch: refs/heads/branch-2
Commit: 03e370e306196b1b0db54a2ce5902a2534491ace
Parents: 504d6fd
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Sep 28 13:12:43 2015 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Mon Sep 28 13:14:20 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm | 5 -
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03e370e3/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9e719a1..de39835 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -20,6 +20,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6471. Document distcp incremental copy
+(Neelesh Srinivas Salian via harsh)
+
 MAPREDUCE-5045. UtilTest#isCygwin method appears to be unused
 (Neelesh Srinivas Salian via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/03e370e3/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
--
diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm 
b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
index 447e515..7fa1747 100644
--- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
+++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm
@@ -189,7 +189,9 @@ $H3 Update and Overwrite
   because it doesn't exist at the target. `10` and `20` are overwritten since
   the contents don't match the source.
 
-  If `-update` is used, `1` is overwritten as well.
+  If `-update` is used, `1` is skipped because the file-length and contents 
match. `2` is copied because it doesn’t exist at the target. `10` and `20` 
are overwritten since the contents don’t match the source. However, if 
`-append` is additionally used, then only `10` is overwritten (source length 
less than destination) and `20` is appended with the change in file (if the 
files match up to the destination's original length).
+
+  If `-overwrite` is used, `1` is overwritten as well.
 
 $H3 raw Namespace Extended Attribute Preservation
 
@@ -222,6 +224,7 @@ Flag  | Description  | 
Notes
 `-m ` | Maximum number of simultaneous copies | Specify the number 
of maps to copy data. Note that more maps may not necessarily improve 
throughput.
 `-overwrite` | Overwrite destination | If a map fails and `-i` is not 
specified, all the files in the split, not only those that failed, will be 
recopied. As discussed in the Usage documentation, it also changes the 
semantics for generating destination paths, so users should use this carefully.
 `-update` | Overwrite if source and destination differ in size, blocksize, or 
checksum | As noted in the preceding, this is not a "sync" operation. The 
criteria examined are the source and destination file sizes, blocksizes, and 
checksums; if they differ, the source file replaces the destination file. As 
discussed in the Usage documentation, it also changes the semantics for 
generating destination paths, so users should use this carefully.
+`-append` | Incremental copy of file with same name but different length | If 
the source file is greater in length than the destination file, the checksum of 
the common length part is compared. If the checksum matches, only the 
difference is copied using read and append functionalities. The -append option 
only works with `-update` without `-skipcrccheck`
 `-f ` | Use list at \<urilist_uri\> as src list | This is 
equivalent to listing each source on the command line. The `urilist_uri` list 
should be a fully qualified URI.
 `-filelimit ` | Limit the total number of files to be <= n | 
**Deprecated!** Ignored in the new DistCp.
 `-sizelimit ` | Limit the total size to be <= n bytes | **Deprecated!** 
Ignored in the new DistCp.



hadoop git commit: MAPREDUCE-5045. UtilTest#isCygwin method appears to be unused. Contributed by Neelesh Srinivas Salian.

2015-09-22 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 57003fa97 -> cc2b47399


MAPREDUCE-5045. UtilTest#isCygwin method appears to be unused. Contributed by 
Neelesh Srinivas Salian.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc2b4739
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc2b4739
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc2b4739

Branch: refs/heads/trunk
Commit: cc2b4739902df60254dce2ddb23ef8f6ff2a3495
Parents: 57003fa
Author: Harsh J <ha...@cloudera.com>
Authored: Tue Sep 22 21:37:41 2015 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Tue Sep 22 21:38:06 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt| 3 +++
 .../src/test/java/org/apache/hadoop/streaming/UtilTest.java | 5 -
 2 files changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc2b4739/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index bcdac1f..c2fe31f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -295,6 +295,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-5045. UtilTest#isCygwin method appears to be unused
+(Neelesh Srinivas Salian via harsh)
+
 MAPREDUCE-6291. Correct mapred queue usage command.
 (Brahma Reddu Battula via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc2b4739/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java
--
diff --git 
a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java
 
b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java
index 2766969..31e4905 100644
--- 
a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java
+++ 
b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java
@@ -117,11 +117,6 @@ class UtilTest {
 return collate(vargs, " ");
   }
 
-  public static boolean isCygwin() {
-String OS = System.getProperty("os.name");
-return (OS.indexOf("Windows") > -1);
-  }
-
   /**
* Is perl supported on this machine ?
* @return true if perl is available and is working as expected



hadoop git commit: MAPREDUCE-5045. UtilTest#isCygwin method appears to be unused. Contributed by Neelesh Srinivas Salian.

2015-09-22 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 96e3fbf40 -> 38420754f


MAPREDUCE-5045. UtilTest#isCygwin method appears to be unused. Contributed by 
Neelesh Srinivas Salian.

(cherry picked from commit de1322aa76007832698af682a3b76042116043b9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38420754
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38420754
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38420754

Branch: refs/heads/branch-2
Commit: 38420754f5c55feb13d22e03ff27b0626b34ee46
Parents: 96e3fbf
Author: Harsh J <ha...@cloudera.com>
Authored: Tue Sep 22 21:37:41 2015 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Tue Sep 22 21:38:31 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt| 3 +++
 .../src/test/java/org/apache/hadoop/streaming/UtilTest.java | 5 -
 2 files changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38420754/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ba83864..0c78be6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -20,6 +20,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-5045. UtilTest#isCygwin method appears to be unused
+(Neelesh Srinivas Salian via harsh)
+
 MAPREDUCE-6291. Correct mapred queue usage command.
 (Brahma Reddu Battula via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38420754/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java
--
diff --git 
a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java
 
b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java
index 2766969..31e4905 100644
--- 
a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java
+++ 
b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/UtilTest.java
@@ -117,11 +117,6 @@ class UtilTest {
 return collate(vargs, " ");
   }
 
-  public static boolean isCygwin() {
-String OS = System.getProperty("os.name");
-return (OS.indexOf("Windows") > -1);
-  }
-
   /**
* Is perl supported on this machine ?
* @return true if perl is available and is working as expected



hadoop git commit: HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value. Contributed by Gautam Gopalakrishnan.

2015-08-27 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk fdb56f74f - 0bf285413


HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value. 
Contributed by Gautam Gopalakrishnan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0bf28541
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0bf28541
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0bf28541

Branch: refs/heads/trunk
Commit: 0bf285413f8fcaadbb2d5817fe8090f5fb0d37d9
Parents: fdb56f7
Author: Harsh J ha...@cloudera.com
Authored: Thu Aug 27 16:22:48 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Thu Aug 27 16:22:48 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 5 +
 .../java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java | 7 +++
 3 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bf28541/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 42eed14..29ecf7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -359,6 +359,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value
+(Gautam Gopalakrishnan via harsh)
+
 HDFS-8821. Explain message Operation category X is not supported
 in state standby (Gautam Gopalakrishnan via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bf28541/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 014637b..298d55e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -851,6 +851,11 @@ public class DFSAdmin extends FsShell {
   return exitCode;
 }
 
+if (bandwidth  0) {
+  System.err.println(Bandwidth should be a non-negative integer);
+  return exitCode;
+}
+
 FileSystem fs = getFS();
 if (!(fs instanceof DistributedFileSystem)) {
   System.err.println(FileSystem is  + fs.getUri());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0bf28541/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index 6859e43..a6c0924 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -193,6 +193,13 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 3)
+  public void testSetNegativeBalancerBandwidth() throws Exception {
+setUpHaCluster(false);
+int exitCode = admin.run(new String[] {-setBalancerBandwidth, -10});
+assertEquals(Negative bandwidth value must fail the command, -1, 
exitCode);
+  }
+
+  @Test (timeout = 3)
   public void testMetaSave() throws Exception {
 setUpHaCluster(false);
 int exitCode = admin.run(new String[] {-metasave, dfs.meta});



hadoop git commit: HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value. Contributed by Gautam Gopalakrishnan.

2015-08-27 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d63122979 - 574f3a98c


HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value. 
Contributed by Gautam Gopalakrishnan.

(cherry picked from commit 0bf285413f8fcaadbb2d5817fe8090f5fb0d37d9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/574f3a98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/574f3a98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/574f3a98

Branch: refs/heads/branch-2
Commit: 574f3a98ce8b8c0a55dfabc668320dc57a26a087
Parents: d631229
Author: Harsh J ha...@cloudera.com
Authored: Thu Aug 27 16:22:48 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Thu Aug 27 16:23:22 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 5 +
 .../java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java | 7 +++
 3 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/574f3a98/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5283294..5c18e2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -14,6 +14,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-2390. dfsadmin -setBalancerBandwidth does not validate -ve value
+(Gautam Gopalakrishnan via harsh)
+
 HDFS-8821. Explain message Operation category X is not supported
 in state standby (Gautam Gopalakrishnan via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/574f3a98/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 6f7d8bf..ad4af87 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -828,6 +828,11 @@ public class DFSAdmin extends FsShell {
   return exitCode;
 }
 
+if (bandwidth  0) {
+  System.err.println(Bandwidth should be a non-negative integer);
+  return exitCode;
+}
+
 FileSystem fs = getFS();
 if (!(fs instanceof DistributedFileSystem)) {
   System.err.println(FileSystem is  + fs.getUri());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/574f3a98/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
index 6859e43..a6c0924 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
@@ -193,6 +193,13 @@ public class TestDFSAdminWithHA {
   }
 
   @Test (timeout = 3)
+  public void testSetNegativeBalancerBandwidth() throws Exception {
+setUpHaCluster(false);
+int exitCode = admin.run(new String[] {-setBalancerBandwidth, -10});
+assertEquals(Negative bandwidth value must fail the command, -1, 
exitCode);
+  }
+
+  @Test (timeout = 3)
   public void testMetaSave() throws Exception {
 setUpHaCluster(false);
 int exitCode = admin.run(new String[] {-metasave, dfs.meta});



hadoop git commit: HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals before deleting. Contributed by Casey Brotherton.

2015-08-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 387076894 - e99349830


HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals 
before deleting. Contributed by Casey Brotherton.

(cherry picked from commit af78767870b8296886c03f8be24cf13a4e2bd4b0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9934983
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9934983
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9934983

Branch: refs/heads/branch-2
Commit: e9934983027bc3cd7cd82e49c436c66f507b4cd2
Parents: 3870768
Author: Harsh J ha...@cloudera.com
Authored: Tue Aug 25 11:21:19 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Aug 25 11:22:08 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/fs/TrashPolicyDefault.java| 11 +--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9934983/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6c27abe..15ec117 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -286,6 +286,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals
+before deleting (Casey Brotherton via harsh)
+
 HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
 is an I/O error during requestShortCircuitShm (cmccabe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9934983/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 88aeab5..1ed8a46 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -161,12 +161,19 @@ public class TrashPolicyDefault extends TrashPolicy {
   @SuppressWarnings(deprecation)
   @Override
   public void createCheckpoint() throws IOException {
+createCheckpoint(new Date());
+  }
+
+  @SuppressWarnings(deprecation)
+  public void createCheckpoint(Date date) throws IOException {
+
 if (!fs.exists(current)) // no trash, no checkpoint
   return;
 
 Path checkpointBase;
 synchronized (CHECKPOINT) {
-  checkpointBase = new Path(trash, CHECKPOINT.format(new Date()));
+  checkpointBase = new Path(trash, CHECKPOINT.format(date));
+
 }
 Path checkpoint = checkpointBase;
 
@@ -287,7 +294,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 TrashPolicyDefault trash = new TrashPolicyDefault(
 fs, home.getPath(), conf);
 trash.deleteCheckpoint();
-trash.createCheckpoint();
+trash.createCheckpoint(new Date(now));
   } catch (IOException e) {
 LOG.warn(Trash caught: +e+. Skipping +home.getPath()+.);
   } 



hadoop git commit: HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals before deleting. Contributed by Casey Brotherton.

2015-08-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 57c7ae1af - af7876787


HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals 
before deleting. Contributed by Casey Brotherton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af787678
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af787678
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af787678

Branch: refs/heads/trunk
Commit: af78767870b8296886c03f8be24cf13a4e2bd4b0
Parents: 57c7ae1
Author: Harsh J ha...@cloudera.com
Authored: Tue Aug 25 11:21:19 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Aug 25 11:21:19 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../java/org/apache/hadoop/fs/TrashPolicyDefault.java| 11 +--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af787678/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4250fc3..0ec4ed6 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -796,6 +796,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals
+before deleting (Casey Brotherton via harsh)
+
 HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
 is an I/O error during requestShortCircuitShm (cmccabe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af787678/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 88aeab5..1ed8a46 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -161,12 +161,19 @@ public class TrashPolicyDefault extends TrashPolicy {
   @SuppressWarnings(deprecation)
   @Override
   public void createCheckpoint() throws IOException {
+createCheckpoint(new Date());
+  }
+
+  @SuppressWarnings(deprecation)
+  public void createCheckpoint(Date date) throws IOException {
+
 if (!fs.exists(current)) // no trash, no checkpoint
   return;
 
 Path checkpointBase;
 synchronized (CHECKPOINT) {
-  checkpointBase = new Path(trash, CHECKPOINT.format(new Date()));
+  checkpointBase = new Path(trash, CHECKPOINT.format(date));
+
 }
 Path checkpoint = checkpointBase;
 
@@ -287,7 +294,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 TrashPolicyDefault trash = new TrashPolicyDefault(
 fs, home.getPath(), conf);
 trash.deleteCheckpoint();
-trash.createCheckpoint();
+trash.createCheckpoint(new Date(now));
   } catch (IOException e) {
 LOG.warn(Trash caught: +e+. Skipping +home.getPath()+.);
   } 



hadoop git commit: HADOOP-12271. Hadoop Jar Error Should Be More Explanatory. Contributed by Josh Elser.

2015-07-30 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 212b8b919 - 68034bd6c


HADOOP-12271. Hadoop Jar Error Should Be More Explanatory. Contributed by Josh 
Elser.

(cherry picked from commit 2087eaf684d9fb14b5390e21bf17e93ac8fea7f8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68034bd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68034bd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68034bd6

Branch: refs/heads/branch-2
Commit: 68034bd6c2b3f048c12eacc7d3922ce36f071d23
Parents: 212b8b9
Author: Harsh J ha...@cloudera.com
Authored: Fri Jul 31 10:12:32 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Fri Jul 31 10:13:07 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/util/RunJar.java  | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68034bd6/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d329319..e383de3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -30,6 +30,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-12271. Hadoop Jar Error Should Be More Explanatory
+(Josh Elser via harsh)
+
 HADOOP-6842. hadoop fs -text does not give a useful text representation
 of MapWritable objects (Akira Ajisaka via bobby)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68034bd6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
index 4b26b76..ccb114b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
@@ -148,7 +148,8 @@ public class RunJar {
 String fileName = args[firstArg++];
 File file = new File(fileName);
 if (!file.exists() || !file.isFile()) {
-  System.err.println(Not a valid JAR:  + file.getCanonicalPath());
+  System.err.println(JAR does not exist or is not a normal file:  +
+  file.getCanonicalPath());
   System.exit(-1);
 }
 String mainClassName = null;



hadoop git commit: HDFS-8821. Explain message Operation category X is not supported in state standby. Contributed by Gautam Gopalakrishnan.

2015-07-30 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 88d8736dd - c5caa25b8


HDFS-8821. Explain message Operation category X is not supported in state 
standby. Contributed by Gautam Gopalakrishnan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5caa25b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5caa25b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5caa25b

Branch: refs/heads/trunk
Commit: c5caa25b8f2953e2b7a9d2c9dcbdbf1fed95c10b
Parents: 88d8736
Author: Harsh J ha...@cloudera.com
Authored: Fri Jul 31 08:58:22 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Fri Jul 31 08:58:22 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java   | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5caa25b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7f04125..69e4dd6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -357,6 +357,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-8821. Explain message Operation category X is not supported
+in state standby (Gautam Gopalakrishnan via harsh)
+
 HDFS-3918. EditLogTailer shouldn't log WARN when other node
 is in standby mode (todd via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5caa25b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
index 60e8371..d782bdf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyState.java
@@ -82,8 +82,9 @@ public class StandbyState extends HAState {
 (op == OperationCategory.READ  context.allowStaleReads())) {
   return;
 }
+String faq = . Visit https://s.apache.org/sbnn-error;;
 String msg = Operation category  + op +  is not supported in state 
-+ context.getState();
++ context.getState() + faq;
 throw new StandbyException(msg);
   }
 



hadoop git commit: YARN-3462. Patches applied for YARN-2424 are inconsistent between trunk and branch-2. Contributed by Naganarasimha G R.

2015-04-14 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 38b031d6b - 01af29106


YARN-3462. Patches applied for YARN-2424 are inconsistent between trunk and 
branch-2. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01af2910
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01af2910
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01af2910

Branch: refs/heads/branch-2
Commit: 01af29106a1603eff9f4e622a6919d49cb0bfa65
Parents: 38b031d
Author: Harsh J ha...@cloudera.com
Authored: Wed Apr 15 09:41:43 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Wed Apr 15 09:41:43 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../src/main/resources/yarn-default.xml | 21 ++--
 .../nodemanager/LinuxContainerExecutor.java | 12 +++
 3 files changed, 22 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01af2910/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f77aafd..b5e850e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -165,6 +165,9 @@ Release 2.7.1 - UNRELEASED
 
   BUG FIXES
 
+YARN-3462. Patches applied for YARN-2424 are inconsistent between
+trunk and branch-2. (Naganarasimha G R via harsh)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01af2910/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index a469cae..66400c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1036,21 +1036,22 @@
   /property
 
   property
-descriptionThis determines which of the two modes that LCE should use on 
a non-secure
-cluster.  If this value is set to true, then all containers will be 
launched as the user 
-specified in 
yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user.  If 
-this value is set to false, then containers will run as the user who 
submitted the 
-application.
-/description
+descriptionThis determines which of the two modes that LCE should use on
+  a non-secure cluster.  If this value is set to true, then all containers
+  will be launched as the user specified in
+  yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user.  If
+  this value is set to false, then containers will run as the user who
+  submitted the application./description
 
nameyarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users/name
 valuetrue/value
   /property
 
   property
-descriptionThe UNIX user that containers will run as when 
Linux-container-executor
-is used in nonsecure mode (a use case for this is using cgroups) if the
-yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users is 
set 
-to true./description
+descriptionThe UNIX user that containers will run as when
+  Linux-container-executor is used in nonsecure mode (a use case for this
+  is using cgroups) if the
+  yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users is
+  set to true./description
 
nameyarn.nodemanager.linux-container-executor.nonsecure-mode.local-user/name
 valuenobody/value
   /property

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01af2910/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 59b35ce..fac71d3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn

hadoop git commit: YARN-3462. Patches applied for YARN-2424 are inconsistent between trunk and branch-2. Contributed by Naganarasimha G R.

2015-04-14 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 50c8d0631 - d4a462c02


YARN-3462. Patches applied for YARN-2424 are inconsistent between trunk and 
branch-2. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4a462c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4a462c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4a462c0

Branch: refs/heads/branch-2.7
Commit: d4a462c02e9be1e082ef4f9c62a47cf93a5c9b7c
Parents: 50c8d06
Author: Harsh J ha...@cloudera.com
Authored: Wed Apr 15 09:41:43 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Wed Apr 15 09:42:20 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../src/main/resources/yarn-default.xml | 21 ++--
 .../nodemanager/LinuxContainerExecutor.java | 12 +++
 3 files changed, 22 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a462c0/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5e44bc3..9072edf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -12,6 +12,9 @@ Release 2.7.1 - UNRELEASED
 
   BUG FIXES
 
+YARN-3462. Patches applied for YARN-2424 are inconsistent between
+trunk and branch-2. (Naganarasimha G R via harsh)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a462c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index a469cae..66400c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1036,21 +1036,22 @@
   /property
 
   property
-descriptionThis determines which of the two modes that LCE should use on 
a non-secure
-cluster.  If this value is set to true, then all containers will be 
launched as the user 
-specified in 
yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user.  If 
-this value is set to false, then containers will run as the user who 
submitted the 
-application.
-/description
+descriptionThis determines which of the two modes that LCE should use on
+  a non-secure cluster.  If this value is set to true, then all containers
+  will be launched as the user specified in
+  yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user.  If
+  this value is set to false, then containers will run as the user who
+  submitted the application./description
 
nameyarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users/name
 valuetrue/value
   /property
 
   property
-descriptionThe UNIX user that containers will run as when 
Linux-container-executor
-is used in nonsecure mode (a use case for this is using cgroups) if the
-yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users is 
set 
-to true./description
+descriptionThe UNIX user that containers will run as when
+  Linux-container-executor is used in nonsecure mode (a use case for this
+  is using cgroups) if the
+  yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users is
+  set to true./description
 
nameyarn.nodemanager.linux-container-executor.nonsecure-mode.local-user/name
 valuenobody/value
   /property

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4a462c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 59b35ce..fac71d3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn

hadoop git commit: HDFS-4396. Add START_MSG/SHUTDOWN_MSG for ZKFC. Contributed by Liang Xie.

2015-03-30 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1ed9fb766 - ae3e8c61f


HDFS-4396. Add START_MSG/SHUTDOWN_MSG for ZKFC. Contributed by Liang Xie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae3e8c61
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae3e8c61
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae3e8c61

Branch: refs/heads/trunk
Commit: ae3e8c61ff4c926ef3e71c782433ed9764d21478
Parents: 1ed9fb7
Author: Harsh J ha...@cloudera.com
Authored: Mon Mar 30 15:21:18 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Mon Mar 30 15:21:18 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java | 2 ++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae3e8c61/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9b1cc3e..f437ad8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -323,6 +323,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-4396. Add START_MSG/SHUTDOWN_MSG for ZKFC
+(Liang Xie via harsh)
+
 HDFS-7875. Improve log message when wrong value configured for
 dfs.datanode.failed.volumes.tolerated.
 (nijel via harsh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae3e8c61/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
index 85f77f1..4e256a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
@@ -167,6 +167,8 @@ public class DFSZKFailoverController extends 
ZKFailoverController {
 
   public static void main(String args[])
   throws Exception {
+StringUtils.startupShutdownMessage(DFSZKFailoverController.class,
+args, LOG);
 if (DFSUtil.parseHelpArgument(args, 
 ZKFailoverController.USAGE, System.out, true)) {
   System.exit(0);



hadoop git commit: HDFS-4396. Add START_MSG/SHUTDOWN_MSG for ZKFC. Contributed by Liang Xie.

2015-03-30 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9f49b3e93 - c58357939


HDFS-4396. Add START_MSG/SHUTDOWN_MSG for ZKFC. Contributed by Liang Xie.

(cherry picked from commit ae3e8c61ff4c926ef3e71c782433ed9764d21478)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5835793
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5835793
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5835793

Branch: refs/heads/branch-2
Commit: c58357939fecf797d9556f70d434edba81681f6f
Parents: 9f49b3e
Author: Harsh J ha...@cloudera.com
Authored: Mon Mar 30 15:21:18 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Mon Mar 30 15:22:57 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java | 2 ++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5835793/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 151f71b..abc3d9a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-4396. Add START_MSG/SHUTDOWN_MSG for ZKFC
+(Liang Xie via harsh)
+
 HDFS-7875. Improve log message when wrong value configured for
 dfs.datanode.failed.volumes.tolerated.
 (nijel via harsh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5835793/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
index 85f77f1..4e256a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
@@ -167,6 +167,8 @@ public class DFSZKFailoverController extends 
ZKFailoverController {
 
   public static void main(String args[])
   throws Exception {
+StringUtils.startupShutdownMessage(DFSZKFailoverController.class,
+args, LOG);
 if (DFSUtil.parseHelpArgument(args, 
 ZKFailoverController.USAGE, System.out, true)) {
   System.exit(0);



hadoop git commit: HDFS-3918. EditLogTailer shouldn't log WARN when other node is in standby mode. Contributed by Todd Lipcon.

2015-03-30 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 abf3ad988 - 32766b656


HDFS-3918. EditLogTailer shouldn't log WARN when other node is in standby mode. 
Contributed by Todd Lipcon.

(cherry picked from commit cce66ba3c9ec293e8ba1afd0eb518c7ca0bbc7c9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32766b65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32766b65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32766b65

Branch: refs/heads/branch-2
Commit: 32766b6563e258e252a3652d4919dabb84f20391
Parents: abf3ad9
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 31 08:04:18 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 31 08:17:22 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../hadoop/hdfs/server/namenode/ha/EditLogTailer.java| 11 +++
 2 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/32766b65/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cb4ac29..636f62d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-3918. EditLogTailer shouldn't log WARN when other node
+is in standby mode (todd via harsh)
+
 HDFS-4396. Add START_MSG/SHUTDOWN_MSG for ZKFC
 (Liang Xie via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/32766b65/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 6b6c8d4..1897d8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.SecurityUtil;
 
 import static org.apache.hadoop.util.Time.monotonicNow;
@@ -273,6 +275,15 @@ public class EditLogTailer {
   getActiveNodeProxy().rollEditLog();
   lastRollTriggerTxId = lastLoadedTxnId;
 } catch (IOException ioe) {
+  if (ioe instanceof RemoteException) {
+ioe = ((RemoteException)ioe).unwrapRemoteException();
+if (ioe instanceof StandbyException) {
+  LOG.info(Skipping log roll. Remote node is not in Active state:  +
+  ioe.getMessage().split(\n)[0]);
+  return;
+}
+  }
+
   LOG.warn(Unable to trigger a roll of the active NN, ioe);
 }
   }



hadoop git commit: HDFS-3918. EditLogTailer shouldn't log WARN when other node is in standby mode. Contributed by Todd Lipcon.

2015-03-30 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1a495fbb4 - cce66ba3c


HDFS-3918. EditLogTailer shouldn't log WARN when other node is in standby mode. 
Contributed by Todd Lipcon.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cce66ba3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cce66ba3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cce66ba3

Branch: refs/heads/trunk
Commit: cce66ba3c9ec293e8ba1afd0eb518c7ca0bbc7c9
Parents: 1a495fb
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 31 08:04:18 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 31 08:04:18 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../hadoop/hdfs/server/namenode/ha/EditLogTailer.java| 11 +++
 2 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cce66ba3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 390d279..cba30b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -323,6 +323,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-3918. EditLogTailer shouldn't log WARN when other node
+is in standby mode (todd via harsh)
+
 HDFS-4396. Add START_MSG/SHUTDOWN_MSG for ZKFC
 (Liang Xie via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cce66ba3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index 6b6c8d4..1897d8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -42,6 +42,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.SecurityUtil;
 
 import static org.apache.hadoop.util.Time.monotonicNow;
@@ -273,6 +275,15 @@ public class EditLogTailer {
   getActiveNodeProxy().rollEditLog();
   lastRollTriggerTxId = lastLoadedTxnId;
 } catch (IOException ioe) {
+  if (ioe instanceof RemoteException) {
+ioe = ((RemoteException)ioe).unwrapRemoteException();
+if (ioe instanceof StandbyException) {
+  LOG.info(Skipping log roll. Remote node is not in Active state:  +
+  ioe.getMessage().split(\n)[0]);
+  return;
+}
+  }
+
   LOG.warn(Unable to trigger a roll of the active NN, ioe);
 }
   }



hadoop git commit: HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. Contributed by Gautam Gopalakrishnan.

2015-03-28 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk e0ccea33c - 3d9132d43


HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. Contributed 
by Gautam Gopalakrishnan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d9132d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d9132d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d9132d4

Branch: refs/heads/trunk
Commit: 3d9132d434c39e9b6e142e5cf9fd7a8afa4190a6
Parents: e0ccea3
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 29 00:45:01 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 29 00:45:01 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../namenode/metrics/TestNameNodeMetrics.java   | 84 
 3 files changed, 88 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d9132d4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f7cc2bc..496db06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -351,6 +351,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
+(Gautam Gopalakrishnan via harsh)
+
 HDFS-5356. MiniDFSCluster should close all open FileSystems when shutdown()
 (Rakesh R via vinayakumarb)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d9132d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d0999b8..0e0f484 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4784,7 +4784,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   @Metric({TransactionsSinceLastCheckpoint,
   Number of transactions since last checkpoint})
   public long getTransactionsSinceLastCheckpoint() {
-return getEditLog().getLastWrittenTxId() -
+return getFSImage().getLastAppliedOrWrittenTxId() -
 getFSImage().getStorage().getMostRecentCheckpointTxId();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d9132d4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 011db3c..64ea1e4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -22,12 +22,16 @@ import static 
org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.Random;
+import com.google.common.collect.ImmutableList;
+import com.google.common.io.Files;
 
+import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -39,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -47,7 +52,9 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor

hadoop git commit: HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. Contributed by Gautam Gopalakrishnan.

2015-03-28 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b1b495145 - b679dc5a8


HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. Contributed 
by Gautam Gopalakrishnan.

(cherry picked from commit 3d9132d434c39e9b6e142e5cf9fd7a8afa4190a6)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b679dc5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b679dc5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b679dc5a

Branch: refs/heads/branch-2
Commit: b679dc5a8ea4bc27ac2945e492417df8b2ba5124
Parents: b1b4951
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 29 00:45:01 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 29 00:46:22 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../namenode/metrics/TestNameNodeMetrics.java   | 84 
 3 files changed, 88 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b679dc5a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8feffcb..1391b72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -36,6 +36,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
+(Gautam Gopalakrishnan via harsh)
+
 HDFS-5356. MiniDFSCluster should close all open FileSystems when shutdown()
 (Rakesh R via vinayakumarb)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b679dc5a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 04d9d67..32dcd5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4784,7 +4784,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   @Metric({TransactionsSinceLastCheckpoint,
   Number of transactions since last checkpoint})
   public long getTransactionsSinceLastCheckpoint() {
-return getEditLog().getLastWrittenTxId() -
+return getFSImage().getLastAppliedOrWrittenTxId() -
 getFSImage().getStorage().getMostRecentCheckpointTxId();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b679dc5a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 63ab395..2ba609d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -22,12 +22,16 @@ import static 
org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.Random;
+import com.google.common.collect.ImmutableList;
+import com.google.common.io.Files;
 
+import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -39,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -47,7 +52,9

hadoop git commit: MAPREDUCE-6291. Correct mapred queue usage command. Contributed by Brahma Reddy Battula.

2015-03-28 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d0bc2eff1 - 274db918c


MAPREDUCE-6291. Correct mapred queue usage command. Contributed by Brahma Reddy 
Battula.

(cherry picked from commit f5432cb47dfacaa57faed32a29d00bbeff8a5385)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/274db918
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/274db918
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/274db918

Branch: refs/heads/branch-2
Commit: 274db918c3129869b981436b47e30dbc11aaecb4
Parents: d0bc2ef
Author: Harsh J ha...@cloudera.com
Authored: Sat Mar 28 11:57:21 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sat Mar 28 11:58:44 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/JobQueueClient.java| 2 +-
 .../src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java   | 2 +-
 .../src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java  | 2 +-
 .../src/main/java/org/apache/hadoop/tools/HadoopArchives.java | 2 +-
 5 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/274db918/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index d0dfd5f..3efe73a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6291. Correct mapred queue usage command.
+(Brahma Reddu Battula via harsh)
+
 MAPREDUCE-579. Streaming slowmatch documentation. (harsh)
 
 MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort

http://git-wip-us.apache.org/repos/asf/hadoop/blob/274db918/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
index 097e338..81f6140 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
@@ -224,7 +224,7 @@ class JobQueueClient extends Configured implements Tool {
   }
 
   private void displayUsage(String cmd) {
-String prefix = Usage: JobQueueClient ;
+String prefix = Usage: queue ;
 if (-queueinfo.equals(cmd)) {
   System.err.println(prefix + [ + cmd + job-queue-name [-showJobs]]);
 } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/274db918/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
index 8f4259e..4f5b6a1 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
@@ -363,7 +363,7 @@ public class Submitter extends Configured implements Tool {
 void printUsage() {
   // The CLI package should do this for us, but I can't figure out how
   // to make it print something reasonable.
-  System.out.println(bin/hadoop pipes);
+  System.out.println(Usage: pipes );
   System.out.println(  [-input path] // Input directory);
   System.out.println(  [-output path] // Output directory);
   System.out.println(  [-jar jar file // jar filename);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/274db918/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop

hadoop git commit: MAPREDUCE-6291. Correct mapred queue usage command. Contributed by Brahma Reddy Battula.

2015-03-28 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 89fb0f57e - 27d49e671


MAPREDUCE-6291. Correct mapred queue usage command. Contributed by Brahma Reddy 
Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27d49e67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27d49e67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27d49e67

Branch: refs/heads/trunk
Commit: 27d49e6714ad7fc6038bc001e70ff5be3755f1ef
Parents: 89fb0f5
Author: Harsh J ha...@cloudera.com
Authored: Sat Mar 28 11:57:21 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sat Mar 28 11:58:17 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/JobQueueClient.java| 2 +-
 .../src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java   | 2 +-
 .../src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java  | 2 +-
 .../src/main/java/org/apache/hadoop/tools/HadoopArchives.java | 2 +-
 5 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27d49e67/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ce16510..b0367a7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -256,6 +256,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6291. Correct mapred queue usage command.
+(Brahma Reddu Battula via harsh)
+
 MAPREDUCE-579. Streaming slowmatch documentation. (harsh)
 
 MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27d49e67/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
index 097e338..81f6140 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
@@ -224,7 +224,7 @@ class JobQueueClient extends Configured implements Tool {
   }
 
   private void displayUsage(String cmd) {
-String prefix = Usage: JobQueueClient ;
+String prefix = Usage: queue ;
 if (-queueinfo.equals(cmd)) {
   System.err.println(prefix + [ + cmd + job-queue-name [-showJobs]]);
 } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27d49e67/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
index 8f4259e..4f5b6a1 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/pipes/Submitter.java
@@ -363,7 +363,7 @@ public class Submitter extends Configured implements Tool {
 void printUsage() {
   // The CLI package should do this for us, but I can't figure out how
   // to make it print something reasonable.
-  System.out.println(bin/hadoop pipes);
+  System.out.println(Usage: pipes );
   System.out.println(  [-input path] // Input directory);
   System.out.println(  [-output path] // Output directory);
   System.out.println(  [-jar jar file // jar filename);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27d49e67/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client

hadoop git commit: HADOOP-11719.[Fsshell] Remove bin/hadoop reference from GenericOptionsParser default help text. Contributed by Brahma Reddy Battula.

2015-03-26 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4528eb9fb - b4b4fe905


HADOOP-11719.[Fsshell] Remove bin/hadoop reference from GenericOptionsParser 
default help text. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4b4fe90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4b4fe90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4b4fe90

Branch: refs/heads/trunk
Commit: b4b4fe90569a116c67bfc94fbfbab95b1a0b712a
Parents: 4528eb9
Author: Harsh J ha...@cloudera.com
Authored: Thu Mar 26 11:27:21 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Thu Mar 26 11:29:54 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 4 
 .../main/java/org/apache/hadoop/util/GenericOptionsParser.java   | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b4fe90/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 667a010..5f43236 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -447,6 +447,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-11719. [Fsshell] Remove bin/hadoop reference from
+GenericOptionsParser default help text.
+(Brahma Reddy Battula via harsh)
+
 HADOOP-11692. Improve authentication failure WARN message to avoid user
 confusion. (Yongjun Zhang)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4b4fe90/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index 0a46a7a..925aad6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -513,7 +513,7 @@ public class GenericOptionsParser {
 specify comma separated archives to be unarchived +
  on the compute machines.\n);
 out.println(The general command line syntax is);
-out.println(bin/hadoop command [genericOptions] [commandOptions]\n);
+out.println(command [genericOptions] [commandOptions]\n);
   }
   
 }



hadoop git commit: MAPREDUCE-579. Streaming slowmatch documentation.

2015-03-25 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ee824cafe - d85c14afb


MAPREDUCE-579. Streaming slowmatch documentation.

(cherry picked from commit a2e42d2deee715f6255d6fd2c95f34e80888dc5f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d85c14af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d85c14af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d85c14af

Branch: refs/heads/branch-2
Commit: d85c14afbfbf31028a7f253e0fa77c5ed3e88f7f
Parents: ee824ca
Author: Harsh J ha...@cloudera.com
Authored: Wed Mar 25 14:38:12 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Wed Mar 25 14:39:46 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 2 ++
 .../hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm  | 7 +++
 2 files changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d85c14af/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index a2fcfbe..d913fe5 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,8 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-579. Streaming slowmatch documentation. (harsh)
+
 MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort
 (Chao Zhang via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d85c14af/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
--
diff --git 
a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm 
b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
index 179b1f0..a23d407 100644
--- a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
+++ b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
@@ -546,6 +546,13 @@ You can use the record reader StreamXmlRecordReader to 
process XML documents.
 
 Anything found between BEGIN\_STRING and END\_STRING would be treated as one 
record for map tasks.
 
+The name-value properties that StreamXmlRecordReader understands are:
+
+*   (strings) 'begin' - Characters marking beginning of record, and 'end' - 
Characters marking end of record.
+*   (boolean) 'slowmatch' - Toggle to look for begin and end characters, but 
within CDATA instead of regular tags. Defaults to false.
+*   (integer) 'lookahead' - Maximum lookahead bytes to sync CDATA when using 
'slowmatch', should be larger than 'maxrec'. Defaults to 2*'maxrec'.
+*   (integer) 'maxrec' - Maximum record size to read between each match during 
'slowmatch'. Defaults to 5 bytes.
+
 $H3 How do I update counters in streaming applications?
 
 A streaming process can use the stderr to emit counter information. 
`reporter:counter:group,counter,amount` should be sent to stderr to 
update the counter.



hadoop git commit: MAPREDUCE-579. Streaming slowmatch documentation.

2015-03-25 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 323945b33 - b6dea9776


MAPREDUCE-579. Streaming slowmatch documentation.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6dea977
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6dea977
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6dea977

Branch: refs/heads/trunk
Commit: b6dea9776b92c46d2ca593f7ada0a3b5dfdc2e04
Parents: 323945b
Author: Harsh J ha...@cloudera.com
Authored: Wed Mar 25 14:38:12 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Wed Mar 25 14:39:00 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 2 ++
 .../hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm  | 7 +++
 2 files changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6dea977/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 2b16c30..f81a13f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -256,6 +256,8 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-579. Streaming slowmatch documentation. (harsh)
+
 MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort
 (Chao Zhang via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6dea977/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
--
diff --git 
a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm 
b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
index b4c5e38..7f2412e 100644
--- a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
+++ b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
@@ -546,6 +546,13 @@ You can use the record reader StreamXmlRecordReader to 
process XML documents.
 
 Anything found between BEGIN\_STRING and END\_STRING would be treated as one 
record for map tasks.
 
+The name-value properties that StreamXmlRecordReader understands are:
+
+*   (strings) 'begin' - Characters marking beginning of record, and 'end' - 
Characters marking end of record.
+*   (boolean) 'slowmatch' - Toggle to look for begin and end characters, but 
within CDATA instead of regular tags. Defaults to false.
+*   (integer) 'lookahead' - Maximum lookahead bytes to sync CDATA when using 
'slowmatch', should be larger than 'maxrec'. Defaults to 2*'maxrec'.
+*   (integer) 'maxrec' - Maximum record size to read between each match during 
'slowmatch'. Defaults to 5 bytes.
+
 $H3 How do I update counters in streaming applications?
 
 A streaming process can use the stderr to emit counter information. 
`reporter:counter:group,counter,amount` should be sent to stderr to 
update the counter.



hadoop git commit: HADOOP-11719.[Fsshell] Remove bin/hadoop reference from GenericOptionsParser default help text. Contributed by Brahma Reddy Battula.

2015-03-25 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6e9202c80 - c33ecd83e


HADOOP-11719.[Fsshell] Remove bin/hadoop reference from GenericOptionsParser 
default help text. Contributed by Brahma Reddy Battula.

(cherry picked from commit 1ede5e6e1e7f51cf13d0488bb1b0e126c865656c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c33ecd83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c33ecd83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c33ecd83

Branch: refs/heads/branch-2
Commit: c33ecd83e42c1f688875979761c6077c92c415b2
Parents: 6e9202c
Author: Harsh J ha...@cloudera.com
Authored: Thu Mar 26 11:27:21 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Thu Mar 26 11:28:32 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 4 
 .../main/java/org/apache/hadoop/util/GenericOptionsParser.java   | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c33ecd83/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 04817af..e2dbe8d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -14,6 +14,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-11719. [Fsshell] Remove bin/hadoop reference from
+GenericOptionsParser default help text.
+(Brahma Reddy Battula via harsh)
+
 HADOOP-11692. Improve authentication failure WARN message to avoid user
 confusion. (Yongjun Zhang)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c33ecd83/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index 0a46a7a..925aad6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -513,7 +513,7 @@ public class GenericOptionsParser {
 specify comma separated archives to be unarchived +
  on the compute machines.\n);
 out.println(The general command line syntax is);
-out.println(bin/hadoop command [genericOptions] [commandOptions]\n);
+out.println(command [genericOptions] [commandOptions]\n);
   }
   
 }



hadoop git commit: HDFS-7875. Improve log message when wrong value configured for dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.

2015-03-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 51f1f4937 - eda02540c


HDFS-7875. Improve log message when wrong value configured for 
dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eda02540
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eda02540
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eda02540

Branch: refs/heads/trunk
Commit: eda02540ce53732585b3f31411b2e65db569eb25
Parents: 51f1f49
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 24 23:03:30 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 24 23:06:18 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 4 
 .../hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 6 --
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eda02540/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4f3937a..3725a03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -321,6 +321,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-7875. Improve log message when wrong value configured for
+dfs.datanode.failed.volumes.tolerated.
+(nijel via harsh)
+
 HDFS-2360. Ugly stacktrace when quota exceeds. (harsh)
 
 HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eda02540/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d42c00c..05c4871 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -276,8 +276,10 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 this.validVolsRequired = volsConfigured - volFailuresTolerated;
 
 if (volFailuresTolerated  0 || volFailuresTolerated = volsConfigured) {
-  throw new DiskErrorException(Invalid volume failure 
-  +  config value:  + volFailuresTolerated);
+  throw new DiskErrorException(Invalid value configured for 
+  + dfs.datanode.failed.volumes.tolerated -  + volFailuresTolerated
+  + . Value configured is either less than 0 or = 
+  + to the number of configured volumes ( + volsConfigured + ).);
 }
 if (volsFailed  volFailuresTolerated) {
   throw new DiskErrorException(Too many failed volumes - 



hadoop git commit: HDFS-7875. Improve log message when wrong value configured for dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.

2015-03-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 95bde8898 - 608ad6c2c


HDFS-7875. Improve log message when wrong value configured for 
dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.

(cherry picked from commit 2da3d2ed2ff2e9b48dbda7e029aa58261c729d35)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/608ad6c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/608ad6c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/608ad6c2

Branch: refs/heads/branch-2
Commit: 608ad6c2cd18234ffb551e0784f260e0b3faf402
Parents: 95bde88
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 24 23:03:30 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 24 23:03:56 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 4 
 .../hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java | 6 --
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ad6c2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5f289dd..19c5529 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -8,6 +8,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-7875. Improve log message when wrong value configured for
+dfs.datanode.failed.volumes.tolerated.
+(nijel via harsh)
+
 HDFS-2360. Ugly stacktrace when quota exceeds. (harsh)
 
 HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608ad6c2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 6a15906..69a80c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -278,8 +278,10 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl {
 this.validVolsRequired = volsConfigured - volFailuresTolerated;
 
 if (volFailuresTolerated  0 || volFailuresTolerated = volsConfigured) {
-  throw new DiskErrorException(Invalid volume failure 
-  +  config value:  + volFailuresTolerated);
+  throw new DiskErrorException(Invalid value configured for 
+  + dfs.datanode.failed.volumes.tolerated -  + volFailuresTolerated
+  + . Value configured is either less than 0 or = 
+  + to the number of configured volumes ( + volsConfigured + ).);
 }
 if (volsFailed  volFailuresTolerated) {
   throw new DiskErrorException(Too many failed volumes - 



hadoop git commit: YARN-1880. Cleanup TestApplicationClientProtocolOnHA. Contributed by ozawa.

2015-03-24 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 970ee3fc5 - fbceb3b41


YARN-1880. Cleanup TestApplicationClientProtocolOnHA. Contributed by ozawa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbceb3b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbceb3b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbceb3b4

Branch: refs/heads/trunk
Commit: fbceb3b41834d6899c4353fb24f12ba3ecf67faf
Parents: 970ee3f
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 24 11:57:28 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 24 11:57:58 2015 +0530

--
 hadoop-yarn-project/CHANGES.txt |  3 +++
 .../client/TestApplicationClientProtocolOnHA.java   | 16 ++--
 2 files changed, 13 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbceb3b4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3d9f271..8a5e142 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -58,6 +58,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+YARN-1880. Cleanup TestApplicationClientProtocolOnHA
+(ozawa via harsh)
+
 YARN-3243. CapacityScheduler should pass headroom from parent to children
 to make sure ParentQueue obey its capacity limits. (Wangda Tan via jianhe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbceb3b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
index bfc6656..8e00554 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
@@ -93,7 +93,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   public void testGetApplicationsOnHA() throws Exception {
 ListApplicationReport reports =
 client.getApplications();
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 Assert.assertEquals(cluster.createFakeAppReports(),
 reports);
   }
@@ -101,7 +102,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   @Test(timeout = 15000)
   public void testGetClusterNodesOnHA() throws Exception {
 ListNodeReport reports = client.getNodeReports(NodeState.RUNNING);
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 Assert.assertEquals(cluster.createFakeNodeReports(),
 reports);
   }
@@ -117,8 +119,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   @Test(timeout = 15000)
   public void testGetQueueUserAclsOnHA() throws Exception {
 ListQueueUserACLInfo queueUserAclsList = client.getQueueAclsInfo();
-Assert.assertTrue(queueUserAclsList != null
- !queueUserAclsList.isEmpty());
+Assert.assertTrue(queueUserAclsList != null);
+Assert.assertFalse(queueUserAclsList.isEmpty());
 Assert.assertEquals(cluster.createFakeQueueUserACLInfoList(),
 queueUserAclsList);
   }
@@ -136,7 +138,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   public void testGetApplicationAttemptsOnHA() throws Exception {
 ListApplicationAttemptReport reports =
 client.getApplicationAttempts(cluster.createFakeAppId());
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 Assert.assertEquals(cluster.createFakeApplicationAttemptReports(),
 reports);
   }
@@ -153,7 +156,8 @@ public class TestApplicationClientProtocolOnHA extends 
ProtocolHATestBase {
   public void testGetContainersOnHA() throws Exception {
 ListContainerReport reports =
 client.getContainers(cluster.createFakeApplicationAttemptId());
-Assert.assertTrue(reports != null  !reports.isEmpty());
+Assert.assertTrue(reports != null);
+Assert.assertFalse(reports.isEmpty());
 Assert.assertEquals(cluster.createFakeContainerReports

hadoop git commit: MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort. Contributed by Chao Zhang.

2015-03-22 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6e6e0e4a3 - 2cd243128


MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort. 
Contributed by Chao Zhang.

(cherry picked from commit b375d1fc936913edf4a75212559f160c41043906)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cd24312
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cd24312
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cd24312

Branch: refs/heads/branch-2
Commit: 2cd24312893ea0b8518aa8c16dd2477f19ed2526
Parents: 6e6e0e4
Author: Harsh J ha...@cloudera.com
Authored: Mon Mar 23 03:48:36 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Mon Mar 23 03:48:55 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/examples/Sort.java| 7 ---
 2 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cd24312/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 5ebf835..e399d3e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort
+(Chao Zhang via harsh)
+
 MAPREDUCE-5190. Unnecessary condition test in RandomSampler.
 (Jingguo Yao via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cd24312/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java
index a90c02b..0382c09 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java
@@ -24,7 +24,7 @@ import java.util.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.mapreduce.filecache.DistributedCache;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Writable;
@@ -160,13 +160,14 @@ public class SortK,V extends Configured implements Tool 
{
   System.out.println(Sampling input to effect total-order sort...);
   job.setPartitionerClass(TotalOrderPartitioner.class);
   Path inputDir = FileInputFormat.getInputPaths(job)[0];
-  inputDir = inputDir.makeQualified(inputDir.getFileSystem(conf));
+  FileSystem fs = inputDir.getFileSystem(conf);
+  inputDir = inputDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
   Path partitionFile = new Path(inputDir, _sortPartitioning);
   TotalOrderPartitioner.setPartitionFile(conf, partitionFile);
   InputSampler.K,VwritePartitionFile(job, sampler);
   URI partitionUri = new URI(partitionFile.toString() +
  # + _sortPartitioning);
-  DistributedCache.addCacheFile(partitionUri, conf);
+  job.addCacheFile(partitionUri);
 }
 
 System.out.println(Running on  +



hadoop git commit: MAPREDUCE-5660. Log info about possible thrashing (when using memory-based scheduling in Capacity Scheduler) is not printed. Contributed by Adam Kawa.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-1 927f13b6c - 3e3623818


MAPREDUCE-5660. Log info about possible thrashing (when using memory-based 
scheduling in Capacity Scheduler) is not printed. Contributed by Adam Kawa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e362381
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e362381
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e362381

Branch: refs/heads/branch-1
Commit: 3e3623818142fdd9b643d2a72d2af66431b9122f
Parents: 927f13b
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 02:37:06 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 02:37:06 2015 +0530

--
 CHANGES.txt  |  4 
 src/mapred/org/apache/hadoop/mapred/TaskTracker.java | 10 ++
 2 files changed, 10 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e362381/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index e6bfac1..b71eb08 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -66,6 +66,10 @@ Release 1.3.0 - unreleased
 
   BUG FIXES
 
+MAPREDUCE-5660. Log info about possible thrashing (when using
+memory-based scheduling in Capacity Scheduler) is not printed
+(Adam Kawa via harsh)
+
 MAPREDUCE-5556. mapred docs have incorrect classpath (harsh)
 
 MAPREDUCE-5272. Minor error in javadoc of TestMRWithDistributedCache

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e362381/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
--
diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTracker.java 
b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
index a1ca352..3698eab 100644
--- a/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
+++ b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
@@ -4460,11 +4460,13 @@ public class TaskTracker implements MRConstants, 
TaskUmbilicalProtocol,
 }
   }
 }
-if (totalMemoryAllottedForTasks  totalPhysicalMemoryOnTT) {
-  LOG.info(totalMemoryAllottedForTasks  totalPhysicalMemoryOnTT.
+
+long totalMemoryAllottedForTasksInBytes = totalMemoryAllottedForTasks * 
1024 * 1024;
+if (totalMemoryAllottedForTasksInBytes  totalPhysicalMemoryOnTT) {
+  LOG.info(totalMemoryAllottedForTasksInBytes  totalPhysicalMemoryOnTT.
   +  Thrashing might happen.);
-} else if (totalMemoryAllottedForTasks  totalVirtualMemoryOnTT) {
-  LOG.info(totalMemoryAllottedForTasks  totalVirtualMemoryOnTT.
+} else if (totalMemoryAllottedForTasksInBytes  totalVirtualMemoryOnTT) {
+  LOG.info(totalMemoryAllottedForTasksInBytes  totalVirtualMemoryOnTT.
   +  Thrashing might happen.);
 }
 



hadoop git commit: MAPREDUCE-6239. Consolidate TestJobConf classes in hadoop-mapreduce-client-jobclient and hadoop-mapreduce-client-core. Contributed by Varun Saxena.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk e1feb4ea1 - 7a678db3a


MAPREDUCE-6239. Consolidate TestJobConf classes in 
hadoop-mapreduce-client-jobclient and hadoop-mapreduce-client-core. Contributed 
by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a678db3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a678db3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a678db3

Branch: refs/heads/trunk
Commit: 7a678db3accf9480f3799dcf6fd7ffef09a311cc
Parents: e1feb4e
Author: Harsh J ha...@cloudera.com
Authored: Sat Mar 21 09:43:29 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sat Mar 21 09:43:29 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt|   4 +
 .../org/apache/hadoop/mapred/TestJobConf.java   | 173 
 .../org/apache/hadoop/conf/TestJobConf.java | 199 ---
 3 files changed, 177 insertions(+), 199 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a678db3/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 48eda8b..4f80411 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -256,6 +256,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6239. Consolidate TestJobConf classes in
+hadoop-mapreduce-client-jobclient and hadoop-mapreduce-client-core
+(Varun Saxena via harsh)
+
 MAPREDUCE-5807. Print usage by TeraSort job. (Rohith via harsh)
 
 MAPREDUCE-4653. TestRandomAlgorithm has an unused import statement.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a678db3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
index 3d924e1..0612ade 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
@@ -22,6 +22,7 @@ import java.util.regex.Pattern;
 import static org.junit.Assert.*;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -188,4 +189,176 @@ public class TestJobConf {
 Assert.assertEquals(2048, configuration.getLong(
 JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY, -1));
   }
+
+
+  @Test
+  public void testProfileParamsDefaults() {
+JobConf configuration = new JobConf();
+String result = configuration.getProfileParams();
+Assert.assertNotNull(result);
+Assert.assertTrue(result.contains(file=%s));
+Assert.assertTrue(result.startsWith(-agentlib:hprof));
+  }
+
+  @Test
+  public void testProfileParamsSetter() {
+JobConf configuration = new JobConf();
+
+configuration.setProfileParams(test);
+Assert.assertEquals(test, 
configuration.get(MRJobConfig.TASK_PROFILE_PARAMS));
+  }
+
+  @Test
+  public void testProfileParamsGetter() {
+JobConf configuration = new JobConf();
+
+configuration.set(MRJobConfig.TASK_PROFILE_PARAMS, test);
+Assert.assertEquals(test, configuration.getProfileParams());
+  }
+
+  /**
+   * Testing mapred.task.maxvmem replacement with new values
+   *
+   */
+  @Test
+  public void testMemoryConfigForMapOrReduceTask(){
+JobConf configuration = new JobConf();
+configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
+configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300));
+Assert.assertEquals(configuration.getMemoryForMapTask(),300);
+Assert.assertEquals(configuration.getMemoryForReduceTask(),300);
+
+configuration.set(mapred.task.maxvmem , String.valueOf(2*1024 * 1024));
+configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
+configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300));
+Assert.assertEquals(configuration.getMemoryForMapTask(),2);
+Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
+
+configuration = new JobConf();
+configuration.set(mapred.task.maxvmem , -1);
+configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
+configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(400

hadoop git commit: MAPREDUCE-6213. NullPointerException caused by job history server addr not resolvable. Contributed by Peng Zhang.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7a678db3a - e1e09052e


MAPREDUCE-6213. NullPointerException caused by job history server addr not 
resolvable. Contributed by Peng Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1e09052
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1e09052
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1e09052

Branch: refs/heads/trunk
Commit: e1e09052e861926112493d6041aae01ab594b547
Parents: 7a678db
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 02:44:36 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 02:44:36 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java | 7 ---
 2 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1e09052/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 4f80411..76180a3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -286,6 +286,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6213. NullPointerException caused by job history server addr not
+resolvable. (Peng Zhang via harsh)
+
 MAPREDUCE-6281. Fix javadoc in Terasort. (Albert Chu via ozawa)
 
 Release 2.7.0 - UNRELEASED

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1e09052/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
index cac0119..d367060 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
@@ -137,8 +137,9 @@ public class MRWebAppUtil {
   hsAddress, getDefaultJHSWebappPort(),
   getDefaultJHSWebappURLWithoutScheme());
 StringBuffer sb = new StringBuffer();
-if (address.getAddress().isAnyLocalAddress() || 
-address.getAddress().isLoopbackAddress()) {
+if (address.getAddress() != null 
+(address.getAddress().isAnyLocalAddress() ||
+ address.getAddress().isLoopbackAddress())) {
   sb.append(InetAddress.getLocalHost().getCanonicalHostName());
 } else {
   sb.append(address.getHostName());
@@ -171,4 +172,4 @@ public class MRWebAppUtil {
   public static String getAMWebappScheme(Configuration conf) {
 return http://;;
   }
-}
\ No newline at end of file
+}



hadoop git commit: MAPREDUCE-6239. Consolidate TestJobConf classes in hadoop-mapreduce-client-jobclient and hadoop-mapreduce-client-core. Contributed by Varun Saxena.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9c494ceda - 97da36777


MAPREDUCE-6239. Consolidate TestJobConf classes in 
hadoop-mapreduce-client-jobclient and hadoop-mapreduce-client-core. Contributed 
by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97da3677
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97da3677
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97da3677

Branch: refs/heads/branch-2
Commit: 97da36777f2916904e44bfa011208f4f07fe5170
Parents: 9c494ce
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 02:20:11 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 02:20:11 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt|   4 +
 .../org/apache/hadoop/mapred/TestJobConf.java   | 171 
 .../org/apache/hadoop/conf/TestJobConf.java | 198 ---
 3 files changed, 175 insertions(+), 198 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97da3677/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 0ffbc28..da43195 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6239. Consolidate TestJobConf classes in
+hadoop-mapreduce-client-jobclient and hadoop-mapreduce-client-core
+(Varun Saxena via harsh)
+
 MAPREDUCE-5807. Print usage by TeraSort job. (Rohith via harsh)
 
 MAPREDUCE-4653. TestRandomAlgorithm has an unused import statement.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97da3677/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
index 3d924e1..a68ba4f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobConf.java
@@ -22,6 +22,7 @@ import java.util.regex.Pattern;
 import static org.junit.Assert.*;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -188,4 +189,174 @@ public class TestJobConf {
 Assert.assertEquals(2048, configuration.getLong(
 JobConf.MAPREDUCE_JOB_REDUCE_MEMORY_MB_PROPERTY, -1));
   }
+
+  @Test
+  public void testProfileParamsDefaults() {
+JobConf configuration = new JobConf();
+String result = configuration.getProfileParams();
+Assert.assertNotNull(result);
+Assert.assertTrue(result.contains(file=%s));
+Assert.assertTrue(result.startsWith(-agentlib:hprof));
+  }
+
+  @Test
+  public void testProfileParamsSetter() {
+JobConf configuration = new JobConf();
+
+configuration.setProfileParams(test);
+Assert.assertEquals(test, 
configuration.get(MRJobConfig.TASK_PROFILE_PARAMS));
+  }
+
+  @Test
+  public void testProfileParamsGetter() {
+JobConf configuration = new JobConf();
+
+configuration.set(MRJobConfig.TASK_PROFILE_PARAMS, test);
+Assert.assertEquals(test, configuration.getProfileParams());
+  }
+
+  /**
+   * Testing mapred.task.maxvmem replacement with new values
+   *
+   */
+  @Test
+  public void testMemoryConfigForMapOrReduceTask(){
+JobConf configuration = new JobConf();
+configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
+configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300));
+Assert.assertEquals(configuration.getMemoryForMapTask(),300);
+Assert.assertEquals(configuration.getMemoryForReduceTask(),300);
+
+configuration.set(mapred.task.maxvmem , String.valueOf(2*1024 * 1024));
+configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
+configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(300));
+Assert.assertEquals(configuration.getMemoryForMapTask(),2);
+Assert.assertEquals(configuration.getMemoryForReduceTask(),2);
+
+configuration = new JobConf();
+configuration.set(mapred.task.maxvmem , -1);
+configuration.set(MRJobConfig.MAP_MEMORY_MB,String.valueOf(300));
+configuration.set(MRJobConfig.REDUCE_MEMORY_MB,String.valueOf(400

hadoop git commit: MAPREDUCE-6213. NullPointerException caused by job history server addr not resolvable. Contributed by Peng Zhang.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 97da36777 - 418d078bc


MAPREDUCE-6213. NullPointerException caused by job history server addr not 
resolvable. Contributed by Peng Zhang.

(cherry picked from commit e1e09052e861926112493d6041aae01ab594b547)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/418d078b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/418d078b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/418d078b

Branch: refs/heads/branch-2
Commit: 418d078bc5621bea415c3fd095f1c8a9e218878a
Parents: 97da367
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 02:44:36 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 02:45:01 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java | 7 ---
 2 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/418d078b/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index da43195..5897bd1 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -38,6 +38,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6213. NullPointerException caused by job history server addr not
+resolvable. (Peng Zhang via harsh)
+
 MAPREDUCE-6281. Fix javadoc in Terasort. (Albert Chu via ozawa)
 
 Release 2.7.0 - UNRELEASED

http://git-wip-us.apache.org/repos/asf/hadoop/blob/418d078b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
index cac0119..d367060 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
@@ -137,8 +137,9 @@ public class MRWebAppUtil {
   hsAddress, getDefaultJHSWebappPort(),
   getDefaultJHSWebappURLWithoutScheme());
 StringBuffer sb = new StringBuffer();
-if (address.getAddress().isAnyLocalAddress() || 
-address.getAddress().isLoopbackAddress()) {
+if (address.getAddress() != null 
+(address.getAddress().isAnyLocalAddress() ||
+ address.getAddress().isLoopbackAddress())) {
   sb.append(InetAddress.getLocalHost().getCanonicalHostName());
 } else {
   sb.append(address.getHostName());
@@ -171,4 +172,4 @@ public class MRWebAppUtil {
   public static String getAMWebappScheme(Configuration conf) {
 return http://;;
   }
-}
\ No newline at end of file
+}



hadoop git commit: MAPREDUCE-5183. In, TaskTracker#reportProgress logging of 0.0-1.0 progress is followed by percent sign. Contributed by Niranjan Singh.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-1 3e3623818 - 552bafde0


MAPREDUCE-5183. In, TaskTracker#reportProgress logging of 0.0-1.0 progress is 
followed by percent sign. Contributed by Niranjan Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/552bafde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/552bafde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/552bafde

Branch: refs/heads/branch-1
Commit: 552bafde0f117504b7ef41a1d83a0b593623a7ae
Parents: 3e36238
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 03:02:36 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 03:02:36 2015 +0530

--
 CHANGES.txt  | 3 +++
 src/mapred/org/apache/hadoop/mapred/TaskTracker.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/552bafde/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index b71eb08..6abf1f9 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -66,6 +66,9 @@ Release 1.3.0 - unreleased
 
   BUG FIXES
 
+MAPREDUCE-5183. In, TaskTracker#reportProgress logging of 0.0-1.0 progress
+is followed by percent sign (Niranjan Singh via harsh)
+
 MAPREDUCE-5660. Log info about possible thrashing (when using
 memory-based scheduling in Capacity Scheduler) is not printed
 (Adam Kawa via harsh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/552bafde/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
--
diff --git a/src/mapred/org/apache/hadoop/mapred/TaskTracker.java 
b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
index 3698eab..18c466c 100644
--- a/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
+++ b/src/mapred/org/apache/hadoop/mapred/TaskTracker.java
@@ -2936,8 +2936,8 @@ public class TaskTracker implements MRConstants, 
TaskUmbilicalProtocol,
  */
 public synchronized void reportProgress(TaskStatus taskStatus) 
 {
-  LOG.info(task.getTaskID() +   + taskStatus.getProgress() + 
-  %  + taskStatus.getStateString());
+  LOG.info(task.getTaskID() +   + StringUtils.formatPercent(
+  taskStatus.getProgress(), 0) +   + taskStatus.getStateString());
   // task will report its state as
   // COMMIT_PENDING when it is waiting for commit response and 
   // when it is committing.



hadoop git commit: MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which causes counter limits are not reset correctly. Contributed by Zhihai Xu.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk e1e09052e - 433542904


MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which causes 
counter limits are not reset correctly. Contributed by Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43354290
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43354290
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43354290

Branch: refs/heads/trunk
Commit: 433542904aba5ddebf9bd9d299378647351eb13a
Parents: e1e0905
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 02:51:02 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 02:51:02 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt | 4 
 .../org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java| 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43354290/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 76180a3..fc42941 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -286,6 +286,10 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
+causes counter limits are not reset correctly.
+(Zhihai Xu via harsh)
+
 MAPREDUCE-6213. NullPointerException caused by job history server addr not
 resolvable. (Peng Zhang via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43354290/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
index 43b2df2..f343d7c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
@@ -93,7 +93,7 @@ public class HistoryViewer {
   final Configuration jobConf = new Configuration(conf);
   try {
 jobConf.addResource(fs.open(jobConfPath), jobConfPath.toString());
-Limits.reset(conf);
+Limits.reset(jobConf);
   } catch (FileNotFoundException fnf) {
 if (LOG.isWarnEnabled()) {
   LOG.warn(Missing job conf in history, fnf);



hadoop git commit: MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which causes counter limits are not reset correctly. Contributed by Zhihai Xu.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 418d078bc - 7c72c7f52


MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which causes 
counter limits are not reset correctly. Contributed by Zhihai Xu.

(cherry picked from commit 433542904aba5ddebf9bd9d299378647351eb13a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c72c7f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c72c7f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c72c7f5

Branch: refs/heads/branch-2
Commit: 7c72c7f5290f0991f65102c2bd413f965a8ca409
Parents: 418d078
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 02:51:02 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 02:51:17 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt | 4 
 .../org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java| 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c72c7f5/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 5897bd1..507c6f4 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -38,6 +38,10 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
+causes counter limits are not reset correctly.
+(Zhihai Xu via harsh)
+
 MAPREDUCE-6213. NullPointerException caused by job history server addr not
 resolvable. (Peng Zhang via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c72c7f5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
index 43b2df2..f343d7c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
@@ -93,7 +93,7 @@ public class HistoryViewer {
   final Configuration jobConf = new Configuration(conf);
   try {
 jobConf.addResource(fs.open(jobConfPath), jobConfPath.toString());
-Limits.reset(conf);
+Limits.reset(jobConf);
   } catch (FileNotFoundException fnf) {
 if (LOG.isWarnEnabled()) {
   LOG.warn(Missing job conf in history, fnf);



hadoop git commit: MAPREDUCE-5448. MapFileOutputFormat#getReaders bug with invisible files/folders. Contributed by Maysam Yabandeh.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 433542904 - b46c2bb51


MAPREDUCE-5448. MapFileOutputFormat#getReaders bug with invisible 
files/folders. Contributed by Maysam Yabandeh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b46c2bb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b46c2bb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b46c2bb5

Branch: refs/heads/trunk
Commit: b46c2bb51ae524e6640756620f70e5925cda7592
Parents: 4335429
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 09:45:48 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 09:45:48 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../mapreduce/lib/output/MapFileOutputFormat.java   | 12 +++-
 .../mapreduce/lib/output/TestFileOutputCommitter.java   | 10 ++
 3 files changed, 24 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b46c2bb5/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index fc42941..2920811 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -286,6 +286,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-5448. MapFileOutputFormat#getReaders bug with hidden
+files/folders. (Maysam Yabandeh via harsh)
+
 MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
 causes counter limits are not reset correctly.
 (Zhihai Xu via harsh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b46c2bb5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
index b8cb997..da33770 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
@@ -24,6 +24,7 @@ import java.util.Arrays;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.PathFilter;
 
 import org.apache.hadoop.io.MapFile;
 import org.apache.hadoop.io.WritableComparable;
@@ -88,7 +89,16 @@ public class MapFileOutputFormat
   public static MapFile.Reader[] getReaders(Path dir,
   Configuration conf) throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
-Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
+PathFilter filter = new PathFilter() {
+  @Override
+  public boolean accept(Path path) {
+String name = path.getName();
+if (name.startsWith(_) || name.startsWith(.))
+  return false;
+return true;
+  }
+};
+Path[] names = FileUtil.stat2Paths(fs.listStatus(dir, filter));
 
 // sort names, so that hash partitioning works
 Arrays.sort(names);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b46c2bb5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
index 0d4ab98..5c4428b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
@@ -27,6 +27,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
+import junit.framework.Assert;
 import junit.framework.TestCase;
 
 import

hadoop git commit: MAPREDUCE-5190. Unnecessary condition test in RandomSampler. Contributed by Jingguo Yao.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a7f1c1b99 - d43c0da7d


MAPREDUCE-5190. Unnecessary condition test in RandomSampler. Contributed by 
Jingguo Yao.

(cherry picked from commit 1d5c796d654c8959972d15cc6742731a99380bfc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d43c0da7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d43c0da7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d43c0da7

Branch: refs/heads/branch-2
Commit: d43c0da7d7117afeb132b770bb4e3b076292d2a0
Parents: a7f1c1b
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 10:03:25 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 10:04:19 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/mapreduce/lib/partition/InputSampler.java| 6 ++
 2 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d43c0da7/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 440c571..9235798 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-5190. Unnecessary condition test in RandomSampler.
+(Jingguo Yao via harsh)
+
 MAPREDUCE-6239. Consolidate TestJobConf classes in
 hadoop-mapreduce-client-jobclient and hadoop-mapreduce-client-core
 (Varun Saxena via harsh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d43c0da7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
index 4668f49..cce9f37 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
@@ -230,10 +230,8 @@ public class InputSamplerK,V extends Configured 
implements Tool  {
   // to reflect the possibility of existing elements being
   // pushed out
   int ind = r.nextInt(numSamples);
-  if (ind != numSamples) {
-samples.set(ind, ReflectionUtils.copy(job.getConfiguration(),
- reader.getCurrentKey(), null));
-  }
+  samples.set(ind, ReflectionUtils.copy(job.getConfiguration(),
+   reader.getCurrentKey(), null));
   freq *= (numSamples - 1) / (double) numSamples;
 }
   }



hadoop git commit: MAPREDUCE-5448. MapFileOutputFormat#getReaders bug with invisible files/folders. Contributed by Maysam Yabandeh.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7c72c7f52 - a7f1c1b99


MAPREDUCE-5448. MapFileOutputFormat#getReaders bug with invisible 
files/folders. Contributed by Maysam Yabandeh.

(cherry picked from commit b46c2bb51ae524e6640756620f70e5925cda7592)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7f1c1b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7f1c1b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7f1c1b9

Branch: refs/heads/branch-2
Commit: a7f1c1b9938f3eeea6bd3dc1cc93fe140a61d478
Parents: 7c72c7f
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 09:45:48 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 09:46:54 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +++
 .../mapreduce/lib/output/MapFileOutputFormat.java   | 12 +++-
 .../mapreduce/lib/output/TestFileOutputCommitter.java   | 10 ++
 3 files changed, 24 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f1c1b9/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 507c6f4..440c571 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -38,6 +38,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-5448. MapFileOutputFormat#getReaders bug with hidden
+files/folders. (Maysam Yabandeh via harsh)
+
 MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
 causes counter limits are not reset correctly.
 (Zhihai Xu via harsh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f1c1b9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
index b8cb997..da33770 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/MapFileOutputFormat.java
@@ -24,6 +24,7 @@ import java.util.Arrays;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.PathFilter;
 
 import org.apache.hadoop.io.MapFile;
 import org.apache.hadoop.io.WritableComparable;
@@ -88,7 +89,16 @@ public class MapFileOutputFormat
   public static MapFile.Reader[] getReaders(Path dir,
   Configuration conf) throws IOException {
 FileSystem fs = dir.getFileSystem(conf);
-Path[] names = FileUtil.stat2Paths(fs.listStatus(dir));
+PathFilter filter = new PathFilter() {
+  @Override
+  public boolean accept(Path path) {
+String name = path.getName();
+if (name.startsWith(_) || name.startsWith(.))
+  return false;
+return true;
+  }
+};
+Path[] names = FileUtil.stat2Paths(fs.listStatus(dir, filter));
 
 // sort names, so that hash partitioning works
 Arrays.sort(names);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7f1c1b9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
index 0d4ab98..5c4428b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
@@ -27,6 +27,7 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
+import

hadoop git commit: MAPREDUCE-5190. Unnecessary condition test in RandomSampler. Contributed by Jingguo Yao.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk b46c2bb51 - 1d5c796d6


MAPREDUCE-5190. Unnecessary condition test in RandomSampler. Contributed by 
Jingguo Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d5c796d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d5c796d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d5c796d

Branch: refs/heads/trunk
Commit: 1d5c796d654c8959972d15cc6742731a99380bfc
Parents: b46c2bb
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 10:03:25 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 10:03:25 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt   | 3 +++
 .../apache/hadoop/mapreduce/lib/partition/InputSampler.java| 6 ++
 2 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d5c796d/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 2920811..e98aacd 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -256,6 +256,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-5190. Unnecessary condition test in RandomSampler.
+(Jingguo Yao via harsh)
+
 MAPREDUCE-6239. Consolidate TestJobConf classes in
 hadoop-mapreduce-client-jobclient and hadoop-mapreduce-client-core
 (Varun Saxena via harsh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d5c796d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
index 4668f49..cce9f37 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java
@@ -230,10 +230,8 @@ public class InputSamplerK,V extends Configured 
implements Tool  {
   // to reflect the possibility of existing elements being
   // pushed out
   int ind = r.nextInt(numSamples);
-  if (ind != numSamples) {
-samples.set(ind, ReflectionUtils.copy(job.getConfiguration(),
- reader.getCurrentKey(), null));
-  }
+  samples.set(ind, ReflectionUtils.copy(job.getConfiguration(),
+   reader.getCurrentKey(), null));
   freq *= (numSamples - 1) / (double) numSamples;
 }
   }



hadoop git commit: MAPREDUCE-6286. Amend commit to CHANGES.txt for backport into 2.7.0.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d43c0da7d - 40682a4aa


MAPREDUCE-6286. Amend commit to CHANGES.txt for backport into 2.7.0.

(cherry picked from commit 8770c82acc948bc5127afb1c59072718fd04630c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40682a4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40682a4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40682a4a

Branch: refs/heads/branch-2
Commit: 40682a4aad467fd52c228cc83de266df38d4a819
Parents: d43c0da
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 10:15:52 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 10:16:12 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40682a4a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9235798..5ebf835 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -44,10 +44,6 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-5448. MapFileOutputFormat#getReaders bug with hidden
 files/folders. (Maysam Yabandeh via harsh)
 
-MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
-causes counter limits are not reset correctly.
-(Zhihai Xu via harsh)
-
 MAPREDUCE-6213. NullPointerException caused by job history server addr not
 resolvable. (Peng Zhang via harsh)
 
@@ -150,6 +146,10 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
+causes counter limits are not reset correctly.
+(Zhihai Xu via harsh)
+
 MAPREDUCE-6210. Use getApplicationAttemptId() instead of getApplicationId()
 for logging AttemptId in RMContainerAllocator.java (Leitao Guo via 
aajisaka)
 



hadoop git commit: MAPREDUCE-6286. Amend commit to CHANGES.txt for backport into 2.7.0.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1d5c796d6 - 8770c82ac


MAPREDUCE-6286. Amend commit to CHANGES.txt for backport into 2.7.0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8770c82a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8770c82a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8770c82a

Branch: refs/heads/trunk
Commit: 8770c82acc948bc5127afb1c59072718fd04630c
Parents: 1d5c796
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 10:15:52 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 10:15:52 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8770c82a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index e98aacd..b75d8aa 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -292,10 +292,6 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-5448. MapFileOutputFormat#getReaders bug with hidden
 files/folders. (Maysam Yabandeh via harsh)
 
-MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
-causes counter limits are not reset correctly.
-(Zhihai Xu via harsh)
-
 MAPREDUCE-6213. NullPointerException caused by job history server addr not
 resolvable. (Peng Zhang via harsh)
 
@@ -398,6 +394,10 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
+causes counter limits are not reset correctly.
+(Zhihai Xu via harsh)
+
 MAPREDUCE-6210. Use getApplicationAttemptId() instead of getApplicationId()
 for logging AttemptId in RMContainerAllocator.java (Leitao Guo via 
aajisaka)
 



hadoop git commit: MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which causes counter limits are not reset correctly. Contributed by Zhihai Xu.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 08f2f7ed3 - 929b04ce3


MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which causes 
counter limits are not reset correctly. Contributed by Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/929b04ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/929b04ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/929b04ce

Branch: refs/heads/branch-2.7
Commit: 929b04ce3a4fe419dece49ed68d4f6228be214c1
Parents: 08f2f7e
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 10:18:32 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 10:18:32 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt | 4 
 .../org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java| 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/929b04ce/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 323e0c4..6dc4cab 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -97,6 +97,10 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
+causes counter limits are not reset correctly.
+(Zhihai Xu via harsh)
+
 MAPREDUCE-6210. Use getApplicationAttemptId() instead of getApplicationId()
 for logging AttemptId in RMContainerAllocator.java (Leitao Guo via 
aajisaka)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/929b04ce/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
index 43b2df2..f343d7c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryViewer.java
@@ -93,7 +93,7 @@ public class HistoryViewer {
   final Configuration jobConf = new Configuration(conf);
   try {
 jobConf.addResource(fs.open(jobConfPath), jobConfPath.toString());
-Limits.reset(conf);
+Limits.reset(jobConf);
   } catch (FileNotFoundException fnf) {
 if (LOG.isWarnEnabled()) {
   LOG.warn(Missing job conf in history, fnf);



hadoop git commit: MAPREDUCE-5448. Addendum fix to remove deprecation warning by junit.Assert import in TestFileOutputCommitter.

2015-03-21 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 40682a4aa - 6e6e0e4a3


MAPREDUCE-5448. Addendum fix to remove deprecation warning by junit.Assert 
import in TestFileOutputCommitter.

(cherry picked from commit 4cd54d9a297435150ab61803284eb05603f114e2)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e6e0e4a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e6e0e4a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e6e0e4a

Branch: refs/heads/branch-2
Commit: 6e6e0e4a3763680370404501d3fe319ee1631b71
Parents: 40682a4
Author: Harsh J ha...@cloudera.com
Authored: Sun Mar 22 10:33:15 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Mar 22 10:33:29 2015 +0530

--
 .../hadoop/mapreduce/lib/output/TestFileOutputCommitter.java  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e6e0e4a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
index 5c4428b..7678f35 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java
@@ -27,7 +27,6 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
-import junit.framework.Assert;
 import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
@@ -315,7 +314,7 @@ public class TestFileOutputCommitter extends TestCase {
 try {
   MapFileOutputFormat.getReaders(outDir, conf);
 } catch (Exception e) {
-  Assert.fail(Fail to read from MapFileOutputFormat:  + e);
+  fail(Fail to read from MapFileOutputFormat:  + e);
   e.printStackTrace();
 }
 



hadoop git commit: MAPREDUCE-5807. Print usage for TeraSort job. Contributed by Rohith.

2015-03-18 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 34117325b - 9d72f9397


MAPREDUCE-5807. Print usage for TeraSort job. Contributed by Rohith.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d72f939
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d72f939
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d72f939

Branch: refs/heads/trunk
Commit: 9d72f939759f407796ecb4715c2dc2f0d36d5578
Parents: 3411732
Author: Harsh J ha...@cloudera.com
Authored: Wed Mar 18 15:34:44 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Wed Mar 18 15:36:52 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt|  2 +
 .../hadoop/examples/terasort/TeraGen.java   |  6 +-
 .../examples/terasort/TeraInputFormat.java  | 16 ++--
 .../examples/terasort/TeraOutputFormat.java |  8 +-
 .../hadoop/examples/terasort/TeraScheduler.java |  1 -
 .../hadoop/examples/terasort/TeraSort.java  | 28 +--
 .../examples/terasort/TeraSortConfigKeys.java   | 77 
 .../hadoop/examples/terasort/TestTeraSort.java  |  5 ++
 8 files changed, 123 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d72f939/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 3936c9b..4a9b4c7 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -253,6 +253,8 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-5807. Print usage by TeraSort job. (Rohith via harsh)
+
 MAPREDUCE-4653. TestRandomAlgorithm has an unused import statement.
 (Amir Sanjar via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d72f939/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index e8b6503..d7d751a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -70,7 +70,6 @@ public class TeraGen extends Configured implements Tool {
 
   public static enum Counters {CHECKSUM}
 
-  public static final String NUM_ROWS = mapreduce.terasort.num-rows;
   /**
* An input format that assigns ranges of longs to each mapper.
*/
@@ -189,11 +188,12 @@ public class TeraGen extends Configured implements Tool {
   }
   
   static long getNumberOfRows(JobContext job) {
-return job.getConfiguration().getLong(NUM_ROWS, 0);
+return job.getConfiguration().getLong(TeraSortConfigKeys.NUM_ROWS.key(),
+TeraSortConfigKeys.DEFAULT_NUM_ROWS);
   }
   
   static void setNumberOfRows(Job job, long numRows) {
-job.getConfiguration().setLong(NUM_ROWS, numRows);
+job.getConfiguration().setLong(TeraSortConfigKeys.NUM_ROWS.key(), numRows);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d72f939/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
index 88b12dd..20ce8ef 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
@@ -50,10 +50,6 @@ import org.apache.hadoop.util.StringUtils;
 public class TeraInputFormat extends FileInputFormatText,Text {
 
   static final String PARTITION_FILENAME = _partition.lst;
-  private static final String NUM_PARTITIONS = 
-mapreduce.terasort.num.partitions;
-  private static final String SAMPLE_SIZE = 
-mapreduce.terasort.partitions.sample;
   static final int KEY_LENGTH = 10;
   static final int VALUE_LENGTH = 90;
   static final int RECORD_LENGTH = KEY_LENGTH + VALUE_LENGTH;
@@ -123,11 +119,16 @@ public class TeraInputFormat extends 
FileInputFormatText,Text {
 final TeraInputFormat inFormat = new TeraInputFormat();
 final

hadoop git commit: MAPREDUCE-5556. mapred docs have incorrect classpath. Contributed by harsh.

2015-03-18 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-1 2943ee5e3 - 927f13b6c


MAPREDUCE-5556. mapred docs have incorrect classpath. Contributed by harsh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/927f13b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/927f13b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/927f13b6

Branch: refs/heads/branch-1
Commit: 927f13b6cb0c5e500d59aa68d952fb6e845b7c37
Parents: 2943ee5
Author: Harsh J ha...@cloudera.com
Authored: Wed Mar 18 17:49:02 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Wed Mar 18 17:49:02 2015 +0530

--
 CHANGES.txt  | 2 ++
 src/docs/src/documentation/content/xdocs/mapred_tutorial.xml | 2 +-
 2 files changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/927f13b6/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index 33dccad..e6bfac1 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -66,6 +66,8 @@ Release 1.3.0 - unreleased
 
   BUG FIXES
 
+MAPREDUCE-5556. mapred docs have incorrect classpath (harsh)
+
 MAPREDUCE-5272. Minor error in javadoc of TestMRWithDistributedCache
 (Zhijie Shen via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/927f13b6/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
--
diff --git a/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml 
b/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
index 2f6e5c6..8f3efe1 100644
--- a/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
+++ b/src/docs/src/documentation/content/xdocs/mapred_tutorial.xml
@@ -550,7 +550,7 @@
 p
   code$ mkdir wordcount_classes/codebr/
   code
-$ javac -classpath 
${HADOOP_HOME}/hadoop-${HADOOP_VERSION}-core.jar 
+$ javac -classpath ${HADOOP_HOME}/hadoop-core-${HADOOP_VERSION}.jar
   -d wordcount_classes WordCount.java
   /codebr/
   code$ jar -cvf /usr/joe/wordcount.jar -C wordcount_classes/ 
./code 



hadoop git commit: MAPREDUCE-5807. Print usage for TeraSort job. Contributed by Rohith.

2015-03-18 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 18740f938 - 4e80c4cd5


MAPREDUCE-5807. Print usage for TeraSort job. Contributed by Rohith.

(cherry picked from commit 9d72f939759f407796ecb4715c2dc2f0d36d5578)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e80c4cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e80c4cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e80c4cd

Branch: refs/heads/branch-2
Commit: 4e80c4cd5ec48c1325f1c8a4e7771cf7137682db
Parents: 18740f9
Author: Harsh J ha...@cloudera.com
Authored: Wed Mar 18 15:34:44 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Wed Mar 18 17:31:25 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt|  2 +
 .../hadoop/examples/terasort/TeraGen.java   |  6 +-
 .../examples/terasort/TeraInputFormat.java  | 16 ++--
 .../examples/terasort/TeraOutputFormat.java |  8 +-
 .../hadoop/examples/terasort/TeraScheduler.java |  1 -
 .../hadoop/examples/terasort/TeraSort.java  | 28 +--
 .../examples/terasort/TeraSortConfigKeys.java   | 77 
 .../hadoop/examples/terasort/TestTeraSort.java  |  5 ++
 8 files changed, 123 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e80c4cd/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index abcfe8c..5cd974c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,8 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-5807. Print usage by TeraSort job. (Rohith via harsh)
+
 MAPREDUCE-4653. TestRandomAlgorithm has an unused import statement.
 (Amir Sanjar via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e80c4cd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
index e8b6503..d7d751a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraGen.java
@@ -70,7 +70,6 @@ public class TeraGen extends Configured implements Tool {
 
   public static enum Counters {CHECKSUM}
 
-  public static final String NUM_ROWS = mapreduce.terasort.num-rows;
   /**
* An input format that assigns ranges of longs to each mapper.
*/
@@ -189,11 +188,12 @@ public class TeraGen extends Configured implements Tool {
   }
   
   static long getNumberOfRows(JobContext job) {
-return job.getConfiguration().getLong(NUM_ROWS, 0);
+return job.getConfiguration().getLong(TeraSortConfigKeys.NUM_ROWS.key(),
+TeraSortConfigKeys.DEFAULT_NUM_ROWS);
   }
   
   static void setNumberOfRows(Job job, long numRows) {
-job.getConfiguration().setLong(NUM_ROWS, numRows);
+job.getConfiguration().setLong(TeraSortConfigKeys.NUM_ROWS.key(), numRows);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e80c4cd/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
index 88b12dd..20ce8ef 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java
@@ -50,10 +50,6 @@ import org.apache.hadoop.util.StringUtils;
 public class TeraInputFormat extends FileInputFormatText,Text {
 
   static final String PARTITION_FILENAME = _partition.lst;
-  private static final String NUM_PARTITIONS = 
-mapreduce.terasort.num.partitions;
-  private static final String SAMPLE_SIZE = 
-mapreduce.terasort.partitions.sample;
   static final int KEY_LENGTH = 10;
   static final int VALUE_LENGTH = 90;
   static final int RECORD_LENGTH = KEY_LENGTH + VALUE_LENGTH;
@@ -123,11 +119,16 @@ public class TeraInputFormat extends 
FileInputFormatText,Text

hadoop git commit: MAPREDUCE-4653. TestRandomAlgorithm has an unused import statement. Contributed by Amir Sanjar.

2015-03-17 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 48c2db34e - e5370477c


MAPREDUCE-4653. TestRandomAlgorithm has an unused import statement. Contributed 
by Amir Sanjar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5370477
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5370477
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5370477

Branch: refs/heads/trunk
Commit: e5370477c2d00745e695507ecfdf86de59c5f5b9
Parents: 48c2db3
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 17 14:01:15 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 17 14:11:54 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java | 2 --
 2 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5370477/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b5baf51..3936c9b 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -253,6 +253,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-4653. TestRandomAlgorithm has an unused import statement.
+(Amir Sanjar via harsh)
+
 MAPREDUCE-6100. replace mapreduce.job.credentials.binary with
 MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY for better readability.
 (Zhihai Xu via harsh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5370477/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
--
diff --git 
a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
 
b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
index cd55483..4e85ce2 100644
--- 
a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
+++ 
b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
@@ -30,8 +30,6 @@ import java.util.Set;
 
 import org.junit.Test;
 
-import com.sun.tools.javac.code.Attribute.Array;
-
 public class TestRandomAlgorithm {
   private static final int[][] parameters = new int[][] {
 {5, 1, 1}, 



hadoop git commit: MAPREDUCE-4653. TestRandomAlgorithm has an unused import statement. Contributed by Amir Sanjar.

2015-03-17 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 991ac04af - c58786794


MAPREDUCE-4653. TestRandomAlgorithm has an unused import statement. Contributed 
by Amir Sanjar.

(cherry picked from commit 75e4670408a058efa95eaa768fedbe614008658f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5878679
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5878679
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5878679

Branch: refs/heads/branch-2
Commit: c58786794b2b3fab71f0100709c5851248223556
Parents: 991ac04
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 17 14:01:15 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 17 14:13:23 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java | 2 --
 2 files changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5878679/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index ec0e49d..abcfe8c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-4653. TestRandomAlgorithm has an unused import statement.
+(Amir Sanjar via harsh)
+
 MAPREDUCE-6100. replace mapreduce.job.credentials.binary with
 MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY for better readability.
 (Zhihai Xu via harsh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5878679/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
--
diff --git 
a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
 
b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
index cd55483..4e85ce2 100644
--- 
a/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
+++ 
b/hadoop-tools/hadoop-gridmix/src/test/java/org/apache/hadoop/mapred/gridmix/TestRandomAlgorithm.java
@@ -30,8 +30,6 @@ import java.util.Set;
 
 import org.junit.Test;
 
-import com.sun.tools.javac.code.Attribute.Array;
-
 public class TestRandomAlgorithm {
   private static final int[][] parameters = new int[][] {
 {5, 1, 1}, 



hadoop git commit: MAPREDUCE-6105. nconsistent configuration in property mapreduce.reduce.shuffle.merge.percent. Contributed by Ray Chiang.

2015-03-16 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk ce5de93a5 - 685dbafbe


MAPREDUCE-6105. nconsistent configuration in property 
mapreduce.reduce.shuffle.merge.percent. Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/685dbafb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/685dbafb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/685dbafb

Branch: refs/heads/trunk
Commit: 685dbafbe2154e5bf4b638da0668ce32d8c879b0
Parents: ce5de93
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 17 01:17:34 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 17 02:28:09 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt| 3 +++
 .../src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java  | 1 +
 .../apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java   | 5 +++--
 3 files changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/685dbafb/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index d02d725..52880f6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -253,6 +253,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6105. Inconsistent configuration in property
+mapreduce.reduce.shuffle.merge.percent. (Ray Chiang via harsh)
+
 MAPREDUCE-4414. Add main methods to JobConf and YarnConfiguration,
 for debug purposes. (Plamen Jeliazkov via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/685dbafb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 3aa304a..f0a6ddf 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -305,6 +305,7 @@ public interface MRJobConfig {
 = mapreduce.reduce.shuffle.memory.limit.percent;
 
   public static final String SHUFFLE_MERGE_PERCENT = 
mapreduce.reduce.shuffle.merge.percent;
+  public static final float DEFAULT_SHUFFLE_MERGE_PERCENT = 0.66f;
 
   public static final String REDUCE_FAILURES_MAXPERCENT = 
mapreduce.reduce.failures.maxpercent;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/685dbafb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
index a4b1aa8..8bf17ef 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
@@ -191,8 +191,9 @@ public class MergeManagerImplK, V implements 
MergeManagerK, V {
 this.memToMemMergeOutputsThreshold = 
 jobConf.getInt(MRJobConfig.REDUCE_MEMTOMEM_THRESHOLD, 
ioSortFactor);
 this.mergeThreshold = (long)(this.memoryLimit * 
-  jobConf.getFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT, 
-   0.90f));
+  jobConf.getFloat(
+MRJobConfig.SHUFFLE_MERGE_PERCENT,
+MRJobConfig.DEFAULT_SHUFFLE_MERGE_PERCENT));
 LOG.info(MergerManager: memoryLimit= + memoryLimit + ,  +
  maxSingleShuffleLimit= + maxSingleShuffleLimit + ,  +
  mergeThreshold= + mergeThreshold + ,  + 



hadoop git commit: MAPREDUCE-6100. replace mapreduce.job.credentials.binary with MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY for better readability. Contributed by Zhihai Xu.

2015-03-16 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 046521cd6 - f222bde27


MAPREDUCE-6100. replace mapreduce.job.credentials.binary with 
MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY for better readability. 
Contributed by Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f222bde2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f222bde2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f222bde2

Branch: refs/heads/trunk
Commit: f222bde273cc10a38945dc31e85206a0c4f06a12
Parents: 046521c
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 17 11:06:35 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 17 11:06:35 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt | 4 
 .../src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java  | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f222bde2/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 52880f6..ee21b70 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -253,6 +253,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6100. replace mapreduce.job.credentials.binary with
+MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY for better readability.
+(Zhihai Xu via harsh)
+
 MAPREDUCE-6105. Inconsistent configuration in property
 mapreduce.reduce.shuffle.merge.percent. (Ray Chiang via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f222bde2/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
index 30a87c7..023bd63 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
@@ -383,7 +383,7 @@ class JobSubmitter {
   throws IOException {
 // add tokens and secrets coming from a token storage file
 String binaryTokenFilename =
-  conf.get(mapreduce.job.credentials.binary);
+  conf.get(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
 if (binaryTokenFilename != null) {
   Credentials binary = Credentials.readTokenStorageFile(
   FileSystem.getLocal(conf).makeQualified(



hadoop git commit: HDFS-2360. Ugly stacktrce when quota exceeds. (harsh)

2015-03-16 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 9f227ad69 - cbb885836


HDFS-2360. Ugly stacktrce when quota exceeds. (harsh)

(cherry picked from commit 046521cd6511b7fc6d9478cb2bed90d8e75fca20)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cbb88583
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cbb88583
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cbb88583

Branch: refs/heads/branch-2
Commit: cbb885836741085757f5e5da3e7413af9cbceaf6
Parents: 9f227ad
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 17 00:59:50 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 17 10:28:52 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 .../main/java/org/apache/hadoop/hdfs/DFSOutputStream.java   | 9 -
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb88583/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7d9f145..f788a9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -8,6 +8,8 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-2360. Ugly stacktrace when quota exceeds. (harsh)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cbb88583/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 0a8720a..8655061 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -550,7 +551,13 @@ public class DFSOutputStream extends FSOutputSummer
 } catch (Throwable e) {
   // Log warning if there was a real error.
   if (restartingNodeIndex.get() == -1) {
-DFSClient.LOG.warn(DataStreamer Exception, e);
+// Since their messages are descriptive enough, do not always
+// log a verbose stack-trace WARN for quota exceptions.
+if (e instanceof QuotaExceededException) {
+  DFSClient.LOG.debug(DataStreamer Quota Exception, e);
+} else {
+  DFSClient.LOG.warn(DataStreamer Exception, e);
+}
   }
   if (e instanceof IOException) {
 setLastException((IOException)e);



hadoop git commit: HDFS-2360. Ugly stacktrce when quota exceeds. (harsh)

2015-03-16 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 56085203c - 046521cd6


HDFS-2360. Ugly stacktrce when quota exceeds. (harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/046521cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/046521cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/046521cd

Branch: refs/heads/trunk
Commit: 046521cd6511b7fc6d9478cb2bed90d8e75fca20
Parents: 5608520
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 17 00:59:50 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 17 10:28:17 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++
 .../main/java/org/apache/hadoop/hdfs/DFSOutputStream.java   | 9 -
 2 files changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/046521cd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d313b6c..9339b97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -321,6 +321,8 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-2360. Ugly stacktrace when quota exceeds. (harsh)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/046521cd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 130bb6e..286ae7d 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -551,7 +552,13 @@ public class DFSOutputStream extends FSOutputSummer
 } catch (Throwable e) {
   // Log warning if there was a real error.
   if (restartingNodeIndex.get() == -1) {
-DFSClient.LOG.warn(DataStreamer Exception, e);
+// Since their messages are descriptive enough, do not always
+// log a verbose stack-trace WARN for quota exceptions.
+if (e instanceof QuotaExceededException) {
+  DFSClient.LOG.debug(DataStreamer Quota Exception, e);
+} else {
+  DFSClient.LOG.warn(DataStreamer Exception, e);
+}
   }
   if (e instanceof IOException) {
 setLastException((IOException)e);



hadoop git commit: MAPREDUCE-6100. replace mapreduce.job.credentials.binary with MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY for better readability. Contributed by Zhihai Xu.

2015-03-16 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cbb885836 - 5e403e8b8


MAPREDUCE-6100. replace mapreduce.job.credentials.binary with 
MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY for better readability. 
Contributed by Zhihai Xu.

(cherry picked from commit f222bde273cc10a38945dc31e85206a0c4f06a12)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e403e8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e403e8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e403e8b

Branch: refs/heads/branch-2
Commit: 5e403e8b8d16640699428ead8e5e9f343bc45c10
Parents: cbb8858
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 17 11:06:35 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 17 11:08:32 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt | 4 
 .../src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java  | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e403e8b/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index c0c6d57..c8aa10a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6100. replace mapreduce.job.credentials.binary with
+MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY for better readability.
+(Zhihai Xu via harsh)
+
 MAPREDUCE-6105. Inconsistent configuration in property
 mapreduce.reduce.shuffle.merge.percent. (Ray Chiang via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e403e8b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
index 30a87c7..023bd63 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
@@ -383,7 +383,7 @@ class JobSubmitter {
   throws IOException {
 // add tokens and secrets coming from a token storage file
 String binaryTokenFilename =
-  conf.get(mapreduce.job.credentials.binary);
+  conf.get(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
 if (binaryTokenFilename != null) {
   Credentials binary = Credentials.readTokenStorageFile(
   FileSystem.getLocal(conf).makeQualified(



hadoop git commit: MAPREDUCE-5272. A Minor Error in Javadoc of TestMRWithDistributedCache in Branch-1. Contributed by Zhijie Shen.

2015-03-16 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-1 22543d34b - 2943ee5e3


MAPREDUCE-5272. A Minor Error in Javadoc of TestMRWithDistributedCache in 
Branch-1. Contributed by Zhijie Shen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2943ee5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2943ee5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2943ee5e

Branch: refs/heads/branch-1
Commit: 2943ee5e33887b464e37293ff649121b0f6a8fb9
Parents: 22543d3
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 17 10:43:19 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 17 10:43:19 2015 +0530

--
 CHANGES.txt   | 3 +++
 .../org/apache/hadoop/filecache/TestMRWithDistributedCache.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2943ee5e/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index 786ab47..33dccad 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -66,6 +66,9 @@ Release 1.3.0 - unreleased
 
   BUG FIXES
 
+MAPREDUCE-5272. Minor error in javadoc of TestMRWithDistributedCache
+(Zhijie Shen via harsh)
+
 MAPREDUCE-4490. Fixed LinuxTaskController to re-initialize user log
 directory when JVM reuse option is enabled.  (Sam Liu via eyang)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2943ee5e/src/test/org/apache/hadoop/filecache/TestMRWithDistributedCache.java
--
diff --git 
a/src/test/org/apache/hadoop/filecache/TestMRWithDistributedCache.java 
b/src/test/org/apache/hadoop/filecache/TestMRWithDistributedCache.java
index 7736043..d277a98 100644
--- a/src/test/org/apache/hadoop/filecache/TestMRWithDistributedCache.java
+++ b/src/test/org/apache/hadoop/filecache/TestMRWithDistributedCache.java
@@ -48,7 +48,7 @@ import 
org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
 
 /**
  * Tests the use of the
- * {@link org.apache.hadoop.mapreduce.filecache.DistributedCache} within the
+ * {@link org.apache.hadoop.filecache.DistributedCache} within the
  * full MR flow as well as the LocalJobRunner. This ought to be part of the
  * filecache package, but that package is not currently in mapred, so cannot
  * depend on MR for testing.



hadoop git commit: MAPREDUCE-6105. nconsistent configuration in property mapreduce.reduce.shuffle.merge.percent. Contributed by Ray Chiang.

2015-03-16 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4568acaa6 - 0e9f78dda


MAPREDUCE-6105. nconsistent configuration in property 
mapreduce.reduce.shuffle.merge.percent. Contributed by Ray Chiang.

(cherry picked from commit 26a23b11598b3757ed4973781890e2cd2fc270df)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e9f78dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e9f78dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e9f78dd

Branch: refs/heads/branch-2
Commit: 0e9f78dda268e4a7e3edc25b7cfc535e3f89a691
Parents: 4568aca
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 17 01:17:34 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 17 02:27:33 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt| 3 +++
 .../src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java  | 1 +
 .../apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java   | 5 +++--
 3 files changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e9f78dd/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 5feb14d..c0c6d57 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6105. Inconsistent configuration in property
+mapreduce.reduce.shuffle.merge.percent. (Ray Chiang via harsh)
+
 MAPREDUCE-4414. Add main methods to JobConf and YarnConfiguration,
 for debug purposes. (Plamen Jeliazkov via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e9f78dd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index d445bb1..4cff68f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -300,6 +300,7 @@ public interface MRJobConfig {
 = mapreduce.reduce.shuffle.memory.limit.percent;
 
   public static final String SHUFFLE_MERGE_PERCENT = 
mapreduce.reduce.shuffle.merge.percent;
+  public static final float DEFAULT_SHUFFLE_MERGE_PERCENT = 0.66f;
 
   public static final String REDUCE_FAILURES_MAXPERCENT = 
mapreduce.reduce.failures.maxpercent;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e9f78dd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
index a4b1aa8..8bf17ef 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
@@ -191,8 +191,9 @@ public class MergeManagerImplK, V implements 
MergeManagerK, V {
 this.memToMemMergeOutputsThreshold = 
 jobConf.getInt(MRJobConfig.REDUCE_MEMTOMEM_THRESHOLD, 
ioSortFactor);
 this.mergeThreshold = (long)(this.memoryLimit * 
-  jobConf.getFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT, 
-   0.90f));
+  jobConf.getFloat(
+MRJobConfig.SHUFFLE_MERGE_PERCENT,
+MRJobConfig.DEFAULT_SHUFFLE_MERGE_PERCENT));
 LOG.info(MergerManager: memoryLimit= + memoryLimit + ,  +
  maxSingleShuffleLimit= + maxSingleShuffleLimit + ,  +
  mergeThreshold= + mergeThreshold + ,  + 



hadoop git commit: MAPREDUCE-4414. Add main methods to JobConf and YarnConfiguration, for debug purposes. Contributed by Plamen Jeliazkov.

2015-03-16 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eb4eb63de - 2684b0b44


MAPREDUCE-4414. Add main methods to JobConf and YarnConfiguration, for debug 
purposes. Contributed by Plamen Jeliazkov.

(cherry picked from commit 571f75b468a4412be2178466cb5d3888b05cd076)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2684b0b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2684b0b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2684b0b4

Branch: refs/heads/branch-2
Commit: 2684b0b440f52212e8cf75fda4075590f21059d0
Parents: eb4eb63
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 17 01:01:06 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 17 01:02:21 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt| 3 +++
 .../src/main/java/org/apache/hadoop/mapred/JobConf.java | 5 +
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 5 +
 3 files changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2684b0b4/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 2a79395..5feb14d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-4414. Add main methods to JobConf and YarnConfiguration,
+for debug purposes. (Plamen Jeliazkov via harsh)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2684b0b4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index 51b99be..c2bb1d5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -2021,5 +2021,10 @@ public class JobConf extends Configuration {
   }
   
 
+  /* For debugging. Dump configurations to system output as XML format. */
+  public static void main(String[] args) throws Exception {
+new JobConf(new Configuration()).writeXml(System.out);
+  }
+
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2684b0b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f6ebb51..be5471d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1808,4 +1808,9 @@ public class YarnConfiguration extends Configuration {
 }
 return clusterId;
   }
+
+  /* For debugging. mp configurations to system output as XML format. */
+  public static void main(String[] args) throws Exception {
+new YarnConfiguration(new Configuration()).writeXml(System.out);
+  }
 }



hadoop git commit: MAPREDUCE-4414. Add main methods to JobConf and YarnConfiguration, for debug purposes. Contributed by Plamen Jeliazkov.

2015-03-16 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk bf3275dba - 587d8be17


MAPREDUCE-4414. Add main methods to JobConf and YarnConfiguration, for debug 
purposes. Contributed by Plamen Jeliazkov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/587d8be1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/587d8be1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/587d8be1

Branch: refs/heads/trunk
Commit: 587d8be17bb9e71bad2881e24e7372d3e15125d3
Parents: bf3275d
Author: Harsh J ha...@cloudera.com
Authored: Tue Mar 17 01:01:06 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Mar 17 01:03:08 2015 +0530

--
 hadoop-mapreduce-project/CHANGES.txt| 3 +++
 .../src/main/java/org/apache/hadoop/mapred/JobConf.java | 5 +
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 5 +
 3 files changed, 13 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/587d8be1/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 28460d3..d02d725 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -253,6 +253,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-4414. Add main methods to JobConf and YarnConfiguration,
+for debug purposes. (Plamen Jeliazkov via harsh)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/587d8be1/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index c388bda..9cac685 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -2140,5 +2140,10 @@ public class JobConf extends Configuration {
 }
   }
 
+  /* For debugging. Dump configurations to system output as XML format. */
+  public static void main(String[] args) throws Exception {
+new JobConf(new Configuration()).writeXml(System.out);
+  }
+
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/587d8be1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f40c999..a527af4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1808,4 +1808,9 @@ public class YarnConfiguration extends Configuration {
 }
 return clusterId;
   }
+
+  /* For debugging. mp configurations to system output as XML format. */
+  public static void main(String[] args) throws Exception {
+new YarnConfiguration(new Configuration()).writeXml(System.out);
+  }
 }



hadoop git commit: HDFS-7752. Improve description for dfs.namenode.num.extra.edits.retained and dfs.namenode.num.checkpoints.retained properties on hdfs-default.xml. Contributed by Wellington Chev

2015-02-20 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eaeaf80d3 - 5c2c6b00d


HDFS-7752. Improve description for dfs.namenode.num.extra.edits.retained and 
dfs.namenode.num.checkpoints.retained properties on hdfs-default.xml. 
Contributed by Wellington Chevreuil.

(cherry picked from commit b9a17909ba39898120a096cb6ae90104640690db)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c2c6b00
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c2c6b00
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c2c6b00

Branch: refs/heads/branch-2
Commit: 5c2c6b00dd35ce422dccfbbfff77a3933d93f33b
Parents: eaeaf80
Author: Harsh J ha...@cloudera.com
Authored: Fri Feb 20 19:20:41 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Fri Feb 20 19:21:34 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  5 +
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  | 15 +++
 2 files changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c2c6b00/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 40196b3..363ddca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -39,6 +39,11 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-7752. Improve description for
+dfs.namenode.num.extra.edits.retained
+and dfs.namenode.num.checkpoints.retained properties on
+hdfs-default.xml (Wellington Chevreuil via harsh)
+
 HDFS-7055. Add tracing to DFSInputStream (cmccabe)
 
 HDFS-7186. Document the hadoop trace command. (Masatake Iwasaki via Colin

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c2c6b00/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index bb28f01..2981db2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -852,9 +852,9 @@
 property
   namedfs.namenode.num.checkpoints.retained/name
   value2/value
-  descriptionThe number of image checkpoint files that will be retained by
+  descriptionThe number of image checkpoint files (fsimage_*) that will be 
retained by
   the NameNode and Secondary NameNode in their storage directories. All edit
-  logs necessary to recover an up-to-date namespace from the oldest retained
+  logs (stored on edits_* files) necessary to recover an up-to-date namespace 
from the oldest retained
   checkpoint will also be retained.
   /description
 /property
@@ -863,8 +863,15 @@
   namedfs.namenode.num.extra.edits.retained/name
   value100/value
   descriptionThe number of extra transactions which should be retained
-  beyond what is minimally necessary for a NN restart. This can be useful for
-  audit purposes or for an HA setup where a remote Standby Node may have
+  beyond what is minimally necessary for a NN restart.
+  It does not translate directly to file's age, or the number of files kept,
+  but to the number of transactions (here edits means transactions).
+  One edit file may contain several transactions (edits).
+  During checkpoint, NameNode will identify the total number of edits to 
retain as extra by
+  checking the latest checkpoint transaction value, subtracted by the value of 
this property.
+  Then, it scans edits files to identify the older ones that don't include the 
computed range of
+  retained transactions that are to be kept around, and purges them 
subsequently.
+  The retainment can be useful for audit purposes or for an HA setup where a 
remote Standby Node may have
   been offline for some time and need to have a longer backlog of retained
   edits in order to start again.
   Typically each edit is on the order of a few hundred bytes, so the default



hadoop git commit: HDFS-7752. Improve description for dfs.namenode.num.extra.edits.retained and dfs.namenode.num.checkpoints.retained properties on hdfs-default.xml. Contributed by Wellington Chev

2015-02-20 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk c0d9b9395 - b9a17909b


HDFS-7752. Improve description for dfs.namenode.num.extra.edits.retained and 
dfs.namenode.num.checkpoints.retained properties on hdfs-default.xml. 
Contributed by Wellington Chevreuil.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9a17909
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9a17909
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9a17909

Branch: refs/heads/trunk
Commit: b9a17909ba39898120a096cb6ae90104640690db
Parents: c0d9b93
Author: Harsh J ha...@cloudera.com
Authored: Fri Feb 20 19:20:41 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Fri Feb 20 19:20:41 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  5 +
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  | 15 +++
 2 files changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a17909/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 80a086a..5f3cc02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -341,6 +341,11 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HDFS-7752. Improve description for
+dfs.namenode.num.extra.edits.retained
+and dfs.namenode.num.checkpoints.retained properties on
+hdfs-default.xml (Wellington Chevreuil via harsh)
+
 HDFS-7055. Add tracing to DFSInputStream (cmccabe)
 
 HDFS-7186. Document the hadoop trace command. (Masatake Iwasaki via Colin

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9a17909/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 9299ea3..85d2273 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -852,9 +852,9 @@
 property
   namedfs.namenode.num.checkpoints.retained/name
   value2/value
-  descriptionThe number of image checkpoint files that will be retained by
+  descriptionThe number of image checkpoint files (fsimage_*) that will be 
retained by
   the NameNode and Secondary NameNode in their storage directories. All edit
-  logs necessary to recover an up-to-date namespace from the oldest retained
+  logs (stored on edits_* files) necessary to recover an up-to-date namespace 
from the oldest retained
   checkpoint will also be retained.
   /description
 /property
@@ -863,8 +863,15 @@
   namedfs.namenode.num.extra.edits.retained/name
   value100/value
   descriptionThe number of extra transactions which should be retained
-  beyond what is minimally necessary for a NN restart. This can be useful for
-  audit purposes or for an HA setup where a remote Standby Node may have
+  beyond what is minimally necessary for a NN restart.
+  It does not translate directly to file's age, or the number of files kept,
+  but to the number of transactions (here edits means transactions).
+  One edit file may contain several transactions (edits).
+  During checkpoint, NameNode will identify the total number of edits to 
retain as extra by
+  checking the latest checkpoint transaction value, subtracted by the value of 
this property.
+  Then, it scans edits files to identify the older ones that don't include the 
computed range of
+  retained transactions that are to be kept around, and purges them 
subsequently.
+  The retainment can be useful for audit purposes or for an HA setup where a 
remote Standby Node may have
   been offline for some time and need to have a longer backlog of retained
   edits in order to start again.
   Typically each edit is on the order of a few hundred bytes, so the default



hadoop git commit: HADOOP-11512. Use getTrimmedStrings when reading serialization keys. Contributed by Ryan P.

2015-02-09 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2b722b904 - 442bc776d


HADOOP-11512. Use getTrimmedStrings when reading serialization keys. 
Contributed by Ryan P.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/442bc776
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/442bc776
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/442bc776

Branch: refs/heads/branch-2
Commit: 442bc776db7642eae02ee83231ecdbec78d3cb78
Parents: 2b722b9
Author: Harsh J ha...@cloudera.com
Authored: Mon Feb 9 11:10:45 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Feb 10 12:52:41 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../hadoop/io/serializer/SerializationFactory.java   |  2 +-
 .../hadoop/io/serializer/TestSerializationFactory.java   | 11 +++
 3 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/442bc776/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a61c349..fec5613 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -202,6 +202,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-11512. Use getTrimmedStrings when reading serialization keys
+(Ryan P via harsh)
+
 HADOOP-11488. Difference in default connection timeout for S3A FS
 (Daisuke Kobayashi via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/442bc776/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
index 52a0a25..aa3c86a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
@@ -55,7 +55,7 @@ public class SerializationFactory extends Configured {
*/
   public SerializationFactory(Configuration conf) {
 super(conf);
-for (String serializerName : conf.getStrings(
+for (String serializerName : conf.getTrimmedStrings(
   CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
   new String[]{WritableSerialization.class.getName(),
 AvroSpecificSerialization.class.getName(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/442bc776/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
index 18c2637..b3c8bee 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.io.serializer;
 
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.junit.Test;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNotNull;
@@ -41,4 +43,13 @@ public class TestSerializationFactory {
 assertNull(A null should be returned if there are no deserializers found,
 factory.getDeserializer(TestSerializationFactory.class));
   }
+
+  @Test
+  public void testSerializationKeyIsTrimmed() {
+Configuration conf = new Configuration();
+conf.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,  
org.apache.hadoop.io.serializer.WritableSerialization );
+SerializationFactory factory = new SerializationFactory(conf);
+assertNotNull(Valid class must be returned,
+  factory.getSerializer(LongWritable.class));
+   }
 }



hadoop git commit: HADOOP-11512. Use getTrimmedStrings when reading serialization keys. Contributed by Ryan P.

2015-02-09 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk b73956fda - e0ec0718d


HADOOP-11512. Use getTrimmedStrings when reading serialization keys. 
Contributed by Ryan P.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0ec0718
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0ec0718
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0ec0718

Branch: refs/heads/trunk
Commit: e0ec0718d033e84bda2ebeab7beb00b7dbd990c0
Parents: b73956f
Author: Harsh J ha...@cloudera.com
Authored: Mon Feb 9 10:41:25 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Feb 10 12:51:56 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt   |  3 +++
 .../apache/hadoop/io/serializer/SerializationFactory.java |  2 +-
 .../hadoop/io/serializer/TestSerializationFactory.java| 10 ++
 3 files changed, 14 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ec0718/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index aa86360..8b80998 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -588,6 +588,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-11512. Use getTrimmedStrings when reading serialization keys
+(Ryan P via harsh)
+
 HADOOP-11488. Difference in default connection timeout for S3A FS
 (Daisuke Kobayashi via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ec0718/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
index d6c6588..3f177f8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/serializer/SerializationFactory.java
@@ -60,7 +60,7 @@ public class SerializationFactory extends Configured {
   + CommonConfigurationKeys.IO_SERIALIZATIONS_KEY
   +  properly to have serialization support (it is currently not 
set).);
 } else {
-  for (String serializerName : conf.getStrings(
+  for (String serializerName : conf.getTrimmedStrings(
   CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, new String[] {
   WritableSerialization.class.getName(),
   AvroSpecificSerialization.class.getName(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0ec0718/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
index c5805be..6774155 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/serializer/TestSerializationFactory.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.io.serializer;
 
+import org.apache.hadoop.io.LongWritable;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import static org.junit.Assert.assertNull;
@@ -76,4 +77,13 @@ public class TestSerializationFactory {
 assertNull(A null should be returned if there are no deserializers found,
 factory.getDeserializer(TestSerializationFactory.class));
   }
+
+  @Test
+  public void testSerializationKeyIsTrimmed() {
+Configuration conf = new Configuration();
+conf.set(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,  
org.apache.hadoop.io.serializer.WritableSerialization );
+SerializationFactory factory = new SerializationFactory(conf);
+assertNotNull(Valid class must be returned,
+ factory.getSerializer(LongWritable.class));
+   }
 }



hadoop git commit: HADOOP-11488. Difference in default connection timeout for S3A FS. Contributed by Daisuke Kobayashi.

2015-01-31 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 151e147e6 - 648510e03


HADOOP-11488. Difference in default connection timeout for S3A FS. Contributed 
by Daisuke Kobayashi.

(cherry picked from commit ffc75d6ebe4912f20f4f4870d2a50abbe4557bfa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/648510e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/648510e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/648510e0

Branch: refs/heads/branch-2
Commit: 648510e031a1fd5f8ebe9b2f62549642ed029db0
Parents: 151e147
Author: Harsh J ha...@cloudera.com
Authored: Sun Feb 1 00:16:52 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Feb 1 00:17:24 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/main/resources/core-default.xml | 2 +-
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md| 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/648510e0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 32d42b3..7e74069 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -179,6 +179,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-11488. Difference in default connection timeout for S3A FS
+(Daisuke Kobayashi via harsh)
+
 HADOOP-11256. Some site docs have inconsistent appearance (Masatake 
 Iwasaki via aw)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/648510e0/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 0a0bdfe..4070619 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -711,7 +711,7 @@ for ldap providers in the same way as above does.
 
 property
   namefs.s3a.connection.timeout/name
-  value5000/value
+  value5/value
   descriptionSocket connection timeout in seconds./description
 /property
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/648510e0/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index d443389..375f82c 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -172,7 +172,7 @@ If you do any of these: change your credentials immediately!
 
 property
   namefs.s3a.connection.timeout/name
-  value5000/value
+  value5/value
   descriptionSocket connection timeout in seconds./description
 /property
 



hadoop git commit: HADOOP-11488. Difference in default connection timeout for S3A FS. Contributed by Daisuke Kobayashi.

2015-01-31 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 26c2de36e - ffc75d6eb


HADOOP-11488. Difference in default connection timeout for S3A FS. Contributed 
by Daisuke Kobayashi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffc75d6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffc75d6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffc75d6e

Branch: refs/heads/trunk
Commit: ffc75d6ebe4912f20f4f4870d2a50abbe4557bfa
Parents: 26c2de3
Author: Harsh J ha...@cloudera.com
Authored: Sun Feb 1 00:16:52 2015 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sun Feb 1 00:17:04 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../hadoop-common/src/main/resources/core-default.xml | 2 +-
 .../hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md| 2 +-
 3 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffc75d6e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 37fcee7..f3647fb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -540,6 +540,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-11488. Difference in default connection timeout for S3A FS
+(Daisuke Kobayashi via harsh)
+
 HADOOP-11256. Some site docs have inconsistent appearance (Masatake 
 Iwasaki via aw)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffc75d6e/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index c11669d..598ce6f 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -719,7 +719,7 @@ for ldap providers in the same way as above does.
 
 property
   namefs.s3a.connection.timeout/name
-  value5000/value
+  value5/value
   descriptionSocket connection timeout in seconds./description
 /property
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffc75d6e/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
--
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index d443389..375f82c 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -172,7 +172,7 @@ If you do any of these: change your credentials immediately!
 
 property
   namefs.s3a.connection.timeout/name
-  value5000/value
+  value5/value
   descriptionSocket connection timeout in seconds./description
 /property
 



hadoop git commit: MAPREDUCE-6149. Document override log4j.properties in MR job. Contributed by Junping Du.

2014-12-30 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e25b789b1 - 6d65e7467


MAPREDUCE-6149. Document override log4j.properties in MR job. Contributed by 
Junping Du.

(cherry picked from commit 6621c3598e22279cde11eca73cfb5619a8bc8dee)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d65e746
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d65e746
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d65e746

Branch: refs/heads/branch-2
Commit: 6d65e746706e0c5eed0433a00cd0a9fa6c6b1219
Parents: e25b789
Author: Harsh J ha...@cloudera.com
Authored: Tue Dec 30 23:12:32 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Dec 30 23:13:19 2014 +0530

--
 hadoop-mapreduce-project/CHANGES.txt |  3 +++
 .../src/main/resources/mapred-default.xml| 15 +++
 2 files changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d65e746/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 6721e56..c5dd74f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6149. Document override log4j.properties in MR job.
+(Junping Du via harsh)
+
 MAPREDUCE-6194. Bubble up final exception in failures during creation
 of output collectors (Varun Saxena via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d65e746/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 3798da7..85aa0e5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -581,6 +581,8 @@
   valueINFO/value
   descriptionThe logging level for the map task. The allowed levels are:
   OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+  The setting here could be overridden if mapreduce.job.log4j-properties-file
+  is set.
   /description
 /property
 
@@ -589,6 +591,8 @@
   valueINFO/value
   descriptionThe logging level for the reduce task. The allowed levels are:
   OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+  The setting here could be overridden if mapreduce.job.log4j-properties-file
+  is set.
   /description
 /property
 
@@ -1598,6 +1602,17 @@
   /description
 /property
 
+  property
+namemapreduce.job.log4j-properties-file/name
+value/value
+descriptionUsed to override the default settings of log4j in 
container-log4j.properties
+for NodeManager. Like container-log4j.properties, it requires certain
+framework appenders properly defined in this overriden file. The file on 
the
+path will be added to distributed cache and classpath. If no-scheme is 
given
+in the path, it defaults to point to a log4j file on the local FS.
+/description
+  /property
+
 property
   namemapreduce.job.end-notification.max.retry.interval/name
   value5000/value



hadoop git commit: MAPREDUCE-6149. Document override log4j.properties in MR job. Contributed by Junping Du.

2014-12-30 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 249cc9050 - 6621c3598


MAPREDUCE-6149. Document override log4j.properties in MR job. Contributed by 
Junping Du.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6621c359
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6621c359
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6621c359

Branch: refs/heads/trunk
Commit: 6621c3598e22279cde11eca73cfb5619a8bc8dee
Parents: 249cc90
Author: Harsh J ha...@cloudera.com
Authored: Tue Dec 30 23:12:32 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Dec 30 23:12:51 2014 +0530

--
 hadoop-mapreduce-project/CHANGES.txt |  3 +++
 .../src/main/resources/mapred-default.xml| 15 +++
 2 files changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6621c359/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 0a9ee8d..82295de 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -238,6 +238,9 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6149. Document override log4j.properties in MR job.
+(Junping Du via harsh)
+
 MAPREDUCE-6194. Bubble up final exception in failures during creation
 of output collectors (Varun Saxena via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6621c359/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 6e0deaa..30e291b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -324,6 +324,8 @@
   valueINFO/value
   descriptionThe logging level for the map task. The allowed levels are:
   OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+  The setting here could be overridden if mapreduce.job.log4j-properties-file
+  is set.
   /description
 /property
 
@@ -332,6 +334,8 @@
   valueINFO/value
   descriptionThe logging level for the reduce task. The allowed levels are:
   OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+  The setting here could be overridden if mapreduce.job.log4j-properties-file
+  is set.
   /description
 /property
 
@@ -1163,6 +1167,17 @@
   /description
 /property
 
+  property
+namemapreduce.job.log4j-properties-file/name
+value/value
+descriptionUsed to override the default settings of log4j in 
container-log4j.properties
+for NodeManager. Like container-log4j.properties, it requires certain
+framework appenders properly defined in this overriden file. The file on 
the
+path will be added to distributed cache and classpath. If no-scheme is 
given
+in the path, it defaults to point to a log4j file on the local FS.
+/description
+  /property
+
 property
   namemapreduce.job.end-notification.max.retry.interval/name
   value5000/value



hadoop git commit: MAPREDUCE-6194. Bubble up final exception in failures during creation of output collectors. Contributed by Varun Saxena.

2014-12-15 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 25a044023 - 298d09c9b


MAPREDUCE-6194. Bubble up final exception in failures during creation of output 
collectors. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/298d09c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/298d09c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/298d09c9

Branch: refs/heads/trunk
Commit: 298d09c9b583088f364038adcb1edf1eb1c2c196
Parents: 25a0440
Author: Harsh J ha...@cloudera.com
Authored: Mon Dec 15 14:26:22 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Mon Dec 15 14:26:43 2014 +0530

--
 hadoop-mapreduce-project/CHANGES.txt| 3 +++
 .../src/main/java/org/apache/hadoop/mapred/MapTask.java | 5 -
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/298d09c9/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index a6475b1..191526a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -235,6 +235,9 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6194. Bubble up final exception in failures during creation
+of output collectors (Varun Saxena via harsh)
+
 MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml
 (James Carman via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/298d09c9/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
index 75b4141..1a4901b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
@@ -387,6 +387,7 @@ public class MapTask extends Task {
 Class?[] collectorClasses = job.getClasses(
   JobContext.MAP_OUTPUT_COLLECTOR_CLASS_ATTR, MapOutputBuffer.class);
 int remainingCollectors = collectorClasses.length;
+Exception lastException = null;
 for (Class clazz : collectorClasses) {
   try {
 if (!MapOutputCollector.class.isAssignableFrom(clazz)) {
@@ -406,10 +407,12 @@ public class MapTask extends Task {
 if (--remainingCollectors  0) {
   msg +=  ( + remainingCollectors +  more collector(s) to try);
 }
+lastException = e;
 LOG.warn(msg, e);
   }
 }
-throw new IOException(Unable to initialize any output collector);
+throw new IOException(Initialization of all the collectors failed.  +
+  Error in last collector was : + lastException.getMessage(), 
lastException);
   }
 
   @SuppressWarnings(unchecked)



hadoop git commit: MAPREDUCE-6194. Bubble up final exception in failures during creation of output collectors. Contributed by Varun Saxena.

2014-12-15 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6465931c1 - a3fa1f076


MAPREDUCE-6194. Bubble up final exception in failures during creation of output 
collectors. Contributed by Varun Saxena.

(cherry picked from commit 298d09c9b583088f364038adcb1edf1eb1c2c196)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3fa1f07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3fa1f07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3fa1f07

Branch: refs/heads/branch-2
Commit: a3fa1f0761b901740a68cdb843b0025eed34bbb4
Parents: 6465931
Author: Harsh J ha...@cloudera.com
Authored: Mon Dec 15 14:26:22 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Mon Dec 15 14:27:49 2014 +0530

--
 hadoop-mapreduce-project/CHANGES.txt| 3 +++
 .../src/main/java/org/apache/hadoop/mapred/MapTask.java | 5 -
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3fa1f07/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index e470baf..22817f9 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-6194. Bubble up final exception in failures during creation
+of output collectors (Varun Saxena via harsh)
+
 MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml
 (James Carman via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3fa1f07/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
index 75b4141..1a4901b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
@@ -387,6 +387,7 @@ public class MapTask extends Task {
 Class?[] collectorClasses = job.getClasses(
   JobContext.MAP_OUTPUT_COLLECTOR_CLASS_ATTR, MapOutputBuffer.class);
 int remainingCollectors = collectorClasses.length;
+Exception lastException = null;
 for (Class clazz : collectorClasses) {
   try {
 if (!MapOutputCollector.class.isAssignableFrom(clazz)) {
@@ -406,10 +407,12 @@ public class MapTask extends Task {
 if (--remainingCollectors  0) {
   msg +=  ( + remainingCollectors +  more collector(s) to try);
 }
+lastException = e;
 LOG.warn(msg, e);
   }
 }
-throw new IOException(Unable to initialize any output collector);
+throw new IOException(Initialization of all the collectors failed.  +
+  Error in last collector was : + lastException.getMessage(), 
lastException);
   }
 
   @SuppressWarnings(unchecked)



hadoop git commit: YARN-2950. Change message to mandate, not suggest JS requirement on UI. Contributed by Dustin Cote.

2014-12-12 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk fa7b9248e - 0e37bbc8e


YARN-2950. Change message to mandate, not suggest JS requirement on UI. 
Contributed by Dustin Cote.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e37bbc8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e37bbc8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e37bbc8

Branch: refs/heads/trunk
Commit: 0e37bbc8e3f8e96acd96522face2f4bb01584cb4
Parents: fa7b924
Author: Harsh J ha...@cloudera.com
Authored: Sat Dec 13 07:10:11 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Sat Dec 13 07:10:40 2014 +0530

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java| 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e37bbc8/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cd0bf7c..af29b70 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -67,6 +67,9 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+YARN-2950. Change message to mandate, not suggest JS requirement on UI.
+(Dustin Cote via harsh)
+
 YARN-2891. Failed Container Executor does not provide a clear error
 message. (Dustin Cote via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e37bbc8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
index 7c311bc..6a64d1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/JQueryUI.java
@@ -91,7 +91,8 @@ public class JQueryUI extends HtmlBlock {
   public static void jsnotice(HTML html) {
 html.
   div(#jsnotice.ui-state-error).
-  _(This page works best with javascript enabled.)._();
+  _(This page will not function without javascript enabled.
++  Please enable javascript on your browser.)._();
 html.
   script().$type(text/javascript).
 _($('#jsnotice').hide();)._();



hadoop git commit: MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml. Contributed by James Carman. (harsh)

2014-12-10 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9a44db48b - cb99f4330


MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml. 
Contributed by James Carman. (harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb99f433
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb99f433
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb99f433

Branch: refs/heads/trunk
Commit: cb99f43305bd1577d4ba9527d237ac6cdb9ae730
Parents: 9a44db4
Author: Harsh J ha...@cloudera.com
Authored: Mon Dec 8 17:34:39 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Thu Dec 11 09:45:49 2014 +0530

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../apache/hadoop/mapreduce/MRJobConfig.java|  2 --
 .../hadoop/mapreduce/util/ConfigUtil.java   |  2 --
 .../src/main/resources/mapred-default.xml   | 12 
 .../resources/job_1329348432655_0001_conf.xml   |  1 -
 .../hadoop/mapred/TestMiniMRChildTask.java  | 31 +---
 6 files changed, 4 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb99f433/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index c757d40..bbab097 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -235,6 +235,9 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml
+(James Carman via harsh)
+
 MAPREDUCE-5932. Provide an option to use a dedicated reduce-side shuffle
 log (Gera Shegalov via jlowe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb99f433/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index 230361c..915353b 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -166,8 +166,6 @@ public interface MRJobConfig {
 
   public static final String PRESERVE_FILES_PATTERN = 
mapreduce.task.files.preserve.filepattern;
 
-  public static final String TASK_TEMP_DIR = mapreduce.task.tmp.dir;
-
   public static final String TASK_DEBUGOUT_LINES = 
mapreduce.task.debugout.lines;
 
   public static final String RECORDS_BEFORE_PROGRESS = 
mapreduce.task.merge.progress.records;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb99f433/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
index 8c7952b..b1756ce 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
@@ -274,8 +274,6 @@ public class ConfigUtil {
 MRJobConfig.PRESERVE_FAILED_TASK_FILES),
   new DeprecationDelta(keep.task.files.pattern,
 MRJobConfig.PRESERVE_FILES_PATTERN),
-  new DeprecationDelta(mapred.child.tmp,
-MRJobConfig.TASK_TEMP_DIR),
   new DeprecationDelta(mapred.debug.out.lines,
 MRJobConfig.TASK_DEBUGOUT_LINES),
   new DeprecationDelta(mapred.merge.recordsBeforeProgress,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb99f433/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce

hadoop git commit: MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml. Contributed by James Carman. (harsh)

2014-12-10 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d39809016 - 6e06a51e3


MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml. 
Contributed by James Carman. (harsh)

(cherry picked from commit cb99f43305bd1577d4ba9527d237ac6cdb9ae730)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e06a51e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e06a51e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e06a51e

Branch: refs/heads/branch-2
Commit: 6e06a51e30ffc0f02a4c5ea027ac9956b9dd1269
Parents: d398090
Author: Harsh J ha...@cloudera.com
Authored: Mon Dec 8 17:34:39 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Thu Dec 11 09:46:08 2014 +0530

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../apache/hadoop/mapreduce/MRJobConfig.java|  2 --
 .../hadoop/mapreduce/util/ConfigUtil.java   |  2 --
 .../src/main/resources/mapred-default.xml   | 12 
 .../resources/job_1329348432655_0001_conf.xml   |  1 -
 .../hadoop/mapred/TestMiniMRChildTask.java  | 31 +---
 6 files changed, 4 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e06a51e/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index bccb616..eaa37a1 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -8,6 +8,9 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+MAPREDUCE-5420. Remove mapreduce.task.tmp.dir from mapred-default.xml
+(James Carman via harsh)
+
 MAPREDUCE-5932. Provide an option to use a dedicated reduce-side shuffle
 log (Gera Shegalov via jlowe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e06a51e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index dd928d4..41e3fa4 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -161,8 +161,6 @@ public interface MRJobConfig {
 
   public static final String PRESERVE_FILES_PATTERN = 
mapreduce.task.files.preserve.filepattern;
 
-  public static final String TASK_TEMP_DIR = mapreduce.task.tmp.dir;
-
   public static final String TASK_DEBUGOUT_LINES = 
mapreduce.task.debugout.lines;
 
   public static final String RECORDS_BEFORE_PROGRESS = 
mapreduce.task.merge.progress.records;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e06a51e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
index 8c7952b..b1756ce 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ConfigUtil.java
@@ -274,8 +274,6 @@ public class ConfigUtil {
 MRJobConfig.PRESERVE_FAILED_TASK_FILES),
   new DeprecationDelta(keep.task.files.pattern,
 MRJobConfig.PRESERVE_FILES_PATTERN),
-  new DeprecationDelta(mapred.child.tmp,
-MRJobConfig.TASK_TEMP_DIR),
   new DeprecationDelta(mapred.debug.out.lines,
 MRJobConfig.TASK_DEBUGOUT_LINES),
   new DeprecationDelta(mapred.merge.recordsBeforeProgress,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e06a51e/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml

hadoop git commit: MAPREDUCE-6177. Minor typo in the EncryptedShuffle document about ssl-client.xml. Contributed by Yangping Wu. (harsh)

2014-12-08 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 120e1decd - 8963515b8


MAPREDUCE-6177. Minor typo in the EncryptedShuffle document about 
ssl-client.xml. Contributed by Yangping Wu. (harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8963515b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8963515b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8963515b

Branch: refs/heads/trunk
Commit: 8963515b880b78068791f11abe4f5df332553be1
Parents: 120e1de
Author: Harsh J ha...@cloudera.com
Authored: Mon Dec 8 15:57:52 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Mon Dec 8 15:57:52 2014 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/site/apt/EncryptedShuffle.apt.vm  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8963515b/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 3f34acd..c757d40 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -246,6 +246,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6177. Minor typo in the EncryptedShuffle document about
+ssl-client.xml (Yangping Wu via harsh)
+
 MAPREDUCE-5918. LineRecordReader can return the same decompressor to
 CodecPool multiple times (Sergey Murylev via raviprak)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8963515b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm
index da412df..68e569d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm
@@ -202,7 +202,7 @@ Hadoop MapReduce Next Generation - Encrypted Shuffle
 
 ** ssl-client.xml (Reducer/Fetcher) Configuration:
 
-  The mapred user should own the ssl-server.xml file and it should have
+  The mapred user should own the ssl-client.xml file and it should have
   default permissions.
 
 
*-+-+-+



hadoop git commit: MAPREDUCE-6177. Minor typo in the EncryptedShuffle document about ssl-client.xml. Contributed by Yangping Wu. (harsh)

2014-12-08 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d02cb9c51 - bb1fedfbc


MAPREDUCE-6177. Minor typo in the EncryptedShuffle document about 
ssl-client.xml. Contributed by Yangping Wu. (harsh)

(cherry picked from commit 8963515b880b78068791f11abe4f5df332553be1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb1fedfb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb1fedfb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb1fedfb

Branch: refs/heads/branch-2
Commit: bb1fedfbc36411b1d3f63bcfac05028e1b6c2eb2
Parents: d02cb9c
Author: Harsh J ha...@cloudera.com
Authored: Mon Dec 8 15:57:52 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Mon Dec 8 16:00:12 2014 +0530

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../src/site/apt/EncryptedShuffle.apt.vm  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb1fedfb/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index e0969e4..bccb616 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -19,6 +19,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+MAPREDUCE-6177. Minor typo in the EncryptedShuffle document about
+ssl-client.xml (Yangping Wu via harsh)
+
 MAPREDUCE-5918. LineRecordReader can return the same decompressor to
 CodecPool multiple times (Sergey Murylev via raviprak)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb1fedfb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm
index da412df..68e569d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/EncryptedShuffle.apt.vm
@@ -202,7 +202,7 @@ Hadoop MapReduce Next Generation - Encrypted Shuffle
 
 ** ssl-client.xml (Reducer/Fetcher) Configuration:
 
-  The mapred user should own the ssl-server.xml file and it should have
+  The mapred user should own the ssl-client.xml file and it should have
   default permissions.
 
 
*-+-+-+



hadoop git commit: YARN-2891. Failed Container Executor does not provide a clear error message. Contributed by Dustin Cote. (harsh)

2014-12-03 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 25be97808 - ee25c0d89


YARN-2891. Failed Container Executor does not provide a clear error message. 
Contributed by Dustin Cote. (harsh)

(cherry picked from commit 4b13318dea7a1cbbbfc1f84207af829cbe2f720e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee25c0d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee25c0d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee25c0d8

Branch: refs/heads/branch-2
Commit: ee25c0d8906be457f69edae5a9ee940534af3a89
Parents: 25be978
Author: Harsh J ha...@cloudera.com
Authored: Thu Dec 4 03:16:08 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Thu Dec 4 03:19:01 2014 +0530

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../src/main/native/container-executor/impl/container-executor.c  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee25c0d8/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b67af7f..b8daae9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -29,6 +29,9 @@ Release 2.7.0 - UNRELEASED
 
   IMPROVEMENTS
 
+YARN-2891. Failed Container Executor does not provide a clear error
+message. (Dustin Cote via harsh)
+
 YARN-1979. TestDirectoryCollection fails when the umask is unusual.
 (Vinod Kumar Vavilapalli and Tsuyoshi OZAWA via junping_du)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee25c0d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 9af9161..4fc78b6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -526,7 +526,7 @@ int check_dir(char* npath, mode_t st_mode, mode_t desired, 
int finalComponent) {
 int filePermInt = st_mode  (S_IRWXU | S_IRWXG | S_IRWXO);
 int desiredInt = desired  (S_IRWXU | S_IRWXG | S_IRWXO);
 if (filePermInt != desiredInt) {
-  fprintf(LOGFILE, Path %s does not have desired permission.\n, npath);
+  fprintf(LOGFILE, Path %s has permission %o but needs permission %o.\n, 
npath, filePermInt, desiredInt);
   return -1;
 }
   }



git commit: HDFS-6741. Improve permission denied message when FSPermissionChecker#checkOwner fails. Contributed by Stephen Chu and Harsh J. (harsh)

2014-10-28 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk e7859015b - 0398db19b


HDFS-6741. Improve permission denied message when 
FSPermissionChecker#checkOwner fails. Contributed by Stephen Chu and Harsh J. 
(harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0398db19
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0398db19
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0398db19

Branch: refs/heads/trunk
Commit: 0398db19b2c4558a9f08ac2700a27752748896fa
Parents: e785901
Author: Harsh J ha...@cloudera.com
Authored: Tue Oct 28 12:08:26 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Tue Oct 28 12:22:37 2014 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../hdfs/server/namenode/FSPermissionChecker.java |  4 +++-
 .../org/apache/hadoop/hdfs/TestDFSPermission.java | 18 +++---
 .../server/namenode/TestFSPermissionChecker.java  |  8 +++-
 4 files changed, 28 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0398db19/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 456f77b..e18b935 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -316,6 +316,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+HDFS-6741. Improve permission denied message when
+FSPermissionChecker#checkOwner fails (Stephen Chu and harsh).
+
 HDFS-6538. Comment format error in ShortCircuitRegistry javadoc.
 (David Luo via harsh).
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0398db19/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index 5b7804b..2c48051 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -198,7 +198,9 @@ class FSPermissionChecker {
 if (inode != null  user.equals(inode.getUserName(snapshotId))) {
   return;
 }
-throw new AccessControlException(Permission denied);
+throw new AccessControlException(
+Permission denied. user=
++ user +  is not the owner of inode= + inode);
   }
 
   /** Guarded by {@link FSNamesystem#readLock()} */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0398db19/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
index 68349a2..23ce916 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
@@ -443,7 +443,11 @@ public class TestDFSPermission {
   fs.access(p1, FsAction.WRITE);
   fail(The access call should have failed.);
 } catch (AccessControlException e) {
-  // expected
+  assertTrue(Permission denied messages must carry the username,
+  e.getMessage().contains(USER1_NAME));
+  assertTrue(Permission denied messages must carry the path parent,
+  e.getMessage().contains(
+  p1.getParent().toUri().getPath()));
 }
 
 Path badPath = new Path(/bad/bad);
@@ -473,7 +477,11 @@ public class TestDFSPermission {
   fs.access(p2, FsAction.EXECUTE);
   fail(The access call should have failed.);
 } catch (AccessControlException e) {
-  // expected
+  assertTrue(Permission denied messages must carry the username,
+  e.getMessage().contains(USER1_NAME));
+  assertTrue(Permission denied messages must carry the path parent,
+  e.getMessage().contains(
+  p2.getParent().toUri().getPath()));
 }
   }
 
@@ -494,7 +502,11 @@ public class TestDFSPermission {
   fs.access(p3, FsAction.READ_WRITE);
   fail(The access call should have failed.);
 } catch (AccessControlException e) {
-  // expected
+  assertTrue(Permission denied messages must carry

git commit: HADOOP-11236. NFS: Fix javadoc warning in RpcProgram.java. Contributed by Abhiraj Butala. (harsh)

2014-10-27 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cb3974524 - c88ac8a73


HADOOP-11236. NFS: Fix javadoc warning in RpcProgram.java. Contributed by 
Abhiraj Butala. (harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c88ac8a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c88ac8a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c88ac8a7

Branch: refs/heads/branch-2
Commit: c88ac8a738b329f5558341b75876aeaedd33feaa
Parents: cb39745
Author: Harsh J ha...@cloudera.com
Authored: Mon Oct 27 19:09:52 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Mon Oct 27 19:13:50 2014 +0530

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 2 ++
 .../src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java   | 4 ++--
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c88ac8a7/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ef4cc0e..1eebf77 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -23,6 +23,8 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+HADOOP-11236. NFS: Fix javadoc warning in RpcProgram.java (Abhiraj Butala 
via harsh)
+
 HADOOP-11166. Remove ulimit from test-patch.sh. (wang)
 
 HDFS-7227. Fix findbugs warning about NP_DEREFERENCE_OF_READLINE_VALUE in

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c88ac8a7/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
index 3343c75..b782ae5 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
@@ -65,8 +65,8 @@ public abstract class RpcProgram extends 
SimpleChannelUpstreamHandler {
* @param progNumber program number as defined in RFC 1050
* @param lowProgVersion lowest version of the specification supported
* @param highProgVersion highest version of the specification supported
-   * @param DatagramSocket registrationSocket if not null, use this socket to
-   *register with portmap daemon
+   * @param registrationSocket if not null, use this socket to register
+   *with portmap daemon
* @param allowInsecurePorts true to allow client connections from
*unprivileged ports, false otherwise
*/



git commit: HDFS-6538. Comment format error in ShortCircuitRegistry javadoc. Contributed by David Luo. (harsh)

2014-10-27 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2429b3165 - 0058eadbd


HDFS-6538. Comment format error in ShortCircuitRegistry javadoc. Contributed by 
David Luo. (harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0058eadb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0058eadb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0058eadb

Branch: refs/heads/trunk
Commit: 0058eadbd3149a5dee1ffc69c2d9f21caa916fb5
Parents: 2429b31
Author: Harsh J ha...@cloudera.com
Authored: Mon Oct 27 19:24:50 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Mon Oct 27 19:24:50 2014 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 5 -
 .../hadoop/hdfs/server/datanode/ShortCircuitRegistry.java   | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0058eadb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f793981..1f18ab1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -309,7 +309,10 @@ Release 2.7.0 - UNRELEASED
   OPTIMIZATIONS
 
   BUG FIXES
-
+
+HDFS-6538. Comment format error in ShortCircuitRegistry javadoc.
+(David Luo via harsh).
+
 HDFS-7194. Fix findbugs inefficient new String constructor warning in
 DFSClient#PATH (yzhang via cmccabe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0058eadb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
index ddde22d..965b40a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
@@ -48,7 +48,7 @@ import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.HashMultimap;
 
-/*
+/**
  * Manages client short-circuit memory segments on the DataNode.
  *
  * DFSClients request shared memory segments from the DataNode.  The 



git commit: HDFS-6538. Comment format error in ShortCircuitRegistry javadoc. Contributed by David Luo. (harsh)

2014-10-27 Thread harsh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c88ac8a73 - 8d781aaad


HDFS-6538. Comment format error in ShortCircuitRegistry javadoc. Contributed by 
David Luo. (harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d781aaa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d781aaa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d781aaa

Branch: refs/heads/branch-2
Commit: 8d781aaad3130ac5f8836524d5fd19cbb0f5134f
Parents: c88ac8a
Author: Harsh J ha...@cloudera.com
Authored: Mon Oct 27 19:24:50 2014 +0530
Committer: Harsh J ha...@cloudera.com
Committed: Mon Oct 27 19:26:02 2014 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 5 -
 .../hadoop/hdfs/server/datanode/ShortCircuitRegistry.java   | 2 +-
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d781aaa/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 79c9266..c2a87a7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -53,7 +53,10 @@ Release 2.7.0 - UNRELEASED
   OPTIMIZATIONS
 
   BUG FIXES
-
+
+HDFS-6538. Comment format error in ShortCircuitRegistry javadoc.
+(David Luo via harsh).
+
 HDFS-7194. Fix findbugs inefficient new String constructor warning in
 DFSClient#PATH (yzhang via cmccabe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d781aaa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
index ddde22d..965b40a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
@@ -48,7 +48,7 @@ import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.HashMultimap;
 
-/*
+/**
  * Manages client short-circuit memory segments on the DataNode.
  *
  * DFSClients request shared memory segments from the DataNode.  The 



svn commit: r1518302 - /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

2013-08-28 Thread harsh
Author: harsh
Date: Wed Aug 28 17:54:19 2013
New Revision: 1518302

URL: http://svn.apache.org/r1518302
Log:
Addendum to HADOOP-9910 for trunk. Removed bad characters from CHANGES.txt note 
that was causing odd issues.

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1518302r1=1518301r2=1518302view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Wed Aug 
28 17:54:19 2013
@@ -363,7 +363,7 @@ Release 2.1.1-beta - UNRELEASED
   IMPROVEMENTS
 
 HADOOP-9910. proxy server start and stop documentation wrong
-(André Kelpevia harsh)
+(Andre Kelpe via harsh)
 
 HADOOP-9446. Support Kerberos SPNEGO for IBM JDK. (Yu Gao via llu)
  




svn commit: r1513939 - in /hadoop/common/branches/branch-1: CHANGES.txt src/core/org/apache/hadoop/conf/Configuration.java src/core/org/apache/hadoop/util/StringUtils.java src/test/org/apache/hadoop/c

2013-08-14 Thread harsh
Author: harsh
Date: Wed Aug 14 15:50:48 2013
New Revision: 1513939

URL: http://svn.apache.org/r1513939
Log:
HADOOP-9855. Backport HADOOP-6578 to branch-1. Contributed by James Kinley. 
(harsh)

Modified:
hadoop/common/branches/branch-1/CHANGES.txt

hadoop/common/branches/branch-1/src/core/org/apache/hadoop/conf/Configuration.java

hadoop/common/branches/branch-1/src/core/org/apache/hadoop/util/StringUtils.java

hadoop/common/branches/branch-1/src/test/org/apache/hadoop/conf/TestConfiguration.java

Modified: hadoop/common/branches/branch-1/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/CHANGES.txt?rev=1513939r1=1513938r2=1513939view=diff
==
--- hadoop/common/branches/branch-1/CHANGES.txt (original)
+++ hadoop/common/branches/branch-1/CHANGES.txt Wed Aug 14 15:50:48 2013
@@ -34,6 +34,9 @@ Release 1.3.0 - unreleased
 HDFS-4963. Improve multihoming support in namenode. (Arpit Agarwal via
 cnauroth)
 
+HADOOP-9855. Backport HADOOP-6578 to branch-1.
+(James Kinley via harsh)
+
   BUG FIXES
 
 MAPREDUCE-5047. keep.failed.task.files=true causes job failure on 

Modified: 
hadoop/common/branches/branch-1/src/core/org/apache/hadoop/conf/Configuration.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-1/src/core/org/apache/hadoop/conf/Configuration.java?rev=1513939r1=1513938r2=1513939view=diff
==
--- 
hadoop/common/branches/branch-1/src/core/org/apache/hadoop/conf/Configuration.java
 (original)
+++ 
hadoop/common/branches/branch-1/src/core/org/apache/hadoop/conf/Configuration.java
 Wed Aug 14 15:50:48 2013
@@ -398,6 +398,29 @@ public class Configuration implements It
   }
 
   /**
+   * Get the value of the codename/code property as a trimmed 
codeString/code,
+   * codenull/code if no such property exists. 
+   * If the key is deprecated, it returns the value of
+   * the first key which replaces the deprecated key and is not null
+   *
+   * Values are processed for a href=#VariableExpansionvariable 
expansion/a
+   * before being returned.
+   *
+   * @param name the property name.
+   * @return the value of the codename/code or its replacing property, 
+   * or null if no such property exists.
+   */
+  public String getTrimmed(String name) {
+String value = get(name);
+
+if (null == value) {
+  return null;
+} else {
+  return value.trim();
+}
+  }
+
+  /**
* Get the value of the codename/code property, without doing
* a href=#VariableExpansionvariable expansion/a.
* 
@@ -472,7 +495,7 @@ public class Configuration implements It
* or codedefaultValue/code. 
*/
   public int getInt(String name, int defaultValue) {
-String valueString = get(name);
+String valueString = getTrimmed(name);
 if (valueString == null)
   return defaultValue;
 try {
@@ -508,7 +531,7 @@ public class Configuration implements It
* or codedefaultValue/code. 
*/
   public long getLong(String name, long defaultValue) {
-String valueString = get(name);
+String valueString = getTrimmed(name);
 if (valueString == null)
   return defaultValue;
 try {
@@ -561,7 +584,7 @@ public class Configuration implements It
* or codedefaultValue/code. 
*/
   public float getFloat(String name, float defaultValue) {
-String valueString = get(name);
+String valueString = getTrimmed(name);
 if (valueString == null)
   return defaultValue;
 try {
@@ -591,7 +614,7 @@ public class Configuration implements It
* or codedefaultValue/code. 
*/
   public boolean getBoolean(String name, boolean defaultValue) {
-String valueString = get(name);
+String valueString = getTrimmed(name);
 if (true.equals(valueString))
   return true;
 else if (false.equals(valueString))
@@ -740,6 +763,20 @@ public class Configuration implements It
 return new IntegerRanges(get(name, defaultValue));
   }
 
+  /**
+   * Get the comma delimited values of the codename/code property as
+   * an array of codeString/codes, trimmed of the leading and trailing 
whitespace.
+   * If no such property is specified then an empty array is returned.
+   *
+   * @param name property name.
+   * @return property value as an array of trimmed codeString/codes,
+   * or empty array.
+   */
+  public String[] getTrimmedStrings(String name) {
+String valueString = get(name);
+return StringUtils.getTrimmedStrings(valueString);
+  }
+
   /** 
* Get the comma delimited values of the codename/code property as 
* a collection of codeString/codes.  
@@ -823,7 +860,7 @@ public class Configuration implements It
* or codedefaultValue/code. 
*/
   public Class?[] getClasses(String name, Class? ... defaultValue) {
-String[] classnames = getStrings(name

svn commit: r1502954 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: ./ src/main/java/org/apache/hadoop/fs/ src/main/resources/ src/test/java/org/apache/hadoop/fs/

2013-07-14 Thread harsh
Author: harsh
Date: Sun Jul 14 10:46:37 2013
New Revision: 1502954

URL: http://svn.apache.org/r1502954
Log:
HADOOP-9241. DU refresh interval is not configurable. (harsh)

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1502954r1=1502953r2=1502954view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Sun Jul 
14 10:46:37 2013
@@ -287,6 +287,8 @@ Release 2.3.0 - UNRELEASED
 
   IMPROVEMENTS
 
+HADOOP-9241. DU refresh interval is not configurable (harsh)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java?rev=1502954r1=1502953r2=1502954view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 Sun Jul 14 10:46:37 2013
@@ -54,6 +54,10 @@ public class CommonConfigurationKeysPubl
   public static final String  FS_DF_INTERVAL_KEY = fs.df.interval; 
   /** Default value for FS_DF_INTERVAL_KEY */
   public static final longFS_DF_INTERVAL_DEFAULT = 6;
+  /** See a href={@docRoot}/../core-default.htmlcore-default.xml/a */
+  public static final String  FS_DU_INTERVAL_KEY = fs.du.interval;
+  /** Default value for FS_DU_INTERVAL_KEY */
+  public static final longFS_DU_INTERVAL_DEFAULT = 60;
 
 
   //Defaults are not specified for following keys

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java?rev=1502954r1=1502953r2=1502954view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
 Sun Jul 14 10:46:37 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Shell;
 
 import java.io.BufferedReader;
@@ -64,8 +65,8 @@ public class DU extends Shell {
* @throws IOException if we fail to refresh the disk usage
*/
   public DU(File path, Configuration conf) throws IOException {
-this(path, 60L);
-//10 minutes default refresh interval
+this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
+CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT));
   }
 
   /**

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml?rev=1502954r1=1502953r2=1502954view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
 Sun Jul 14 10:46:37 2013
@@ -465,6 +465,12 @@
 /property
 
 property
+  namefs.du.interval/name
+  value60/value
+  descriptionFile space usage statistics refresh interval in 
msec./description
+/property
+
+property
   namefs.s3.block.size/name
   value67108864/value
   descriptionBlock size to use when writing files to S3./description

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java
URL: 
http

svn commit: r1502955 - in /hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common: ./ src/main/java/org/apache/hadoop/fs/ src/main/resources/ src/test/java/org/apache/hadoop/fs/

2013-07-14 Thread harsh
Author: harsh
Date: Sun Jul 14 11:01:02 2013
New Revision: 1502955

URL: http://svn.apache.org/r1502955
Log:
HADOOP-9241. DU refresh interval is not configurable. (harsh)

Modified:

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDU.java

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1502955r1=1502954r2=1502955view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/CHANGES.txt 
Sun Jul 14 11:01:02 2013
@@ -14,6 +14,8 @@ Release 2.3.0 - UNRELEASED
 
 HADOOP-9432 Add support for markdown .md files in site documentation 
(stevel)
 
+HADOOP-9241. DU refresh interval is not configurable (harsh)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java?rev=1502955r1=1502954r2=1502955view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
 Sun Jul 14 11:01:02 2013
@@ -54,6 +54,10 @@ public class CommonConfigurationKeysPubl
   public static final String  FS_DF_INTERVAL_KEY = fs.df.interval; 
   /** Default value for FS_DF_INTERVAL_KEY */
   public static final longFS_DF_INTERVAL_DEFAULT = 6;
+  /** See a href={@docRoot}/../core-default.htmlcore-default.xml/a */
+  public static final String  FS_DU_INTERVAL_KEY = fs.du.interval;
+  /** Default value for FS_DU_INTERVAL_KEY */
+  public static final longFS_DU_INTERVAL_DEFAULT = 60;
 
 
   //Defaults are not specified for following keys

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java?rev=1502955r1=1502954r2=1502955view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
 Sun Jul 14 11:01:02 2013
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Shell;
 
 import java.io.BufferedReader;
@@ -64,8 +65,8 @@ public class DU extends Shell {
* @throws IOException if we fail to refresh the disk usage
*/
   public DU(File path, Configuration conf) throws IOException {
-this(path, 60L);
-//10 minutes default refresh interval
+this(path, conf.getLong(CommonConfigurationKeys.FS_DU_INTERVAL_KEY,
+CommonConfigurationKeys.FS_DU_INTERVAL_DEFAULT));
   }
 
   /**

Modified: 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml?rev=1502955r1=1502954r2=1502955view=diff
==
--- 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
 (original)
+++ 
hadoop/common/branches/branch-2/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
 Sun Jul 14 11:01:02 2013
@@ -457,6 +457,12 @@
 /property
 
 property
+  namefs.du.interval/name
+  value60/value
+  descriptionFile space usage

svn commit: r1500190 - /hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

2013-07-05 Thread harsh
Author: harsh
Date: Sat Jul  6 02:16:27 2013
New Revision: 1500190

URL: http://svn.apache.org/r1500190
Log:
HADOOP-8844. Add a plaintext fs -text test-case. Contributed by Akira AJISAKA. 
(harsh)

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1500190r1=1500189r2=1500190view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Sat Jul 
 6 02:16:27 2013
@@ -102,6 +102,9 @@ Trunk (Unreleased)
 HADOOP-9540. Expose the InMemoryS3 and S3N FilesystemStores implementations
 for Unit testing. (Hari via stevel)
 
+HADOOP-8844. Add a plaintext fs -text test-case.
+(Akira AJISAKA via harsh)
+
   BUG FIXES
 
 HADOOP-9451. Fault single-layer config if node group topology is enabled.




svn commit: r1477458 - in /hadoop/common/trunk/hadoop-common-project/hadoop-common: CHANGES.txt src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java src/main/resources/core-default.xml

2013-04-29 Thread harsh
Author: harsh
Date: Tue Apr 30 03:06:06 2013
New Revision: 1477458

URL: http://svn.apache.org/r1477458
Log:
HADOOP-9322. LdapGroupsMapping doesn't seem to set a timeout for its directory 
search. Contributed by Harsh J. (harsh)

Modified:
hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java

hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

Modified: hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt?rev=1477458r1=1477457r2=1477458view=diff
==
--- hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt 
(original)
+++ hadoop/common/trunk/hadoop-common-project/hadoop-common/CHANGES.txt Tue Apr 
30 03:06:06 2013
@@ -577,6 +577,9 @@ Release 2.0.5-beta - UNRELEASED
 HADOOP-9503. Remove sleep between IPC client connect timeouts.
 (Varun Sharma via szetszwo)
 
+HADOOP-9322. LdapGroupsMapping doesn't seem to set a timeout for
+its directory search. (harsh)
+
   OPTIMIZATIONS
 
 HADOOP-9150. Avoid unnecessary DNS resolution attempts for logical URIs

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java?rev=1477458r1=1477457r2=1477458view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
 Tue Apr 30 03:06:06 2013
@@ -144,7 +144,15 @@ public class LdapGroupsMapping
*/
   public static final String GROUP_NAME_ATTR_KEY = LDAP_CONFIG_PREFIX + 
.search.attr.group.name;
   public static final String GROUP_NAME_ATTR_DEFAULT = cn;
-  
+
+  /*
+   * LDAP {@link SearchControls} attribute to set the time limit
+   * for an invoked directory search. Prevents infinite wait cases.
+   */
+  public static final String DIRECTORY_SEARCH_TIMEOUT =
+LDAP_CONFIG_PREFIX + .directory.search.timeout;
+  public static final int DIRECTORY_SEARCH_TIMEOUT_DEFAULT = 1; // 10s
+
   private static final Log LOG = LogFactory.getLog(LdapGroupsMapping.class);
 
   private static final SearchControls SEARCH_CONTROLS = new SearchControls();
@@ -326,6 +334,9 @@ public class LdapGroupsMapping
 groupNameAttr =
 conf.get(GROUP_NAME_ATTR_KEY, GROUP_NAME_ATTR_DEFAULT);
 
+int dirSearchTimeout = conf.getInt(DIRECTORY_SEARCH_TIMEOUT, 
DIRECTORY_SEARCH_TIMEOUT_DEFAULT);
+SEARCH_CONTROLS.setTimeLimit(dirSearchTimeout);
+
 this.conf = conf;
   }
   

Modified: 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml?rev=1477458r1=1477457r2=1477458view=diff
==
--- 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
 (original)
+++ 
hadoop/common/trunk/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
 Tue Apr 30 03:06:06 2013
@@ -213,6 +213,17 @@
 /property
 
 property
+  namehadoop.security.group.mapping.ldap.directory.search.timeout/name
+  value1/value
+  description
+The attribute applied to the LDAP SearchControl properties to set a
+maximum time limit when searching and awaiting a result.
+Set to 0 if infinite wait period is desired.
+Default is 10 seconds. Units in milliseconds.
+  /description
+/property
+
+property
   namehadoop.security.service.user.name.key/name
   value/value
   description




  1   2   3   >