[2/2] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-04-09 Thread drankye
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19cc05b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19cc05b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19cc05b5

Branch: refs/heads/HDFS-7285
Commit: 19cc05b5220f4a9246348f2eb623613d04065679
Parents: f3885a7
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 10 04:34:24 2015 +0800
Committer: Kai Zheng kai.zh...@intel.com
Committed: Fri Apr 10 04:34:24 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19cc05b5/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index c72394e..b850e11 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -40,3 +40,5 @@
 
 HADOOP-11645. Erasure Codec API covering the essential aspects for an 
erasure code
 ( Kai Zheng via vinayakumarb )
+  
+HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)



hadoop git commit: Fix CHANGES.txt for branch-2

2015-04-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 53e0bf5c1 - 6d1cb3422


Fix CHANGES.txt for branch-2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d1cb342
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d1cb342
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d1cb342

Branch: refs/heads/branch-2
Commit: 6d1cb34221cecf640fab5dcb8bbfe1cd899edec6
Parents: 53e0bf5
Author: Kihwal Lee kih...@apache.org
Authored: Thu Apr 9 10:05:43 2015 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Thu Apr 9 10:05:43 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d1cb342/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 53b88dd..2c4a3bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -84,9 +84,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-8076. Code cleanup for DFSInputStream: use offset instead of
 LocatedBlock when possible. (Zhe Zhang via wang)
 
-HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can
-complete without blocks being replicated. (Ming Ma via wang)
-
 HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. (wheat9)
 
 HDFS-7979. Initialize block report IDs with a random number. (wang)
@@ -1120,6 +1117,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8072. Reserved RBW space is not released if client terminates while
 writing block. (Arpit Agarwal)
 
+HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can
+complete without blocks being replicated. (Ming Ma via wang)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode



hadoop git commit: Adding release 2.7.1 to CHANGES.txt

2015-04-09 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk a813db0b1 - 623fd46c1


Adding release 2.7.1 to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/623fd46c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/623fd46c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/623fd46c

Branch: refs/heads/trunk
Commit: 623fd46c1eac057ea9bc4b503a47ad1a831cdacf
Parents: a813db0
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu Apr 9 13:53:35 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu Apr 9 13:54:20 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/623fd46c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 397161d..f181a96 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -509,6 +509,18 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11800. Clean up some test methods in TestCodec.java.
 (Brahma Reddy Battula via aajisaka)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/623fd46c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 979534e..695dc36 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -463,6 +463,18 @@ Release 2.8.0 - UNRELEASED
 HDFS-8091: ACLStatus and XAttributes should be presented to
 INodeAttributesProvider before returning to client (asuresh)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/623fd46c/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b9a75e3..4a45386 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -327,6 +327,18 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL
 (rchiang via rkanter)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/623fd46c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c29fdea..e6a1cf6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -165,6 +165,18 @@ Release 2.8.0 - UNRELEASED
 YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
 (Zhihai Xu via kasha)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[1/2] hadoop git commit: HDFS-8104 Make hard-coded values consistent with the system default schema first before remove them. Contributed by Kai Zheng

2015-04-09 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 d022be287 - de3621c0c


HDFS-8104 Make hard-coded values consistent with the system default schema 
first before remove them. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5635a44c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5635a44c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5635a44c

Branch: refs/heads/HDFS-7285
Commit: 5635a44cf84201aa5626685b93d54af4b36b68bb
Parents: d022be2
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 10 00:16:28 2015 +0800
Committer: Kai Zheng kai.zh...@intel.com
Committed: Fri Apr 10 00:16:28 2015 +0800

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  12 +-
 .../hadoop/hdfs/TestPlanReadPortions.java   | 142 +++
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 112 ---
 3 files changed, 151 insertions(+), 115 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5635a44c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index a888aa4..11c5260 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -180,11 +180,17 @@ public class HdfsConstants {
   public static final byte WARM_STORAGE_POLICY_ID = 5;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
 
-  public static final byte NUM_DATA_BLOCKS = 3;
-  public static final byte NUM_PARITY_BLOCKS = 2;
+
   public static final long BLOCK_GROUP_INDEX_MASK = 15;
   public static final byte MAX_BLOCKS_IN_GROUP = 16;
 
+  /*
+   * These values correspond to the values used by the system default schema.
+   * TODO: to be removed once all places use schema.
+   */
+
+  public static final byte NUM_DATA_BLOCKS = 6;
+  public static final byte NUM_PARITY_BLOCKS = 3;
   // The chunk size for striped block which is used by erasure coding
-  public static final int BLOCK_STRIPED_CELL_SIZE = 128 * 1024;
+  public static final int BLOCK_STRIPED_CELL_SIZE = 256 * 1024;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5635a44c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
new file mode 100644
index 000..cf84b30
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.junit.Test;
+
+import static org.apache.hadoop.hdfs.DFSStripedInputStream.ReadPortion;
+import static org.junit.Assert.*;
+
+public class TestPlanReadPortions {
+
+  // We only support this as num of data blocks. It might be good enough for 
now
+  // for the purpose, even not flexible yet for any number in a schema.
+  private final short GROUP_SIZE = 3;
+  private final int CELLSIZE = 128 * 1024;
+
+  private void testPlanReadPortions(int startInBlk, int length,
+  int bufferOffset, int[] readLengths, int[] offsetsInBlock,
+  int[][] bufferOffsets, int[][] bufferLengths) {
+ReadPortion[] results = DFSStripedInputStream.planReadPortions(GROUP_SIZE,
+CELLSIZE, startInBlk, length, bufferOffset);
+assertEquals(GROUP_SIZE, results.length);
+
+for (int i = 0; i  GROUP_SIZE; i++) {
+  assertEquals(readLengths[i], 

hadoop git commit: MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL (rchiang via rkanter)

2015-04-09 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c846e84d5 - 0fdd5c23c


MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL 
(rchiang via rkanter)

(cherry picked from commit c4986b2d00d327f18d0c7e2f8805b69a4e07a19b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0fdd5c23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0fdd5c23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0fdd5c23

Branch: refs/heads/branch-2
Commit: 0fdd5c23c34cfd3c212f880358652702b4dcd68e
Parents: c846e84
Author: Robert Kanter rkan...@apache.org
Authored: Thu Apr 9 13:48:14 2015 -0700
Committer: Robert Kanter rkan...@apache.org
Committed: Thu Apr 9 13:49:03 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fdd5c23/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index d75fda2..48a39a4 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -79,6 +79,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-4844. Counters / AbstractCounters have constant references not
 declared final. (Brahma Reddy Battula via gera)
 
+MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL
+(rchiang via rkanter)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0fdd5c23/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
index 6df8261..0f1f391 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
@@ -157,10 +157,10 @@ public class CompletedJob implements 
org.apache.hadoop.mapreduce.v2.app.job.Job
 String historyUrl = N/A;
 try {
   historyUrl =
-  MRWebAppUtil.getApplicationWebURLOnJHSWithoutScheme(conf,
+  MRWebAppUtil.getApplicationWebURLOnJHSWithScheme(conf,
   jobId.getAppId());
 } catch (UnknownHostException e) {
-  //Ignore.
+LOG.error(Problem determining local host:  + e.getMessage());
 }
 report.setTrackingUrl(historyUrl);
 report.setAMInfos(getAMInfos());



hadoop git commit: HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and even for failed transfers (Contributed by Vinayakumar B)

2015-04-09 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk b1e059089 - 9d8952f97


HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and even 
for failed transfers (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d8952f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d8952f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d8952f9

Branch: refs/heads/trunk
Commit: 9d8952f97f638ede27e4336b9601507d7bb1de7b
Parents: b1e0590
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Apr 9 11:58:00 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu Apr 9 11:58:00 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/datanode/BPOfferService.java|  1 -
 .../hadoop/hdfs/server/datanode/DataNode.java   |  2 ++
 .../datanode/metrics/DataNodeMetrics.java   |  4 +--
 .../server/datanode/TestDataNodeMetrics.java| 37 +---
 5 files changed, 40 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d8952f9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 19f264a..74ed624 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -451,6 +451,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7725. Incorrect nodes in service metrics caused all writes to fail.
 (Ming Ma via wang)
 
+HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and
+even for failed transfers (vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d8952f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index da9642a..1b42b19 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -656,7 +656,6 @@ class BPOfferService {
   // Send a copy of a block to another datanode
   dn.transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(),
   bcmd.getTargets(), bcmd.getTargetStorageTypes());
-  dn.metrics.incrBlocksReplicated(bcmd.getBlocks().length);
   break;
 case DatanodeProtocol.DNA_INVALIDATE:
   //

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d8952f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 50dccb8..8c08871 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2174,6 +2174,8 @@ public class DataNode extends ReconfigurableBase
   + Arrays.asList(targets));
 }
   }
+} else {
+  metrics.incrBlocksReplicated();
 }
   } catch (IOException ie) {
 LOG.warn(bpReg + :Failed to transfer  + b +  to  +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d8952f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index 2e8eb22..2e62b3c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -210,8 +210,8 @@ public class DataNodeMetrics {
 cacheReports.add(latency);
   }
 
-  public void incrBlocksReplicated(int delta) {
-blocksReplicated.incr(delta);

[1/2] hadoop git commit: HDFS-7188. support build libhdfs3 on windows (Thanh Do via Colin P. McCabe)

2015-04-09 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-6994 af17a556c - f0ea98f0c


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc
 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc
deleted file mode 100644
index 1e4c9c5..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/src/common/StackPrinter.cc
+++ /dev/null
@@ -1,670 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include StackPrinter.h
-
-#include cassert
-#include cxxabi.h
-#include dlfcn.h
-#include execinfo.h
-#include sstream
-#include string
-#include vector
-
-namespace hdfs {
-namespace internal {
-
-static void ATTRIBUTE_NOINLINE GetStack(int skip, int maxDepth,
-std::vectorvoid *  stack) {
-std::ostringstream ss;
-++skip; //current frame.
-stack.resize(maxDepth + skip);
-int size;
-size = backtrace(stack[0], maxDepth + skip);
-size = size - skip;
-
-if (size  0) {
-stack.resize(0);
-return;
-}
-
-stack.erase(stack.begin(), stack.begin() + skip);
-stack.resize(size);
-}
-
-std::string DemangleSymbol(const char * symbol) {
-int status;
-std::string retval;
-char * name = abi::__cxa_demangle(symbol, 0, 0, status);
-
-switch (status) {
-case 0:
-retval = name;
-break;
-
-case -1:
-throw std::bad_alloc();
-break;
-
-case -2:
-retval = symbol;
-break;
-
-case -3:
-retval = symbol;
-break;
-}
-
-if (name) {
-free(name);
-}
-
-return retval;
-}
-
-#if defined(__ELF__)
-
-#include elf.h
-#include errno.h
-#include fcntl.h
-#include limits.h
-#include link.h  // For ElfW() macro.
-#include stdint.h
-#include stdio.h
-#include stdlib.h
-#include stddef.h
-#include string.h
-#include sys/stat.h
-#include sys/types.h
-#include unistd.h
-
-// Re-runs fn until it doesn't cause EINTR.
-#define NO_INTR(fn)   do {} while ((fn)  0  errno == EINTR)
-
-// Read up to count bytes from file descriptor fd into the buffer
-// starting at buf while handling short reads and EINTR.  On
-// success, return the number of bytes read.  Otherwise, return -1.
-static ssize_t ReadPersistent(const int fd, void * buf, const size_t count) {
-assert(fd = 0);
-char * buf0 = reinterpret_castchar *(buf);
-ssize_t num_bytes = 0;
-
-while (num_bytes  static_castssize_t(count)) {
-ssize_t len;
-NO_INTR(len = read(fd, buf0 + num_bytes, count - num_bytes));
-
-if (len  0) {  // There was an error other than EINTR.
-return -1;
-}
-
-if (len == 0) {  // Reached EOF.
-break;
-}
-
-num_bytes += len;
-}
-
-return num_bytes;
-}
-
-// Read up to count bytes from offset in the file pointed by file
-// descriptor fd into the buffer starting at buf.  On success,
-// return the number of bytes read.  Otherwise, return -1.
-static ssize_t ReadFromOffset(const int fd, void * buf,
-  const size_t count, const off_t offset) {
-off_t off = lseek(fd, offset, SEEK_SET);
-
-if (off == (off_t) - 1) {
-return -1;
-}
-
-return ReadPersistent(fd, buf, count);
-}
-
-// Try reading exactly count bytes from offset bytes in a file
-// pointed by fd into the buffer starting at buf while handling
-// short reads and EINTR.  On success, return true. Otherwise, return
-// false.
-static bool ReadFromOffsetExact(const int fd, void * buf,
-const size_t count, const off_t offset) {
-ssize_t len = ReadFromOffset(fd, buf, count, offset);
-return len == static_castssize_t(count);
-}
-
-// Returns elf_header.e_type if the file pointed by fd is an ELF binary.
-static int FileGetElfType(const int fd) {
-ElfW(Ehdr) elf_header;
-
-if (!ReadFromOffsetExact(fd, elf_header, sizeof(elf_header), 0)) {
-return -1;
-}
-
-if 

hadoop git commit: YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token renewal of applications part of a bigger workflow. Contributed by Daryn Sharp.

2015-04-09 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 4d5b1fbde - 4c6867ff0


YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token 
renewal of applications part of a bigger workflow. Contributed by Daryn Sharp.

(cherry picked from commit 9c5911294e0ba71aefe4763731b0e780cde9d0ca)

Conflicts:
hadoop-yarn-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c6867ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c6867ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c6867ff

Branch: refs/heads/branch-2.7
Commit: 4c6867ff01bf60ad7682184cd490924eb0ea81e5
Parents: 4d5b1fb
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu Apr 9 13:08:53 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu Apr 9 13:11:26 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../security/DelegationTokenRenewer.java| 137 ---
 .../security/TestDelegationTokenRenewer.java|  87 +++-
 3 files changed, 173 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6867ff/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 28b33ff..43793f0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -751,6 +751,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3430. Made headroom data available on app attempt page of RM WebUI.
 (Xuan Gong via zjshen)
 
+YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token
+renewal of applications part of a bigger workflow. (Daryn Sharp via 
vinodkv)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c6867ff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index 2619971..d49ecfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
@@ -229,15 +230,16 @@ public class DelegationTokenRenewer extends 
AbstractService {
   @VisibleForTesting
   protected static class DelegationTokenToRenew {
 public final Token? token;
-public final ApplicationId applicationId;
+public final CollectionApplicationId referringAppIds;
 public final Configuration conf;
 public long expirationDate;
-public TimerTask timerTask;
+public RenewalTimerTask timerTask;
 public volatile boolean shouldCancelAtEnd;
 public long maxDate;
 public String user;
 
-public DelegationTokenToRenew(ApplicationId jId, Token? token,
+public DelegationTokenToRenew(CollectionApplicationId applicationIds,
+Token? token,
 Configuration conf, long expirationDate, boolean shouldCancelAtEnd,
 String user) {
   this.token = token;
@@ -251,20 +253,33 @@ public class DelegationTokenRenewer extends 
AbstractService {
   throw new YarnRuntimeException(e);
 }
   }
-  this.applicationId = jId;
+  this.referringAppIds = Collections.synchronizedSet(
+  new HashSetApplicationId(applicationIds));
   this.conf = conf;
   this.expirationDate = expirationDate;
   this.timerTask = null;
   this.shouldCancelAtEnd = shouldCancelAtEnd;
 }
 
-public void setTimerTask(TimerTask tTask) {
+public void setTimerTask(RenewalTimerTask tTask) {
   timerTask = tTask;
 }
-
+
+@VisibleForTesting
+public void cancelTimer() {
+  if (timerTask != null) {
+timerTask.cancel();
+  }
+}
+
+

[1/2] hadoop git commit: HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R

2015-04-09 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 de3621c0c - 19cc05b52


HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3885a71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3885a71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3885a71

Branch: refs/heads/HDFS-7285
Commit: f3885a7131930802b71adc33b29ef7165a59962e
Parents: de3621c
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 10 04:31:48 2015 +0800
Committer: Kai Zheng kai.zh...@intel.com
Committed: Fri Apr 10 04:31:48 2015 +0800

--
 .../hadoop/io/erasurecode/SchemaLoader.java  | 12 ++--
 .../io/erasurecode/coder/RSErasureDecoder.java   | 19 ++-
 .../io/erasurecode/coder/RSErasureEncoder.java   | 19 ++-
 .../io/erasurecode/coder/XORErasureDecoder.java  |  2 +-
 .../io/erasurecode/rawcoder/util/RSUtil.java | 17 +
 5 files changed, 60 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3885a71/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
index c51ed37..75dd03a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.w3c.dom.*;
@@ -36,7 +36,7 @@ import java.util.*;
  * A EC schema loading utility that loads predefined EC schemas from XML file
  */
 public class SchemaLoader {
-  private static final Log LOG = 
LogFactory.getLog(SchemaLoader.class.getName());
+  private static final Logger LOG = 
LoggerFactory.getLogger(SchemaLoader.class.getName());
 
   /**
* Load predefined ec schemas from configuration file. This file is
@@ -63,7 +63,7 @@ public class SchemaLoader {
   private ListECSchema loadSchema(File schemaFile)
   throws ParserConfigurationException, IOException, SAXException {
 
-LOG.info(Loading predefined EC schema file  + schemaFile);
+LOG.info(Loading predefined EC schema file {}, schemaFile);
 
 // Read and parse the schema file.
 DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
@@ -87,7 +87,7 @@ public class SchemaLoader {
   ECSchema schema = loadSchema(element);
 schemas.add(schema);
 } else {
-  LOG.warn(Bad element in EC schema configuration file:  +
+  LOG.warn(Bad element in EC schema configuration file: {},
   element.getTagName());
 }
   }
@@ -109,7 +109,7 @@ public class SchemaLoader {
   URL url = Thread.currentThread().getContextClassLoader()
   .getResource(schemaFilePath);
   if (url == null) {
-LOG.warn(schemaFilePath +  not found on the classpath.);
+LOG.warn({} not found on the classpath., schemaFilePath);
 schemaFile = null;
   } else if (! url.getProtocol().equalsIgnoreCase(file)) {
 throw new RuntimeException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3885a71/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index e2c5051..fc664a5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -1,3 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in 

[2/2] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-04-09 Thread drankye
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de3621c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de3621c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de3621c0

Branch: refs/heads/HDFS-7285
Commit: de3621c0cdee724217b636c13b35de6ef64cd89f
Parents: 5635a44
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 10 00:18:14 2015 +0800
Committer: Kai Zheng kai.zh...@intel.com
Committed: Fri Apr 10 00:18:14 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3621c0/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 5078a15..1e695c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -54,4 +54,6 @@
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
 NameNode (vinayakumarb)
 
-HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
\ No newline at end of file
+HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
+
+HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
\ No newline at end of file



hadoop git commit: MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL (rchiang via rkanter)

2015-04-09 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9c5911294 - c4986b2d0


MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL 
(rchiang via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4986b2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4986b2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4986b2d

Branch: refs/heads/trunk
Commit: c4986b2d00d327f18d0c7e2f8805b69a4e07a19b
Parents: 9c59112
Author: Robert Kanter rkan...@apache.org
Authored: Thu Apr 9 13:48:14 2015 -0700
Committer: Robert Kanter rkan...@apache.org
Committed: Thu Apr 9 13:48:14 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4986b2d/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index c1eb6c3..b9a75e3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -324,6 +324,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-4844. Counters / AbstractCounters have constant references not
 declared final. (Brahma Reddy Battula via gera)
 
+MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL
+(rchiang via rkanter)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4986b2d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
index 6df8261..0f1f391 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
@@ -157,10 +157,10 @@ public class CompletedJob implements 
org.apache.hadoop.mapreduce.v2.app.job.Job
 String historyUrl = N/A;
 try {
   historyUrl =
-  MRWebAppUtil.getApplicationWebURLOnJHSWithoutScheme(conf,
+  MRWebAppUtil.getApplicationWebURLOnJHSWithScheme(conf,
   jobId.getAppId());
 } catch (UnknownHostException e) {
-  //Ignore.
+LOG.error(Problem determining local host:  + e.getMessage());
 }
 report.setTrackingUrl(historyUrl);
 report.setAMInfos(getAMInfos());



hadoop git commit: HDFS-8091: ACLStatus and XAttributes should be presented to INodeAttributesProvider before returning to client (asuresh)

2015-04-09 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1ff3fd33e - c846e84d5


HDFS-8091: ACLStatus and XAttributes should be presented to 
INodeAttributesProvider before returning to client (asuresh)

(cherry picked from commit 922b7ed21d1f1460263ca42f709bb9f415d189c5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c846e84d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c846e84d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c846e84d

Branch: refs/heads/branch-2
Commit: c846e84d55d677d2f8294298fd69a9a43d2d9d85
Parents: 1ff3fd3
Author: Arun Suresh asur...@apache.org
Authored: Thu Apr 9 12:28:44 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Thu Apr 9 13:13:23 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/server/namenode/AclStorage.java | 11 ++
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |  3 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  3 +-
 .../hdfs/server/namenode/XAttrStorage.java  |  6 ++--
 .../namenode/TestINodeAttributeProvider.java| 36 
 6 files changed, 50 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c846e84d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2b1acf5..e3f6372 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -101,6 +101,9 @@ Release 2.8.0 - UNRELEASED
 
   BUG FIXES
 
+HDFS-8091: ACLStatus and XAttributes should be presented to
+INodeAttributesProvider before returning to client (asuresh)
+
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
 (Gautam Gopalakrishnan via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c846e84d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
index 4f6ce3a..abd3755 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
@@ -163,6 +163,17 @@ public final class AclStorage {
   }
 
   /**
+   * Reads the existing extended ACL entries of an INodeAttribute object.
+   *
+   * @param inodeAttr INode to read
+   * @return ListAclEntry containing extended inode ACL entries
+   */
+  public static ListAclEntry readINodeAcl(INodeAttributes inodeAttr) {
+AclFeature f = inodeAttr.getAclFeature();
+return getEntriesFromAclFeature(f);
+  }
+
+  /**
* Build list of AclEntries from the AclFeature
* @param aclFeature AclFeature
* @return List of entries

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c846e84d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index dff1c2e..0c572b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -172,7 +172,8 @@ class FSDirAclOp {
   }
   INode inode = FSDirectory.resolveLastINode(iip);
   int snapshotId = iip.getPathSnapshotId();
-  ListAclEntry acl = AclStorage.readINodeAcl(inode, snapshotId);
+  ListAclEntry acl = AclStorage.readINodeAcl(fsd.getAttributes(src,
+  inode.getLocalNameBytes(), inode, snapshotId));
   FsPermission fsPermission = inode.getFsPermission(snapshotId);
   return new AclStatus.Builder()
   .owner(inode.getUserName()).group(inode.getGroupName())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c846e84d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 

hadoop git commit: HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki via Colin P. McCabe)

2015-04-09 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ca1208825 - 8dac24592


HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki via 
Colin P. McCabe)

(cherry picked from commit 61dc2ea3fee4085b19cd2d01de9eacdc4c42e21f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8dac2459
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8dac2459
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8dac2459

Branch: refs/heads/branch-2
Commit: 8dac245920f41709dd6343873682f172fdbeb0a1
Parents: ca12088
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Apr 9 11:28:02 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Thu Apr 9 11:42:00 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/tracing/TestTracing.java  | 125 +++
 2 files changed, 45 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dac2459/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c38e0ed..2b1acf5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1126,6 +1126,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can
 complete without blocks being replicated. (Ming Ma via wang)
 
+HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki
+via Colin P. McCabe)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8dac2459/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
index 01361b5..f6fef5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
@@ -31,7 +31,7 @@ import org.apache.htrace.Span;
 import org.apache.htrace.SpanReceiver;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -56,27 +56,26 @@ public class TestTracing {
   private static SpanReceiverHost spanReceiverHost;
 
   @Test
-  public void testGetSpanReceiverHost() throws Exception {
-Configuration c = new Configuration();
+  public void testTracing() throws Exception {
 // getting instance already loaded.
-c.set(SpanReceiverHost.SPAN_RECEIVERS_CONF_KEY, );
-SpanReceiverHost s = SpanReceiverHost.getInstance(c);
-Assert.assertEquals(spanReceiverHost, s);
+Assert.assertEquals(spanReceiverHost,
+SpanReceiverHost.getInstance(new Configuration()));
+
+// write and read without tracing started
+String fileName = testTracingDisabled.dat;
+writeTestFile(fileName);
+Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
+readTestFile(fileName);
+Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
+
+writeWithTracing();
+readWithTracing();
   }
 
-  @Test
-  public void testWriteTraceHooks() throws Exception {
+  public void writeWithTracing() throws Exception {
 long startTime = System.currentTimeMillis();
 TraceScope ts = Trace.startSpan(testWriteTraceHooks, Sampler.ALWAYS);
-Path file = new Path(traceWriteTest.dat);
-FSDataOutputStream stream = dfs.create(file);
-
-for (int i = 0; i  10; i++) {
-  byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
-  stream.write(data);
-}
-stream.hflush();
-stream.close();
+writeTestFile(testWriteTraceHooks.dat);
 long endTime = System.currentTimeMillis();
 ts.close();
 
@@ -125,55 +124,17 @@ public class TestTracing {
 Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
   }
 }
+SetSpanReceiver.SetHolder.spans.clear();
   }
 
-  @Test
-  public void testWriteWithoutTraceHooks() throws Exception {
-Path file = new Path(withoutTraceWriteTest.dat);
-FSDataOutputStream stream = dfs.create(file);
-for (int i = 0; i  10; i++) {
-  byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
-  stream.write(data);
-}
-stream.hflush();
-

hadoop git commit: HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki via Colin P. McCabe)

2015-04-09 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 d665d11f9 - 4d5b1fbde


HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki via 
Colin P. McCabe)

(cherry picked from commit 61dc2ea3fee4085b19cd2d01de9eacdc4c42e21f)
(cherry picked from commit 8dac245920f41709dd6343873682f172fdbeb0a1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d5b1fbd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d5b1fbd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d5b1fbd

Branch: refs/heads/branch-2.7
Commit: 4d5b1fbdec9c6ad16f2068940df178362f27393e
Parents: d665d11
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Apr 9 11:28:02 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Thu Apr 9 11:42:29 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/tracing/TestTracing.java  | 125 +++
 2 files changed, 45 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d5b1fbd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2abc8a8..bc24dd5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -974,6 +974,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can
 complete without blocks being replicated. (Ming Ma via wang)
 
+HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki
+via Colin P. McCabe)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d5b1fbd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
index 3720abe..dc71514 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
@@ -31,7 +31,7 @@ import org.apache.htrace.Span;
 import org.apache.htrace.SpanReceiver;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -56,27 +56,26 @@ public class TestTracing {
   private static SpanReceiverHost spanReceiverHost;
 
   @Test
-  public void testGetSpanReceiverHost() throws Exception {
-Configuration c = new Configuration();
+  public void testTracing() throws Exception {
 // getting instance already loaded.
-c.set(SpanReceiverHost.SPAN_RECEIVERS_CONF_KEY, );
-SpanReceiverHost s = SpanReceiverHost.getInstance(c);
-Assert.assertEquals(spanReceiverHost, s);
+Assert.assertEquals(spanReceiverHost,
+SpanReceiverHost.getInstance(new Configuration()));
+
+// write and read without tracing started
+String fileName = testTracingDisabled.dat;
+writeTestFile(fileName);
+Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
+readTestFile(fileName);
+Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
+
+writeWithTracing();
+readWithTracing();
   }
 
-  @Test
-  public void testWriteTraceHooks() throws Exception {
+  public void writeWithTracing() throws Exception {
 long startTime = System.currentTimeMillis();
 TraceScope ts = Trace.startSpan(testWriteTraceHooks, Sampler.ALWAYS);
-Path file = new Path(traceWriteTest.dat);
-FSDataOutputStream stream = dfs.create(file);
-
-for (int i = 0; i  10; i++) {
-  byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
-  stream.write(data);
-}
-stream.hflush();
-stream.close();
+writeTestFile(testWriteTraceHooks.dat);
 long endTime = System.currentTimeMillis();
 ts.close();
 
@@ -125,55 +124,17 @@ public class TestTracing {
 Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
   }
 }
+SetSpanReceiver.SetHolder.spans.clear();
   }
 
-  @Test
-  public void testWriteWithoutTraceHooks() throws Exception {
-Path file = new Path(withoutTraceWriteTest.dat);
-FSDataOutputStream stream = dfs.create(file);
-for (int i = 0; i  10; i++) {
-  byte[] data = 

hadoop git commit: YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token renewal of applications part of a bigger workflow. Contributed by Daryn Sharp.

2015-04-09 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk 922b7ed21 - 9c5911294


YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token 
renewal of applications part of a bigger workflow. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c591129
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c591129
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c591129

Branch: refs/heads/trunk
Commit: 9c5911294e0ba71aefe4763731b0e780cde9d0ca
Parents: 922b7ed
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu Apr 9 13:08:53 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu Apr 9 13:08:53 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../security/DelegationTokenRenewer.java| 137 ---
 .../security/TestDelegationTokenRenewer.java|  87 +++-
 3 files changed, 173 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c591129/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c6c56d3..c29fdea 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -912,6 +912,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers 
 and node-label column (Jason Lowe via wangda)
 
+YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token
+renewal of applications part of a bigger workflow. (Daryn Sharp via 
vinodkv)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c591129/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index 2619971..d49ecfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
@@ -229,15 +230,16 @@ public class DelegationTokenRenewer extends 
AbstractService {
   @VisibleForTesting
   protected static class DelegationTokenToRenew {
 public final Token? token;
-public final ApplicationId applicationId;
+public final CollectionApplicationId referringAppIds;
 public final Configuration conf;
 public long expirationDate;
-public TimerTask timerTask;
+public RenewalTimerTask timerTask;
 public volatile boolean shouldCancelAtEnd;
 public long maxDate;
 public String user;
 
-public DelegationTokenToRenew(ApplicationId jId, Token? token,
+public DelegationTokenToRenew(CollectionApplicationId applicationIds,
+Token? token,
 Configuration conf, long expirationDate, boolean shouldCancelAtEnd,
 String user) {
   this.token = token;
@@ -251,20 +253,33 @@ public class DelegationTokenRenewer extends 
AbstractService {
   throw new YarnRuntimeException(e);
 }
   }
-  this.applicationId = jId;
+  this.referringAppIds = Collections.synchronizedSet(
+  new HashSetApplicationId(applicationIds));
   this.conf = conf;
   this.expirationDate = expirationDate;
   this.timerTask = null;
   this.shouldCancelAtEnd = shouldCancelAtEnd;
 }
 
-public void setTimerTask(TimerTask tTask) {
+public void setTimerTask(RenewalTimerTask tTask) {
   timerTask = tTask;
 }
-
+
+@VisibleForTesting
+public void cancelTimer() {
+  if (timerTask != null) {
+timerTask.cancel();
+  }
+}
+
+@VisibleForTesting
+public boolean isTimerCancelled() {
+  return (timerTask != null)  

hadoop git commit: YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token renewal of applications part of a bigger workflow. Contributed by Daryn Sharp.

2015-04-09 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8dac24592 - 1ff3fd33e


YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token 
renewal of applications part of a bigger workflow. Contributed by Daryn Sharp.

(cherry picked from commit 9c5911294e0ba71aefe4763731b0e780cde9d0ca)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ff3fd33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ff3fd33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ff3fd33

Branch: refs/heads/branch-2
Commit: 1ff3fd33ed6f2ac09c774cc42b0107c5dbd9c19d
Parents: 8dac245
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu Apr 9 13:08:53 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu Apr 9 13:10:06 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../security/DelegationTokenRenewer.java| 137 ---
 .../security/TestDelegationTokenRenewer.java|  87 +++-
 3 files changed, 173 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff3fd33/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6a3999f..048557d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -867,6 +867,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers 
 and node-label column (Jason Lowe via wangda)
 
+YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token
+renewal of applications part of a bigger workflow. (Daryn Sharp via 
vinodkv)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ff3fd33/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index 2619971..d49ecfc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
@@ -229,15 +230,16 @@ public class DelegationTokenRenewer extends 
AbstractService {
   @VisibleForTesting
   protected static class DelegationTokenToRenew {
 public final Token? token;
-public final ApplicationId applicationId;
+public final CollectionApplicationId referringAppIds;
 public final Configuration conf;
 public long expirationDate;
-public TimerTask timerTask;
+public RenewalTimerTask timerTask;
 public volatile boolean shouldCancelAtEnd;
 public long maxDate;
 public String user;
 
-public DelegationTokenToRenew(ApplicationId jId, Token? token,
+public DelegationTokenToRenew(CollectionApplicationId applicationIds,
+Token? token,
 Configuration conf, long expirationDate, boolean shouldCancelAtEnd,
 String user) {
   this.token = token;
@@ -251,20 +253,33 @@ public class DelegationTokenRenewer extends 
AbstractService {
   throw new YarnRuntimeException(e);
 }
   }
-  this.applicationId = jId;
+  this.referringAppIds = Collections.synchronizedSet(
+  new HashSetApplicationId(applicationIds));
   this.conf = conf;
   this.expirationDate = expirationDate;
   this.timerTask = null;
   this.shouldCancelAtEnd = shouldCancelAtEnd;
 }
 
-public void setTimerTask(TimerTask tTask) {
+public void setTimerTask(RenewalTimerTask tTask) {
   timerTask = tTask;
 }
-
+
+@VisibleForTesting
+public void cancelTimer() {
+  if (timerTask != null) {
+timerTask.cancel();
+  }
+}
+
+@VisibleForTesting
+public boolean 

hadoop git commit: YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers and node-label column. (Jason Lowe via wangda)

2015-04-09 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3fe61e0bb - 1885141e9


YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers and 
node-label column. (Jason Lowe via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1885141e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1885141e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1885141e

Branch: refs/heads/trunk
Commit: 1885141e90837252934192040a40047c7adbc1b5
Parents: 3fe61e0
Author: Wangda Tan wan...@apache.org
Authored: Thu Apr 9 10:35:12 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Thu Apr 9 10:35:12 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../server/resourcemanager/webapp/NodesPage.java  | 18 +++---
 2 files changed, 14 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1885141e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 914bba0..c6c56d3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -909,6 +909,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3430. Made headroom data available on app attempt page of RM WebUI.
 (Xuan Gong via zjshen)
 
+YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers 
+and node-label column (Jason Lowe via wangda)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1885141e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index 13e0835..a2bab0c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -65,14 +65,18 @@ class NodesPage extends RmView {
   String type = $(NODE_STATE);
   String labelFilter = $(NODE_LABEL, CommonNodeLabelsManager.ANY).trim();
   TBODYTABLEHamlet tbody =
-  html.table(#nodes).thead().tr().th(.nodelabels, Node Labels)
-  .th(.rack, Rack).th(.state, Node State)
+  html.table(#nodes).thead().tr()
+  .th(.nodelabels, Node Labels)
+  .th(.rack, Rack)
+  .th(.state, Node State)
   .th(.nodeaddress, Node Address)
   .th(.nodehttpaddress, Node HTTP Address)
   .th(.lastHealthUpdate, Last health-update)
   .th(.healthReport, Health-report)
-  .th(.containers, Containers).th(.mem, Mem Used)
-  .th(.mem, Mem Avail).th(.vcores, VCores Used)
+  .th(.containers, Containers)
+  .th(.mem, Mem Used)
+  .th(.mem, Mem Avail)
+  .th(.vcores, VCores Used)
   .th(.vcores, VCores Avail)
   .th(.nodeManagerVersion, Version)._()._().tbody();
   NodeState stateFilter = null;
@@ -168,10 +172,10 @@ class NodesPage extends RmView {
 
   private String nodesTableInit() {
 StringBuilder b = tableInit().append(, aoColumnDefs: [);
-b.append({'bSearchable': false, 'aTargets': [ 6 ]});
+b.append({'bSearchable': false, 'aTargets': [ 7 ]});
 b.append(, {'sType': 'title-numeric', 'bSearchable': false, 
-+ 'aTargets': [ 7, 8 ] });
-b.append(, {'sType': 'title-numeric', 'aTargets': [ 4 ]});
++ 'aTargets': [ 8, 9 ] });
+b.append(, {'sType': 'title-numeric', 'aTargets': [ 5 ]});
 b.append(]});
 return b.toString();
   }



hadoop git commit: Adding release 2.7.1 to CHANGES.txt

2015-04-09 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 4c6867ff0 - 2b44fcaa5


Adding release 2.7.1 to CHANGES.txt

(cherry picked from commit 623fd46c1eac057ea9bc4b503a47ad1a831cdacf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b44fcaa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b44fcaa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b44fcaa

Branch: refs/heads/branch-2.7
Commit: 2b44fcaa53846087e34544ea355c364d59dff408
Parents: 4c6867f
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu Apr 9 13:53:35 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu Apr 9 14:01:23 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b44fcaa/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 616febc..1f3e40d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1,5 +1,17 @@
 Hadoop Change Log
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b44fcaa/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bc24dd5..7b540cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1,5 +1,17 @@
 Hadoop HDFS Change Log
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b44fcaa/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 3c75585..ffbe4ef 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1,5 +1,17 @@
 Hadoop MapReduce Change Log
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b44fcaa/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 43793f0..ebeb3dd 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1,5 +1,17 @@
 Hadoop YARN Change Log
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it stops. Contributed by Rohith.

2015-04-09 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 73d9e4c77 - f753e2043


HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it stops. 
Contributed by Rohith.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f753e204
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f753e204
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f753e204

Branch: refs/heads/branch-2
Commit: f753e2043bd1fc7bfa17b40196eed14ffe075cff
Parents: 73d9e4c
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 9 10:58:12 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu Apr 9 10:58:20 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 5 -
 .../src/main/java/org/apache/hadoop/http/HttpServer2.java   | 5 -
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f753e204/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 70a1840..b0bbef9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -799,7 +799,10 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows.
 (Xiaoyu Yao via cnauroth)
-
+
+HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it
+stops. (Rohith via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f753e204/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 39d14d8..11ab23b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -140,6 +140,7 @@ public final class HttpServer2 implements FilterContainer {
   protected final ListString filterNames = new ArrayList();
   static final String STATE_DESCRIPTION_ALIVE =  - alive;
   static final String STATE_DESCRIPTION_NOT_LIVE =  - not live;
+  private final SignerSecretProvider secretProvider;
 
   /**
* Class to construct instances of HTTP server with specific options.
@@ -335,7 +336,7 @@ public final class HttpServer2 implements FilterContainer {
 this.adminsAcl = b.adminsAcl;
 this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, 
appDir);
 try {
-  SignerSecretProvider secretProvider =
+  this.secretProvider =
   constructSecretProvider(b, webAppContext.getServletContext());
   this.webAppContext.getServletContext().setAttribute
   (AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE,
@@ -945,6 +946,8 @@ public final class HttpServer2 implements FilterContainer {
 }
 
 try {
+  // explicitly destroy the secrete provider
+  secretProvider.destroy();
   // clear  stop webAppContext attributes to avoid memory leaks.
   webAppContext.clearAttributes();
   webAppContext.stop();



hadoop git commit: HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it stops. Contributed by Rohith.

2015-04-09 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 9a111fcd1 - d665d11f9


HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it stops. 
Contributed by Rohith.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d665d11f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d665d11f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d665d11f

Branch: refs/heads/branch-2.7
Commit: d665d11f9937383d1d703cad38671c912252f338
Parents: 9a111fc
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 9 10:58:12 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu Apr 9 10:58:31 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 5 -
 .../src/main/java/org/apache/hadoop/http/HttpServer2.java   | 5 -
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d665d11f/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a4605ac..616febc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -729,7 +729,10 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows.
 (Xiaoyu Yao via cnauroth)
-
+
+HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it
+stops. (Rohith via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d665d11f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 39d14d8..11ab23b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -140,6 +140,7 @@ public final class HttpServer2 implements FilterContainer {
   protected final ListString filterNames = new ArrayList();
   static final String STATE_DESCRIPTION_ALIVE =  - alive;
   static final String STATE_DESCRIPTION_NOT_LIVE =  - not live;
+  private final SignerSecretProvider secretProvider;
 
   /**
* Class to construct instances of HTTP server with specific options.
@@ -335,7 +336,7 @@ public final class HttpServer2 implements FilterContainer {
 this.adminsAcl = b.adminsAcl;
 this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, 
appDir);
 try {
-  SignerSecretProvider secretProvider =
+  this.secretProvider =
   constructSecretProvider(b, webAppContext.getServletContext());
   this.webAppContext.getServletContext().setAttribute
   (AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE,
@@ -945,6 +946,8 @@ public final class HttpServer2 implements FilterContainer {
 }
 
 try {
+  // explicitly destroy the secrete provider
+  secretProvider.destroy();
   // clear  stop webAppContext attributes to avoid memory leaks.
   webAppContext.clearAttributes();
   webAppContext.stop();



hadoop git commit: Preparing for 2.7.1 development: mvn versions:set

2015-04-09 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 2b44fcaa5 - a7fa46c2e


Preparing for 2.7.1 development: mvn versions:set


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7fa46c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7fa46c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7fa46c2

Branch: refs/heads/branch-2.7
Commit: a7fa46c2e0f212e63253a5c28a34e9628005a6c2
Parents: 2b44fca
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu Apr 9 14:13:30 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu Apr 9 14:13:30 2015 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-client/pom.xml| 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml| 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-ant/pom.xml  | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml| 4 ++--
 hadoop-tools/hadoop-tools-dist/pom.xml   | 4 ++--
 hadoop-tools/pom.xml | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml  | 4 ++--
 .../hadoop-yarn-applications-distributedshell/pom.xml| 4 ++--
 .../hadoop-yarn-applications-unmanaged-am-launcher/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/pom.xml | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml   | 4 ++--
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/pom.xml | 4 ++--
 .../hadoop-yarn-server-applicationhistoryservice/pom.xml | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml | 4 ++--
 .../hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml| 4 ++--
 .../hadoop-yarn-server-resourcemanager/pom.xml   | 4 ++--
 

hadoop git commit: HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki via Colin P. McCabe)

2015-04-09 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 30acb7372 - 61dc2ea3f


HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki via 
Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61dc2ea3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61dc2ea3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61dc2ea3

Branch: refs/heads/trunk
Commit: 61dc2ea3fee4085b19cd2d01de9eacdc4c42e21f
Parents: 30acb73
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Apr 9 11:28:02 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Thu Apr 9 11:28:02 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/tracing/TestTracing.java  | 125 +++
 2 files changed, 45 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61dc2ea3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 59cab03..4b22fa4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1427,6 +1427,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8072. Reserved RBW space is not released if client terminates while
 writing block. (Arpit Agarwal)
 
+HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki
+via Colin P. McCabe)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61dc2ea3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
index 01361b5..f6fef5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
@@ -31,7 +31,7 @@ import org.apache.htrace.Span;
 import org.apache.htrace.SpanReceiver;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -56,27 +56,26 @@ public class TestTracing {
   private static SpanReceiverHost spanReceiverHost;
 
   @Test
-  public void testGetSpanReceiverHost() throws Exception {
-Configuration c = new Configuration();
+  public void testTracing() throws Exception {
 // getting instance already loaded.
-c.set(SpanReceiverHost.SPAN_RECEIVERS_CONF_KEY, );
-SpanReceiverHost s = SpanReceiverHost.getInstance(c);
-Assert.assertEquals(spanReceiverHost, s);
+Assert.assertEquals(spanReceiverHost,
+SpanReceiverHost.getInstance(new Configuration()));
+
+// write and read without tracing started
+String fileName = testTracingDisabled.dat;
+writeTestFile(fileName);
+Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
+readTestFile(fileName);
+Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
+
+writeWithTracing();
+readWithTracing();
   }
 
-  @Test
-  public void testWriteTraceHooks() throws Exception {
+  public void writeWithTracing() throws Exception {
 long startTime = System.currentTimeMillis();
 TraceScope ts = Trace.startSpan(testWriteTraceHooks, Sampler.ALWAYS);
-Path file = new Path(traceWriteTest.dat);
-FSDataOutputStream stream = dfs.create(file);
-
-for (int i = 0; i  10; i++) {
-  byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
-  stream.write(data);
-}
-stream.hflush();
-stream.close();
+writeTestFile(testWriteTraceHooks.dat);
 long endTime = System.currentTimeMillis();
 ts.close();
 
@@ -125,55 +124,17 @@ public class TestTracing {
 Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
   }
 }
+SetSpanReceiver.SetHolder.spans.clear();
   }
 
-  @Test
-  public void testWriteWithoutTraceHooks() throws Exception {
-Path file = new Path(withoutTraceWriteTest.dat);
-FSDataOutputStream stream = dfs.create(file);
-for (int i = 0; i  10; i++) {
-  byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
-  stream.write(data);
-}
-stream.hflush();
-stream.close();
-Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
-  }
-
-  @Test
-  public void 

hadoop git commit: YARN-3465. Use LinkedHashMap to preserve order of resource requests. (Zhihai Xu via kasha)

2015-04-09 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4e099c113 - 53e0bf5c1


YARN-3465. Use LinkedHashMap to preserve order of resource requests. (Zhihai Xu 
via kasha)

(cherry picked from commit 6495940eae09418a939882a8955845f9241a6485)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53e0bf5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53e0bf5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53e0bf5c

Branch: refs/heads/branch-2
Commit: 53e0bf5c172c396780b96fda8dd31ad799a25fed
Parents: 4e099c1
Author: Karthik Kambatla ka...@apache.org
Authored: Thu Apr 9 00:07:49 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Thu Apr 9 00:10:23 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../nodemanager/containermanager/container/ContainerImpl.java | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e0bf5c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 09fd819..7674f5d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -114,6 +114,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2890. MiniYarnCluster should turn on timeline service if
 configured to do so. (Mit Desai via hitesh)
 
+YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
+(Zhihai Xu via kasha)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53e0bf5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index cf3d8e7..131d439 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -638,7 +639,7 @@ public class ContainerImpl implements Container {
   return ContainerState.LOCALIZATION_FAILED;
 }
 MapLocalResourceVisibility, CollectionLocalResourceRequest req =
-new HashMapLocalResourceVisibility, 
+new LinkedHashMapLocalResourceVisibility,
 CollectionLocalResourceRequest();
 if (!container.publicRsrcs.isEmpty()) {
   req.put(LocalResourceVisibility.PUBLIC, container.publicRsrcs);



hadoop git commit: YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers and node-label column. (Jason Lowe via wangda)

2015-04-09 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 edf2f52d6 - 73d9e4c77


YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers and 
node-label column. (Jason Lowe via wangda)

(cherry picked from commit 1885141e90837252934192040a40047c7adbc1b5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73d9e4c7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73d9e4c7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73d9e4c7

Branch: refs/heads/branch-2
Commit: 73d9e4c77b2aa4734845376d20fb76e6b91b6db0
Parents: edf2f52
Author: Wangda Tan wan...@apache.org
Authored: Thu Apr 9 10:35:12 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Thu Apr 9 10:36:10 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   |  3 +++
 .../server/resourcemanager/webapp/NodesPage.java  | 18 +++---
 2 files changed, 14 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73d9e4c7/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7674f5d..6a3999f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -864,6 +864,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3430. Made headroom data available on app attempt page of RM WebUI.
 (Xuan Gong via zjshen)
 
+YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers 
+and node-label column (Jason Lowe via wangda)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73d9e4c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index 13e0835..a2bab0c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -65,14 +65,18 @@ class NodesPage extends RmView {
   String type = $(NODE_STATE);
   String labelFilter = $(NODE_LABEL, CommonNodeLabelsManager.ANY).trim();
   TBODYTABLEHamlet tbody =
-  html.table(#nodes).thead().tr().th(.nodelabels, Node Labels)
-  .th(.rack, Rack).th(.state, Node State)
+  html.table(#nodes).thead().tr()
+  .th(.nodelabels, Node Labels)
+  .th(.rack, Rack)
+  .th(.state, Node State)
   .th(.nodeaddress, Node Address)
   .th(.nodehttpaddress, Node HTTP Address)
   .th(.lastHealthUpdate, Last health-update)
   .th(.healthReport, Health-report)
-  .th(.containers, Containers).th(.mem, Mem Used)
-  .th(.mem, Mem Avail).th(.vcores, VCores Used)
+  .th(.containers, Containers)
+  .th(.mem, Mem Used)
+  .th(.mem, Mem Avail)
+  .th(.vcores, VCores Used)
   .th(.vcores, VCores Avail)
   .th(.nodeManagerVersion, Version)._()._().tbody();
   NodeState stateFilter = null;
@@ -168,10 +172,10 @@ class NodesPage extends RmView {
 
   private String nodesTableInit() {
 StringBuilder b = tableInit().append(, aoColumnDefs: [);
-b.append({'bSearchable': false, 'aTargets': [ 6 ]});
+b.append({'bSearchable': false, 'aTargets': [ 7 ]});
 b.append(, {'sType': 'title-numeric', 'bSearchable': false, 
-+ 'aTargets': [ 7, 8 ] });
-b.append(, {'sType': 'title-numeric', 'aTargets': [ 4 ]});
++ 'aTargets': [ 8, 9 ] });
+b.append(, {'sType': 'title-numeric', 'aTargets': [ 5 ]});
 b.append(]});
 return b.toString();
   }



hadoop git commit: HDFS-8091: ACLStatus and XAttributes should be presented to INodeAttributesProvider before returning to client (asuresh)

2015-04-09 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 61dc2ea3f - 922b7ed21


HDFS-8091: ACLStatus and XAttributes should be presented to 
INodeAttributesProvider before returning to client (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/922b7ed2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/922b7ed2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/922b7ed2

Branch: refs/heads/trunk
Commit: 922b7ed21d1f1460263ca42f709bb9f415d189c5
Parents: 61dc2ea
Author: Arun Suresh asur...@apache.org
Authored: Thu Apr 9 12:28:44 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Thu Apr 9 12:38:33 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/server/namenode/AclStorage.java | 11 ++
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |  3 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  3 +-
 .../hdfs/server/namenode/XAttrStorage.java  |  6 ++--
 .../namenode/TestINodeAttributeProvider.java| 36 
 6 files changed, 50 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/922b7ed2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4b22fa4..df6d90a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -153,6 +153,9 @@ Trunk (Unreleased)
 
   BUG FIXES
  
+HDFS-8091: ACLStatus and XAttributes should be presented to 
INodeAttributesProvider
+   before returning to client (asuresh)
+
 HADOOP-9635 Fix potential Stack Overflow in DomainSocket.c (V. Karthik 
Kumar
 via cmccabe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/922b7ed2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
index 4f6ce3a..abd3755 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
@@ -163,6 +163,17 @@ public final class AclStorage {
   }
 
   /**
+   * Reads the existing extended ACL entries of an INodeAttribute object.
+   *
+   * @param inodeAttr INode to read
+   * @return ListAclEntry containing extended inode ACL entries
+   */
+  public static ListAclEntry readINodeAcl(INodeAttributes inodeAttr) {
+AclFeature f = inodeAttr.getAclFeature();
+return getEntriesFromAclFeature(f);
+  }
+
+  /**
* Build list of AclEntries from the AclFeature
* @param aclFeature AclFeature
* @return List of entries

http://git-wip-us.apache.org/repos/asf/hadoop/blob/922b7ed2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index dff1c2e..0c572b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -172,7 +172,8 @@ class FSDirAclOp {
   }
   INode inode = FSDirectory.resolveLastINode(iip);
   int snapshotId = iip.getPathSnapshotId();
-  ListAclEntry acl = AclStorage.readINodeAcl(inode, snapshotId);
+  ListAclEntry acl = AclStorage.readINodeAcl(fsd.getAttributes(src,
+  inode.getLocalNameBytes(), inode, snapshotId));
   FsPermission fsPermission = inode.getFsPermission(snapshotId);
   return new AclStatus.Builder()
   .owner(inode.getUserName()).group(inode.getGroupName())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/922b7ed2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index d5c9124..53b9b41 100644
--- 

Git Push Summary

2015-04-09 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 [created] 9d8952f97


hadoop git commit: Fix CHANGES.txt for HDFS-8091

2015-04-09 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk c4986b2d0 - a813db0b1


Fix CHANGES.txt for HDFS-8091


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a813db0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a813db0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a813db0b

Branch: refs/heads/trunk
Commit: a813db0b1bed36dc846705640db9a8f9e2cc33de
Parents: c4986b2
Author: Arun Suresh asur...@apache.org
Authored: Thu Apr 9 13:51:00 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Thu Apr 9 13:51:00 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a813db0b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index df6d90a..979534e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -153,9 +153,6 @@ Trunk (Unreleased)
 
   BUG FIXES
  
-HDFS-8091: ACLStatus and XAttributes should be presented to 
INodeAttributesProvider
-   before returning to client (asuresh)
-
 HADOOP-9635 Fix potential Stack Overflow in DomainSocket.c (V. Karthik 
Kumar
 via cmccabe)
 
@@ -463,6 +460,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and
 even for failed transfers (vinayakumarb)
 
+HDFS-8091: ACLStatus and XAttributes should be presented to
+INodeAttributesProvider before returning to client (asuresh)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



[2/2] hadoop git commit: HDFS-7188. support build libhdfs3 on windows (Thanh Do via Colin P. McCabe)

2015-04-09 Thread cmccabe
HDFS-7188. support build libhdfs3 on windows (Thanh Do via Colin P.  McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f0ea98f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f0ea98f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f0ea98f0

Branch: refs/heads/HDFS-6994
Commit: f0ea98f0c873847f6ad8782d889eaff89e978b7c
Parents: af17a55
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Apr 9 11:24:55 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Thu Apr 9 11:39:21 2015 -0700

--
 .../src/contrib/libhdfs3/CMake/Options.cmake|  31 +-
 .../src/contrib/libhdfs3/os/posix/Platform.cc   | 344 ++
 .../contrib/libhdfs3/os/posix/StackPrinter.cc   | 670 +++
 .../src/contrib/libhdfs3/os/posix/Thread.cc |  47 ++
 .../src/contrib/libhdfs3/os/windows/Platform.cc | 303 +
 .../contrib/libhdfs3/os/windows/StackPrinter.cc |  62 ++
 .../src/contrib/libhdfs3/os/windows/Thread.cc   |  41 ++
 .../src/contrib/libhdfs3/os/windows/platform.h  |  35 +
 .../src/contrib/libhdfs3/os/windows/sys/mman.c  | 204 ++
 .../src/contrib/libhdfs3/os/windows/sys/mman.h  |  62 +-
 .../src/contrib/libhdfs3/os/windows/sys/time.h  |  22 +-
 .../src/contrib/libhdfs3/os/windows/uuid/uuid.h |   9 +-
 .../src/contrib/libhdfs3/src/CMakeLists.txt |  16 +-
 .../contrib/libhdfs3/src/client/BlockLocation.h |   1 +
 .../libhdfs3/src/client/InputStreamImpl.cc  |  77 +--
 .../libhdfs3/src/client/InputStreamImpl.h   |   2 +
 .../contrib/libhdfs3/src/client/KerberosName.cc |  68 --
 .../contrib/libhdfs3/src/client/Permission.h|   1 +
 .../src/contrib/libhdfs3/src/client/UserInfo.cc |  34 -
 .../src/contrib/libhdfs3/src/common/Atoi.cc |  12 +-
 .../src/contrib/libhdfs3/src/common/BigEndian.h |   1 +
 .../contrib/libhdfs3/src/common/CFileWrapper.cc |   4 +-
 .../libhdfs3/src/common/ExceptionInternal.h |   1 -
 .../libhdfs3/src/common/MappedFileWrapper.cc|   4 +-
 .../src/contrib/libhdfs3/src/common/SharedPtr.h |   2 +-
 .../contrib/libhdfs3/src/common/StackPrinter.cc | 670 ---
 .../src/contrib/libhdfs3/src/common/Thread.cc   |  47 --
 .../contrib/libhdfs3/src/common/UnorderedMap.h  |  17 +-
 .../libhdfs3/src/common/WritableUtils.cc|   5 +-
 .../contrib/libhdfs3/src/common/WritableUtils.h |   1 +
 .../contrib/libhdfs3/src/common/WriteBuffer.h   |   1 +
 .../src/contrib/libhdfs3/src/network/Syscall.h  |   8 +-
 .../contrib/libhdfs3/src/network/TcpSocket.cc   |  24 +-
 .../src/contrib/libhdfs3/src/rpc/RpcClient.h|   2 +-
 .../libhdfs3/src/server/NamenodeProxy.cc|  90 ---
 .../contrib/libhdfs3/src/server/NamenodeProxy.h |   4 +
 36 files changed, 1884 insertions(+), 1038 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f0ea98f0/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake 
b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
index b957c40..738e404 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/libhdfs3/CMake/Options.cmake
@@ -63,6 +63,7 @@ IF(ENABLE_SSE STREQUAL ON)
 # In Visual Studio 2013, this option will use SS4.2 instructions
 # if available. Not sure about the behaviour in Visual Studio 2010.
 SET(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} /arch:SSE2)
+ADD_DEFINITIONS(-D__SSE4_2__)
 ELSE(MSVC)
 SET(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} -msse4.2)
 ENDIF(MSVC)
@@ -80,25 +81,29 @@ IF(OS_MACOSX AND CMAKE_COMPILER_IS_GNUCXX)
 SET(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} -Wl,-bind_at_load)
 ENDIF(OS_MACOSX AND CMAKE_COMPILER_IS_GNUCXX)
 
-
 IF(OS_LINUX)
 SET(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} -Wl,--export-dynamic)
 ENDIF(OS_LINUX)
 
+IF(MSVC)
+  # Always enable boost for windows as VC does not support some C++11 features,
+  # such as nested exception.
+  IF(ENABLE_BOOST STREQUAL OFF)
+ADD_DEFINITIONS(-DNEED_BOOST)
+  ENDIF(ENABLE_BOOST STREQUAL OFF)
+  # Find boost libraries with flavor: mt-sgd (multi-thread, static, and debug)
+  SET(Boost_USE_STATIC_LIBS ON)
+  SET(Boost_USE_MULTITHREADED ON)
+  SET(Boost_USE_STATIC_RUNTIME ON)
+  FIND_PACKAGE(Boost 1.53 COMPONENTS thread chrono system atomic iostreams 
REQUIRED)
+  INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIRS})
+  LINK_DIRECTORIES(${Boost_LIBRARY_DIRS})
+ENDIF(MSVC)
+
 SET(BOOST_ROOT ${CMAKE_PREFIX_PATH})
 IF(ENABLE_BOOST STREQUAL ON)
 MESSAGE(STATUS using boost instead of native compiler c++0x support.)
-IF(MSVC)
-# Find boost libraries with flavor: mt-sgd (multi-thread, 

hadoop git commit: HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys. Contributed by Haohui Mai.

2015-04-09 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 aec1c5ec7 - e34afb48d


HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e34afb48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e34afb48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e34afb48

Branch: refs/heads/branch-2
Commit: e34afb48d48872df60c8fb5bd0ff7eb25ad4ee9e
Parents: aec1c5e
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 9 14:36:27 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu Apr 9 14:36:35 2015 -0700

--
 .../hdfs/client/HdfsClientConfigKeys.java   | 17 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 50 +++-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 25 +-
 .../hadoop/hdfs/TestDFSClientRetries.java   |  2 +-
 5 files changed, 71 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e34afb48/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 604d60e..7316e3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -60,4 +60,21 @@ public interface HdfsClientConfigKeys {
 public static final int WINDOW_BASE_DEFAULT
 = 3000;
   }
+
+  // WebHDFS retry configuration policy
+  interface WebHdfsRetry {
+String  PREFIX = HdfsClientConfigKeys.PREFIX + http.client.;
+String  RETRY_POLICY_ENABLED_KEY = PREFIX + 
dfs.http.client.retry.policy.enabled;
+boolean RETRY_POLICY_ENABLED_DEFAULT = false;
+String  RETRY_POLICY_SPEC_KEY = PREFIX + 
dfs.http.client.retry.policy.spec;
+String  RETRY_POLICY_SPEC_DEFAULT = 1,6,6,10; //t1,n1,t2,n2,...
+String  FAILOVER_MAX_ATTEMPTS_KEY = PREFIX + 
dfs.http.client.failover.max.attempts;
+int FAILOVER_MAX_ATTEMPTS_DEFAULT =  15;
+String  RETRY_MAX_ATTEMPTS_KEY = PREFIX + 
dfs.http.client.retry.max.attempts;
+int RETRY_MAX_ATTEMPTS_DEFAULT = 10;
+String  FAILOVER_SLEEPTIME_BASE_KEY = PREFIX + 
dfs.http.client.failover.sleep.base.millis;
+int FAILOVER_SLEEPTIME_BASE_DEFAULT = 500;
+String  FAILOVER_SLEEPTIME_MAX_KEY = PREFIX + 
dfs.http.client.failover.sleep.max.millis;
+int FAILOVER_SLEEPTIME_MAX_DEFAULT =  15000;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e34afb48/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6914934..b53b2c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -94,6 +94,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8099. Change DFSInputStream has been closed already message to
 debug log level (Charles Lamb via Colin P. McCabe)
 
+HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys.
+(wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e34afb48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d8b1692..777bf76 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -607,19 +607,43 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final long   DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT 
= 6;
 
   // WebHDFS retry policy
-  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY = 
dfs.http.client.retry.policy.enabled;
-  public static final boolean DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = 
false;
-  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY = 

hadoop git commit: YARN-3465. Use LinkedHashMap to preserve order of resource requests. (Zhihai Xu via kasha)

2015-04-09 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 9d8952f97 - 6495940ea


YARN-3465. Use LinkedHashMap to preserve order of resource requests. (Zhihai Xu 
via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6495940e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6495940e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6495940e

Branch: refs/heads/trunk
Commit: 6495940eae09418a939882a8955845f9241a6485
Parents: 9d8952f
Author: Karthik Kambatla ka...@apache.org
Authored: Thu Apr 9 00:07:49 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Thu Apr 9 00:07:49 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../nodemanager/containermanager/container/ContainerImpl.java | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6495940e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4714f4e..914bba0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -162,6 +162,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2890. MiniYarnCluster should turn on timeline service if
 configured to do so. (Mit Desai via hitesh)
 
+YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
+(Zhihai Xu via kasha)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6495940e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index cf3d8e7..131d439 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -638,7 +639,7 @@ public class ContainerImpl implements Container {
   return ContainerState.LOCALIZATION_FAILED;
 }
 MapLocalResourceVisibility, CollectionLocalResourceRequest req =
-new HashMapLocalResourceVisibility, 
+new LinkedHashMapLocalResourceVisibility,
 CollectionLocalResourceRequest();
 if (!container.publicRsrcs.isEmpty()) {
   req.put(LocalResourceVisibility.PUBLIC, container.publicRsrcs);



hadoop git commit: Adding release 2.7.1 to CHANGES.txt

2015-04-09 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0fdd5c23c - aec1c5ec7


Adding release 2.7.1 to CHANGES.txt

(cherry picked from commit 623fd46c1eac057ea9bc4b503a47ad1a831cdacf)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aec1c5ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aec1c5ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aec1c5ec

Branch: refs/heads/branch-2
Commit: aec1c5ec7de2a9ba38dd3cf4b752514e4a720bfa
Parents: 0fdd5c2
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu Apr 9 13:53:35 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Thu Apr 9 13:56:14 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 12 
 hadoop-mapreduce-project/CHANGES.txt| 12 
 hadoop-yarn-project/CHANGES.txt | 12 
 4 files changed, 48 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aec1c5ec/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index b0bbef9..dc60e67 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -70,6 +70,18 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11800. Clean up some test methods in TestCodec.java.
 (Brahma Reddy Battula via aajisaka)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aec1c5ec/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e3f6372..6914934 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -145,6 +145,18 @@ Release 2.8.0 - UNRELEASED
 HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and
 even for failed transfers (vinayakumarb)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aec1c5ec/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 48a39a4..b388f57 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -82,6 +82,18 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL
 (rchiang via rkanter)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aec1c5ec/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 048557d..eebbba7 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -117,6 +117,18 @@ Release 2.8.0 - UNRELEASED
 YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
 (Zhihai Xu via kasha)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-8099. Change DFSInputStream has been closed already message to debug log level (Charles Lamb via Colin P. McCabe)

2015-04-09 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f753e2043 - ca1208825


HDFS-8099. Change DFSInputStream has been closed already message to debug log 
level (Charles Lamb via Colin P. McCabe)

(cherry picked from commit 30acb7372ab97adf9bc86ead529c96cfe36e2396)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca120882
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca120882
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca120882

Branch: refs/heads/branch-2
Commit: ca1208825390b4121cc2fdaacad258b28ac26f3d
Parents: f753e20
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Apr 9 10:50:44 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Thu Apr 9 11:23:01 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca120882/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 132daa9..c38e0ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -91,6 +91,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
 classes at runtime. (Sean Busbey via atm)
 
+HDFS-8099. Change DFSInputStream has been closed already message to
+debug log level (Charles Lamb via Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca120882/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index a9f2746..41b9d50 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -666,7 +666,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   @Override
   public synchronized void close() throws IOException {
 if (!closed.compareAndSet(false, true)) {
-  DFSClient.LOG.warn(DFSInputStream has been closed already);
+  DFSClient.LOG.debug(DFSInputStream has been closed already);
   return;
 }
 dfsClient.checkOpen();



hadoop git commit: HDFS-8099. Change DFSInputStream has been closed already message to debug log level (Charles Lamb via Colin P. McCabe)

2015-04-09 Thread cmccabe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 63c659ddd - 30acb7372


HDFS-8099. Change DFSInputStream has been closed already message to debug log 
level (Charles Lamb via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30acb737
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30acb737
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30acb737

Branch: refs/heads/trunk
Commit: 30acb7372ab97adf9bc86ead529c96cfe36e2396
Parents: 63c659d
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Thu Apr 9 10:50:44 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Thu Apr 9 11:22:39 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30acb737/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 727bec7..59cab03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -409,6 +409,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
 classes at runtime. (Sean Busbey via atm)
 
+HDFS-8099. Change DFSInputStream has been closed already message to
+debug log level (Charles Lamb via Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30acb737/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index a9f2746..41b9d50 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -666,7 +666,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   @Override
   public synchronized void close() throws IOException {
 if (!closed.compareAndSet(false, true)) {
-  DFSClient.LOG.warn(DFSInputStream has been closed already);
+  DFSClient.LOG.debug(DFSInputStream has been closed already);
   return;
 }
 dfsClient.checkOpen();



hadoop git commit: HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and even for failed transfers (Contributed by Vinayakumar B)

2015-04-09 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 351fac25a - 4e099c113


HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and even 
for failed transfers (Contributed by Vinayakumar B)

(cherry picked from commit 9d8952f97f638ede27e4336b9601507d7bb1de7b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e099c11
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e099c11
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e099c11

Branch: refs/heads/branch-2
Commit: 4e099c113b5823f9bce92a014d469083a7863c1a
Parents: 351fac2
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Apr 9 11:58:00 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Thu Apr 9 11:59:05 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/datanode/BPOfferService.java|  1 -
 .../hadoop/hdfs/server/datanode/DataNode.java   |  2 ++
 .../datanode/metrics/DataNodeMetrics.java   |  4 +--
 .../server/datanode/TestDataNodeMetrics.java| 37 +---
 5 files changed, 40 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e099c11/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a75f383..53b88dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -136,6 +136,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7725. Incorrect nodes in service metrics caused all writes to fail.
 (Ming Ma via wang)
 
+HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and
+even for failed transfers (vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e099c11/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 8efad83..67979f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -657,7 +657,6 @@ class BPOfferService {
   // Send a copy of a block to another datanode
   dn.transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(),
   bcmd.getTargets(), bcmd.getTargetStorageTypes());
-  dn.metrics.incrBlocksReplicated(bcmd.getBlocks().length);
   break;
 case DatanodeProtocol.DNA_INVALIDATE:
   //

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e099c11/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 6f70168..1285aaf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2181,6 +2181,8 @@ public class DataNode extends ReconfigurableBase
   + Arrays.asList(targets));
 }
   }
+} else {
+  metrics.incrBlocksReplicated();
 }
   } catch (IOException ie) {
 LOG.warn(bpReg + :Failed to transfer  + b +  to  +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e099c11/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index 2e8eb22..2e62b3c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -210,8 +210,8 @@ public class DataNodeMetrics {
 cacheReports.add(latency);
   }
 
-  

hadoop git commit: HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys. Contributed by Haohui Mai.

2015-04-09 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 623fd46c1 - af9d4fede


HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af9d4fed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af9d4fed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af9d4fed

Branch: refs/heads/trunk
Commit: af9d4fede535f0699d08e592d5c4e133a5823663
Parents: 623fd46
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 9 14:36:27 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu Apr 9 14:36:27 2015 -0700

--
 .../hdfs/client/HdfsClientConfigKeys.java   | 17 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 50 +++-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 25 +-
 .../hadoop/hdfs/TestDFSClientRetries.java   |  2 +-
 5 files changed, 71 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af9d4fed/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 604d60e..7316e3b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -60,4 +60,21 @@ public interface HdfsClientConfigKeys {
 public static final int WINDOW_BASE_DEFAULT
 = 3000;
   }
+
+  // WebHDFS retry configuration policy
+  interface WebHdfsRetry {
+String  PREFIX = HdfsClientConfigKeys.PREFIX + http.client.;
+String  RETRY_POLICY_ENABLED_KEY = PREFIX + 
dfs.http.client.retry.policy.enabled;
+boolean RETRY_POLICY_ENABLED_DEFAULT = false;
+String  RETRY_POLICY_SPEC_KEY = PREFIX + 
dfs.http.client.retry.policy.spec;
+String  RETRY_POLICY_SPEC_DEFAULT = 1,6,6,10; //t1,n1,t2,n2,...
+String  FAILOVER_MAX_ATTEMPTS_KEY = PREFIX + 
dfs.http.client.failover.max.attempts;
+int FAILOVER_MAX_ATTEMPTS_DEFAULT =  15;
+String  RETRY_MAX_ATTEMPTS_KEY = PREFIX + 
dfs.http.client.retry.max.attempts;
+int RETRY_MAX_ATTEMPTS_DEFAULT = 10;
+String  FAILOVER_SLEEPTIME_BASE_KEY = PREFIX + 
dfs.http.client.failover.sleep.base.millis;
+int FAILOVER_SLEEPTIME_BASE_DEFAULT = 500;
+String  FAILOVER_SLEEPTIME_MAX_KEY = PREFIX + 
dfs.http.client.failover.sleep.max.millis;
+int FAILOVER_SLEEPTIME_MAX_DEFAULT =  15000;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af9d4fed/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 695dc36..e091a65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -412,6 +412,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8099. Change DFSInputStream has been closed already message to
 debug log level (Charles Lamb via Colin P. McCabe)
 
+HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys.
+(wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af9d4fed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d0ca125..ce08075 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -601,19 +601,43 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final long   DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT 
= 6;
 
   // WebHDFS retry policy
-  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY = 
dfs.http.client.retry.policy.enabled;
-  public static final boolean DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = 
false;
-  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY = 

hadoop git commit: HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it stops. Contributed by Rohith.

2015-04-09 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1885141e9 - 63c659ddd


HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it stops. 
Contributed by Rohith.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63c659dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63c659dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63c659dd

Branch: refs/heads/trunk
Commit: 63c659ddd0641227501851dbcd1352064fa9348a
Parents: 1885141
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 9 10:58:12 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Thu Apr 9 10:58:12 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 5 -
 .../src/main/java/org/apache/hadoop/http/HttpServer2.java   | 5 -
 2 files changed, 8 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63c659dd/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 50fb4d7..397161d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1222,7 +1222,10 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows.
 (Xiaoyu Yao via cnauroth)
-
+
+HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it
+stops. (Rohith via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/63c659dd/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 0f1c222..6fd34d5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -140,6 +140,7 @@ public final class HttpServer2 implements FilterContainer {
   protected final ListString filterNames = new ArrayList();
   static final String STATE_DESCRIPTION_ALIVE =  - alive;
   static final String STATE_DESCRIPTION_NOT_LIVE =  - not live;
+  private final SignerSecretProvider secretProvider;
 
   /**
* Class to construct instances of HTTP server with specific options.
@@ -335,7 +336,7 @@ public final class HttpServer2 implements FilterContainer {
 this.adminsAcl = b.adminsAcl;
 this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, 
appDir);
 try {
-  SignerSecretProvider secretProvider =
+  this.secretProvider =
   constructSecretProvider(b, webAppContext.getServletContext());
   this.webAppContext.getServletContext().setAttribute
   (AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE,
@@ -913,6 +914,8 @@ public final class HttpServer2 implements FilterContainer {
 }
 
 try {
+  // explicitly destroy the secrete provider
+  secretProvider.destroy();
   // clear  stop webAppContext attributes to avoid memory leaks.
   webAppContext.clearAttributes();
   webAppContext.stop();



[2/2] hadoop git commit: HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS classes at runtime. Contributed by Sean Busbey. (cherry picked from commit 3fe61e0bb0d025a6acbb754027f7

2015-04-09 Thread atm
HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS classes 
at runtime. Contributed by Sean Busbey.
(cherry picked from commit 3fe61e0bb0d025a6acbb754027f73f3084b2f4d1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edf2f52d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edf2f52d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edf2f52d

Branch: refs/heads/branch-2
Commit: edf2f52d6d2b0fd7f0ea5a63401a8affeb976949
Parents: 6d1cb34
Author: Aaron T. Myers a...@apache.org
Authored: Thu Apr 9 09:40:08 2015 -0700
Committer: Aaron T. Myers a...@apache.org
Committed: Thu Apr 9 09:45:02 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  8 +++--
 .../apache/hadoop/hdfs/TestDFSConfigKeys.java   | 37 
 3 files changed, 46 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edf2f52d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2c4a3bf..132daa9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -88,6 +88,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-7979. Initialize block report IDs with a random number. (wang)
 
+HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
+classes at runtime. (Sean Busbey via atm)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/edf2f52d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f88b221..d8b1692 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
-import org.apache.hadoop.hdfs.web.AuthFilter;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -156,7 +155,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY 
= dfs.namenode.replication.max-streams-hard-limit;
   public static final int 
DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
   public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = 
dfs.web.authentication.filter;
-  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = 
AuthFilter.class.getName();
+  /* Phrased as below to avoid javac inlining as a constant, to match the 
behavior when
+ this was AuthFilter.class.getName(). Note that if you change the import 
for AuthFilter, you
+ need to update the literal here as well as TestDFSConfigKeys.
+   */
+  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
+  org.apache.hadoop.hdfs.web.AuthFilter.toString();
   public static final String  DFS_WEBHDFS_ENABLED_KEY = dfs.webhdfs.enabled;
   public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
   public static final String  DFS_WEBHDFS_USER_PATTERN_KEY = 
dfs.webhdfs.user.provider.user.pattern;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/edf2f52d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
new file mode 100644
index 000..c7df891
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in 

[1/2] hadoop git commit: HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS classes at runtime. Contributed by Sean Busbey.

2015-04-09 Thread atm
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6d1cb3422 - edf2f52d6
  refs/heads/trunk 6495940ea - 3fe61e0bb


HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS classes 
at runtime. Contributed by Sean Busbey.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fe61e0b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fe61e0b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fe61e0b

Branch: refs/heads/trunk
Commit: 3fe61e0bb0d025a6acbb754027f73f3084b2f4d1
Parents: 6495940
Author: Aaron T. Myers a...@apache.org
Authored: Thu Apr 9 09:40:08 2015 -0700
Committer: Aaron T. Myers a...@apache.org
Committed: Thu Apr 9 09:40:08 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  8 +++--
 .../apache/hadoop/hdfs/TestDFSConfigKeys.java   | 37 
 3 files changed, 46 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fe61e0b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 74ed624..727bec7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -406,6 +406,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-7979. Initialize block report IDs with a random number. (wang)
 
+HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
+classes at runtime. (Sean Busbey via atm)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fe61e0b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3bb2ae6..d0ca125 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
-import org.apache.hadoop.hdfs.web.AuthFilter;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -157,7 +156,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY 
= dfs.namenode.replication.max-streams-hard-limit;
   public static final int 
DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
   public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = 
dfs.web.authentication.filter;
-  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = 
AuthFilter.class.getName();
+  /* Phrased as below to avoid javac inlining as a constant, to match the 
behavior when
+ this was AuthFilter.class.getName(). Note that if you change the import 
for AuthFilter, you
+ need to update the literal here as well as TestDFSConfigKeys.
+   */
+  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
+  org.apache.hadoop.hdfs.web.AuthFilter.toString();
   public static final String  DFS_WEBHDFS_USER_PATTERN_KEY = 
dfs.webhdfs.user.provider.user.pattern;
   public static final String  DFS_WEBHDFS_USER_PATTERN_DEFAULT =
   HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fe61e0b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
new file mode 100644
index 000..c7df891
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you 

hadoop git commit: HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can complete without blocks being replicated. Contributed by Ming Ma. (cherry picked from commit 5a540c3d3107199f4

2015-04-09 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 84ba4db2a - 9a111fcd1


HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can complete 
without blocks being replicated. Contributed by Ming Ma.
(cherry picked from commit 5a540c3d3107199f4632e2ad7ee8ff913b107a04)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a111fcd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a111fcd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a111fcd

Branch: refs/heads/branch-2.7
Commit: 9a111fcd1db0c74fb32537503ab2b7592702fea5
Parents: 84ba4db
Author: Kihwal Lee kih...@apache.org
Authored: Thu Apr 9 10:01:44 2015 -0500
Committer: Kihwal Lee kih...@apache.org
Committed: Thu Apr 9 10:01:44 2015 -0500

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/BlockManager.java|  5 +++
 .../apache/hadoop/hdfs/TestDecommission.java| 32 
 3 files changed, 20 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a111fcd/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 54b52d3..2abc8a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -971,6 +971,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8072. Reserved RBW space is not released if client terminates while
 writing block. (Arpit Agarwal)
 
+HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can
+complete without blocks being replicated. (Ming Ma via wang)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a111fcd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 11965c1..e0f87c7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3308,6 +3308,11 @@ public class BlockManager {
* liveness. Dead nodes cannot always be safely decommissioned.
*/
   boolean isNodeHealthyForDecommission(DatanodeDescriptor node) {
+if (!node.checkBlockReportReceived()) {
+  LOG.info(Node {} hasn't sent its first block report., node);
+  return false;
+}
+
 if (node.isAlive) {
   return true;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a111fcd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 081e40f..1ab7427 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -882,9 +882,12 @@ public class TestDecommission {
 int numNamenodes = 1;
 int numDatanodes = 1;
 int replicas = 1;
-
+conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
+DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
+conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 5);
+
 startCluster(numNamenodes, numDatanodes, conf);
-Path file1 = new Path(testDecommission.dat);
+Path file1 = new Path(testDecommissionWithNamenodeRestart.dat);
 FileSystem fileSys = cluster.getFileSystem();
 writeFile(fileSys, file1, replicas);
 
@@ -894,37 +897,26 @@ public class TestDecommission {
 String excludedDatanodeName = info[0].getXferAddr();
 
 writeConfigFile(excludeFile, new 
ArrayListString(Arrays.asList(excludedDatanodeName)));
-
+
 //Add a new datanode to cluster
 cluster.startDataNodes(conf, 1, true, null, null, null, null);
 numDatanodes+=1;
-
+
 assertEquals(Number of datanodes should be 2 , 2, 
cluster.getDataNodes().size());
 //Restart the namenode
 cluster.restartNameNode();