hadoop git commit: HDFS-7640. print NFS Client in the NFS log. Contributed by Brandon Li.

2015-01-19 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0a2d3e717 - 5e5e35b18


HDFS-7640. print NFS Client in the NFS log. Contributed by Brandon Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e5e35b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e5e35b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e5e35b1

Branch: refs/heads/trunk
Commit: 5e5e35b1856293503124b77d5d4998a4d8e83082
Parents: 0a2d3e7
Author: Haohui Mai whe...@apache.org
Authored: Mon Jan 19 17:29:46 2015 -0800
Committer: Haohui Mai whe...@apache.org
Committed: Mon Jan 19 17:29:46 2015 -0800

--
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java| 53 +---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 2 files changed, 36 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e5e35b1/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 148d4f7..9204c4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -330,8 +330,9 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 }
 
 FileHandle handle = request.getHandle();
-if (LOG.isTraceEnabled()) {
-  LOG.trace(GETATTR for fileId:  + handle.getFileId());
+if (LOG.isDebugEnabled()) {
+  LOG.debug(GETATTR for fileId:  + handle.getFileId() +  client:
+  + remoteAddress);
 }
 
 Nfs3FileAttributes attrs = null;
@@ -423,7 +424,8 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 
 FileHandle handle = request.getHandle();
 if (LOG.isDebugEnabled()) {
-  LOG.debug(NFS SETATTR fileId:  + handle.getFileId());
+  LOG.debug(NFS SETATTR fileId:  + handle.getFileId() +  client:
+  + remoteAddress);
 }
 
 if (request.getAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
@@ -509,7 +511,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 String fileName = request.getName();
 if (LOG.isDebugEnabled()) {
   LOG.debug(NFS LOOKUP dir fileId:  + dirHandle.getFileId() +  name: 
-  + fileName);
+  + fileName +  client: + remoteAddress);
 }
 
 try {
@@ -577,7 +579,8 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 Nfs3FileAttributes attrs;
 
 if (LOG.isDebugEnabled()) {
-  LOG.debug(NFS ACCESS fileId:  + handle.getFileId());
+  LOG.debug(NFS ACCESS fileId:  + handle.getFileId() +  client:
+  + remoteAddress);
 }
 
 try {
@@ -643,7 +646,8 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 
 FileHandle handle = request.getHandle();
 if (LOG.isDebugEnabled()) {
-  LOG.debug(NFS READLINK fileId:  + handle.getFileId());
+  LOG.debug(NFS READLINK fileId:  + handle.getFileId() +  client:
+  + remoteAddress);
 }
 
 String fileIdPath = Nfs3Utils.getFileIdPath(handle);
@@ -722,7 +726,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 FileHandle handle = request.getHandle();
 if (LOG.isDebugEnabled()) {
   LOG.debug(NFS READ fileId:  + handle.getFileId() +  offset:  + offset
-  +  count:  + count);
+  +  count:  + count +  client: + remoteAddress);
 }
 
 Nfs3FileAttributes attrs;
@@ -863,7 +867,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 if (LOG.isDebugEnabled()) {
   LOG.debug(NFS WRITE fileId:  + handle.getFileId() +  offset: 
   + offset +  length: + count +  stableHow: + stableHow.getValue()
-  +  xid: + xid);
+  +  xid: + xid +  client: + remoteAddress);
 }
 
 Nfs3FileAttributes preOpAttr = null;
@@ -936,7 +940,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 String fileName = request.getName();
 if (LOG.isDebugEnabled()) {
   LOG.debug(NFS CREATE dir fileId:  + dirHandle.getFileId()
-  +  filename:  + fileName);
+  +  filename:  + fileName +  client: + remoteAddress);
 }
 
 int createMode = request.getMode();
@@ -1067,6 +1071,10 @@ public class RpcProgramNfs3 extends RpcProgram 
implements Nfs3Interface {
 }
 FileHandle dirHandle = request.getHandle();
 String fileName = request.getName();
+ 

[2/2] hadoop git commit: YARN-3015. yarn classpath command should support same options as hadoop classpath. Contributed by Varun Saxena.

2015-01-19 Thread cnauroth
YARN-3015. yarn classpath command should support same options as hadoop 
classpath. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c4abe92
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c4abe92
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c4abe92

Branch: refs/heads/branch-2
Commit: 2c4abe9208397cc2952dced5e41ca72ce5671384
Parents: a70e2c1
Author: cnauroth cnaur...@apache.org
Authored: Mon Jan 19 15:54:41 2015 -0800
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jan 19 15:54:41 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt  |  3 +++
 hadoop-yarn-project/hadoop-yarn/bin/yarn | 12 
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd | 11 +++
 3 files changed, 18 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c4abe92/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 82ae526..8baae0e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -354,6 +354,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3070. TestRMAdminCLI#testHelp fails for transitionToActive command. 
 (Contributed by Junping Du)
 
+YARN-3015. yarn classpath command should support same options as hadoop
+classpath. (Contributed by Varun Saxena)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c4abe92/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 896398d..ca7ee4c 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -208,11 +208,15 @@ unset IFS
 
 # figure out which class to run
 if [ $COMMAND = classpath ] ; then
-  if $cygwin; then
-CLASSPATH=$(cygpath -p -w $CLASSPATH 2/dev/null)
+  if [ $# -gt 0 ]; then
+CLASS=org.apache.hadoop.util.Classpath
+  else
+if $cygwin; then
+  CLASSPATH=$(cygpath -p -w $CLASSPATH 2/dev/null)
+fi
+echo $CLASSPATH
+exit 0
   fi
-  echo $CLASSPATH
-  exit
 elif [ $COMMAND = rmadmin ] ; then
   CLASS='org.apache.hadoop.yarn.client.cli.RMAdminCLI'
   YARN_OPTS=$YARN_OPTS $YARN_CLIENT_OPTS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c4abe92/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index b3286e2..d594957 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -142,13 +142,16 @@ if %1 == --loglevel (
   set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\*
 
   if %yarn-command% == classpath (
-@echo %CLASSPATH%
-goto :eof
+if not defined yarn-command-arguments (
+  @rem No need to bother starting up a JVM for this simple case. 
+  @echo %CLASSPATH%
+  exit /b
+)
   )
 
   set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar 
^
  application applicationattempt container node logs daemonlog 
historyserver ^
- timelineserver
+ timelineserver classpath
   for %%i in ( %yarncommands% ) do (
 if %yarn-command% == %%i set yarncommand=true
   )
@@ -169,7 +172,7 @@ if %1 == --loglevel (
 goto :eof
 
 :classpath
-  @echo %CLASSPATH%
+  set CLASS=org.apache.hadoop.util.Classpath 
   goto :eof
 
 :rmadmin



[1/2] hadoop git commit: YARN-3015. yarn classpath command should support same options as hadoop classpath. Contributed by Varun Saxena.

2015-01-19 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a70e2c15b - 2c4abe920
  refs/heads/trunk 4a4450836 - cb0a15d20


YARN-3015. yarn classpath command should support same options as hadoop 
classpath. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb0a15d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb0a15d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb0a15d2

Branch: refs/heads/trunk
Commit: cb0a15d20180c7ca3799e63a2d53aa8dee800abd
Parents: 4a44508
Author: cnauroth cnaur...@apache.org
Authored: Mon Jan 19 15:52:32 2015 -0800
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jan 19 15:52:32 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt  |  3 +++
 hadoop-yarn-project/hadoop-yarn/bin/yarn |  4 +---
 hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd | 11 +++
 3 files changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb0a15d2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ea7a606..5670963 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -388,6 +388,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3070. TestRMAdminCLI#testHelp fails for transitionToActive command. 
 (Contributed by Junping Du)
 
+YARN-3015. yarn classpath command should support same options as hadoop
+classpath. (Contributed by Varun Saxena)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb0a15d2/hadoop-yarn-project/hadoop-yarn/bin/yarn
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index 255082f..a176288 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -81,9 +81,7 @@ case ${COMMAND} in
 set -- ${COMMAND} $@
   ;;
   classpath)
-hadoop_finalize
-echo ${CLASSPATH}
-exit
+hadoop_do_classpath_subcommand $@
   ;;
   daemonlog)
 CLASS=org.apache.hadoop.log.LogLevel

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb0a15d2/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
--
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd 
b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
index b3286e2..3f68b16 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
@@ -142,13 +142,16 @@ if %1 == --loglevel (
   set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\%YARN_LIB_JARS_DIR%\*
 
   if %yarn-command% == classpath (
-@echo %CLASSPATH%
-goto :eof
+if not defined yarn-command-arguments (
+  @rem No need to bother starting up a JVM for this simple case.
+  @echo %CLASSPATH%
+  exit /b
+)
   )
 
   set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar 
^
  application applicationattempt container node logs daemonlog 
historyserver ^
- timelineserver
+ timelineserver classpath
   for %%i in ( %yarncommands% ) do (
 if %yarn-command% == %%i set yarncommand=true
   )
@@ -169,7 +172,7 @@ if %1 == --loglevel (
 goto :eof
 
 :classpath
-  @echo %CLASSPATH%
+  set CLASS=org.apache.hadoop.util.Classpath
   goto :eof
 
 :rmadmin



hadoop git commit: YARN-2933. Capacity Scheduler preemption policy should only consider capacity without labels temporarily. Contributed by Mayank Bansal

2015-01-19 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2c4abe920 - ef6fc24df


YARN-2933. Capacity Scheduler preemption policy should only consider capacity 
without labels temporarily. Contributed by Mayank Bansal

(cherry picked from commit 0a2d3e717d9c42090a32ff177991a222a1e34132)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef6fc24d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef6fc24d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef6fc24d

Branch: refs/heads/branch-2
Commit: ef6fc24dfb970da5d0afcfc3eca3f861cf01b23b
Parents: 2c4abe9
Author: Wangda Tan wan...@apache.org
Authored: Mon Jan 19 16:48:50 2015 -0800
Committer: Wangda Tan wan...@apache.org
Committed: Mon Jan 19 16:57:17 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../ProportionalCapacityPreemptionPolicy.java   |  53 +-
 ...estProportionalCapacityPreemptionPolicy.java | 106 +--
 3 files changed, 149 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef6fc24d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8baae0e..2aae75e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -357,6 +357,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3015. yarn classpath command should support same options as hadoop
 classpath. (Contributed by Varun Saxena)
 
+YARN-2933. Capacity Scheduler preemption policy should only consider 
capacity 
+without labels temporarily. (Mayank Bansal via wangda)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef6fc24d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 1a3f804..0743f60 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -30,15 +30,19 @@ import java.util.NavigableSet;
 import java.util.PriorityQueue;
 import java.util.Set;
 
+import org.apache.commons.collections.map.HashedMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import 
org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEventType;
@@ -129,6 +133,7 @@ public class ProportionalCapacityPreemptionPolicy 
implements SchedulingEditPolic
   private float percentageClusterPreemptionAllowed;
   private double naturalTerminationFactor;
   private boolean observeOnly;
+  private MapNodeId, SetString labels;
 
   public ProportionalCapacityPreemptionPolicy() {
 clock = new SystemClock();
@@ -168,6 +173,7 @@ public class ProportionalCapacityPreemptionPolicy 
implements SchedulingEditPolic
   config.getFloat(TOTAL_PREEMPTION_PER_ROUND, (float) 0.1);
 observeOnly = config.getBoolean(OBSERVE_ONLY, false);
 rc = scheduler.getResourceCalculator();
+labels = null;
   }
   
   @VisibleForTesting
@@ -176,14 +182,39 @@ public class 

hadoop git commit: HDFS-7638: Small fix and few refinements for FSN#truncate. (yliu)

2015-01-19 Thread yliu
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5e5e35b18 - 5a6c084f0


HDFS-7638: Small fix and few refinements for FSN#truncate. (yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a6c084f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a6c084f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a6c084f

Branch: refs/heads/trunk
Commit: 5a6c084f074990a1f412475b147fd4f040b57d57
Parents: 5e5e35b
Author: yliu y...@apache.org
Authored: Tue Jan 20 00:45:12 2015 +0800
Committer: yliu y...@apache.org
Committed: Tue Jan 20 00:45:12 2015 +0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 20 +++-
 2 files changed, 13 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a6c084f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 01e76d9..47228a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -276,6 +276,8 @@ Trunk (Unreleased)
 
 HDFS-7606. Fix potential NPE in INodeFile.getBlocks(). (Byron Wong via shv)
 
+HDFS-7638: Small fix and few refinements for FSN#truncate. (yliu)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a6c084f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index df10f59..ff53fb5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1917,18 +1917,22 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 boolean res;
 byte[][] pathComponents = 
FSDirectory.getPathComponentsForReservedPath(src);
 writeLock();
+BlocksMapUpdateInfo toRemoveBlocks = new BlocksMapUpdateInfo();
 try {
   checkOperation(OperationCategory.WRITE);
   checkNameNodeSafeMode(Cannot truncate for  + src);
   src = dir.resolvePath(pc, src, pathComponents);
   res = truncateInternal(src, newLength, clientName,
- clientMachine, mtime, pc);
-  stat = FSDirStatAndListingOp.getFileInfo(dir, src, false,
-  FSDirectory.isReservedRawName(src), true);
+  clientMachine, mtime, pc, toRemoveBlocks);
+  stat = dir.getAuditFileInfo(dir.getINodesInPath4Write(src, false));
 } finally {
   writeUnlock();
 }
 getEditLog().logSync();
+if (!toRemoveBlocks.getToDeleteList().isEmpty()) {
+  removeBlocks(toRemoveBlocks);
+  toRemoveBlocks.clear();
+}
 logAuditEvent(true, truncate, src, null, stat);
 return res;
   }
@@ -1939,17 +1943,17 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
*/
   boolean truncateInternal(String src, long newLength,
String clientName, String clientMachine,
-   long mtime, FSPermissionChecker pc)
+   long mtime, FSPermissionChecker pc,
+   BlocksMapUpdateInfo toRemoveBlocks)
   throws IOException, UnresolvedLinkException {
 assert hasWriteLock();
 INodesInPath iip = dir.getINodesInPath4Write(src, true);
 if (isPermissionEnabled) {
   dir.checkPathAccess(pc, iip, FsAction.WRITE);
 }
-INodeFile file = iip.getLastINode().asFile();
+INodeFile file = INodeFile.valueOf(iip.getLastINode(), src);
 // Opening an existing file for write. May need lease recovery.
 recoverLeaseInternal(iip, src, clientName, clientMachine, false);
-file = INodeFile.valueOf(iip.getLastINode(), src);
 // Truncate length check.
 long oldLength = file.computeFileSize();
 if(oldLength == newLength) {
@@ -1961,9 +1965,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   , truncate size:  + newLength + .);
 }
 // Perform INodeFile truncation.
-BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
 boolean onBlockBoundary = dir.truncate(iip, newLength,
-   collectedBlocks, mtime);
+toRemoveBlocks, mtime);
 Block truncateBlock = null;
 if(! onBlockBoundary) {
  

hadoop git commit: YARN-2933. Capacity Scheduler preemption policy should only consider capacity without labels temporarily. Contributed by Mayank Bansal

2015-01-19 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/trunk cb0a15d20 - 0a2d3e717


YARN-2933. Capacity Scheduler preemption policy should only consider capacity 
without labels temporarily. Contributed by Mayank Bansal


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a2d3e71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a2d3e71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a2d3e71

Branch: refs/heads/trunk
Commit: 0a2d3e717d9c42090a32ff177991a222a1e34132
Parents: cb0a15d
Author: Wangda Tan wan...@apache.org
Authored: Mon Jan 19 16:48:50 2015 -0800
Committer: Wangda Tan wan...@apache.org
Committed: Mon Jan 19 16:48:50 2015 -0800

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../ProportionalCapacityPreemptionPolicy.java   |  53 +-
 ...estProportionalCapacityPreemptionPolicy.java | 106 +--
 3 files changed, 149 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a2d3e71/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5670963..a29d316 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -391,6 +391,9 @@ Release 2.7.0 - UNRELEASED
 YARN-3015. yarn classpath command should support same options as hadoop
 classpath. (Contributed by Varun Saxena)
 
+YARN-2933. Capacity Scheduler preemption policy should only consider 
capacity 
+without labels temporarily. (Mayank Bansal via wangda)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a2d3e71/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 1a3f804..0743f60 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -30,15 +30,19 @@ import java.util.NavigableSet;
 import java.util.PriorityQueue;
 import java.util.Set;
 
+import org.apache.commons.collections.map.HashedMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import 
org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy;
+import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEventType;
@@ -129,6 +133,7 @@ public class ProportionalCapacityPreemptionPolicy 
implements SchedulingEditPolic
   private float percentageClusterPreemptionAllowed;
   private double naturalTerminationFactor;
   private boolean observeOnly;
+  private MapNodeId, SetString labels;
 
   public ProportionalCapacityPreemptionPolicy() {
 clock = new SystemClock();
@@ -168,6 +173,7 @@ public class ProportionalCapacityPreemptionPolicy 
implements SchedulingEditPolic
   config.getFloat(TOTAL_PREEMPTION_PER_ROUND, (float) 0.1);
 observeOnly = config.getBoolean(OBSERVE_ONLY, false);
 rc = scheduler.getResourceCalculator();
+labels = null;
   }
   
   @VisibleForTesting
@@ -176,14 +182,39 @@ public class ProportionalCapacityPreemptionPolicy 
implements SchedulingEditPolic
   }
 
   @Override

hadoop git commit: HDFS-7640. print NFS Client in the NFS log. Contributed by Brandon Li.

2015-01-19 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ef6fc24df - 755731a6c


HDFS-7640. print NFS Client in the NFS log. Contributed by Brandon Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/755731a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/755731a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/755731a6

Branch: refs/heads/branch-2
Commit: 755731a6c1b8dc1aa1495597a6eb9b8415b5b6db
Parents: ef6fc24
Author: Haohui Mai whe...@apache.org
Authored: Mon Jan 19 17:29:46 2015 -0800
Committer: Haohui Mai whe...@apache.org
Committed: Mon Jan 19 17:29:57 2015 -0800

--
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java| 53 +---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 +
 2 files changed, 36 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/755731a6/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 148d4f7..9204c4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -330,8 +330,9 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 }
 
 FileHandle handle = request.getHandle();
-if (LOG.isTraceEnabled()) {
-  LOG.trace(GETATTR for fileId:  + handle.getFileId());
+if (LOG.isDebugEnabled()) {
+  LOG.debug(GETATTR for fileId:  + handle.getFileId() +  client:
+  + remoteAddress);
 }
 
 Nfs3FileAttributes attrs = null;
@@ -423,7 +424,8 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 
 FileHandle handle = request.getHandle();
 if (LOG.isDebugEnabled()) {
-  LOG.debug(NFS SETATTR fileId:  + handle.getFileId());
+  LOG.debug(NFS SETATTR fileId:  + handle.getFileId() +  client:
+  + remoteAddress);
 }
 
 if (request.getAttr().getUpdateFields().contains(SetAttrField.SIZE)) {
@@ -509,7 +511,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 String fileName = request.getName();
 if (LOG.isDebugEnabled()) {
   LOG.debug(NFS LOOKUP dir fileId:  + dirHandle.getFileId() +  name: 
-  + fileName);
+  + fileName +  client: + remoteAddress);
 }
 
 try {
@@ -577,7 +579,8 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 Nfs3FileAttributes attrs;
 
 if (LOG.isDebugEnabled()) {
-  LOG.debug(NFS ACCESS fileId:  + handle.getFileId());
+  LOG.debug(NFS ACCESS fileId:  + handle.getFileId() +  client:
+  + remoteAddress);
 }
 
 try {
@@ -643,7 +646,8 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 
 FileHandle handle = request.getHandle();
 if (LOG.isDebugEnabled()) {
-  LOG.debug(NFS READLINK fileId:  + handle.getFileId());
+  LOG.debug(NFS READLINK fileId:  + handle.getFileId() +  client:
+  + remoteAddress);
 }
 
 String fileIdPath = Nfs3Utils.getFileIdPath(handle);
@@ -722,7 +726,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 FileHandle handle = request.getHandle();
 if (LOG.isDebugEnabled()) {
   LOG.debug(NFS READ fileId:  + handle.getFileId() +  offset:  + offset
-  +  count:  + count);
+  +  count:  + count +  client: + remoteAddress);
 }
 
 Nfs3FileAttributes attrs;
@@ -863,7 +867,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 if (LOG.isDebugEnabled()) {
   LOG.debug(NFS WRITE fileId:  + handle.getFileId() +  offset: 
   + offset +  length: + count +  stableHow: + stableHow.getValue()
-  +  xid: + xid);
+  +  xid: + xid +  client: + remoteAddress);
 }
 
 Nfs3FileAttributes preOpAttr = null;
@@ -936,7 +940,7 @@ public class RpcProgramNfs3 extends RpcProgram implements 
Nfs3Interface {
 String fileName = request.getName();
 if (LOG.isDebugEnabled()) {
   LOG.debug(NFS CREATE dir fileId:  + dirHandle.getFileId()
-  +  filename:  + fileName);
+  +  filename:  + fileName +  client: + remoteAddress);
 }
 
 int createMode = request.getMode();
@@ -1067,6 +1071,10 @@ public class RpcProgramNfs3 extends RpcProgram 
implements Nfs3Interface {
 }
 FileHandle dirHandle = request.getHandle();
 String fileName = 

svn commit: r1653152 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/

2015-01-19 Thread wangda
Author: wangda
Date: Mon Jan 19 23:07:34 2015
New Revision: 1653152

URL: http://svn.apache.org/r1653152
Log:
Add Wangda Tan to committer list

Modified:
hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
hadoop/common/site/main/publish/bylaws.pdf
hadoop/common/site/main/publish/index.pdf
hadoop/common/site/main/publish/issue_tracking.pdf
hadoop/common/site/main/publish/linkmap.pdf
hadoop/common/site/main/publish/mailing_lists.pdf
hadoop/common/site/main/publish/privacy_policy.pdf
hadoop/common/site/main/publish/releases.pdf
hadoop/common/site/main/publish/version_control.pdf
hadoop/common/site/main/publish/who.html
hadoop/common/site/main/publish/who.pdf

Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1653152r1=1653151r2=1653152view=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Mon 
Jan 19 23:07:34 2015
@@ -1138,6 +1138,14 @@
  td/td
  td-8/td
/tr
+   
+   tr
+ tdwangda/td
+ tdWangda Tan/td
+ tdHortonworks/td
+ td/td
+ td-8/td
+   /tr
 
tr
  tdwheat9/td

Modified: hadoop/common/site/main/publish/bylaws.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/bylaws.pdf?rev=1653152r1=1653151r2=1653152view=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/index.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/index.pdf?rev=1653152r1=1653151r2=1653152view=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/issue_tracking.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/issue_tracking.pdf?rev=1653152r1=1653151r2=1653152view=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/linkmap.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/linkmap.pdf?rev=1653152r1=1653151r2=1653152view=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/mailing_lists.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/mailing_lists.pdf?rev=1653152r1=1653151r2=1653152view=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/privacy_policy.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/privacy_policy.pdf?rev=1653152r1=1653151r2=1653152view=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/releases.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/releases.pdf?rev=1653152r1=1653151r2=1653152view=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/version_control.pdf
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/version_control.pdf?rev=1653152r1=1653151r2=1653152view=diff
==
Binary files - no diff available.

Modified: hadoop/common/site/main/publish/who.html
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/publish/who.html?rev=1653152r1=1653151r2=1653152view=diff
==
--- hadoop/common/site/main/publish/who.html (original)
+++ hadoop/common/site/main/publish/who.html Mon Jan 19 23:07:34 2015
@@ -1790,6 +1790,17 @@ document.write(Last Published:  + docu
  td colspan=1 rowspan=1-8/td

 /tr
+   
+   
+tr
+ 
+td colspan=1 rowspan=1wangda/td
+ td colspan=1 rowspan=1Wangda Tan/td
+ td colspan=1 rowspan=1Hortonworks/td
+ td colspan=1 rowspan=1/td
+ td colspan=1 rowspan=1-8/td
+   
+/tr
 

 tr
@@ -1851,7 +1862,7 @@ document.write(Last Published:  + docu
 /div
 

-a name=N10FC4/aa name=Emeritus+Hadoop+Committers/a
+a name=N10FDF/aa name=Emeritus+Hadoop+Committers/a
 h2 class=h3Emeritus Hadoop Committers/h2
 div class=section
 pHadoop committers who are no longer active include:/p

Modified: hadoop/common/site/main/publish/who.pdf
URL: 

hadoop git commit: HADOOP-11489. Dropping dependency on io.netty from hadoop-nfs' pom.xml. Contributed by Ted Yu.

2015-01-19 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 755731a6c - 09eee9bbc


HADOOP-11489. Dropping dependency on io.netty from hadoop-nfs' pom.xml. 
Contributed by Ted Yu.

(cherry picked from commit c94c0d2c5663c38fc7b306368b54153bcfcb6c74)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09eee9bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09eee9bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09eee9bb

Branch: refs/heads/branch-2
Commit: 09eee9bbc163f18741e4b647ac8bd416c5794b9a
Parents: 755731a
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Jan 20 15:26:17 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Jan 20 15:27:41 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-common-project/hadoop-nfs/pom.xml| 5 -
 2 files changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09eee9bb/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 553e367..4a8b0f9 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -130,6 +130,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11171 Enable using a proxy server to connect to S3a.
 (Thomas Demoor via stevel)
 
+HADOOP-11489 Dropping dependency on io.netty from hadoop-nfs' pom.xml
+(Ted Yu via ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09eee9bb/hadoop-common-project/hadoop-nfs/pom.xml
--
diff --git a/hadoop-common-project/hadoop-nfs/pom.xml 
b/hadoop-common-project/hadoop-nfs/pom.xml
index 5b3f5b9..92a85a3 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -83,11 +83,6 @@
   scoperuntime/scope
 /dependency
 dependency
-  groupIdio.netty/groupId
-  artifactIdnetty/artifactId
-  scopecompile/scope
-/dependency
-dependency
   groupIdcom.google.guava/groupId
   artifactIdguava/artifactId
 /dependency



hadoop git commit: HADOOP-11489. Dropping dependency on io.netty from hadoop-nfs' pom.xml. Contributed by Ted Yu.

2015-01-19 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5a6c084f0 - c94c0d2c5


HADOOP-11489. Dropping dependency on io.netty from hadoop-nfs' pom.xml. 
Contributed by Ted Yu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c94c0d2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c94c0d2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c94c0d2c

Branch: refs/heads/trunk
Commit: c94c0d2c5663c38fc7b306368b54153bcfcb6c74
Parents: 5a6c084
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue Jan 20 15:26:17 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue Jan 20 15:26:17 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 hadoop-common-project/hadoop-nfs/pom.xml| 5 -
 2 files changed, 3 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c94c0d2c/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9e1bf59..339ccfb 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -491,6 +491,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-11171 Enable using a proxy server to connect to S3a.
 (Thomas Demoor via stevel)
 
+HADOOP-11489 Dropping dependency on io.netty from hadoop-nfs' pom.xml
+(Ted Yu via ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c94c0d2c/hadoop-common-project/hadoop-nfs/pom.xml
--
diff --git a/hadoop-common-project/hadoop-nfs/pom.xml 
b/hadoop-common-project/hadoop-nfs/pom.xml
index e30a482..409ed75 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -83,11 +83,6 @@
   scoperuntime/scope
 /dependency
 dependency
-  groupIdio.netty/groupId
-  artifactIdnetty/artifactId
-  scopecompile/scope
-/dependency
-dependency
   groupIdcom.google.guava/groupId
   artifactIdguava/artifactId
 /dependency



hadoop git commit: HDFS-5631. Change BlockMetadataHeader.readHeader(..), ChunkChecksum class and constructor to public; and fix FsDatasetSpi to use generic type instead of FsVolumeImpl. Contributed by

2015-01-19 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7fc1f2f5c - 4a4450836


HDFS-5631. Change BlockMetadataHeader.readHeader(..), ChunkChecksum class and 
constructor to public; and fix FsDatasetSpi to use generic type instead of 
FsVolumeImpl.  Contributed by David Powell and Joe Pallas


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a445083
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a445083
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a445083

Branch: refs/heads/trunk
Commit: 4a4450836c8972480b9387b5e31bab57ae2b5baa
Parents: 7fc1f2f
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Mon Jan 19 13:49:19 2015 -0800
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Mon Jan 19 13:49:19 2015 -0800

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   4 +
 .../server/datanode/BlockMetadataHeader.java|   2 +-
 .../hdfs/server/datanode/ChunkChecksum.java |   4 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   2 +-
 .../impl/RamDiskAsyncLazyPersistService.java|   7 +-
 .../server/datanode/SimulatedFSDataset.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java | 409 +++
 .../datanode/extdataset/ExternalReplica.java|  65 +++
 .../extdataset/ExternalReplicaInPipeline.java   |  97 +
 .../extdataset/ExternalRollingLogs.java |  92 +
 .../datanode/extdataset/ExternalVolumeImpl.java |  75 
 .../extdataset/TestExternalDataset.java |  97 +
 12 files changed, 848 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a445083/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e3d0ca8..0a15768 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -136,6 +136,10 @@ Trunk (Unreleased)
 HDFS-7591. hdfs classpath command should support same options as hadoop
 classpath (Varun Saxena via Arpit Agarwal)
 
+HDFS-5631. Change BlockMetadataHeader.readHeader(..), ChunkChecksum
+class and constructor to public; and fix FsDatasetSpi to use generic type
+instead of FsVolumeImpl.  (David Powell and Joe Pallas via szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a445083/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
index 51a6134..94493aa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockMetadataHeader.java
@@ -162,7 +162,7 @@ public class BlockMetadataHeader {
* The current file position will be altered by this method.
* If an error occurs, the file is emnot/em closed.
*/
-  static BlockMetadataHeader readHeader(RandomAccessFile raf) throws 
IOException {
+  public static BlockMetadataHeader readHeader(RandomAccessFile raf) throws 
IOException {
 byte[] buf = new byte[getHeaderSize()];
 raf.seek(0);
 raf.readFully(buf, 0, buf.length);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a445083/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ChunkChecksum.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ChunkChecksum.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ChunkChecksum.java
index cb69f82..714445b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ChunkChecksum.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ChunkChecksum.java
@@ -26,12 +26,12 @@ package org.apache.hadoop.hdfs.server.datanode;
  * the checksum applies for the last chunk, or bytes 512 - 1023
  */
 
-class ChunkChecksum {
+public class ChunkChecksum {
   private final long dataLength;
   // can be null if not available
   private final byte[] checksum;
 
-  ChunkChecksum(long dataLength, byte[] checksum) {
+  public ChunkChecksum(long dataLength, byte[] checksum) {
 this.dataLength = dataLength;
 this.checksum = checksum;

hadoop git commit: YARN-3071. Remove invalid char from sample conf in doc of FairScheduler. (Contributed by Masatake Iwasaki)

2015-01-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 19cbce389 - 4a5c3a4cf


YARN-3071. Remove invalid char from sample conf in doc of FairScheduler. 
(Contributed by Masatake Iwasaki)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a5c3a4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a5c3a4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a5c3a4c

Branch: refs/heads/trunk
Commit: 4a5c3a4cfee6b8008a722801821e64850582a985
Parents: 19cbce3
Author: Akira Ajisaka aajis...@apache.org
Authored: Mon Jan 19 21:20:43 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Mon Jan 19 21:21:34 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a5c3a4c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5aca996..ea7a606 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -201,6 +201,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+YARN-3071. Remove invalid char from sample conf in doc of FairScheduler.
+(Masatake Iwasaki via aajisaka)
+
 YARN-2254. TestRMWebServicesAppsModification should run against both 
 CS and FS. (Zhihai Xu via kasha)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a5c3a4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
index 13bf7f9..10de3e0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
@@ -375,7 +375,7 @@ Allocation file format
 
   queueMaxAMShareDefault0.5/queueMaxAMShareDefault
 
-  !—- Queue 'secondary_group_queueue' is a parent queue and may have
+  !-- Queue 'secondary_group_queue' is a parent queue and may have
user queues under it --
   queue name=secondary_group_queue type=parent
   weight3.0/weight



hadoop git commit: YARN-3071. Remove invalid char from sample conf in doc of FairScheduler. (Contributed by Masatake Iwasaki)

2015-01-19 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ef4d7b73b - 401c7d72f


YARN-3071. Remove invalid char from sample conf in doc of FairScheduler. 
(Contributed by Masatake Iwasaki)

(cherry picked from commit 4a5c3a4cfee6b8008a722801821e64850582a985)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/401c7d72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/401c7d72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/401c7d72

Branch: refs/heads/branch-2
Commit: 401c7d72f5e0cf583a8418fd66b009f9ca5fb631
Parents: ef4d7b7
Author: Akira Ajisaka aajis...@apache.org
Authored: Mon Jan 19 21:20:43 2015 +0900
Committer: Akira Ajisaka aajis...@apache.org
Committed: Mon Jan 19 21:22:48 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/401c7d72/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 162ef6f..82ae526 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -167,6 +167,9 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
+YARN-3071. Remove invalid char from sample conf in doc of FairScheduler.
+(Masatake Iwasaki via aajisaka)
+
 YARN-2254. TestRMWebServicesAppsModification should run against both 
 CS and FS. (Zhihai Xu via kasha)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/401c7d72/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
index 13bf7f9..10de3e0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm
@@ -375,7 +375,7 @@ Allocation file format
 
   queueMaxAMShareDefault0.5/queueMaxAMShareDefault
 
-  !—- Queue 'secondary_group_queueue' is a parent queue and may have
+  !-- Queue 'secondary_group_queue' is a parent queue and may have
user queues under it --
   queue name=secondary_group_queue type=parent
   weight3.0/weight



[1/2] hadoop git commit: HADOOP-10668. TestZKFailoverControllerStress#testExpireBackAndForth occasionally fails. Contributed by Ming Ma.

2015-01-19 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5da33c5b5 - a70e2c15b
  refs/heads/trunk e843a0a8c - 7fc1f2f5c


HADOOP-10668. TestZKFailoverControllerStress#testExpireBackAndForth 
occasionally fails. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7fc1f2f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7fc1f2f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7fc1f2f5

Branch: refs/heads/trunk
Commit: 7fc1f2f5cf4312d72aeffb1a9cef497d00c60adb
Parents: e843a0a
Author: cnauroth cnaur...@apache.org
Authored: Mon Jan 19 11:28:30 2015 -0800
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jan 19 11:28:30 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/ha/ZKFailoverController.java | 4 +++-
 .../src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java  | 4 ++--
 3 files changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fc1f2f5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d9e6180..9e1bf59 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -722,6 +722,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-10542 Potential null pointer dereference in Jets3tFileSystemStore
 retrieveBlock(). (Ted Yu via stevel)   
 
+HADOOP-10668. TestZKFailoverControllerStress#testExpireBackAndForth
+occasionally fails. (Ming Ma via cnauroth)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fc1f2f5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 46c485b..f58c3f4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -153,7 +153,9 @@ public abstract class ZKFailoverController {
   public HAServiceTarget getLocalTarget() {
 return localTarget;
   }
-  
+
+  HAServiceState getServiceState() { return serviceState; }
+
   public int run(final String[] args) throws Exception {
 if (!localTarget.isAutoFailoverEnabled()) {
   LOG.fatal(Automatic failover is not enabled for  + localTarget + . +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7fc1f2f5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
index 1db7924..cab59a4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
@@ -158,8 +158,8 @@ public class MiniZKFCCluster {
*/
   public void waitForHAState(int idx, HAServiceState state)
   throws Exception {
-DummyHAService svc = getService(idx);
-while (svc.state != state) {
+DummyZKFC svc = getZkfc(idx);
+while (svc.getServiceState() != state) {
   ctx.checkException();
   Thread.sleep(50);
 }



[2/2] hadoop git commit: HADOOP-10668. TestZKFailoverControllerStress#testExpireBackAndForth occasionally fails. Contributed by Ming Ma.

2015-01-19 Thread cnauroth
HADOOP-10668. TestZKFailoverControllerStress#testExpireBackAndForth 
occasionally fails. Contributed by Ming Ma.

(cherry picked from commit 7fc1f2f5cf4312d72aeffb1a9cef497d00c60adb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a70e2c15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a70e2c15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a70e2c15

Branch: refs/heads/branch-2
Commit: a70e2c15be29f861c3716e712233cd28a4751130
Parents: 5da33c5
Author: cnauroth cnaur...@apache.org
Authored: Mon Jan 19 11:28:30 2015 -0800
Committer: cnauroth cnaur...@apache.org
Committed: Mon Jan 19 11:28:42 2015 -0800

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/main/java/org/apache/hadoop/ha/ZKFailoverController.java | 4 +++-
 .../src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java  | 4 ++--
 3 files changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a70e2c15/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d745746..553e367 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -366,6 +366,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-10542 Potential null pointer dereference in Jets3tFileSystemStore
 retrieveBlock(). (Ted Yu via stevel)   
 
+HADOOP-10668. TestZKFailoverControllerStress#testExpireBackAndForth
+occasionally fails. (Ming Ma via cnauroth)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a70e2c15/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index 46c485b..f58c3f4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -153,7 +153,9 @@ public abstract class ZKFailoverController {
   public HAServiceTarget getLocalTarget() {
 return localTarget;
   }
-  
+
+  HAServiceState getServiceState() { return serviceState; }
+
   public int run(final String[] args) throws Exception {
 if (!localTarget.isAutoFailoverEnabled()) {
   LOG.fatal(Automatic failover is not enabled for  + localTarget + . +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a70e2c15/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
index 1db7924..cab59a4 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java
@@ -158,8 +158,8 @@ public class MiniZKFCCluster {
*/
   public void waitForHAState(int idx, HAServiceState state)
   throws Exception {
-DummyHAService svc = getService(idx);
-while (svc.state != state) {
+DummyZKFC svc = getZkfc(idx);
+while (svc.getServiceState() != state) {
   ctx.checkException();
   Thread.sleep(50);
 }