[21/24] hadoop git commit: HDFS-8674. Improve performance of postponed block scans. Contributed by Daryn Sharp.

2016-12-01 Thread asuresh
HDFS-8674. Improve performance of postponed block scans. Contributed by Daryn 
Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96c57492
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96c57492
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96c57492

Branch: refs/heads/YARN-5085
Commit: 96c574927a600d15fab919df1fdc9e07887af6c5
Parents: e0fa492
Author: Kihwal Lee 
Authored: Thu Dec 1 12:11:27 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 1 12:11:27 2016 -0600

--
 .../server/blockmanagement/BlockManager.java| 79 ++--
 1 file changed, 24 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96c57492/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1b744e7..e60703b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -30,6 +30,7 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -43,8 +44,6 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.FutureTask;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
 import javax.management.ObjectName;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -101,7 +100,6 @@ import 
org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.util.FoldedTreeSet;
-import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.server.namenode.CacheManager;
 
@@ -184,7 +182,6 @@ public class BlockManager implements BlockStatsMXBean {
   /** flag indicating whether replication queues have been initialized */
   private boolean initializedReplQueues;
 
-  private final AtomicLong postponedMisreplicatedBlocksCount = new 
AtomicLong(0L);
   private final long startupDelayBlockDeletionInMs;
   private final BlockReportLeaseManager blockReportLeaseManager;
   private ObjectName mxBeanName;
@@ -219,7 +216,7 @@ public class BlockManager implements BlockStatsMXBean {
   }
   /** Used by metrics */
   public long getPostponedMisreplicatedBlocksCount() {
-return postponedMisreplicatedBlocksCount.get();
+return postponedMisreplicatedBlocks.size();
   }
   /** Used by metrics */
   public int getPendingDataNodeMessageCount() {
@@ -275,8 +272,10 @@ public class BlockManager implements BlockStatsMXBean {
* notified of all block deletions that might have been pending
* when the failover happened.
*/
-  private final LightWeightHashSet postponedMisreplicatedBlocks =
-  new LightWeightHashSet<>();
+  private final Set postponedMisreplicatedBlocks =
+  new LinkedHashSet();
+  private final int blocksPerPostpondedRescan;
+  private final ArrayList rescannedMisreplicatedBlocks;
 
   /**
* Maps a StorageID to the set of blocks that are "extra" for this
@@ -378,7 +377,10 @@ public class BlockManager implements BlockStatsMXBean {
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
 this.blockIdManager = new BlockIdManager(this);
-
+blocksPerPostpondedRescan = (int)Math.min(Integer.MAX_VALUE,
+datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan());
+rescannedMisreplicatedBlocks =
+new ArrayList(blocksPerPostpondedRescan);
 startupDelayBlockDeletionInMs = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
@@ -1613,9 +1615,7 @@ public class BlockManager implements BlockStatsMXBean {
 
 
   private void postponeBlock(Block blk) {
-if (postponedMisreplicatedBlocks.add(blk)) {
-  postponedMisreplicatedBlocksCount.incrementAndGet();
-}
+postponedMisreplicatedBlocks.add(blk);
   }
   
   

[20/24] hadoop git commit: HDFS-11180. Intermittent deadlock in NameNode when failover happens.

2016-12-01 Thread asuresh
HDFS-11180. Intermittent deadlock in NameNode when failover happens.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0fa4923
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0fa4923
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0fa4923

Branch: refs/heads/YARN-5085
Commit: e0fa49234fd37aca88e1caa95bac77bca192bae4
Parents: 1f7613b
Author: Akira Ajisaka 
Authored: Thu Dec 1 23:08:59 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 1 23:08:59 2016 +0900

--
 .../dev-support/findbugsExcludeFile.xml | 27 
 .../hadoop/hdfs/server/namenode/FSEditLog.java  | 72 +---
 .../hadoop/hdfs/server/namenode/FSImage.java| 15 +++-
 .../hdfs/server/namenode/FSNamesystem.java  | 27 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  2 +-
 .../server/namenode/ha/StandbyCheckpointer.java |  4 +-
 .../server/namenode/TestFSNamesystemMBean.java  | 24 +++
 7 files changed, 148 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0fa4923/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 426fb72..e6e4057 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -109,6 +109,33 @@
 
 
 
+
+
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+  
+  
+  
+
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0fa4923/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index ef9eb68..c9ee32b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -155,14 +155,16 @@ public class FSEditLog implements LogsPurgeable {
   private EditLogOutputStream editLogStream = null;
 
   // a monotonically increasing counter that represents transactionIds.
-  private long txid = 0;
+  // All of the threads which update/increment txid are synchronized,
+  // so make txid volatile instead of AtomicLong.
+  private volatile long txid = 0;
 
   // stores the last synced transactionId.
   private long synctxid = 0;
 
   // the first txid of the log that's currently open for writing.
   // If this value is N, we are currently writing to edits_inprogress_N
-  private long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
+  private volatile long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
 
   // the time of printing the statistics to the log file.
   private long lastPrintTime;
@@ -338,7 +340,18 @@ public class FSEditLog implements LogsPurgeable {
 return state == State.IN_SEGMENT ||
   state == State.BETWEEN_LOG_SEGMENTS;
   }
-  
+
+  /**
+   * Return true if the log is currently open in write mode.
+   * This method is not synchronized and must be used only for metrics.
+   * @return true if the log is currently open in write mode, regardless
+   * of whether it actually has an open segment.
+   */
+  boolean isOpenForWriteWithoutLock() {
+return state == State.IN_SEGMENT ||
+state == State.BETWEEN_LOG_SEGMENTS;
+  }
+
   /**
* @return true if the log is open in write mode and has a segment open
* ready to take edits.
@@ -348,6 +361,16 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   /**
+   * Return true the state is IN_SEGMENT.
+   * This method is not synchronized and must be used only for metrics.
+   * @return true if the log is open in write mode and has a segment open
+   * ready to take edits.
+   */
+  boolean isSegmentOpenWithoutLock() {
+return state == State.IN_SEGMENT;
+  }
+
+  /**
* @return true if the log is open in read mode.
*/
   public synchronized boolean isOpenForRead() {
@@ -522,7 +545,16 @@ public class FSEditLog implements LogsPurgeable {
   public synchronized long getLastWrittenTxId() {
 return txid;
   }
-  
+
+  /**
+   * Return the transaction ID of the last transaction written to the log.
+   * This method is not synchronized and must be used only for metrics.
+   * @return The 

[19/24] hadoop git commit: HADOOP-13840. Implement getUsed() for ViewFileSystem. Contributed by Manoj Govindassamy.

2016-12-01 Thread asuresh
HADOOP-13840. Implement getUsed() for ViewFileSystem. Contributed by Manoj 
Govindassamy.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1f7613be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1f7613be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1f7613be

Branch: refs/heads/YARN-5085
Commit: 1f7613be958bbdb735fd2b49e3f0b48e2c8b7c13
Parents: 7226a71
Author: Andrew Wang 
Authored: Wed Nov 30 17:55:12 2016 -0800
Committer: Andrew Wang 
Committed: Wed Nov 30 17:55:12 2016 -0800

--
 .../apache/hadoop/fs/viewfs/ViewFileSystem.java | 18 
 .../fs/viewfs/ViewFileSystemBaseTest.java   | 29 
 2 files changed, 47 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f7613be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
index ed1bda2..8be666c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
@@ -859,6 +859,24 @@ public class ViewFileSystem extends FileSystem {
   }
 
   /**
+   * Return the total size of all files under "/", if {@link
+   * Constants#CONFIG_VIEWFS_LINK_MERGE_SLASH} is supported and is a valid
+   * mount point. Else, throw NotInMountpointException.
+   *
+   * @throws IOException
+   */
+  @Override
+  public long getUsed() throws IOException {
+InodeTree.ResolveResult res = fsState.resolve(
+getUriPath(InodeTree.SlashPath), true);
+if (res.isInternalDir()) {
+  throw new NotInMountpointException(InodeTree.SlashPath, "getUsed");
+} else {
+  return res.targetFileSystem.getUsed();
+}
+  }
+
+  /**
* An instance of this class represents an internal dir of the viewFs
* that is internal dir of the mount table.
* It is a read only mount tables and create, mkdir or delete operations

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1f7613be/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index 06f9868..9a0bf02 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -1108,4 +1108,33 @@ abstract public class ViewFileSystemBaseTest {
   }
 });
   }
+
+  @Test
+  public void testUsed() throws IOException {
+try {
+  fsView.getUsed();
+  fail("ViewFileSystem getUsed() should fail for slash root path when the" 
+
+  " slash root mount point is not configured.");
+} catch (NotInMountpointException e) {
+  // expected exception.
+}
+long usedSpaceByPathViaViewFs = fsView.getUsed(new Path("/user"));
+long usedSpaceByPathViaTargetFs =
+fsTarget.getUsed(new Path(targetTestRoot, "user"));
+assertEquals("Space used not matching between ViewFileSystem and " +
+"the mounted FileSystem!",
+usedSpaceByPathViaTargetFs, usedSpaceByPathViaViewFs);
+
+Path mountDataRootPath = new Path("/data");
+String fsTargetFileName = "debug.log";
+Path fsTargetFilePath = new Path(targetTestRoot, "data/debug.log");
+Path mountDataFilePath = new Path(mountDataRootPath, fsTargetFileName);
+fileSystemTestHelper.createFile(fsTarget, fsTargetFilePath);
+
+usedSpaceByPathViaViewFs = fsView.getUsed(mountDataFilePath);
+usedSpaceByPathViaTargetFs = fsTarget.getUsed(fsTargetFilePath);
+assertEquals("Space used not matching between ViewFileSystem and " +
+"the mounted FileSystem!",
+usedSpaceByPathViaTargetFs, usedSpaceByPathViaViewFs);
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/24] hadoop git commit: HADOOP-10930. Refactor: Wrap Datanode IO related operations. Contributed by Xiaoyu Yao.

2016-12-01 Thread asuresh
HADOOP-10930. Refactor: Wrap Datanode IO related operations. Contributed by 
Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aeecfa24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aeecfa24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aeecfa24

Branch: refs/heads/YARN-5085
Commit: aeecfa24f4fb6af289920cbf8830c394e66bd78e
Parents: eaaa329
Author: Xiaoyu Yao 
Authored: Tue Nov 29 20:52:36 2016 -0800
Committer: Arpit Agarwal 
Committed: Tue Nov 29 20:52:36 2016 -0800

--
 .../hdfs/server/datanode/BlockReceiver.java |  66 +++
 .../hdfs/server/datanode/BlockSender.java   | 105 ---
 .../hadoop/hdfs/server/datanode/DNConf.java |   4 +
 .../hdfs/server/datanode/DataStorage.java   |   5 +
 .../hdfs/server/datanode/LocalReplica.java  | 179 +--
 .../server/datanode/LocalReplicaInPipeline.java |  30 ++--
 .../hdfs/server/datanode/ReplicaInPipeline.java |   4 +-
 .../server/datanode/fsdataset/FsDatasetSpi.java |   3 +-
 .../datanode/fsdataset/ReplicaInputStreams.java | 102 ++-
 .../fsdataset/ReplicaOutputStreams.java | 107 ++-
 .../datanode/fsdataset/impl/BlockPoolSlice.java |  32 ++--
 .../impl/FsDatasetAsyncDiskService.java |   7 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   5 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   5 +-
 .../org/apache/hadoop/hdfs/TestFileAppend.java  |   2 +-
 .../server/datanode/SimulatedFSDataset.java |  13 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   2 +-
 .../server/datanode/TestSimulatedFSDataset.java |   2 +-
 .../extdataset/ExternalDatasetImpl.java |   4 +-
 .../extdataset/ExternalReplicaInPipeline.java   |   6 +-
 20 files changed, 445 insertions(+), 238 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aeecfa24/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 39419c1..f372072 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -24,10 +24,7 @@ import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
-import java.io.FileDescriptor;
-import java.io.FileOutputStream;
 import java.io.IOException;
-import java.io.OutputStream;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.nio.ByteBuffer;
@@ -53,7 +50,6 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
@@ -88,8 +84,6 @@ class BlockReceiver implements Closeable {
* the DataNode needs to recalculate checksums before writing.
*/
   private final boolean needsChecksumTranslation;
-  private OutputStream out = null; // to block file at local disk
-  private FileDescriptor outFd;
   private DataOutputStream checksumOut = null; // to crc file at local disk
   private final int bytesPerChecksum;
   private final int checksumSize;
@@ -250,7 +244,8 @@ class BlockReceiver implements Closeable {
   
   final boolean isCreate = isDatanode || isTransfer 
   || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
-  streams = replicaInfo.createStreams(isCreate, requestedChecksum);
+  streams = replicaInfo.createStreams(isCreate, requestedChecksum,
+  datanodeSlowLogThresholdMs);
   assert streams != null : "null streams!";
 
   // read checksum meta information
@@ -260,13 +255,6 @@ class BlockReceiver implements Closeable {
   this.bytesPerChecksum = diskChecksum.getBytesPerChecksum();
   this.checksumSize = diskChecksum.getChecksumSize();
 
-  this.out = streams.getDataOut();
-  if (out instanceof FileOutputStream) {
-this.outFd = ((FileOutputStream)out).getFD();
-  } else {
-LOG.warn("Could not get file descriptor for outputstream of class " +
-out.getClass());
-  }
   this.checksumOut = new DataOutputStream(new BufferedOutputStream(
   

[07/24] hadoop git commit: HDFS-11149. Support for parallel checking of FsVolumes.

2016-12-01 Thread asuresh
HDFS-11149. Support for parallel checking of FsVolumes.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eaaa3295
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eaaa3295
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eaaa3295

Branch: refs/heads/YARN-5085
Commit: eaaa32950cbae42a74e28e3db3f0cdb1ff158119
Parents: 8f6e143
Author: Arpit Agarwal 
Authored: Tue Nov 29 20:31:02 2016 -0800
Committer: Arpit Agarwal 
Committed: Tue Nov 29 20:31:02 2016 -0800

--
 .../datanode/checker/DatasetVolumeChecker.java  | 442 +++
 .../server/datanode/fsdataset/FsDatasetSpi.java |   7 +
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  12 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  15 +-
 .../src/main/resources/hdfs-default.xml |  10 +-
 .../server/datanode/SimulatedFSDataset.java |   7 +
 .../server/datanode/TestDirectoryScanner.java   |   7 +
 .../checker/TestDatasetVolumeChecker.java   | 261 +++
 .../TestDatasetVolumeCheckerFailures.java   | 193 
 .../datanode/extdataset/ExternalVolumeImpl.java |   7 +
 10 files changed, 953 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eaaa3295/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
new file mode 100644
index 000..8a57812
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -0,0 +1,442 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode.checker;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.VolumeCheckContext;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.nio.channels.ClosedChannelException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY;
+
+/**
+ * A class that encapsulates running disk checks against each volume of an
+ * 

[04/24] hadoop git commit: YARN-5851. TestContainerManagerSecurity testContainerManager[1] failed (Contributed by Haibo Chen via Daniel Templeton)

2016-12-01 Thread asuresh
YARN-5851. TestContainerManagerSecurity testContainerManager[1] failed 
(Contributed by Haibo Chen via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd9a96cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd9a96cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd9a96cb

Branch: refs/heads/YARN-5085
Commit: dd9a96cb175d63f7c5909cd98f2dc9af267a5864
Parents: 3b9d3ac
Author: Daniel Templeton 
Authored: Tue Nov 29 14:22:19 2016 -0800
Committer: Daniel Templeton 
Committed: Tue Nov 29 14:22:19 2016 -0800

--
 .../apache/hadoop/yarn/server/TestContainerManagerSecurity.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd9a96cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
index 408c1cc..98cb365 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
@@ -105,6 +105,7 @@ public class TestContainerManagerSecurity extends 
KerberosSecurityTestcase {
 testRootDir.mkdirs();
 httpSpnegoKeytabFile.deleteOnExit();
 getKdc().createPrincipal(httpSpnegoKeytabFile, httpSpnegoPrincipal);
+UserGroupInformation.setConfiguration(conf);
 
 yarnCluster =
 new MiniYARNCluster(TestContainerManagerSecurity.class.getName(), 1, 1,
@@ -148,7 +149,6 @@ public class TestContainerManagerSecurity extends 
KerberosSecurityTestcase {
   
   public TestContainerManagerSecurity(Configuration conf) {
 conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS, 10L);
-UserGroupInformation.setConfiguration(conf);
 this.conf = conf;
   }
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/24] hadoop git commit: YARN-5890. FairScheduler should log information about AM-resource-usage and max-AM-share for queues (Contributed by Yufei Gu via Daniel Templeton)

2016-12-01 Thread asuresh
YARN-5890. FairScheduler should log information about AM-resource-usage and 
max-AM-share for queues
(Contributed by Yufei Gu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b9d3acd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b9d3acd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b9d3acd

Branch: refs/heads/YARN-5085
Commit: 3b9d3acd203cef4d861c5182fc4dccc55128d347
Parents: b407d53
Author: Daniel Templeton 
Authored: Tue Nov 29 12:44:08 2016 -0800
Committer: Daniel Templeton 
Committed: Tue Nov 29 12:46:05 2016 -0800

--
 .../scheduler/fair/FSLeafQueue.java |  42 --
 .../resourcemanager/scheduler/fair/FSQueue.java |   7 +
 .../scheduler/fair/FSQueueMetrics.java  |  60 
 .../scheduler/fair/TestFairScheduler.java   | 137 +++
 4 files changed, 232 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b9d3acd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 343e9c3..2754616 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -81,6 +81,7 @@ public class FSLeafQueue extends FSQueue {
 this.lastTimeAtMinShare = scheduler.getClock().getTime();
 activeUsersManager = new ActiveUsersManager(getMetrics());
 amResourceUsage = Resource.newInstance(0, 0);
+getMetrics().setAMResourceUsage(amResourceUsage);
   }
   
   void addApp(FSAppAttempt app, boolean runnable) {
@@ -132,6 +133,7 @@ public class FSLeafQueue extends FSQueue {
 // running an unmanaged AM.
 if (runnable && app.isAmRunning()) {
   Resources.subtractFrom(amResourceUsage, app.getAMResource());
+  getMetrics().setAMResourceUsage(amResourceUsage);
 }
 
 return runnable;
@@ -468,19 +470,14 @@ public class FSLeafQueue extends FSQueue {
   }
 
   /**
-   * Check whether this queue can run this application master under the
-   * maxAMShare limit.
-   *
-   * @param amResource resources required to run the AM
-   * @return true if this queue can run
-   */
-  boolean canRunAppAM(Resource amResource) {
-if (Math.abs(maxAMShare - -1.0f) < 0.0001) {
-  return true;
-}
-
-// If FairShare is zero, use min(maxShare, available resource) to compute
-// maxAMResource
+  * Compute the maximum resource AM can use. The value is the result of
+  * multiplying FairShare and maxAMShare. If FairShare is zero, use
+  * min(maxShare, available resource) instead to prevent zero value for
+  * maximum AM resource since it forbids any job running in the queue.
+  *
+  * @return the maximum resource AM can use
+  */
+  private Resource computeMaxAMResource() {
 Resource maxResource = Resources.clone(getFairShare());
 if (maxResource.getMemorySize() == 0) {
   maxResource.setMemorySize(
@@ -494,7 +491,23 @@ public class FSLeafQueue extends FSQueue {
   getMaxShare().getVirtualCores()));
 }
 
-Resource maxAMResource = Resources.multiply(maxResource, maxAMShare);
+return Resources.multiply(maxResource, maxAMShare);
+  }
+
+  /**
+   * Check whether this queue can run the Application Master under the
+   * maxAMShare limit.
+   *
+   * @param amResource resources required to run the AM
+   * @return true if this queue can run
+   */
+  public boolean canRunAppAM(Resource amResource) {
+if (Math.abs(maxAMShare - -1.0f) < 0.0001) {
+  return true;
+}
+
+Resource maxAMResource = computeMaxAMResource();
+getMetrics().setMaxAMShare(maxAMResource);
 Resource ifRunAMResource = Resources.add(amResourceUsage, amResource);
 return Resources.fitsIn(ifRunAMResource, maxAMResource);
   }
@@ -502,6 +515,7 @@ public class FSLeafQueue extends FSQueue {
   void addAMResourceUsage(Resource amResource) {
 if (amResource != null) {
   Resources.addTo(amResourceUsage, amResource);
+  

[11/24] hadoop git commit: HDFS-8678. Bring back the feature to view chunks of files in the HDFS file browser. Contributed by Ivo Udelsmann.

2016-12-01 Thread asuresh
HDFS-8678. Bring back the feature to view chunks of files in the HDFS file 
browser. Contributed by Ivo Udelsmann.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/625df87c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/625df87c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/625df87c

Branch: refs/heads/YARN-5085
Commit: 625df87c7b8ec2787e743d845fadde5e73479dc1
Parents: 51e6c1c
Author: Ravi Prakash 
Authored: Wed Nov 30 09:11:19 2016 -0800
Committer: Ravi Prakash 
Committed: Wed Nov 30 09:12:15 2016 -0800

--
 .../src/main/webapps/hdfs/explorer.html | 13 +--
 .../src/main/webapps/hdfs/explorer.js   | 37 +---
 2 files changed, 43 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/625df87c/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index ad8c374..3700a5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -57,8 +57,17 @@
File information
  
  
-   Download
-
+   
+  
+Download
+  
+  
+Head the 
file (first 32K)
+  
+  
+Tail the 
file (last 32K)
+ 
+   


  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/625df87c/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 1739db2..3e276a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -192,13 +192,40 @@
   var download_url = '/webhdfs/v1' + abs_path + '?op=OPEN';
 
   $('#file-info-download').attr('href', download_url);
-  $('#file-info-preview').click(function() {
+
+  var processPreview = function(url) {
+url += "=true";
+$.ajax({
+  type: 'GET',
+  url: url,
+  processData: false,
+  crossDomain: true
+}).done(function(data) {
+  url = data.Location;
+  $.ajax({
+type: 'GET',
+url: url,
+processData: false,
+crossDomain: true
+  }).complete(function(data) {
+$('#file-info-preview-body').val(data.responseText);
+$('#file-info-tail').show();
+  }).error(function(jqXHR, textStatus, errorThrown) {
+show_err_msg("Couldn't preview the file. " + errorThrown);
+  });
+}).error(function(jqXHR, textStatus, errorThrown) {
+  show_err_msg("Couldn't find datanode to read file from. " + 
errorThrown);
+});
+  }
+
+  $('#file-info-preview-tail').click(function() {
 var offset = d.fileLength - TAIL_CHUNK_SIZE;
 var url = offset > 0 ? download_url + '=' + offset : 
download_url;
-$.get(url, function(t) {
-  $('#file-info-preview-body').val(t);
-  $('#file-info-tail').show();
-}, "text").error(network_error_handler(url));
+processPreview(url);
+  });
+  $('#file-info-preview-head').click(function() {
+var url = d.fileLength > TAIL_CHUNK_SIZE ? download_url + '=' + 
TAIL_CHUNK_SIZE : download_url;
+processPreview(url);
   });
 
   if (d.fileLength > 0) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/24] hadoop git commit: MAPREDUCE-6810. Fix hadoop-mapreduce-client-nativetask compilation with GCC-6.2.1. Contributed by Ravi Prakash.

2016-12-01 Thread asuresh
MAPREDUCE-6810. Fix hadoop-mapreduce-client-nativetask compilation with 
GCC-6.2.1. Contributed by Ravi Prakash.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c848719
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c848719
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c848719

Branch: refs/heads/YARN-5085
Commit: 7c848719de778929258f1f9e2778e56f267c90ed
Parents: b3befc0
Author: Ravi Prakash 
Authored: Wed Nov 30 10:47:41 2016 -0800
Committer: Ravi Prakash 
Committed: Wed Nov 30 10:47:41 2016 -0800

--
 .../src/main/native/src/lib/Log.h  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c848719/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Log.h
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Log.h
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Log.h
index a0c17f3..a84b055 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Log.h
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/src/lib/Log.h
@@ -32,7 +32,7 @@ extern FILE * LOG_DEVICE;
 #define LOG(_fmt_, args...)   if (LOG_DEVICE) { \
 time_t log_timer; struct tm log_tm; \
 time(_timer); localtime_r(_timer, _tm); \
-fprintf(LOG_DEVICE, "%02d/%02d/%02d %02d:%02d:%02d INFO "_fmt_"\n", \
+fprintf(LOG_DEVICE, "%02d/%02d/%02d %02d:%02d:%02d INFO " _fmt_ "\n", \
 log_tm.tm_year%100, log_tm.tm_mon+1, log_tm.tm_mday, \
 log_tm.tm_hour, log_tm.tm_min, log_tm.tm_sec, \
 ##args);}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/24] hadoop git commit: MAPREDUCE-6565. Configuration to use host name in delegation token service is not read from job.xml during MapReduce job execution. Contributed by Li Lu.

2016-12-01 Thread asuresh
MAPREDUCE-6565. Configuration to use host name in delegation token service is 
not read from job.xml during MapReduce job execution. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f6e1439
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f6e1439
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f6e1439

Branch: refs/heads/YARN-5085
Commit: 8f6e14399a3e77e1bdcc5034f7601e9f62163dea
Parents: 6d8b4f6
Author: Junping Du 
Authored: Tue Nov 29 15:51:27 2016 -0800
Committer: Junping Du 
Committed: Tue Nov 29 15:51:27 2016 -0800

--
 .../src/main/java/org/apache/hadoop/mapred/YarnChild.java | 2 ++
 .../main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java | 3 +++
 2 files changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f6e1439/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
index 164f19d..97642a5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
@@ -78,6 +78,8 @@ class YarnChild {
 // Initing with our JobConf allows us to avoid loading confs twice
 Limits.init(job);
 UserGroupInformation.setConfiguration(job);
+// MAPREDUCE-6565: need to set configuration for SecurityUtil.
+SecurityUtil.setConfiguration(job);
 
 String host = args[0];
 int port = Integer.parseInt(args[1]);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f6e1439/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index 4a8a90e..b383a02 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -123,6 +123,7 @@ import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.service.AbstractService;
@@ -1690,6 +1691,8 @@ public class MRAppMaster extends CompositeService {
   final JobConf conf, String jobUserName) throws IOException,
   InterruptedException {
 UserGroupInformation.setConfiguration(conf);
+// MAPREDUCE-6565: need to set configuration for SecurityUtil.
+SecurityUtil.setConfiguration(conf);
 // Security framework already loaded the tokens into current UGI, just use
 // them
 Credentials credentials =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/24] hadoop git commit: YARN-5942. "Overridden" is misspelled as "overriden" in FairScheduler.md (Contributed by Heather Sutherland via Daniel Templeton)

2016-12-01 Thread asuresh
YARN-5942. "Overridden" is misspelled as "overriden" in FairScheduler.md
(Contributed by Heather Sutherland via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4fca94fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4fca94fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4fca94fb

Branch: refs/heads/YARN-5085
Commit: 4fca94fbdad16e845e670758939aabb7a97154d9
Parents: be5a757
Author: Daniel Templeton 
Authored: Wed Nov 30 11:22:21 2016 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 30 11:23:51 2016 -0800

--
 .../hadoop-yarn-site/src/site/markdown/FairScheduler.md  | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fca94fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index ecbb309..ae4c3ab 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -129,13 +129,13 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **A defaultFairSharePreemptionThreshold element**: which sets the fair share 
preemption threshold for the root queue; overridden by 
fairSharePreemptionThreshold element in root queue.
 
-* **A queueMaxAppsDefault element**: which sets the default running app limit 
for queues; overriden by maxRunningApps element in each queue.
+* **A queueMaxAppsDefault element**: which sets the default running app limit 
for queues; overridden by maxRunningApps element in each queue.
 
-* **A queueMaxResourcesDefault element**: which sets the default max resource 
limit for queue; overriden by maxResources element in each queue.
+* **A queueMaxResourcesDefault element**: which sets the default max resource 
limit for queue; overridden by maxResources element in each queue.
 
-* **A queueMaxAMShareDefault element**: which sets the default AM resource 
limit for queue; overriden by maxAMShare element in each queue.
+* **A queueMaxAMShareDefault element**: which sets the default AM resource 
limit for queue; overridden by maxAMShare element in each queue.
 
-* **A defaultQueueSchedulingPolicy element**: which sets the default 
scheduling policy for queues; overriden by the schedulingPolicy element in each 
queue if specified. Defaults to "fair".
+* **A defaultQueueSchedulingPolicy element**: which sets the default 
scheduling policy for queues; overridden by the schedulingPolicy element in 
each queue if specified. Defaults to "fair".
 
 * **A queuePlacementPolicy element**: which contains a list of rule elements 
that tell the scheduler how to place incoming apps into queues. Rules are 
applied in the order that they are listed. Rules may take arguments. All rules 
accept the "create" argument, which indicates whether the rule can create a new 
queue. "Create" defaults to true; if set to false and the rule would place the 
app in a queue that is not configured in the allocations file, we continue on 
to the next rule. The last rule must be one that can never issue a continue. 
Valid rules are:
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[24/24] hadoop git commit: MAPREDUCE-6787. Allow job_conf.xml to be downloadable on the job overview page in JHS (haibochen via rkanter)

2016-12-01 Thread asuresh
MAPREDUCE-6787. Allow job_conf.xml to be downloadable on the job overview page 
in JHS (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c87b3a44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c87b3a44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c87b3a44

Branch: refs/heads/YARN-5085
Commit: c87b3a448a00df97149a4e93a8c39d9ad0268bdb
Parents: 2d77dc7
Author: Robert Kanter 
Authored: Thu Dec 1 17:29:16 2016 -0800
Committer: Robert Kanter 
Committed: Thu Dec 1 17:29:38 2016 -0800

--
 .../mapreduce/v2/app/webapp/AppController.java  | 34 
 .../mapreduce/v2/app/webapp/ConfBlock.java  |  2 +-
 .../v2/app/webapp/TestAppController.java| 14 
 .../hadoop/mapreduce/v2/hs/webapp/HsWebApp.java |  2 ++
 .../org/apache/hadoop/yarn/webapp/Router.java   | 23 ++---
 .../org/apache/hadoop/yarn/webapp/WebApp.java   | 13 
 6 files changed, 83 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index 305ec7e..e30e1b9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -324,6 +324,40 @@ public class AppController extends Controller implements 
AMParams {
   }
 
   /**
+   * Handle requests to download the job configuration.
+   */
+  public void downloadConf() {
+try {
+  requireJob();
+} catch (Exception e) {
+  renderText(e.getMessage());
+  return;
+}
+writeJobConf();
+  }
+
+  private void writeJobConf() {
+String jobId = $(JOB_ID);
+assert(!jobId.isEmpty());
+
+JobId jobID = MRApps.toJobID($(JOB_ID));
+Job job = app.context.getJob(jobID);
+assert(job != null);
+
+try {
+  Configuration jobConf = job.loadConfFile();
+  response().setContentType("text/xml");
+  response().setHeader("Content-Disposition",
+  "attachment; filename=" + jobId + ".xml");
+  jobConf.writeXml(writer());
+} catch (IOException e) {
+  LOG.error("Error reading/writing job" +
+  " conf file for job: " + jobId, e);
+  renderText(e.getMessage());
+}
+  }
+
+  /**
* Render a BAD_REQUEST error.
* @param s the error message to include.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
index 4cb79bf..532c2bd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
@@ -70,7 +70,7 @@ public class ConfBlock extends HtmlBlock {
 try {
   ConfInfo info = new ConfInfo(job);
 
-  html.div().h3(confPath.toString())._();
+  html.div().a("/jobhistory/downloadconf/" + jid, confPath.toString());
   TBODY tbody = html.
 // Tasks table
   table("#conf").

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
 

[12/24] hadoop git commit: YARN-4997. Update fair scheduler to use pluggable auth provider (Contributed by Tao Jie via Daniel Templeton)

YARN-4997. Update fair scheduler to use pluggable auth provider (Contributed by 
Tao Jie via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3befc02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3befc02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3befc02

Branch: refs/heads/YARN-5085
Commit: b3befc021b0e2d63d1a3710ea450797d1129f1f5
Parents: 625df87
Author: Daniel Templeton 
Authored: Wed Nov 30 09:50:33 2016 -0800
Committer: Daniel Templeton 
Committed: Wed Nov 30 09:50:33 2016 -0800

--
 .../security/YarnAuthorizationProvider.java | 15 +
 .../scheduler/fair/AllocationConfiguration.java | 38 +--
 .../fair/AllocationFileLoaderService.java   | 68 +---
 .../resourcemanager/scheduler/fair/FSQueue.java | 22 +--
 .../scheduler/fair/FairScheduler.java   | 45 +++--
 .../scheduler/fair/TestFairScheduler.java   |  4 +-
 6 files changed, 149 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3befc02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
index 4b43ea1..9ae4bd7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
+import com.google.common.annotations.VisibleForTesting;
 import java.util.List;
 
 /**
@@ -61,6 +62,20 @@ public abstract class YarnAuthorizationProvider {
   }
 
   /**
+   * Destroy the {@link YarnAuthorizationProvider} instance.
+   * This method is called only in Tests.
+   */
+  @VisibleForTesting
+  public static void destroy() {
+synchronized (YarnAuthorizationProvider.class) {
+  if (authorizer != null) {
+LOG.debug(authorizer.getClass().getName() + " is destroyed.");
+authorizer = null;
+  }
+}
+  }
+
+  /**
* Initialize the provider. Invoked on daemon startup. DefaultYarnAuthorizer 
is
* initialized based on configurations.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3befc02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index c771887..7bd2616 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -17,6 +17,7 @@
 */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -25,13 +26,14 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.ReservationACL;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.security.AccessType;
 import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
+import 

[14/24] hadoop git commit: HADOOP-13790. Make qbt script executable. Contributed by Andrew Wang.

HADOOP-13790. Make qbt script executable. Contributed by Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be5a7570
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be5a7570
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be5a7570

Branch: refs/heads/YARN-5085
Commit: be5a757096246d5c4ef73da9d233adda67bd3d69
Parents: 7c84871
Author: Akira Ajisaka 
Authored: Thu Dec 1 03:52:04 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 1 03:52:44 2016 +0900

--
 dev-support/bin/qbt | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/be5a7570/dev-support/bin/qbt
--
diff --git a/dev-support/bin/qbt b/dev-support/bin/qbt
old mode 100644
new mode 100755


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/24] hadoop git commit: YARN-4395. Typo in comment in ClientServiceDelegate (Contributed by Alison Yu via Daniel Templeton)

YARN-4395. Typo in comment in ClientServiceDelegate (Contributed by Alison Yu 
via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d8b4f6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d8b4f6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d8b4f6c

Branch: refs/heads/YARN-5085
Commit: 6d8b4f6c2791f861a55ac78c2950f783693e912a
Parents: dd9a96c
Author: Daniel Templeton 
Authored: Tue Nov 29 15:30:22 2016 -0800
Committer: Daniel Templeton 
Committed: Tue Nov 29 15:30:22 2016 -0800

--
 .../java/org/apache/hadoop/mapred/ClientServiceDelegate.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d8b4f6c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index eac8dbc..72339e5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -335,8 +335,8 @@ public class ClientServiceDelegate {
   throw new IOException(e.getTargetException());
 }
 
-// if it's AM shut down, do not decrement maxClientRetry as we wait for
-// AM to be restarted.
+// if its AM shut down, do not decrement maxClientRetry while we wait
+// for its AM to be restarted.
 if (!usingAMProxy.get()) {
   maxClientRetry--;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/24] hadoop git commit: YARN-5901. Fix race condition in TestGetGroups beforeclass setup() (Contributed by Haibo Chen via Daniel Templeton)

YARN-5901. Fix race condition in TestGetGroups beforeclass setup() (Contributed 
by Haibo Chen via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d77dc72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d77dc72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d77dc72

Branch: refs/heads/YARN-5085
Commit: 2d77dc727d9b5e56009bbc36643d85500efcbca5
Parents: 19f373a
Author: Daniel Templeton 
Authored: Thu Dec 1 15:57:39 2016 -0800
Committer: Daniel Templeton 
Committed: Thu Dec 1 15:57:39 2016 -0800

--
 .../hadoop/yarn/client/TestGetGroups.java   | 36 +---
 1 file changed, 24 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d77dc72/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
index e947ece..da0258c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
@@ -20,16 +20,21 @@ package org.apache.hadoop.yarn.client;
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service;
 import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.service.ServiceStateChangeListener;
 import org.apache.hadoop.tools.GetGroupsTestBase;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
@@ -42,30 +47,37 @@ public class TestGetGroups extends GetGroupsTestBase {
   private static Configuration conf;
   
   @BeforeClass
-  public static void setUpResourceManager() throws IOException, 
InterruptedException {
+  public static void setUpResourceManager() throws InterruptedException {
 conf = new YarnConfiguration();
 resourceManager = new ResourceManager() {
   @Override
   protected void doSecureLogin() throws IOException {
   };
 };
+
+// a reliable way to wait for resource manager to start
+CountDownLatch rmStartedSignal = new CountDownLatch(1);
+ServiceStateChangeListener rmStateChangeListener =
+new ServiceStateChangeListener() {
+  @Override
+  public void stateChanged(Service service) {
+if (service.getServiceState() == STATE.STARTED) {
+  rmStartedSignal.countDown();
+}
+  }
+};
+resourceManager.registerServiceListener(rmStateChangeListener);
+
 resourceManager.init(conf);
 new Thread() {
   public void run() {
 resourceManager.start();
   };
 }.start();
-int waitCount = 0;
-while (resourceManager.getServiceState() == STATE.INITED
-&& waitCount++ < 10) {
-  LOG.info("Waiting for RM to start...");
-  Thread.sleep(1000);
-}
-if (resourceManager.getServiceState() != STATE.STARTED) {
-  throw new IOException(
-  "ResourceManager failed to start. Final state is "
-  + resourceManager.getServiceState());
-}
+
+boolean rmStarted = rmStartedSignal.await(6L, TimeUnit.MILLISECONDS);
+Assert.assertTrue("ResourceManager failed to start up.", rmStarted);
+
 LOG.info("ResourceManager RMAdmin address: " +
 conf.get(YarnConfiguration.RM_ADMIN_ADDRESS));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/24] hadoop git commit: HDFS-11132. Allow AccessControlException in contract tests when getFileStatus on subdirectory of existing files. Contributed by Vishwajeet Dusane

HDFS-11132. Allow AccessControlException in contract tests when getFileStatus 
on subdirectory of existing files. Contributed by Vishwajeet Dusane


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19f373a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19f373a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19f373a4

Branch: refs/heads/YARN-5085
Commit: 19f373a46b2abb7a575f7884a9c7443b8ed67cd3
Parents: 96c5749
Author: Mingliang Liu 
Authored: Thu Dec 1 12:54:03 2016 -0800
Committer: Mingliang Liu 
Committed: Thu Dec 1 12:54:28 2016 -0800

--
 .../fs/FileContextMainOperationsBaseTest.java   | 21 
 .../hadoop/fs/FileSystemContractBaseTest.java   | 17 ++--
 2 files changed, 32 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19f373a4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index 5f9151a..2b3ab2a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -251,8 +252,14 @@ public abstract class FileContextMainOperationsBaseTest  {
 } catch (IOException e) {
   // expected
 }
-Assert.assertFalse(exists(fc, testSubDir));
-
+
+try {
+  Assert.assertFalse(exists(fc, testSubDir));
+} catch (AccessControlException e) {
+  // Expected : HDFS-11132 Checks on paths under file may be rejected by
+  // file missing execute permission.
+}
+
 Path testDeepSubDir = getTestRootPath(fc, "test/hadoop/file/deep/sub/dir");
 try {
   fc.mkdir(testDeepSubDir, FsPermission.getDefault(), true);
@@ -260,8 +267,14 @@ public abstract class FileContextMainOperationsBaseTest  {
 } catch (IOException e) {
   // expected
 }
-Assert.assertFalse(exists(fc, testDeepSubDir));
-
+
+try {
+  Assert.assertFalse(exists(fc, testDeepSubDir));
+} catch (AccessControlException e) {
+  // Expected : HDFS-11132 Checks on paths under file may be rejected by
+  // file missing execute permission.
+}
+
   }
   
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19f373a4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index bbd7336..6247959 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -158,7 +159,13 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 } catch (IOException e) {
   // expected
 }
-assertFalse(fs.exists(testSubDir));
+
+try {
+  assertFalse(fs.exists(testSubDir));
+} catch (AccessControlException e) {
+  // Expected : HDFS-11132 Checks on paths under file may be rejected by
+  // file missing execute permission.
+}
 
 Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
 try {
@@ -167,7 +174,13 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 } catch (IOException e) {
   // expected
 }
-assertFalse(fs.exists(testDeepSubDir));
+
+try {
+  assertFalse(fs.exists(testDeepSubDir));
+} catch (AccessControlException e) {
+   

[10/24] hadoop git commit: HDFS-10994. Support an XOR policy XOR-2-1-64k in HDFS. Contributed by Sammi Chen

HDFS-10994. Support an XOR policy XOR-2-1-64k in HDFS. Contributed by Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51e6c1cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51e6c1cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51e6c1cc

Branch: refs/heads/YARN-5085
Commit: 51e6c1cc3f66f9908d2e816e7291ac34bee43f52
Parents: cfd8076
Author: Kai Zheng 
Authored: Wed Nov 30 15:52:56 2016 +0800
Committer: Kai Zheng 
Committed: Wed Nov 30 15:52:56 2016 +0800

--
 .../io/erasurecode/ErasureCodeConstants.java|  3 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  1 +
 .../namenode/ErasureCodingPolicyManager.java| 23 +++--
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  8 +++-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java | 28 +--
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 50 +---
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 27 ---
 .../TestDFSStripedOutputStreamWithFailure.java  | 37 +++
 .../hdfs/TestDFSXORStripedInputStream.java  | 33 +
 .../hdfs/TestDFSXORStripedOutputStream.java | 35 ++
 ...estDFSXORStripedOutputStreamWithFailure.java | 36 ++
 11 files changed, 240 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/51e6c1cc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index 8d6ff85..ffa0bce 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -38,4 +38,7 @@ public final class ErasureCodeConstants {
 
   public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema(
   RS_LEGACY_CODEC_NAME, 6, 3);
+
+  public static final ECSchema XOR_2_1_SCHEMA = new ECSchema(
+  XOR_CODEC_NAME, 2, 1);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51e6c1cc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index acbc8f6..b55b4df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -147,6 +147,7 @@ public final class HdfsConstants {
   public static final byte RS_6_3_POLICY_ID = 0;
   public static final byte RS_3_2_POLICY_ID = 1;
   public static final byte RS_6_3_LEGACY_POLICY_ID = 2;
+  public static final byte XOR_2_1_POLICY_ID = 3;
 
   /* Hidden constructor */
   protected HdfsConstants() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/51e6c1cc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index c4bc8de..8a85d23 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -36,7 +36,7 @@ import java.util.TreeMap;
 public final class ErasureCodingPolicyManager {
 
   /**
-   * TODO: HDFS-8095
+   * TODO: HDFS-8095.
*/
   private static final int DEFAULT_CELLSIZE = 64 * 1024;
   private static final ErasureCodingPolicy SYS_POLICY1 =
@@ -48,10 +48,14 @@ public final class ErasureCodingPolicyManager {
   private static final ErasureCodingPolicy SYS_POLICY3 =
   new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA,
   DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_LEGACY_POLICY_ID);
+  private static final ErasureCodingPolicy SYS_POLICY4 =
+  new 

[18/24] hadoop git commit: HDFS-5517. Lower the default maximum number of blocks per file. Contributed by Aaron T. Myers and Andrew Wang.

HDFS-5517. Lower the default maximum number of blocks per file. Contributed by 
Aaron T. Myers and Andrew Wang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7226a71b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7226a71b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7226a71b

Branch: refs/heads/YARN-5085
Commit: 7226a71b1f684f562bd88ee121f1dd7aa8b73816
Parents: 69fb70c
Author: Andrew Wang 
Authored: Wed Nov 30 15:58:31 2016 -0800
Committer: Andrew Wang 
Committed: Wed Nov 30 15:58:31 2016 -0800

--
 .../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java  |  2 +-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  |  2 +-
 .../hdfs/server/datanode/TestDirectoryScanner.java   | 11 +--
 .../server/namenode/metrics/TestNameNodeMetrics.java |  2 +-
 4 files changed, 12 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7226a71b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d7d3c9d..df21857 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -399,7 +399,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_MIN_BLOCK_SIZE_KEY = 
"dfs.namenode.fs-limits.min-block-size";
   public static final longDFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024;
   public static final String  DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY = 
"dfs.namenode.fs-limits.max-blocks-per-file";
-  public static final longDFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 
1024*1024;
+  public static final longDFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 
10*1000;
   public static final String  DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY = 
"dfs.namenode.fs-limits.max-xattrs-per-inode";
   public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
   public static final String  DFS_NAMENODE_MAX_XATTR_SIZE_KEY = 
"dfs.namenode.fs-limits.max-xattr-size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7226a71b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 671c98c..086f667 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -372,7 +372,7 @@
 
 
 dfs.namenode.fs-limits.max-blocks-per-file
-1048576
+1
 Maximum number of blocks per file, enforced by the Namenode on
 write. This prevents the creation of extremely large files which can
 degrade performance.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7226a71b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index f08b579..d7c8383 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -590,8 +590,15 @@ public class TestDirectoryScanner {
   100);
   DataNode dataNode = cluster.getDataNodes().get(0);
 
-  createFile(GenericTestUtils.getMethodName(),
-  BLOCK_LENGTH * blocks, false);
+  final int maxBlocksPerFile = (int) DFSConfigKeys
+  .DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT;
+  int numBlocksToCreate = blocks;
+  while (numBlocksToCreate > 0) {
+final int toCreate = Math.min(maxBlocksPerFile, numBlocksToCreate);
+createFile(GenericTestUtils.getMethodName() + numBlocksToCreate,
+BLOCK_LENGTH * toCreate, false);
+numBlocksToCreate -= toCreate;
+  }
 
   float ratio = 0.0f;
   int retries = maxRetries;


[02/24] hadoop git commit: Revert "HDFS-5517. Lower the default maximum number of blocks per file. Contributed by Aaron T. Myers."

Revert "HDFS-5517. Lower the default maximum number of blocks per file. 
Contributed by Aaron T. Myers."

This reverts commit 09451252fae90a3ec192b8d7f0c49508df65e8c5.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b407d531
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b407d531
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b407d531

Branch: refs/heads/YARN-5085
Commit: b407d5319597f01ebd51736e299239022bd08028
Parents: 25f9872
Author: Andrew Wang 
Authored: Tue Nov 29 10:50:10 2016 -0800
Committer: Andrew Wang 
Committed: Tue Nov 29 10:50:10 2016 -0800

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java| 2 +-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b407d531/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index df21857..d7d3c9d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -399,7 +399,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_MIN_BLOCK_SIZE_KEY = 
"dfs.namenode.fs-limits.min-block-size";
   public static final longDFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024;
   public static final String  DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY = 
"dfs.namenode.fs-limits.max-blocks-per-file";
-  public static final longDFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 
10*1000;
+  public static final longDFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT = 
1024*1024;
   public static final String  DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY = 
"dfs.namenode.fs-limits.max-xattrs-per-inode";
   public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
   public static final String  DFS_NAMENODE_MAX_XATTR_SIZE_KEY = 
"dfs.namenode.fs-limits.max-xattr-size";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b407d531/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 9fce84f..c9d74bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -372,7 +372,7 @@
 
 
 dfs.namenode.fs-limits.max-blocks-per-file
-1
+1048576
 Maximum number of blocks per file, enforced by the Namenode on
 write. This prevents the creation of extremely large files which can
 degrade performance.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/24] hadoop git commit: YARN-5761. Separate QueueManager from Scheduler. (Xuan Gong via gtcarrera9)

YARN-5761. Separate QueueManager from Scheduler. (Xuan Gong via gtcarrera9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69fb70c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69fb70c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69fb70c3

Branch: refs/heads/YARN-5085
Commit: 69fb70c31aa277f7fb14b05c0185ddc5cd90793d
Parents: 3fd844b
Author: Li Lu 
Authored: Wed Nov 30 13:38:42 2016 -0800
Committer: Li Lu 
Committed: Wed Nov 30 13:38:42 2016 -0800

--
 .../scheduler/SchedulerQueueManager.java|  75 
 .../scheduler/capacity/CapacityScheduler.java   | 294 +++
 .../capacity/CapacitySchedulerQueueManager.java | 361 +++
 .../capacity/TestApplicationLimits.java |  35 +-
 .../TestApplicationLimitsByPartition.java   |   7 +-
 .../scheduler/capacity/TestChildQueueOrder.java |   9 +-
 .../scheduler/capacity/TestLeafQueue.java   |   9 +-
 .../scheduler/capacity/TestParentQueue.java |  39 +-
 .../scheduler/capacity/TestReservations.java|   8 +-
 .../scheduler/capacity/TestUtils.java   |   2 +-
 10 files changed, 536 insertions(+), 303 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69fb70c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerQueueManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerQueueManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerQueueManager.java
new file mode 100644
index 000..92b989a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerQueueManager.java
@@ -0,0 +1,75 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.io.IOException;
+import java.util.Map;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import 
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
+
+/**
+ *
+ * Context of the Queues in Scheduler.
+ *
+ */
+@Private
+@Unstable
+public interface SchedulerQueueManager {
+
+  /**
+   * Get the root queue.
+   * @return root queue
+   */
+  T getRootQueue();
+
+  /**
+   * Get all the queues.
+   * @return a map contains all the queues as well as related queue names
+   */
+  Map getQueues();
+
+  /**
+   * Remove the queue from the existing queue.
+   * @param queueName the queue name
+   */
+  void removeQueue(String queueName);
+
+  /**
+   * Add a new queue to the existing queues.
+   * @param queueName the queue name
+   * @param queue the queue object
+   */
+  void addQueue(String queueName, T queue);
+
+  /**
+   * Get a queue matching the specified queue name.
+   * @param queueName the queue name
+   * @return a queue object
+   */
+  T getQueue(String queueName);
+
+  /**
+   * Reinitialize the queues.
+   * @param newConf the configuration
+   * @throws IOException if fails to re-initialize queues
+   */
+  void reinitializeQueues(E newConf) throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69fb70c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 

[01/24] hadoop git commit: YARN-5774. MR Job stuck in ACCEPTED status without any progress in Fair Scheduler if set yarn.scheduler.minimum-allocation-mb to 0. (Contributed by Yufei Gu via Daniel Templ

Repository: hadoop
Updated Branches:
  refs/heads/YARN-5085 00096dcc0 -> c87b3a448


YARN-5774. MR Job stuck in ACCEPTED status without any progress in Fair 
Scheduler
if set yarn.scheduler.minimum-allocation-mb to 0. (Contributed by Yufei Gu via 
Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25f9872b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25f9872b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25f9872b

Branch: refs/heads/YARN-5085
Commit: 25f9872be63423ada6a18481eaad2888e731fdac
Parents: 00096dc
Author: Daniel Templeton 
Authored: Tue Nov 29 09:40:49 2016 -0800
Committer: Daniel Templeton 
Committed: Tue Nov 29 09:40:49 2016 -0800

--
 .../api/records/AbstractResourceRequest.java| 55 ++
 .../yarn/api/records/ResourceRequest.java   | 19 +
 .../api/records/UpdateContainerRequest.java | 18 +
 .../resource/DefaultResourceCalculator.java | 19 +++--
 .../resource/DominantResourceCalculator.java| 25 ++-
 .../yarn/util/resource/ResourceCalculator.java  | 17 +
 .../util/resource/TestResourceCalculator.java   | 79 
 .../server/resourcemanager/RMAppManager.java|  6 +-
 .../server/resourcemanager/RMServerUtils.java   |  9 +--
 .../scheduler/AbstractYarnScheduler.java| 20 +
 .../scheduler/SchedulerUtils.java   | 59 +++
 .../scheduler/YarnScheduler.java|  8 ++
 .../scheduler/capacity/CapacityScheduler.java   |  4 +-
 .../scheduler/fair/FairScheduler.java   | 40 --
 .../scheduler/fifo/FifoScheduler.java   |  4 +-
 .../scheduler/TestSchedulerUtils.java   | 22 +++---
 16 files changed, 258 insertions(+), 146 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f9872b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AbstractResourceRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AbstractResourceRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AbstractResourceRequest.java
new file mode 100644
index 000..819a607
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AbstractResourceRequest.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * {@code AbstractResourceRequest} represents a generic resource request made
+ * by an application to the {@code ResourceManager}.
+ * 
+ * It includes:
+ * 
+ *   {@link Resource} capability required for each request.
+ * 
+ *
+ * @see Resource
+ */
+@Public
+@Unstable
+public abstract class AbstractResourceRequest {
+
+  /**
+   * Set the Resource capability of the request
+   * @param capability Resource capability of the request
+   */
+  @Public
+  @Stable
+  public abstract void setCapability(Resource capability);
+
+  /**
+   * Get the Resource capability of the request.
+   * @return Resource capability of the request
+   */
+  @Public
+  @Stable
+  public abstract Resource getCapability();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f9872b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
 

[09/24] hadoop git commit: Revert due to an error "HDFS-10994. Support an XOR policy XOR-2-1-64k in HDFS. Contributed by Sammi Chen"

Revert due to an error "HDFS-10994. Support an XOR policy XOR-2-1-64k in HDFS. 
Contributed by Sammi Chen"

This reverts commit 5614f847b2ef2a5b70bd9a06edc4eba06174c6.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cfd8076f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cfd8076f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cfd8076f

Branch: refs/heads/YARN-5085
Commit: cfd8076f81930c3ffea8ec2ef42926217b83ab1a
Parents: aeecfa2
Author: Kai Zheng 
Authored: Wed Nov 30 15:44:52 2016 +0800
Committer: Kai Zheng 
Committed: Wed Nov 30 15:44:52 2016 +0800

--
 .../io/erasurecode/ErasureCodeConstants.java|   3 -
 .../hadoop/hdfs/protocol/HdfsConstants.java |   1 -
 .../namenode/ErasureCodingPolicyManager.java|  23 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   8 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  28 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |  50 +--
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  27 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |  37 +-
 .../hdfs/TestDFSXORStripedInputStream.java  |  33 --
 .../hdfs/TestDFSXORStripedOutputStream.java |  35 --
 ...estDFSXORStripedOutputStreamWithFailure.java |  36 --
 ...tyPreemptionPolicyForReservedContainers.java | 430 +++
 12 files changed, 471 insertions(+), 240 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfd8076f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index ffa0bce..8d6ff85 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -38,7 +38,4 @@ public final class ErasureCodeConstants {
 
   public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema(
   RS_LEGACY_CODEC_NAME, 6, 3);
-
-  public static final ECSchema XOR_2_1_SCHEMA = new ECSchema(
-  XOR_CODEC_NAME, 2, 1);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfd8076f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index b55b4df..acbc8f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -147,7 +147,6 @@ public final class HdfsConstants {
   public static final byte RS_6_3_POLICY_ID = 0;
   public static final byte RS_3_2_POLICY_ID = 1;
   public static final byte RS_6_3_LEGACY_POLICY_ID = 2;
-  public static final byte XOR_2_1_POLICY_ID = 3;
 
   /* Hidden constructor */
   protected HdfsConstants() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfd8076f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 8a85d23..c4bc8de 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -36,7 +36,7 @@ import java.util.TreeMap;
 public final class ErasureCodingPolicyManager {
 
   /**
-   * TODO: HDFS-8095.
+   * TODO: HDFS-8095
*/
   private static final int DEFAULT_CELLSIZE = 64 * 1024;
   private static final ErasureCodingPolicy SYS_POLICY1 =
@@ -48,14 +48,10 @@ public final class ErasureCodingPolicyManager {
   private static final ErasureCodingPolicy SYS_POLICY3 =
   new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA,
   DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_LEGACY_POLICY_ID);
-  private static final 

[16/24] hadoop git commit: HADOOP-13830. Intermittent failure of ITestS3NContractRootDir#testRecursiveRootListing: "Can not create a Path from an empty string". Contributed by Steve Loughran

 HADOOP-13830. Intermittent failure of 
ITestS3NContractRootDir#testRecursiveRootListing: "Can not create a Path from 
an empty string". Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fd844b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fd844b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fd844b9

Branch: refs/heads/YARN-5085
Commit: 3fd844b99fdfae6be6e5e261f371d175aad14229
Parents: 4fca94f
Author: Mingliang Liu 
Authored: Wed Nov 30 13:01:02 2016 -0800
Committer: Mingliang Liu 
Committed: Wed Nov 30 13:01:19 2016 -0800

--
 .../org/apache/hadoop/fs/s3native/NativeS3FileSystem.java | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fd844b9/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
index f741298..1a45db3 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
@@ -587,7 +587,12 @@ public class NativeS3FileSystem extends FileSystem {
   for (String commonPrefix : listing.getCommonPrefixes()) {
 Path subpath = keyToPath(commonPrefix);
 String relativePath = pathUri.relativize(subpath.toUri()).getPath();
-status.add(newDirectory(new Path(absolutePath, relativePath)));
+// sometimes the common prefix includes the base dir (HADOOP-13830).
+// avoid that problem by detecting it and keeping it out
+// of the list
+if (!relativePath.isEmpty()) {
+  status.add(newDirectory(new Path(absolutePath, relativePath)));
+}
   }
   priorLastKey = listing.getPriorLastKey();
 } while (priorLastKey != null);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11180. Intermittent deadlock in NameNode when failover happens.

Repository: hadoop
Updated Branches:
  refs/heads/branch-2 99b046f8a -> 5a7941a4f


HDFS-11180. Intermittent deadlock in NameNode when failover happens.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a7941a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a7941a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a7941a4

Branch: refs/heads/branch-2
Commit: 5a7941a4fc193259ab4a306f7fe4f68bf101d0e0
Parents: 99b046f
Author: Akira Ajisaka 
Authored: Fri Dec 2 11:34:05 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Dec 2 11:34:05 2016 +0900

--
 .../dev-support/findbugsExcludeFile.xml | 27 
 .../hadoop/hdfs/server/namenode/FSEditLog.java  | 72 +---
 .../hadoop/hdfs/server/namenode/FSImage.java| 13 +++-
 .../hdfs/server/namenode/FSNamesystem.java  | 27 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  2 +-
 .../server/namenode/ha/StandbyCheckpointer.java |  4 +-
 .../server/namenode/TestFSNamesystemMBean.java  | 24 +++
 7 files changed, 147 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a7941a4/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 426fb72..e6e4057 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -109,6 +109,33 @@
 
 
 
+
+
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+  
+  
+  
+
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a7941a4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index bc31ffc..fbf03fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -156,14 +156,16 @@ public class FSEditLog implements LogsPurgeable {
   private EditLogOutputStream editLogStream = null;
 
   // a monotonically increasing counter that represents transactionIds.
-  private long txid = 0;
+  // All of the threads which update/increment txid are synchronized,
+  // so make txid volatile instead of AtomicLong.
+  private volatile long txid = 0;
 
   // stores the last synced transactionId.
   private long synctxid = 0;
 
   // the first txid of the log that's currently open for writing.
   // If this value is N, we are currently writing to edits_inprogress_N
-  private long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
+  private volatile long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
 
   // the time of printing the statistics to the log file.
   private long lastPrintTime;
@@ -339,7 +341,18 @@ public class FSEditLog implements LogsPurgeable {
 return state == State.IN_SEGMENT ||
   state == State.BETWEEN_LOG_SEGMENTS;
   }
-  
+
+  /**
+   * Return true if the log is currently open in write mode.
+   * This method is not synchronized and must be used only for metrics.
+   * @return true if the log is currently open in write mode, regardless
+   * of whether it actually has an open segment.
+   */
+  boolean isOpenForWriteWithoutLock() {
+return state == State.IN_SEGMENT ||
+state == State.BETWEEN_LOG_SEGMENTS;
+  }
+
   /**
* @return true if the log is open in write mode and has a segment open
* ready to take edits.
@@ -349,6 +362,16 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   /**
+   * Return true the state is IN_SEGMENT.
+   * This method is not synchronized and must be used only for metrics.
+   * @return true if the log is open in write mode and has a segment open
+   * ready to take edits.
+   */
+  boolean isSegmentOpenWithoutLock() {
+return state == State.IN_SEGMENT;
+  }
+
+  /**
* @return true if the log is open in read mode.
*/
   public synchronized boolean isOpenForRead() {
@@ -523,7 +546,16 @@ public class FSEditLog implements LogsPurgeable {
   public synchronized long getLastWrittenTxId() {
 return txid;
   }
-  
+
+  /**
+   * Return the transaction ID of the last transaction written to the log.
+   * This 

hadoop git commit: HDFS-11180. Intermittent deadlock in NameNode when failover happens.

Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 dd4acebb4 -> 5025a898e


HDFS-11180. Intermittent deadlock in NameNode when failover happens.

(cherry picked from commit 5a7941a4fc193259ab4a306f7fe4f68bf101d0e0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5025a898
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5025a898
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5025a898

Branch: refs/heads/branch-2.8
Commit: 5025a898e13b0d442aa8c7a3433a90367e013a42
Parents: dd4aceb
Author: Akira Ajisaka 
Authored: Fri Dec 2 11:34:05 2016 +0900
Committer: Akira Ajisaka 
Committed: Fri Dec 2 11:35:22 2016 +0900

--
 .../dev-support/findbugsExcludeFile.xml | 27 
 .../hadoop/hdfs/server/namenode/FSEditLog.java  | 72 +---
 .../hadoop/hdfs/server/namenode/FSImage.java| 13 +++-
 .../hdfs/server/namenode/FSNamesystem.java  | 27 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  2 +-
 .../server/namenode/ha/StandbyCheckpointer.java |  4 +-
 .../server/namenode/TestFSNamesystemMBean.java  | 24 +++
 7 files changed, 147 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5025a898/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 426fb72..e6e4057 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -109,6 +109,33 @@
 
 
 
+
+
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+  
+  
+  
+
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5025a898/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 3eda0f5..2fa70df 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -156,14 +156,16 @@ public class FSEditLog implements LogsPurgeable {
   private EditLogOutputStream editLogStream = null;
 
   // a monotonically increasing counter that represents transactionIds.
-  private long txid = 0;
+  // All of the threads which update/increment txid are synchronized,
+  // so make txid volatile instead of AtomicLong.
+  private volatile long txid = 0;
 
   // stores the last synced transactionId.
   private long synctxid = 0;
 
   // the first txid of the log that's currently open for writing.
   // If this value is N, we are currently writing to edits_inprogress_N
-  private long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
+  private volatile long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
 
   // the time of printing the statistics to the log file.
   private long lastPrintTime;
@@ -339,7 +341,18 @@ public class FSEditLog implements LogsPurgeable {
 return state == State.IN_SEGMENT ||
   state == State.BETWEEN_LOG_SEGMENTS;
   }
-  
+
+  /**
+   * Return true if the log is currently open in write mode.
+   * This method is not synchronized and must be used only for metrics.
+   * @return true if the log is currently open in write mode, regardless
+   * of whether it actually has an open segment.
+   */
+  boolean isOpenForWriteWithoutLock() {
+return state == State.IN_SEGMENT ||
+state == State.BETWEEN_LOG_SEGMENTS;
+  }
+
   /**
* @return true if the log is open in write mode and has a segment open
* ready to take edits.
@@ -349,6 +362,16 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   /**
+   * Return true the state is IN_SEGMENT.
+   * This method is not synchronized and must be used only for metrics.
+   * @return true if the log is open in write mode and has a segment open
+   * ready to take edits.
+   */
+  boolean isSegmentOpenWithoutLock() {
+return state == State.IN_SEGMENT;
+  }
+
+  /**
* @return true if the log is open in read mode.
*/
   public synchronized boolean isOpenForRead() {
@@ -523,7 +546,16 @@ public class FSEditLog implements LogsPurgeable {
   public synchronized long getLastWrittenTxId() {
 return txid;
   }
-  
+
+  /**
+   * Return 

hadoop git commit: YARN-5958. Fix ASF license warnings for slider core module. Contributed by Billie Rinaldi

Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services 165e50b00 -> 681a3a65d


YARN-5958. Fix ASF license warnings for slider core module. Contributed by 
Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/681a3a65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/681a3a65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/681a3a65

Branch: refs/heads/yarn-native-services
Commit: 681a3a65d1f1fa8272444e8f18a8f16aa0bac526
Parents: 165e50b
Author: Gour Saha 
Authored: Thu Dec 1 17:45:44 2016 -0800
Committer: Gour Saha 
Committed: Thu Dec 1 17:45:44 2016 -0800

--
 .../hadoop-yarn-slider-core/pom.xml | 61 +---
 .../src/license/THIRD-PARTY.properties  | 33 ---
 2 files changed, 14 insertions(+), 80 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/681a3a65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
index 66e9ee9..10cf6b1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
@@ -78,6 +78,20 @@
 
   
 
+  
+org.apache.rat
+apache-rat-plugin
+
+  
+**/*.json
+src/main/resources/webapps/slideram/.keep
+
+
src/main/java/org/apache/slider/api/proto/Messages.java
+
src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java
+  
+
+  
+
 
   
   
@@ -384,53 +398,6 @@
   
 
 
-
-
-  rat
-  
-
-
-  
-org.apache.rat
-apache-rat-plugin
-
-  
-check-licenses
-
-  check
-
-  
-
-
-  
-**/*.json
-src/test/python/agent.ini
-src/test/python/version
-**/THIRD-PARTY.properties
-src/main/resources/webapps/slideram/.keep
-src/main/resources/webapps/slideragent/.keep
-
src/main/resources/webapps/static/yarn.dt.plugins.js
-
-
src/main/resources/webapps/static/dt-1.9.4/**
-
-
src/main/resources/webapps/static/jquery/jquery-1.8.2.min.js
-
-
src/main/resources/webapps/static/jquery/jquery-ui-1.9.1.custom.min.js
-
src/main/resources/webapps/static/jquery/themes-1.9.1/base/jquery-ui.css
-
-
src/main/resources/webapps/static/jt/jquery.jstree.js
-
-
src/main/java/org/apache/slider/api/proto/Messages.java
-
src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java
-
src/test/app_packages/test_am_config/resources/test.template
-
src/test/app_packages/test_am_config/test_archive/testfile
-  
-
-  
-
-  
-
-
   
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/681a3a65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/license/THIRD-PARTY.properties
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/license/THIRD-PARTY.properties
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/license/THIRD-PARTY.properties
deleted file mode 100644
index 1abd56e..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/license/THIRD-PARTY.properties
+++ /dev/null
@@ -1,33 +0,0 @@
-# Generated by org.codehaus.mojo.license.AddThirdPartyMojo
-#---
-# Already used licenses in project :
-# - Apache License
-# - BSD
-# - CDDL + GPLv2 with classpath exception
-# - CDDL 1.1
-# - CDDL License
-# - CDDL+GPL
-# - Common Public License Version 1.0
-# - Eclipse Public License - Version 1.0
-# - GNU Lesser General Public License (LGPL), Version 2.1
-# - GNU Lesser General Public 

hadoop git commit: MAPREDUCE-6787. Allow job_conf.xml to be downloadable on the job overview page in JHS (haibochen via rkanter)

Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0d8a35bd6 -> 99b046f8a


MAPREDUCE-6787. Allow job_conf.xml to be downloadable on the job overview page 
in JHS (haibochen via rkanter)

(cherry picked from commit c87b3a448a00df97149a4e93a8c39d9ad0268bdb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99b046f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99b046f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99b046f8

Branch: refs/heads/branch-2
Commit: 99b046f8a9616bfd0bff978017dbab6e22966d8f
Parents: 0d8a35b
Author: Robert Kanter 
Authored: Thu Dec 1 17:29:16 2016 -0800
Committer: Robert Kanter 
Committed: Thu Dec 1 17:30:02 2016 -0800

--
 .../mapreduce/v2/app/webapp/AppController.java  | 34 
 .../mapreduce/v2/app/webapp/ConfBlock.java  |  2 +-
 .../v2/app/webapp/TestAppController.java| 14 
 .../hadoop/mapreduce/v2/hs/webapp/HsWebApp.java |  2 ++
 .../org/apache/hadoop/yarn/webapp/Router.java   | 23 ++---
 .../org/apache/hadoop/yarn/webapp/WebApp.java   | 13 
 6 files changed, 83 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b046f8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index 305ec7e..e30e1b9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -324,6 +324,40 @@ public class AppController extends Controller implements 
AMParams {
   }
 
   /**
+   * Handle requests to download the job configuration.
+   */
+  public void downloadConf() {
+try {
+  requireJob();
+} catch (Exception e) {
+  renderText(e.getMessage());
+  return;
+}
+writeJobConf();
+  }
+
+  private void writeJobConf() {
+String jobId = $(JOB_ID);
+assert(!jobId.isEmpty());
+
+JobId jobID = MRApps.toJobID($(JOB_ID));
+Job job = app.context.getJob(jobID);
+assert(job != null);
+
+try {
+  Configuration jobConf = job.loadConfFile();
+  response().setContentType("text/xml");
+  response().setHeader("Content-Disposition",
+  "attachment; filename=" + jobId + ".xml");
+  jobConf.writeXml(writer());
+} catch (IOException e) {
+  LOG.error("Error reading/writing job" +
+  " conf file for job: " + jobId, e);
+  renderText(e.getMessage());
+}
+  }
+
+  /**
* Render a BAD_REQUEST error.
* @param s the error message to include.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b046f8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
index 4cb79bf..532c2bd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
@@ -70,7 +70,7 @@ public class ConfBlock extends HtmlBlock {
 try {
   ConfInfo info = new ConfInfo(job);
 
-  html.div().h3(confPath.toString())._();
+  html.div().a("/jobhistory/downloadconf/" + jid, confPath.toString());
   TBODY
tbody = html. // Tasks table table("#conf"). http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b046f8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java -- diff --git

hadoop git commit: MAPREDUCE-6787. Allow job_conf.xml to be downloadable on the job overview page in JHS (haibochen via rkanter)

Repository: hadoop
Updated Branches:
  refs/heads/trunk 2d77dc727 -> c87b3a448


MAPREDUCE-6787. Allow job_conf.xml to be downloadable on the job overview page 
in JHS (haibochen via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c87b3a44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c87b3a44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c87b3a44

Branch: refs/heads/trunk
Commit: c87b3a448a00df97149a4e93a8c39d9ad0268bdb
Parents: 2d77dc7
Author: Robert Kanter 
Authored: Thu Dec 1 17:29:16 2016 -0800
Committer: Robert Kanter 
Committed: Thu Dec 1 17:29:38 2016 -0800

--
 .../mapreduce/v2/app/webapp/AppController.java  | 34 
 .../mapreduce/v2/app/webapp/ConfBlock.java  |  2 +-
 .../v2/app/webapp/TestAppController.java| 14 
 .../hadoop/mapreduce/v2/hs/webapp/HsWebApp.java |  2 ++
 .../org/apache/hadoop/yarn/webapp/Router.java   | 23 ++---
 .../org/apache/hadoop/yarn/webapp/WebApp.java   | 13 
 6 files changed, 83 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index 305ec7e..e30e1b9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -324,6 +324,40 @@ public class AppController extends Controller implements 
AMParams {
   }
 
   /**
+   * Handle requests to download the job configuration.
+   */
+  public void downloadConf() {
+try {
+  requireJob();
+} catch (Exception e) {
+  renderText(e.getMessage());
+  return;
+}
+writeJobConf();
+  }
+
+  private void writeJobConf() {
+String jobId = $(JOB_ID);
+assert(!jobId.isEmpty());
+
+JobId jobID = MRApps.toJobID($(JOB_ID));
+Job job = app.context.getJob(jobID);
+assert(job != null);
+
+try {
+  Configuration jobConf = job.loadConfFile();
+  response().setContentType("text/xml");
+  response().setHeader("Content-Disposition",
+  "attachment; filename=" + jobId + ".xml");
+  jobConf.writeXml(writer());
+} catch (IOException e) {
+  LOG.error("Error reading/writing job" +
+  " conf file for job: " + jobId, e);
+  renderText(e.getMessage());
+}
+  }
+
+  /**
* Render a BAD_REQUEST error.
* @param s the error message to include.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
index 4cb79bf..532c2bd 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
@@ -70,7 +70,7 @@ public class ConfBlock extends HtmlBlock {
 try {
   ConfInfo info = new ConfInfo(job);
 
-  html.div().h3(confPath.toString())._();
+  html.div().a("/jobhistory/downloadconf/" + jid, confPath.toString());
   TBODY
tbody = html. // Tasks table table("#conf"). http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java -- diff --git

hadoop git commit: YARN-5901. Fix race condition in TestGetGroups beforeclass setup() (Contributed by Haibo Chen via Daniel Templeton)

Repository: hadoop
Updated Branches:
  refs/heads/trunk 19f373a46 -> 2d77dc727


YARN-5901. Fix race condition in TestGetGroups beforeclass setup() (Contributed 
by Haibo Chen via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d77dc72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d77dc72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d77dc72

Branch: refs/heads/trunk
Commit: 2d77dc727d9b5e56009bbc36643d85500efcbca5
Parents: 19f373a
Author: Daniel Templeton 
Authored: Thu Dec 1 15:57:39 2016 -0800
Committer: Daniel Templeton 
Committed: Thu Dec 1 15:57:39 2016 -0800

--
 .../hadoop/yarn/client/TestGetGroups.java   | 36 +---
 1 file changed, 24 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d77dc72/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
index e947ece..da0258c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
@@ -20,16 +20,21 @@ package org.apache.hadoop.yarn.client;
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service;
 import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.service.ServiceStateChangeListener;
 import org.apache.hadoop.tools.GetGroupsTestBase;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
@@ -42,30 +47,37 @@ public class TestGetGroups extends GetGroupsTestBase {
   private static Configuration conf;
   
   @BeforeClass
-  public static void setUpResourceManager() throws IOException, 
InterruptedException {
+  public static void setUpResourceManager() throws InterruptedException {
 conf = new YarnConfiguration();
 resourceManager = new ResourceManager() {
   @Override
   protected void doSecureLogin() throws IOException {
   };
 };
+
+// a reliable way to wait for resource manager to start
+CountDownLatch rmStartedSignal = new CountDownLatch(1);
+ServiceStateChangeListener rmStateChangeListener =
+new ServiceStateChangeListener() {
+  @Override
+  public void stateChanged(Service service) {
+if (service.getServiceState() == STATE.STARTED) {
+  rmStartedSignal.countDown();
+}
+  }
+};
+resourceManager.registerServiceListener(rmStateChangeListener);
+
 resourceManager.init(conf);
 new Thread() {
   public void run() {
 resourceManager.start();
   };
 }.start();
-int waitCount = 0;
-while (resourceManager.getServiceState() == STATE.INITED
-&& waitCount++ < 10) {
-  LOG.info("Waiting for RM to start...");
-  Thread.sleep(1000);
-}
-if (resourceManager.getServiceState() != STATE.STARTED) {
-  throw new IOException(
-  "ResourceManager failed to start. Final state is "
-  + resourceManager.getServiceState());
-}
+
+boolean rmStarted = rmStartedSignal.await(6L, TimeUnit.MILLISECONDS);
+Assert.assertTrue("ResourceManager failed to start up.", rmStarted);
+
 LOG.info("ResourceManager RMAdmin address: " +
 conf.get(YarnConfiguration.RM_ADMIN_ADDRESS));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-11132. Allow AccessControlException in contract tests when getFileStatus on subdirectory of existing files. Contributed by Vishwajeet Dusane

Repository: hadoop
Updated Branches:
  refs/heads/trunk 96c574927 -> 19f373a46


HDFS-11132. Allow AccessControlException in contract tests when getFileStatus 
on subdirectory of existing files. Contributed by Vishwajeet Dusane


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19f373a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19f373a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19f373a4

Branch: refs/heads/trunk
Commit: 19f373a46b2abb7a575f7884a9c7443b8ed67cd3
Parents: 96c5749
Author: Mingliang Liu 
Authored: Thu Dec 1 12:54:03 2016 -0800
Committer: Mingliang Liu 
Committed: Thu Dec 1 12:54:28 2016 -0800

--
 .../fs/FileContextMainOperationsBaseTest.java   | 21 
 .../hadoop/fs/FileSystemContractBaseTest.java   | 17 ++--
 2 files changed, 32 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19f373a4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index 5f9151a..2b3ab2a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -251,8 +252,14 @@ public abstract class FileContextMainOperationsBaseTest  {
 } catch (IOException e) {
   // expected
 }
-Assert.assertFalse(exists(fc, testSubDir));
-
+
+try {
+  Assert.assertFalse(exists(fc, testSubDir));
+} catch (AccessControlException e) {
+  // Expected : HDFS-11132 Checks on paths under file may be rejected by
+  // file missing execute permission.
+}
+
 Path testDeepSubDir = getTestRootPath(fc, "test/hadoop/file/deep/sub/dir");
 try {
   fc.mkdir(testDeepSubDir, FsPermission.getDefault(), true);
@@ -260,8 +267,14 @@ public abstract class FileContextMainOperationsBaseTest  {
 } catch (IOException e) {
   // expected
 }
-Assert.assertFalse(exists(fc, testDeepSubDir));
-
+
+try {
+  Assert.assertFalse(exists(fc, testDeepSubDir));
+} catch (AccessControlException e) {
+  // Expected : HDFS-11132 Checks on paths under file may be rejected by
+  // file missing execute permission.
+}
+
   }
   
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19f373a4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index bbd7336..6247959 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -158,7 +159,13 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 } catch (IOException e) {
   // expected
 }
-assertFalse(fs.exists(testSubDir));
+
+try {
+  assertFalse(fs.exists(testSubDir));
+} catch (AccessControlException e) {
+  // Expected : HDFS-11132 Checks on paths under file may be rejected by
+  // file missing execute permission.
+}
 
 Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
 try {
@@ -167,7 +174,13 @@ public abstract class FileSystemContractBaseTest extends 
TestCase {
 } catch (IOException e) {
   // expected
 }
-assertFalse(fs.exists(testDeepSubDir));
+
+try {
+  

hadoop git commit: HDFS-8674. Improve performance of postponed block scans. Contributed by Daryn Sharp.

Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 afad13f8d -> dd4acebb4


HDFS-8674. Improve performance of postponed block scans. Contributed by Daryn 
Sharp.

(cherry picked from commit 0d8a35bd6de5d2a5a9b816ca98f31975e94bd7c6)

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd4acebb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd4acebb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd4acebb

Branch: refs/heads/branch-2.8
Commit: dd4acebb4146c7629ce914df6642c5d69b8172d9
Parents: afad13f
Author: Kihwal Lee 
Authored: Thu Dec 1 12:20:30 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 1 12:20:30 2016 -0600

--
 .../server/blockmanagement/BlockManager.java| 75 +++-
 1 file changed, 24 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd4acebb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0ba01aa..a929c43 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -143,7 +144,6 @@ public class BlockManager implements BlockStatsMXBean {
   private boolean initializedReplQueues;
 
   private final AtomicLong excessBlocksCount = new AtomicLong(0L);
-  private final AtomicLong postponedMisreplicatedBlocksCount = new 
AtomicLong(0L);
   private final long startupDelayBlockDeletionInMs;
   private final BlockReportLeaseManager blockReportLeaseManager;
   private ObjectName mxBeanName;
@@ -178,7 +178,7 @@ public class BlockManager implements BlockStatsMXBean {
   }
   /** Used by metrics */
   public long getPostponedMisreplicatedBlocksCount() {
-return postponedMisreplicatedBlocksCount.get();
+return postponedMisreplicatedBlocks.size();
   }
   /** Used by metrics */
   public int getPendingDataNodeMessageCount() {
@@ -218,8 +218,10 @@ public class BlockManager implements BlockStatsMXBean {
* notified of all block deletions that might have been pending
* when the failover happened.
*/
-  private final LightWeightHashSet postponedMisreplicatedBlocks =
-  new LightWeightHashSet<>();
+  private final LinkedHashSet postponedMisreplicatedBlocks =
+  new LinkedHashSet();
+  private final int blocksPerPostpondedRescan;
+  private final ArrayList rescannedMisreplicatedBlocks;
 
   /**
* Maps a StorageID to the set of blocks that are "extra" for this
@@ -316,6 +318,10 @@ public class BlockManager implements BlockStatsMXBean {
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
 
+blocksPerPostpondedRescan = (int)Math.min(Integer.MAX_VALUE,
+datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan());
+rescannedMisreplicatedBlocks =
+new ArrayList(blocksPerPostpondedRescan);
 startupDelayBlockDeletionInMs = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
@@ -1424,9 +1430,7 @@ public class BlockManager implements BlockStatsMXBean {
 
 
   private void postponeBlock(Block blk) {
-if (postponedMisreplicatedBlocks.add(blk)) {
-  postponedMisreplicatedBlocksCount.incrementAndGet();
-}
+postponedMisreplicatedBlocks.add(blk);
   }
   
   
@@ -2050,39 +2054,14 @@ public class BlockManager implements BlockStatsMXBean {
 if (getPostponedMisreplicatedBlocksCount() == 0) {
   return;
 }
-long startTimeRescanPostponedMisReplicatedBlocks = Time.monotonicNow();
 namesystem.writeLock();
-long startPostponedMisReplicatedBlocksCount =
-getPostponedMisreplicatedBlocksCount();
+long startTime = Time.monotonicNow();
+long startSize = postponedMisreplicatedBlocks.size();
 try {
-  // blocksPerRescan is the configured number of blocks per rescan.
-  // Randomly 

hadoop git commit: HDFS-8674. Improve performance of postponed block scans. Contributed by Daryn Sharp.

Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8ffe86f78 -> 0d8a35bd6


HDFS-8674. Improve performance of postponed block scans. Contributed by Daryn 
Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d8a35bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d8a35bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d8a35bd

Branch: refs/heads/branch-2
Commit: 0d8a35bd6de5d2a5a9b816ca98f31975e94bd7c6
Parents: 8ffe86f
Author: Kihwal Lee 
Authored: Thu Dec 1 12:15:15 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 1 12:15:15 2016 -0600

--
 .../server/blockmanagement/BlockManager.java| 76 +++-
 1 file changed, 24 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d8a35bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 954b297..f2805e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -170,7 +171,6 @@ public class BlockManager implements BlockStatsMXBean {
   private boolean initializedReplQueues;
 
   private final AtomicLong excessBlocksCount = new AtomicLong(0L);
-  private final AtomicLong postponedMisreplicatedBlocksCount = new 
AtomicLong(0L);
   private final long startupDelayBlockDeletionInMs;
   private final BlockReportLeaseManager blockReportLeaseManager;
   private ObjectName mxBeanName;
@@ -205,7 +205,7 @@ public class BlockManager implements BlockStatsMXBean {
   }
   /** Used by metrics */
   public long getPostponedMisreplicatedBlocksCount() {
-return postponedMisreplicatedBlocksCount.get();
+return postponedMisreplicatedBlocks.size();
   }
   /** Used by metrics */
   public int getPendingDataNodeMessageCount() {
@@ -245,8 +245,10 @@ public class BlockManager implements BlockStatsMXBean {
* notified of all block deletions that might have been pending
* when the failover happened.
*/
-  private final LightWeightHashSet postponedMisreplicatedBlocks =
-  new LightWeightHashSet<>();
+  private final LinkedHashSet postponedMisreplicatedBlocks =
+  new LinkedHashSet();
+  private final int blocksPerPostpondedRescan;
+  private final ArrayList rescannedMisreplicatedBlocks;
 
   /**
* Maps a StorageID to the set of blocks that are "extra" for this
@@ -345,7 +347,10 @@ public class BlockManager implements BlockStatsMXBean {
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
 this.blockIdManager = new BlockIdManager(this);
-
+blocksPerPostpondedRescan = (int)Math.min(Integer.MAX_VALUE,
+datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan());
+rescannedMisreplicatedBlocks =
+new ArrayList(blocksPerPostpondedRescan);
 startupDelayBlockDeletionInMs = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
@@ -1455,9 +1460,7 @@ public class BlockManager implements BlockStatsMXBean {
 
 
   private void postponeBlock(Block blk) {
-if (postponedMisreplicatedBlocks.add(blk)) {
-  postponedMisreplicatedBlocksCount.incrementAndGet();
-}
+postponedMisreplicatedBlocks.add(blk);
   }
   
   
@@ -2164,39 +2167,14 @@ public class BlockManager implements BlockStatsMXBean {
 if (getPostponedMisreplicatedBlocksCount() == 0) {
   return;
 }
-long startTimeRescanPostponedMisReplicatedBlocks = Time.monotonicNow();
 namesystem.writeLock();
-long startPostponedMisReplicatedBlocksCount =
-getPostponedMisreplicatedBlocksCount();
+long startTime = Time.monotonicNow();
+long startSize = postponedMisreplicatedBlocks.size();
 try {
-  // blocksPerRescan is the configured number of blocks per rescan.
-  // Randomly select blocksPerRescan consecutive blocks from the HashSet
-  // when the number of blocks remaining is larger than blocksPerRescan.
-  // The 

hadoop git commit: HDFS-8674. Improve performance of postponed block scans. Contributed by Daryn Sharp.

Repository: hadoop
Updated Branches:
  refs/heads/trunk e0fa49234 -> 96c574927


HDFS-8674. Improve performance of postponed block scans. Contributed by Daryn 
Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96c57492
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96c57492
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96c57492

Branch: refs/heads/trunk
Commit: 96c574927a600d15fab919df1fdc9e07887af6c5
Parents: e0fa492
Author: Kihwal Lee 
Authored: Thu Dec 1 12:11:27 2016 -0600
Committer: Kihwal Lee 
Committed: Thu Dec 1 12:11:27 2016 -0600

--
 .../server/blockmanagement/BlockManager.java| 79 ++--
 1 file changed, 24 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96c57492/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1b744e7..e60703b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -30,6 +30,7 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedHashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -43,8 +44,6 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.FutureTask;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
 import javax.management.ObjectName;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -101,7 +100,6 @@ import 
org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.util.FoldedTreeSet;
-import org.apache.hadoop.hdfs.util.LightWeightHashSet;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.server.namenode.CacheManager;
 
@@ -184,7 +182,6 @@ public class BlockManager implements BlockStatsMXBean {
   /** flag indicating whether replication queues have been initialized */
   private boolean initializedReplQueues;
 
-  private final AtomicLong postponedMisreplicatedBlocksCount = new 
AtomicLong(0L);
   private final long startupDelayBlockDeletionInMs;
   private final BlockReportLeaseManager blockReportLeaseManager;
   private ObjectName mxBeanName;
@@ -219,7 +216,7 @@ public class BlockManager implements BlockStatsMXBean {
   }
   /** Used by metrics */
   public long getPostponedMisreplicatedBlocksCount() {
-return postponedMisreplicatedBlocksCount.get();
+return postponedMisreplicatedBlocks.size();
   }
   /** Used by metrics */
   public int getPendingDataNodeMessageCount() {
@@ -275,8 +272,10 @@ public class BlockManager implements BlockStatsMXBean {
* notified of all block deletions that might have been pending
* when the failover happened.
*/
-  private final LightWeightHashSet postponedMisreplicatedBlocks =
-  new LightWeightHashSet<>();
+  private final Set postponedMisreplicatedBlocks =
+  new LinkedHashSet();
+  private final int blocksPerPostpondedRescan;
+  private final ArrayList rescannedMisreplicatedBlocks;
 
   /**
* Maps a StorageID to the set of blocks that are "extra" for this
@@ -378,7 +377,10 @@ public class BlockManager implements BlockStatsMXBean {
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
 this.blockIdManager = new BlockIdManager(this);
-
+blocksPerPostpondedRescan = (int)Math.min(Integer.MAX_VALUE,
+datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan());
+rescannedMisreplicatedBlocks =
+new ArrayList(blocksPerPostpondedRescan);
 startupDelayBlockDeletionInMs = conf.getLong(
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
@@ -1613,9 +1615,7 @@ public class BlockManager implements BlockStatsMXBean {
 
 
   private void postponeBlock(Block blk) {
-if (postponedMisreplicatedBlocks.add(blk)) {
-  

hadoop git commit: HDFS-11180. Intermittent deadlock in NameNode when failover happens.

Repository: hadoop
Updated Branches:
  refs/heads/trunk 1f7613be9 -> e0fa49234


HDFS-11180. Intermittent deadlock in NameNode when failover happens.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0fa4923
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0fa4923
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0fa4923

Branch: refs/heads/trunk
Commit: e0fa49234fd37aca88e1caa95bac77bca192bae4
Parents: 1f7613b
Author: Akira Ajisaka 
Authored: Thu Dec 1 23:08:59 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Dec 1 23:08:59 2016 +0900

--
 .../dev-support/findbugsExcludeFile.xml | 27 
 .../hadoop/hdfs/server/namenode/FSEditLog.java  | 72 +---
 .../hadoop/hdfs/server/namenode/FSImage.java| 15 +++-
 .../hdfs/server/namenode/FSNamesystem.java  | 27 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  2 +-
 .../server/namenode/ha/StandbyCheckpointer.java |  4 +-
 .../server/namenode/TestFSNamesystemMBean.java  | 24 +++
 7 files changed, 148 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0fa4923/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 426fb72..e6e4057 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -109,6 +109,33 @@
 
 
 
+
+
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+  
+  
+  
+
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0fa4923/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index ef9eb68..c9ee32b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -155,14 +155,16 @@ public class FSEditLog implements LogsPurgeable {
   private EditLogOutputStream editLogStream = null;
 
   // a monotonically increasing counter that represents transactionIds.
-  private long txid = 0;
+  // All of the threads which update/increment txid are synchronized,
+  // so make txid volatile instead of AtomicLong.
+  private volatile long txid = 0;
 
   // stores the last synced transactionId.
   private long synctxid = 0;
 
   // the first txid of the log that's currently open for writing.
   // If this value is N, we are currently writing to edits_inprogress_N
-  private long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
+  private volatile long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
 
   // the time of printing the statistics to the log file.
   private long lastPrintTime;
@@ -338,7 +340,18 @@ public class FSEditLog implements LogsPurgeable {
 return state == State.IN_SEGMENT ||
   state == State.BETWEEN_LOG_SEGMENTS;
   }
-  
+
+  /**
+   * Return true if the log is currently open in write mode.
+   * This method is not synchronized and must be used only for metrics.
+   * @return true if the log is currently open in write mode, regardless
+   * of whether it actually has an open segment.
+   */
+  boolean isOpenForWriteWithoutLock() {
+return state == State.IN_SEGMENT ||
+state == State.BETWEEN_LOG_SEGMENTS;
+  }
+
   /**
* @return true if the log is open in write mode and has a segment open
* ready to take edits.
@@ -348,6 +361,16 @@ public class FSEditLog implements LogsPurgeable {
   }
 
   /**
+   * Return true the state is IN_SEGMENT.
+   * This method is not synchronized and must be used only for metrics.
+   * @return true if the log is open in write mode and has a segment open
+   * ready to take edits.
+   */
+  boolean isSegmentOpenWithoutLock() {
+return state == State.IN_SEGMENT;
+  }
+
+  /**
* @return true if the log is open in read mode.
*/
   public synchronized boolean isOpenForRead() {
@@ -522,7 +545,16 @@ public class FSEditLog implements LogsPurgeable {
   public synchronized long getLastWrittenTxId() {
 return txid;
   }
-  
+
+  /**
+   * Return the transaction ID of the last transaction written to the log.
+   * This method 

hadoop git commit: HADOOP-13850 s3guard to log choice of metadata store at debug. Contributed by Mingliang Liu

Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 5e93093e6 -> cfd0fbf13


HADOOP-13850 s3guard to log choice of metadata store at debug. Contributed by 
Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cfd0fbf1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cfd0fbf1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cfd0fbf1

Branch: refs/heads/HADOOP-13345
Commit: cfd0fbf13b7e901d991456f14ff7d4b89301f388
Parents: 5e93093
Author: Steve Loughran 
Authored: Thu Dec 1 11:26:59 2016 +
Committer: Steve Loughran 
Committed: Thu Dec 1 11:26:59 2016 +

--
 .../src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfd0fbf1/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
index c998072..904a1c3 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
@@ -66,7 +66,6 @@ final public class S3Guard {
* @return Reference to new MetadataStore.
*/
   public static MetadataStore getMetadataStore(FileSystem fs) {
-
 Preconditions.checkNotNull(fs);
 Configuration conf = fs.getConf();
 Preconditions.checkNotNull(conf);
@@ -74,8 +73,8 @@ final public class S3Guard {
 try {
   Class msClass = getMetadataStoreClass(conf);
   msInstance = ReflectionUtils.newInstance(msClass, conf);
-  LOG.info("Using {} for {} filesystem", msClass.getSimpleName(),
-  fs.getScheme());
+  LOG.debug("Using {} metadata store for {} filesystem",
+  msClass.getSimpleName(), fs.getScheme());
 } catch (RuntimeException e) {
   LOG.error("Failed to instantiate {}, using NullMetadataStore:",
   conf.get(S3_METADATA_STORE_IMPL), e);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-5944. Native services AM should remain up if RM is down. Contributed by Billie Rinaldi

Repository: hadoop
Updated Branches:
  refs/heads/yarn-native-services f32cf8f3b -> 165e50b00


YARN-5944. Native services AM should remain up if RM is down. Contributed by 
Billie Rinaldi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/165e50b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/165e50b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/165e50b0

Branch: refs/heads/yarn-native-services
Commit: 165e50b0094fe83774b205e28a5ecce9139c8694
Parents: f32cf8f
Author: Gour Saha 
Authored: Thu Dec 1 00:30:01 2016 -0800
Committer: Gour Saha 
Committed: Thu Dec 1 00:30:01 2016 -0800

--
 .../org/apache/slider/server/appmaster/SliderAppMaster.java | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/165e50b0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 34b6a7d..8c39343 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -705,6 +705,11 @@ public class SliderAppMaster extends 
AbstractSliderLaunchedService
 synchronized (appState) {
   int heartbeatInterval = HEARTBEAT_INTERVAL;
 
+  // configure AM to wait forever for RM
+  
getConfig().setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,
+  -1);
+  getConfig().unset(YarnConfiguration.CLIENT_FAILOVER_MAX_ATTEMPTS);
+
   // add the RM client -this brings the callbacks in
   asyncRMClient = AMRMClientAsync.createAMRMClientAsync(heartbeatInterval, 
this);
   addService(asyncRMClient);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org