MAPREDUCE-6787. Allow job_conf.xml to be downloadable on the job overview page
in JHS (haibochen via rkanter)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c87b3a44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c87b3a44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c87b3a44
Branch: refs/heads/YARN-5085
Commit: c87b3a448a00df97149a4e93a8c39d9ad0268bdb
Parents: 2d77dc7
Author: Robert Kanter
Authored: Thu Dec 1 17:29:16 2016 -0800
Committer: Robert Kanter
Committed: Thu Dec 1 17:29:38 2016 -0800
--
.../mapreduce/v2/app/webapp/AppController.java | 34
.../mapreduce/v2/app/webapp/ConfBlock.java | 2 +-
.../v2/app/webapp/TestAppController.java| 14
.../hadoop/mapreduce/v2/hs/webapp/HsWebApp.java | 2 ++
.../org/apache/hadoop/yarn/webapp/Router.java | 23 ++---
.../org/apache/hadoop/yarn/webapp/WebApp.java | 13
6 files changed, 83 insertions(+), 5 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
--
diff --git
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index 305ec7e..e30e1b9 100644
---
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -324,6 +324,40 @@ public class AppController extends Controller implements
AMParams {
}
/**
+ * Handle requests to download the job configuration.
+ */
+ public void downloadConf() {
+try {
+ requireJob();
+} catch (Exception e) {
+ renderText(e.getMessage());
+ return;
+}
+writeJobConf();
+ }
+
+ private void writeJobConf() {
+String jobId = $(JOB_ID);
+assert(!jobId.isEmpty());
+
+JobId jobID = MRApps.toJobID($(JOB_ID));
+Job job = app.context.getJob(jobID);
+assert(job != null);
+
+try {
+ Configuration jobConf = job.loadConfFile();
+ response().setContentType("text/xml");
+ response().setHeader("Content-Disposition",
+ "attachment; filename=" + jobId + ".xml");
+ jobConf.writeXml(writer());
+} catch (IOException e) {
+ LOG.error("Error reading/writing job" +
+ " conf file for job: " + jobId, e);
+ renderText(e.getMessage());
+}
+ }
+
+ /**
* Render a BAD_REQUEST error.
* @param s the error message to include.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
--
diff --git
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
index 4cb79bf..532c2bd 100644
---
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
+++
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
@@ -70,7 +70,7 @@ public class ConfBlock extends HtmlBlock {
try {
ConfInfo info = new ConfInfo(job);
- html.div().h3(confPath.toString())._();
+ html.div().a("/jobhistory/downloadconf/" + jid, confPath.toString());
TBODY tbody = html.
// Tasks table
table("#conf").
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
--
diff --git
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
YARN-4997. Update fair scheduler to use pluggable auth provider (Contributed by
Tao Jie via Daniel Templeton)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3befc02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3befc02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3befc02
Branch: refs/heads/YARN-5085
Commit: b3befc021b0e2d63d1a3710ea450797d1129f1f5
Parents: 625df87
Author: Daniel Templeton
Authored: Wed Nov 30 09:50:33 2016 -0800
Committer: Daniel Templeton
Committed: Wed Nov 30 09:50:33 2016 -0800
--
.../security/YarnAuthorizationProvider.java | 15 +
.../scheduler/fair/AllocationConfiguration.java | 38 +--
.../fair/AllocationFileLoaderService.java | 68 +---
.../resourcemanager/scheduler/fair/FSQueue.java | 22 +--
.../scheduler/fair/FairScheduler.java | 45 +++--
.../scheduler/fair/TestFairScheduler.java | 4 +-
6 files changed, 149 insertions(+), 43 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3befc02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
--
diff --git
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
index 4b43ea1..9ae4bd7 100644
---
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
+++
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/YarnAuthorizationProvider.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import com.google.common.annotations.VisibleForTesting;
import java.util.List;
/**
@@ -61,6 +62,20 @@ public abstract class YarnAuthorizationProvider {
}
/**
+ * Destroy the {@link YarnAuthorizationProvider} instance.
+ * This method is called only in Tests.
+ */
+ @VisibleForTesting
+ public static void destroy() {
+synchronized (YarnAuthorizationProvider.class) {
+ if (authorizer != null) {
+LOG.debug(authorizer.getClass().getName() + " is destroyed.");
+authorizer = null;
+ }
+}
+ }
+
+ /**
* Initialize the provider. Invoked on daemon startup. DefaultYarnAuthorizer
is
* initialized based on configurations.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3befc02/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index c771887..7bd2616 100644
---
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
@@ -25,13 +26,14 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.api.records.ReservationACL;
import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.security.AccessType;
import
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
+import
HADOOP-13790. Make qbt script executable. Contributed by Andrew Wang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/be5a7570
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/be5a7570
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/be5a7570
Branch: refs/heads/YARN-5085
Commit: be5a757096246d5c4ef73da9d233adda67bd3d69
Parents: 7c84871
Author: Akira Ajisaka
Authored: Thu Dec 1 03:52:04 2016 +0900
Committer: Akira Ajisaka
Committed: Thu Dec 1 03:52:44 2016 +0900
--
dev-support/bin/qbt | 0
1 file changed, 0 insertions(+), 0 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/be5a7570/dev-support/bin/qbt
--
diff --git a/dev-support/bin/qbt b/dev-support/bin/qbt
old mode 100644
new mode 100755
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
YARN-4395. Typo in comment in ClientServiceDelegate (Contributed by Alison Yu
via Daniel Templeton)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d8b4f6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d8b4f6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d8b4f6c
Branch: refs/heads/YARN-5085
Commit: 6d8b4f6c2791f861a55ac78c2950f783693e912a
Parents: dd9a96c
Author: Daniel Templeton
Authored: Tue Nov 29 15:30:22 2016 -0800
Committer: Daniel Templeton
Committed: Tue Nov 29 15:30:22 2016 -0800
--
.../java/org/apache/hadoop/mapred/ClientServiceDelegate.java | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d8b4f6c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
--
diff --git
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index eac8dbc..72339e5 100644
---
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -335,8 +335,8 @@ public class ClientServiceDelegate {
throw new IOException(e.getTargetException());
}
-// if it's AM shut down, do not decrement maxClientRetry as we wait for
-// AM to be restarted.
+// if its AM shut down, do not decrement maxClientRetry while we wait
+// for its AM to be restarted.
if (!usingAMProxy.get()) {
maxClientRetry--;
}
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
YARN-5901. Fix race condition in TestGetGroups beforeclass setup() (Contributed
by Haibo Chen via Daniel Templeton)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d77dc72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d77dc72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d77dc72
Branch: refs/heads/YARN-5085
Commit: 2d77dc727d9b5e56009bbc36643d85500efcbca5
Parents: 19f373a
Author: Daniel Templeton
Authored: Thu Dec 1 15:57:39 2016 -0800
Committer: Daniel Templeton
Committed: Thu Dec 1 15:57:39 2016 -0800
--
.../hadoop/yarn/client/TestGetGroups.java | 36 +---
1 file changed, 24 insertions(+), 12 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d77dc72/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
--
diff --git
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
index e947ece..da0258c 100644
---
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
+++
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
@@ -20,16 +20,21 @@ package org.apache.hadoop.yarn.client;
import java.io.IOException;
import java.io.PrintStream;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.service.ServiceStateChangeListener;
import org.apache.hadoop.tools.GetGroupsTestBase;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -42,30 +47,37 @@ public class TestGetGroups extends GetGroupsTestBase {
private static Configuration conf;
@BeforeClass
- public static void setUpResourceManager() throws IOException,
InterruptedException {
+ public static void setUpResourceManager() throws InterruptedException {
conf = new YarnConfiguration();
resourceManager = new ResourceManager() {
@Override
protected void doSecureLogin() throws IOException {
};
};
+
+// a reliable way to wait for resource manager to start
+CountDownLatch rmStartedSignal = new CountDownLatch(1);
+ServiceStateChangeListener rmStateChangeListener =
+new ServiceStateChangeListener() {
+ @Override
+ public void stateChanged(Service service) {
+if (service.getServiceState() == STATE.STARTED) {
+ rmStartedSignal.countDown();
+}
+ }
+};
+resourceManager.registerServiceListener(rmStateChangeListener);
+
resourceManager.init(conf);
new Thread() {
public void run() {
resourceManager.start();
};
}.start();
-int waitCount = 0;
-while (resourceManager.getServiceState() == STATE.INITED
-&& waitCount++ < 10) {
- LOG.info("Waiting for RM to start...");
- Thread.sleep(1000);
-}
-if (resourceManager.getServiceState() != STATE.STARTED) {
- throw new IOException(
- "ResourceManager failed to start. Final state is "
- + resourceManager.getServiceState());
-}
+
+boolean rmStarted = rmStartedSignal.await(6L, TimeUnit.MILLISECONDS);
+Assert.assertTrue("ResourceManager failed to start up.", rmStarted);
+
LOG.info("ResourceManager RMAdmin address: " +
conf.get(YarnConfiguration.RM_ADMIN_ADDRESS));
}
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
HDFS-11132. Allow AccessControlException in contract tests when getFileStatus
on subdirectory of existing files. Contributed by Vishwajeet Dusane
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19f373a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19f373a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19f373a4
Branch: refs/heads/YARN-5085
Commit: 19f373a46b2abb7a575f7884a9c7443b8ed67cd3
Parents: 96c5749
Author: Mingliang Liu
Authored: Thu Dec 1 12:54:03 2016 -0800
Committer: Mingliang Liu
Committed: Thu Dec 1 12:54:28 2016 -0800
--
.../fs/FileContextMainOperationsBaseTest.java | 21
.../hadoop/fs/FileSystemContractBaseTest.java | 17 ++--
2 files changed, 32 insertions(+), 6 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19f373a4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index 5f9151a..2b3ab2a 100644
---
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
@@ -251,8 +252,14 @@ public abstract class FileContextMainOperationsBaseTest {
} catch (IOException e) {
// expected
}
-Assert.assertFalse(exists(fc, testSubDir));
-
+
+try {
+ Assert.assertFalse(exists(fc, testSubDir));
+} catch (AccessControlException e) {
+ // Expected : HDFS-11132 Checks on paths under file may be rejected by
+ // file missing execute permission.
+}
+
Path testDeepSubDir = getTestRootPath(fc, "test/hadoop/file/deep/sub/dir");
try {
fc.mkdir(testDeepSubDir, FsPermission.getDefault(), true);
@@ -260,8 +267,14 @@ public abstract class FileContextMainOperationsBaseTest {
} catch (IOException e) {
// expected
}
-Assert.assertFalse(exists(fc, testDeepSubDir));
-
+
+try {
+ Assert.assertFalse(exists(fc, testDeepSubDir));
+} catch (AccessControlException e) {
+ // Expected : HDFS-11132 Checks on paths under file may be rejected by
+ // file missing execute permission.
+}
+
}
@Test
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19f373a4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index bbd7336..6247959 100644
---
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.StringUtils;
/**
@@ -158,7 +159,13 @@ public abstract class FileSystemContractBaseTest extends
TestCase {
} catch (IOException e) {
// expected
}
-assertFalse(fs.exists(testSubDir));
+
+try {
+ assertFalse(fs.exists(testSubDir));
+} catch (AccessControlException e) {
+ // Expected : HDFS-11132 Checks on paths under file may be rejected by
+ // file missing execute permission.
+}
Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
try {
@@ -167,7 +174,13 @@ public abstract class FileSystemContractBaseTest extends
TestCase {
} catch (IOException e) {
// expected
}
-assertFalse(fs.exists(testDeepSubDir));
+
+try {
+ assertFalse(fs.exists(testDeepSubDir));
+} catch (AccessControlException e) {
+
HDFS-10994. Support an XOR policy XOR-2-1-64k in HDFS. Contributed by Sammi Chen
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51e6c1cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51e6c1cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51e6c1cc
Branch: refs/heads/YARN-5085
Commit: 51e6c1cc3f66f9908d2e816e7291ac34bee43f52
Parents: cfd8076
Author: Kai Zheng
Authored: Wed Nov 30 15:52:56 2016 +0800
Committer: Kai Zheng
Committed: Wed Nov 30 15:52:56 2016 +0800
--
.../io/erasurecode/ErasureCodeConstants.java| 3 ++
.../hadoop/hdfs/protocol/HdfsConstants.java | 1 +
.../namenode/ErasureCodingPolicyManager.java| 23 +++--
.../hadoop/hdfs/server/namenode/INodeFile.java | 8 +++-
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 28 +--
.../hadoop/hdfs/TestDFSStripedInputStream.java | 50 +---
.../hadoop/hdfs/TestDFSStripedOutputStream.java | 27 ---
.../TestDFSStripedOutputStreamWithFailure.java | 37 +++
.../hdfs/TestDFSXORStripedInputStream.java | 33 +
.../hdfs/TestDFSXORStripedOutputStream.java | 35 ++
...estDFSXORStripedOutputStreamWithFailure.java | 36 ++
11 files changed, 240 insertions(+), 41 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51e6c1cc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index 8d6ff85..ffa0bce 100644
---
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -38,4 +38,7 @@ public final class ErasureCodeConstants {
public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema(
RS_LEGACY_CODEC_NAME, 6, 3);
+
+ public static final ECSchema XOR_2_1_SCHEMA = new ECSchema(
+ XOR_CODEC_NAME, 2, 1);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51e6c1cc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index acbc8f6..b55b4df 100644
---
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -147,6 +147,7 @@ public final class HdfsConstants {
public static final byte RS_6_3_POLICY_ID = 0;
public static final byte RS_3_2_POLICY_ID = 1;
public static final byte RS_6_3_LEGACY_POLICY_ID = 2;
+ public static final byte XOR_2_1_POLICY_ID = 3;
/* Hidden constructor */
protected HdfsConstants() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/51e6c1cc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index c4bc8de..8a85d23 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -36,7 +36,7 @@ import java.util.TreeMap;
public final class ErasureCodingPolicyManager {
/**
- * TODO: HDFS-8095
+ * TODO: HDFS-8095.
*/
private static final int DEFAULT_CELLSIZE = 64 * 1024;
private static final ErasureCodingPolicy SYS_POLICY1 =
@@ -48,10 +48,14 @@ public final class ErasureCodingPolicyManager {
private static final ErasureCodingPolicy SYS_POLICY3 =
new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA,
DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_LEGACY_POLICY_ID);
+ private static final ErasureCodingPolicy SYS_POLICY4 =
+ new
HDFS-5517. Lower the default maximum number of blocks per file. Contributed by
Aaron T. Myers and Andrew Wang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7226a71b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7226a71b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7226a71b
Branch: refs/heads/YARN-5085
Commit: 7226a71b1f684f562bd88ee121f1dd7aa8b73816
Parents: 69fb70c
Author: Andrew Wang
Authored: Wed Nov 30 15:58:31 2016 -0800
Committer: Andrew Wang
Committed: Wed Nov 30 15:58:31 2016 -0800
--
.../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 +-
.../hadoop-hdfs/src/main/resources/hdfs-default.xml | 2 +-
.../hdfs/server/datanode/TestDirectoryScanner.java | 11 +--
.../server/namenode/metrics/TestNameNodeMetrics.java | 2 +-
4 files changed, 12 insertions(+), 5 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7226a71b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d7d3c9d..df21857 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -399,7 +399,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_MIN_BLOCK_SIZE_KEY =
"dfs.namenode.fs-limits.min-block-size";
public static final longDFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024;
public static final String DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY =
"dfs.namenode.fs-limits.max-blocks-per-file";
- public static final longDFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT =
1024*1024;
+ public static final longDFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT =
10*1000;
public static final String DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY =
"dfs.namenode.fs-limits.max-xattrs-per-inode";
public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY =
"dfs.namenode.fs-limits.max-xattr-size";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7226a71b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 671c98c..086f667 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -372,7 +372,7 @@
dfs.namenode.fs-limits.max-blocks-per-file
-1048576
+1
Maximum number of blocks per file, enforced by the Namenode on
write. This prevents the creation of extremely large files which can
degrade performance.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7226a71b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index f08b579..d7c8383 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -590,8 +590,15 @@ public class TestDirectoryScanner {
100);
DataNode dataNode = cluster.getDataNodes().get(0);
- createFile(GenericTestUtils.getMethodName(),
- BLOCK_LENGTH * blocks, false);
+ final int maxBlocksPerFile = (int) DFSConfigKeys
+ .DFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT;
+ int numBlocksToCreate = blocks;
+ while (numBlocksToCreate > 0) {
+final int toCreate = Math.min(maxBlocksPerFile, numBlocksToCreate);
+createFile(GenericTestUtils.getMethodName() + numBlocksToCreate,
+BLOCK_LENGTH * toCreate, false);
+numBlocksToCreate -= toCreate;
+ }
float ratio = 0.0f;
int retries = maxRetries;
Revert "HDFS-5517. Lower the default maximum number of blocks per file.
Contributed by Aaron T. Myers."
This reverts commit 09451252fae90a3ec192b8d7f0c49508df65e8c5.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b407d531
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b407d531
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b407d531
Branch: refs/heads/YARN-5085
Commit: b407d5319597f01ebd51736e299239022bd08028
Parents: 25f9872
Author: Andrew Wang
Authored: Tue Nov 29 10:50:10 2016 -0800
Committer: Andrew Wang
Committed: Tue Nov 29 10:50:10 2016 -0800
--
.../src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java| 2 +-
.../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b407d531/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index df21857..d7d3c9d 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -399,7 +399,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_MIN_BLOCK_SIZE_KEY =
"dfs.namenode.fs-limits.min-block-size";
public static final longDFS_NAMENODE_MIN_BLOCK_SIZE_DEFAULT = 1024*1024;
public static final String DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY =
"dfs.namenode.fs-limits.max-blocks-per-file";
- public static final longDFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT =
10*1000;
+ public static final longDFS_NAMENODE_MAX_BLOCKS_PER_FILE_DEFAULT =
1024*1024;
public static final String DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY =
"dfs.namenode.fs-limits.max-xattrs-per-inode";
public static final int DFS_NAMENODE_MAX_XATTRS_PER_INODE_DEFAULT = 32;
public static final String DFS_NAMENODE_MAX_XATTR_SIZE_KEY =
"dfs.namenode.fs-limits.max-xattr-size";
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b407d531/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 9fce84f..c9d74bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -372,7 +372,7 @@
dfs.namenode.fs-limits.max-blocks-per-file
-1
+1048576
Maximum number of blocks per file, enforced by the Namenode on
write. This prevents the creation of extremely large files which can
degrade performance.
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
YARN-5761. Separate QueueManager from Scheduler. (Xuan Gong via gtcarrera9)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69fb70c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69fb70c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69fb70c3
Branch: refs/heads/YARN-5085
Commit: 69fb70c31aa277f7fb14b05c0185ddc5cd90793d
Parents: 3fd844b
Author: Li Lu
Authored: Wed Nov 30 13:38:42 2016 -0800
Committer: Li Lu
Committed: Wed Nov 30 13:38:42 2016 -0800
--
.../scheduler/SchedulerQueueManager.java| 75
.../scheduler/capacity/CapacityScheduler.java | 294 +++
.../capacity/CapacitySchedulerQueueManager.java | 361 +++
.../capacity/TestApplicationLimits.java | 35 +-
.../TestApplicationLimitsByPartition.java | 7 +-
.../scheduler/capacity/TestChildQueueOrder.java | 9 +-
.../scheduler/capacity/TestLeafQueue.java | 9 +-
.../scheduler/capacity/TestParentQueue.java | 39 +-
.../scheduler/capacity/TestReservations.java| 8 +-
.../scheduler/capacity/TestUtils.java | 2 +-
10 files changed, 536 insertions(+), 303 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69fb70c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerQueueManager.java
--
diff --git
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerQueueManager.java
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerQueueManager.java
new file mode 100644
index 000..92b989a
--- /dev/null
+++
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerQueueManager.java
@@ -0,0 +1,75 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
+
+import java.io.IOException;
+import java.util.Map;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import
org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
+
+/**
+ *
+ * Context of the Queues in Scheduler.
+ *
+ */
+@Private
+@Unstable
+public interface SchedulerQueueManager {
+
+ /**
+ * Get the root queue.
+ * @return root queue
+ */
+ T getRootQueue();
+
+ /**
+ * Get all the queues.
+ * @return a map contains all the queues as well as related queue names
+ */
+ Map getQueues();
+
+ /**
+ * Remove the queue from the existing queue.
+ * @param queueName the queue name
+ */
+ void removeQueue(String queueName);
+
+ /**
+ * Add a new queue to the existing queues.
+ * @param queueName the queue name
+ * @param queue the queue object
+ */
+ void addQueue(String queueName, T queue);
+
+ /**
+ * Get a queue matching the specified queue name.
+ * @param queueName the queue name
+ * @return a queue object
+ */
+ T getQueue(String queueName);
+
+ /**
+ * Reinitialize the queues.
+ * @param newConf the configuration
+ * @throws IOException if fails to re-initialize queues
+ */
+ void reinitializeQueues(E newConf) throws IOException;
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/69fb70c3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git
Repository: hadoop
Updated Branches:
refs/heads/YARN-5085 00096dcc0 -> c87b3a448
YARN-5774. MR Job stuck in ACCEPTED status without any progress in Fair
Scheduler
if set yarn.scheduler.minimum-allocation-mb to 0. (Contributed by Yufei Gu via
Daniel Templeton)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25f9872b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25f9872b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25f9872b
Branch: refs/heads/YARN-5085
Commit: 25f9872be63423ada6a18481eaad2888e731fdac
Parents: 00096dc
Author: Daniel Templeton
Authored: Tue Nov 29 09:40:49 2016 -0800
Committer: Daniel Templeton
Committed: Tue Nov 29 09:40:49 2016 -0800
--
.../api/records/AbstractResourceRequest.java| 55 ++
.../yarn/api/records/ResourceRequest.java | 19 +
.../api/records/UpdateContainerRequest.java | 18 +
.../resource/DefaultResourceCalculator.java | 19 +++--
.../resource/DominantResourceCalculator.java| 25 ++-
.../yarn/util/resource/ResourceCalculator.java | 17 +
.../util/resource/TestResourceCalculator.java | 79
.../server/resourcemanager/RMAppManager.java| 6 +-
.../server/resourcemanager/RMServerUtils.java | 9 +--
.../scheduler/AbstractYarnScheduler.java| 20 +
.../scheduler/SchedulerUtils.java | 59 +++
.../scheduler/YarnScheduler.java| 8 ++
.../scheduler/capacity/CapacityScheduler.java | 4 +-
.../scheduler/fair/FairScheduler.java | 40 --
.../scheduler/fifo/FifoScheduler.java | 4 +-
.../scheduler/TestSchedulerUtils.java | 22 +++---
16 files changed, 258 insertions(+), 146 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f9872b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AbstractResourceRequest.java
--
diff --git
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AbstractResourceRequest.java
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AbstractResourceRequest.java
new file mode 100644
index 000..819a607
--- /dev/null
+++
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AbstractResourceRequest.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * {@code AbstractResourceRequest} represents a generic resource request made
+ * by an application to the {@code ResourceManager}.
+ *
+ * It includes:
+ *
+ * {@link Resource} capability required for each request.
+ *
+ *
+ * @see Resource
+ */
+@Public
+@Unstable
+public abstract class AbstractResourceRequest {
+
+ /**
+ * Set the Resource capability of the request
+ * @param capability Resource capability of the request
+ */
+ @Public
+ @Stable
+ public abstract void setCapability(Resource capability);
+
+ /**
+ * Get the Resource capability of the request.
+ * @return Resource capability of the request
+ */
+ @Public
+ @Stable
+ public abstract Resource getCapability();
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/25f9872b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
--
diff --git
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
Revert due to an error "HDFS-10994. Support an XOR policy XOR-2-1-64k in HDFS.
Contributed by Sammi Chen"
This reverts commit 5614f847b2ef2a5b70bd9a06edc4eba06174c6.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cfd8076f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cfd8076f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cfd8076f
Branch: refs/heads/YARN-5085
Commit: cfd8076f81930c3ffea8ec2ef42926217b83ab1a
Parents: aeecfa2
Author: Kai Zheng
Authored: Wed Nov 30 15:44:52 2016 +0800
Committer: Kai Zheng
Committed: Wed Nov 30 15:44:52 2016 +0800
--
.../io/erasurecode/ErasureCodeConstants.java| 3 -
.../hadoop/hdfs/protocol/HdfsConstants.java | 1 -
.../namenode/ErasureCodingPolicyManager.java| 23 +-
.../hadoop/hdfs/server/namenode/INodeFile.java | 8 +-
.../org/apache/hadoop/hdfs/DFSTestUtil.java | 28 +-
.../hadoop/hdfs/TestDFSStripedInputStream.java | 50 +--
.../hadoop/hdfs/TestDFSStripedOutputStream.java | 27 +-
.../TestDFSStripedOutputStreamWithFailure.java | 37 +-
.../hdfs/TestDFSXORStripedInputStream.java | 33 --
.../hdfs/TestDFSXORStripedOutputStream.java | 35 --
...estDFSXORStripedOutputStreamWithFailure.java | 36 --
...tyPreemptionPolicyForReservedContainers.java | 430 +++
12 files changed, 471 insertions(+), 240 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfd8076f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index ffa0bce..8d6ff85 100644
---
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -38,7 +38,4 @@ public final class ErasureCodeConstants {
public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema(
RS_LEGACY_CODEC_NAME, 6, 3);
-
- public static final ECSchema XOR_2_1_SCHEMA = new ECSchema(
- XOR_CODEC_NAME, 2, 1);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfd8076f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index b55b4df..acbc8f6 100644
---
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -147,7 +147,6 @@ public final class HdfsConstants {
public static final byte RS_6_3_POLICY_ID = 0;
public static final byte RS_3_2_POLICY_ID = 1;
public static final byte RS_6_3_LEGACY_POLICY_ID = 2;
- public static final byte XOR_2_1_POLICY_ID = 3;
/* Hidden constructor */
protected HdfsConstants() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfd8076f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index 8a85d23..c4bc8de 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -36,7 +36,7 @@ import java.util.TreeMap;
public final class ErasureCodingPolicyManager {
/**
- * TODO: HDFS-8095.
+ * TODO: HDFS-8095
*/
private static final int DEFAULT_CELLSIZE = 64 * 1024;
private static final ErasureCodingPolicy SYS_POLICY1 =
@@ -48,14 +48,10 @@ public final class ErasureCodingPolicyManager {
private static final ErasureCodingPolicy SYS_POLICY3 =
new ErasureCodingPolicy(ErasureCodeConstants.RS_6_3_LEGACY_SCHEMA,
DEFAULT_CELLSIZE, HdfsConstants.RS_6_3_LEGACY_POLICY_ID);
- private static final
HADOOP-13830. Intermittent failure of
ITestS3NContractRootDir#testRecursiveRootListing: "Can not create a Path from
an empty string". Contributed by Steve Loughran
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fd844b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fd844b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fd844b9
Branch: refs/heads/YARN-5085
Commit: 3fd844b99fdfae6be6e5e261f371d175aad14229
Parents: 4fca94f
Author: Mingliang Liu
Authored: Wed Nov 30 13:01:02 2016 -0800
Committer: Mingliang Liu
Committed: Wed Nov 30 13:01:19 2016 -0800
--
.../org/apache/hadoop/fs/s3native/NativeS3FileSystem.java | 7 ++-
1 file changed, 6 insertions(+), 1 deletion(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fd844b9/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
--
diff --git
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
index f741298..1a45db3 100644
---
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
+++
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
@@ -587,7 +587,12 @@ public class NativeS3FileSystem extends FileSystem {
for (String commonPrefix : listing.getCommonPrefixes()) {
Path subpath = keyToPath(commonPrefix);
String relativePath = pathUri.relativize(subpath.toUri()).getPath();
-status.add(newDirectory(new Path(absolutePath, relativePath)));
+// sometimes the common prefix includes the base dir (HADOOP-13830).
+// avoid that problem by detecting it and keeping it out
+// of the list
+if (!relativePath.isEmpty()) {
+ status.add(newDirectory(new Path(absolutePath, relativePath)));
+}
}
priorLastKey = listing.getPriorLastKey();
} while (priorLastKey != null);
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
Repository: hadoop
Updated Branches:
refs/heads/branch-2 99b046f8a -> 5a7941a4f
HDFS-11180. Intermittent deadlock in NameNode when failover happens.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a7941a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a7941a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a7941a4
Branch: refs/heads/branch-2
Commit: 5a7941a4fc193259ab4a306f7fe4f68bf101d0e0
Parents: 99b046f
Author: Akira Ajisaka
Authored: Fri Dec 2 11:34:05 2016 +0900
Committer: Akira Ajisaka
Committed: Fri Dec 2 11:34:05 2016 +0900
--
.../dev-support/findbugsExcludeFile.xml | 27
.../hadoop/hdfs/server/namenode/FSEditLog.java | 72 +---
.../hadoop/hdfs/server/namenode/FSImage.java| 13 +++-
.../hdfs/server/namenode/FSNamesystem.java | 27 ++--
.../hdfs/server/namenode/NameNodeRpcServer.java | 2 +-
.../server/namenode/ha/StandbyCheckpointer.java | 4 +-
.../server/namenode/TestFSNamesystemMBean.java | 24 +++
7 files changed, 147 insertions(+), 22 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a7941a4/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 426fb72..e6e4057 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -109,6 +109,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a7941a4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index bc31ffc..fbf03fd 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -156,14 +156,16 @@ public class FSEditLog implements LogsPurgeable {
private EditLogOutputStream editLogStream = null;
// a monotonically increasing counter that represents transactionIds.
- private long txid = 0;
+ // All of the threads which update/increment txid are synchronized,
+ // so make txid volatile instead of AtomicLong.
+ private volatile long txid = 0;
// stores the last synced transactionId.
private long synctxid = 0;
// the first txid of the log that's currently open for writing.
// If this value is N, we are currently writing to edits_inprogress_N
- private long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
+ private volatile long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
// the time of printing the statistics to the log file.
private long lastPrintTime;
@@ -339,7 +341,18 @@ public class FSEditLog implements LogsPurgeable {
return state == State.IN_SEGMENT ||
state == State.BETWEEN_LOG_SEGMENTS;
}
-
+
+ /**
+ * Return true if the log is currently open in write mode.
+ * This method is not synchronized and must be used only for metrics.
+ * @return true if the log is currently open in write mode, regardless
+ * of whether it actually has an open segment.
+ */
+ boolean isOpenForWriteWithoutLock() {
+return state == State.IN_SEGMENT ||
+state == State.BETWEEN_LOG_SEGMENTS;
+ }
+
/**
* @return true if the log is open in write mode and has a segment open
* ready to take edits.
@@ -349,6 +362,16 @@ public class FSEditLog implements LogsPurgeable {
}
/**
+ * Return true the state is IN_SEGMENT.
+ * This method is not synchronized and must be used only for metrics.
+ * @return true if the log is open in write mode and has a segment open
+ * ready to take edits.
+ */
+ boolean isSegmentOpenWithoutLock() {
+return state == State.IN_SEGMENT;
+ }
+
+ /**
* @return true if the log is open in read mode.
*/
public synchronized boolean isOpenForRead() {
@@ -523,7 +546,16 @@ public class FSEditLog implements LogsPurgeable {
public synchronized long getLastWrittenTxId() {
return txid;
}
-
+
+ /**
+ * Return the transaction ID of the last transaction written to the log.
+ * This
Repository: hadoop
Updated Branches:
refs/heads/branch-2.8 dd4acebb4 -> 5025a898e
HDFS-11180. Intermittent deadlock in NameNode when failover happens.
(cherry picked from commit 5a7941a4fc193259ab4a306f7fe4f68bf101d0e0)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5025a898
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5025a898
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5025a898
Branch: refs/heads/branch-2.8
Commit: 5025a898e13b0d442aa8c7a3433a90367e013a42
Parents: dd4aceb
Author: Akira Ajisaka
Authored: Fri Dec 2 11:34:05 2016 +0900
Committer: Akira Ajisaka
Committed: Fri Dec 2 11:35:22 2016 +0900
--
.../dev-support/findbugsExcludeFile.xml | 27
.../hadoop/hdfs/server/namenode/FSEditLog.java | 72 +---
.../hadoop/hdfs/server/namenode/FSImage.java| 13 +++-
.../hdfs/server/namenode/FSNamesystem.java | 27 ++--
.../hdfs/server/namenode/NameNodeRpcServer.java | 2 +-
.../server/namenode/ha/StandbyCheckpointer.java | 4 +-
.../server/namenode/TestFSNamesystemMBean.java | 24 +++
7 files changed, 147 insertions(+), 22 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5025a898/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 426fb72..e6e4057 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -109,6 +109,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5025a898/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 3eda0f5..2fa70df 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -156,14 +156,16 @@ public class FSEditLog implements LogsPurgeable {
private EditLogOutputStream editLogStream = null;
// a monotonically increasing counter that represents transactionIds.
- private long txid = 0;
+ // All of the threads which update/increment txid are synchronized,
+ // so make txid volatile instead of AtomicLong.
+ private volatile long txid = 0;
// stores the last synced transactionId.
private long synctxid = 0;
// the first txid of the log that's currently open for writing.
// If this value is N, we are currently writing to edits_inprogress_N
- private long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
+ private volatile long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
// the time of printing the statistics to the log file.
private long lastPrintTime;
@@ -339,7 +341,18 @@ public class FSEditLog implements LogsPurgeable {
return state == State.IN_SEGMENT ||
state == State.BETWEEN_LOG_SEGMENTS;
}
-
+
+ /**
+ * Return true if the log is currently open in write mode.
+ * This method is not synchronized and must be used only for metrics.
+ * @return true if the log is currently open in write mode, regardless
+ * of whether it actually has an open segment.
+ */
+ boolean isOpenForWriteWithoutLock() {
+return state == State.IN_SEGMENT ||
+state == State.BETWEEN_LOG_SEGMENTS;
+ }
+
/**
* @return true if the log is open in write mode and has a segment open
* ready to take edits.
@@ -349,6 +362,16 @@ public class FSEditLog implements LogsPurgeable {
}
/**
+ * Return true the state is IN_SEGMENT.
+ * This method is not synchronized and must be used only for metrics.
+ * @return true if the log is open in write mode and has a segment open
+ * ready to take edits.
+ */
+ boolean isSegmentOpenWithoutLock() {
+return state == State.IN_SEGMENT;
+ }
+
+ /**
* @return true if the log is open in read mode.
*/
public synchronized boolean isOpenForRead() {
@@ -523,7 +546,16 @@ public class FSEditLog implements LogsPurgeable {
public synchronized long getLastWrittenTxId() {
return txid;
}
-
+
+ /**
+ * Return
Repository: hadoop
Updated Branches:
refs/heads/yarn-native-services 165e50b00 -> 681a3a65d
YARN-5958. Fix ASF license warnings for slider core module. Contributed by
Billie Rinaldi
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/681a3a65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/681a3a65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/681a3a65
Branch: refs/heads/yarn-native-services
Commit: 681a3a65d1f1fa8272444e8f18a8f16aa0bac526
Parents: 165e50b
Author: Gour Saha
Authored: Thu Dec 1 17:45:44 2016 -0800
Committer: Gour Saha
Committed: Thu Dec 1 17:45:44 2016 -0800
--
.../hadoop-yarn-slider-core/pom.xml | 61 +---
.../src/license/THIRD-PARTY.properties | 33 ---
2 files changed, 14 insertions(+), 80 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/681a3a65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
--
diff --git
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
index 66e9ee9..10cf6b1 100644
---
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
+++
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/pom.xml
@@ -78,6 +78,20 @@
+
+org.apache.rat
+apache-rat-plugin
+
+
+**/*.json
+src/main/resources/webapps/slideram/.keep
+
+
src/main/java/org/apache/slider/api/proto/Messages.java
+
src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java
+
+
+
+
@@ -384,53 +398,6 @@
-
-
- rat
-
-
-
-
-org.apache.rat
-apache-rat-plugin
-
-
-check-licenses
-
- check
-
-
-
-
-
-**/*.json
-src/test/python/agent.ini
-src/test/python/version
-**/THIRD-PARTY.properties
-src/main/resources/webapps/slideram/.keep
-src/main/resources/webapps/slideragent/.keep
-
src/main/resources/webapps/static/yarn.dt.plugins.js
-
-
src/main/resources/webapps/static/dt-1.9.4/**
-
-
src/main/resources/webapps/static/jquery/jquery-1.8.2.min.js
-
-
src/main/resources/webapps/static/jquery/jquery-ui-1.9.1.custom.min.js
-
src/main/resources/webapps/static/jquery/themes-1.9.1/base/jquery-ui.css
-
-
src/main/resources/webapps/static/jt/jquery.jstree.js
-
-
src/main/java/org/apache/slider/api/proto/Messages.java
-
src/main/java/org/apache/slider/api/proto/SliderClusterAPI.java
-
src/test/app_packages/test_am_config/resources/test.template
-
src/test/app_packages/test_am_config/test_archive/testfile
-
-
-
-
-
-
-
http://git-wip-us.apache.org/repos/asf/hadoop/blob/681a3a65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/license/THIRD-PARTY.properties
--
diff --git
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/license/THIRD-PARTY.properties
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/license/THIRD-PARTY.properties
deleted file mode 100644
index 1abd56e..000
---
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/license/THIRD-PARTY.properties
+++ /dev/null
@@ -1,33 +0,0 @@
-# Generated by org.codehaus.mojo.license.AddThirdPartyMojo
-#---
-# Already used licenses in project :
-# - Apache License
-# - BSD
-# - CDDL + GPLv2 with classpath exception
-# - CDDL 1.1
-# - CDDL License
-# - CDDL+GPL
-# - Common Public License Version 1.0
-# - Eclipse Public License - Version 1.0
-# - GNU Lesser General Public License (LGPL), Version 2.1
-# - GNU Lesser General Public
Repository: hadoop
Updated Branches:
refs/heads/branch-2 0d8a35bd6 -> 99b046f8a
MAPREDUCE-6787. Allow job_conf.xml to be downloadable on the job overview page
in JHS (haibochen via rkanter)
(cherry picked from commit c87b3a448a00df97149a4e93a8c39d9ad0268bdb)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99b046f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99b046f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99b046f8
Branch: refs/heads/branch-2
Commit: 99b046f8a9616bfd0bff978017dbab6e22966d8f
Parents: 0d8a35b
Author: Robert Kanter
Authored: Thu Dec 1 17:29:16 2016 -0800
Committer: Robert Kanter
Committed: Thu Dec 1 17:30:02 2016 -0800
--
.../mapreduce/v2/app/webapp/AppController.java | 34
.../mapreduce/v2/app/webapp/ConfBlock.java | 2 +-
.../v2/app/webapp/TestAppController.java| 14
.../hadoop/mapreduce/v2/hs/webapp/HsWebApp.java | 2 ++
.../org/apache/hadoop/yarn/webapp/Router.java | 23 ++---
.../org/apache/hadoop/yarn/webapp/WebApp.java | 13
6 files changed, 83 insertions(+), 5 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b046f8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
--
diff --git
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index 305ec7e..e30e1b9 100644
---
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -324,6 +324,40 @@ public class AppController extends Controller implements
AMParams {
}
/**
+ * Handle requests to download the job configuration.
+ */
+ public void downloadConf() {
+try {
+ requireJob();
+} catch (Exception e) {
+ renderText(e.getMessage());
+ return;
+}
+writeJobConf();
+ }
+
+ private void writeJobConf() {
+String jobId = $(JOB_ID);
+assert(!jobId.isEmpty());
+
+JobId jobID = MRApps.toJobID($(JOB_ID));
+Job job = app.context.getJob(jobID);
+assert(job != null);
+
+try {
+ Configuration jobConf = job.loadConfFile();
+ response().setContentType("text/xml");
+ response().setHeader("Content-Disposition",
+ "attachment; filename=" + jobId + ".xml");
+ jobConf.writeXml(writer());
+} catch (IOException e) {
+ LOG.error("Error reading/writing job" +
+ " conf file for job: " + jobId, e);
+ renderText(e.getMessage());
+}
+ }
+
+ /**
* Render a BAD_REQUEST error.
* @param s the error message to include.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b046f8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
--
diff --git
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
index 4cb79bf..532c2bd 100644
---
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
+++
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
@@ -70,7 +70,7 @@ public class ConfBlock extends HtmlBlock {
try {
ConfInfo info = new ConfInfo(job);
- html.div().h3(confPath.toString())._();
+ html.div().a("/jobhistory/downloadconf/" + jid, confPath.toString());
TBODY tbody = html.
// Tasks table
table("#conf").
http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b046f8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
--
diff --git
Repository: hadoop
Updated Branches:
refs/heads/trunk 2d77dc727 -> c87b3a448
MAPREDUCE-6787. Allow job_conf.xml to be downloadable on the job overview page
in JHS (haibochen via rkanter)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c87b3a44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c87b3a44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c87b3a44
Branch: refs/heads/trunk
Commit: c87b3a448a00df97149a4e93a8c39d9ad0268bdb
Parents: 2d77dc7
Author: Robert Kanter
Authored: Thu Dec 1 17:29:16 2016 -0800
Committer: Robert Kanter
Committed: Thu Dec 1 17:29:38 2016 -0800
--
.../mapreduce/v2/app/webapp/AppController.java | 34
.../mapreduce/v2/app/webapp/ConfBlock.java | 2 +-
.../v2/app/webapp/TestAppController.java| 14
.../hadoop/mapreduce/v2/hs/webapp/HsWebApp.java | 2 ++
.../org/apache/hadoop/yarn/webapp/Router.java | 23 ++---
.../org/apache/hadoop/yarn/webapp/WebApp.java | 13
6 files changed, 83 insertions(+), 5 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
--
diff --git
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index 305ec7e..e30e1b9 100644
---
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -324,6 +324,40 @@ public class AppController extends Controller implements
AMParams {
}
/**
+ * Handle requests to download the job configuration.
+ */
+ public void downloadConf() {
+try {
+ requireJob();
+} catch (Exception e) {
+ renderText(e.getMessage());
+ return;
+}
+writeJobConf();
+ }
+
+ private void writeJobConf() {
+String jobId = $(JOB_ID);
+assert(!jobId.isEmpty());
+
+JobId jobID = MRApps.toJobID($(JOB_ID));
+Job job = app.context.getJob(jobID);
+assert(job != null);
+
+try {
+ Configuration jobConf = job.loadConfFile();
+ response().setContentType("text/xml");
+ response().setHeader("Content-Disposition",
+ "attachment; filename=" + jobId + ".xml");
+ jobConf.writeXml(writer());
+} catch (IOException e) {
+ LOG.error("Error reading/writing job" +
+ " conf file for job: " + jobId, e);
+ renderText(e.getMessage());
+}
+ }
+
+ /**
* Render a BAD_REQUEST error.
* @param s the error message to include.
*/
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
--
diff --git
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
index 4cb79bf..532c2bd 100644
---
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
+++
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/ConfBlock.java
@@ -70,7 +70,7 @@ public class ConfBlock extends HtmlBlock {
try {
ConfInfo info = new ConfInfo(job);
- html.div().h3(confPath.toString())._();
+ html.div().a("/jobhistory/downloadconf/" + jid, confPath.toString());
TBODY tbody = html.
// Tasks table
table("#conf").
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c87b3a44/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
--
diff --git
Repository: hadoop
Updated Branches:
refs/heads/trunk 19f373a46 -> 2d77dc727
YARN-5901. Fix race condition in TestGetGroups beforeclass setup() (Contributed
by Haibo Chen via Daniel Templeton)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d77dc72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d77dc72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d77dc72
Branch: refs/heads/trunk
Commit: 2d77dc727d9b5e56009bbc36643d85500efcbca5
Parents: 19f373a
Author: Daniel Templeton
Authored: Thu Dec 1 15:57:39 2016 -0800
Committer: Daniel Templeton
Committed: Thu Dec 1 15:57:39 2016 -0800
--
.../hadoop/yarn/client/TestGetGroups.java | 36 +---
1 file changed, 24 insertions(+), 12 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d77dc72/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
--
diff --git
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
index e947ece..da0258c 100644
---
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
+++
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestGetGroups.java
@@ -20,16 +20,21 @@ package org.apache.hadoop.yarn.client;
import java.io.IOException;
import java.io.PrintStream;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.service.Service;
import org.apache.hadoop.service.Service.STATE;
+import org.apache.hadoop.service.ServiceStateChangeListener;
import org.apache.hadoop.tools.GetGroupsTestBase;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -42,30 +47,37 @@ public class TestGetGroups extends GetGroupsTestBase {
private static Configuration conf;
@BeforeClass
- public static void setUpResourceManager() throws IOException,
InterruptedException {
+ public static void setUpResourceManager() throws InterruptedException {
conf = new YarnConfiguration();
resourceManager = new ResourceManager() {
@Override
protected void doSecureLogin() throws IOException {
};
};
+
+// a reliable way to wait for resource manager to start
+CountDownLatch rmStartedSignal = new CountDownLatch(1);
+ServiceStateChangeListener rmStateChangeListener =
+new ServiceStateChangeListener() {
+ @Override
+ public void stateChanged(Service service) {
+if (service.getServiceState() == STATE.STARTED) {
+ rmStartedSignal.countDown();
+}
+ }
+};
+resourceManager.registerServiceListener(rmStateChangeListener);
+
resourceManager.init(conf);
new Thread() {
public void run() {
resourceManager.start();
};
}.start();
-int waitCount = 0;
-while (resourceManager.getServiceState() == STATE.INITED
-&& waitCount++ < 10) {
- LOG.info("Waiting for RM to start...");
- Thread.sleep(1000);
-}
-if (resourceManager.getServiceState() != STATE.STARTED) {
- throw new IOException(
- "ResourceManager failed to start. Final state is "
- + resourceManager.getServiceState());
-}
+
+boolean rmStarted = rmStartedSignal.await(6L, TimeUnit.MILLISECONDS);
+Assert.assertTrue("ResourceManager failed to start up.", rmStarted);
+
LOG.info("ResourceManager RMAdmin address: " +
conf.get(YarnConfiguration.RM_ADMIN_ADDRESS));
}
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
Repository: hadoop
Updated Branches:
refs/heads/trunk 96c574927 -> 19f373a46
HDFS-11132. Allow AccessControlException in contract tests when getFileStatus
on subdirectory of existing files. Contributed by Vishwajeet Dusane
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19f373a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19f373a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19f373a4
Branch: refs/heads/trunk
Commit: 19f373a46b2abb7a575f7884a9c7443b8ed67cd3
Parents: 96c5749
Author: Mingliang Liu
Authored: Thu Dec 1 12:54:03 2016 -0800
Committer: Mingliang Liu
Committed: Thu Dec 1 12:54:28 2016 -0800
--
.../fs/FileContextMainOperationsBaseTest.java | 21
.../hadoop/fs/FileSystemContractBaseTest.java | 17 ++--
2 files changed, 32 insertions(+), 6 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19f373a4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index 5f9151a..2b3ab2a 100644
---
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
@@ -251,8 +252,14 @@ public abstract class FileContextMainOperationsBaseTest {
} catch (IOException e) {
// expected
}
-Assert.assertFalse(exists(fc, testSubDir));
-
+
+try {
+ Assert.assertFalse(exists(fc, testSubDir));
+} catch (AccessControlException e) {
+ // Expected : HDFS-11132 Checks on paths under file may be rejected by
+ // file missing execute permission.
+}
+
Path testDeepSubDir = getTestRootPath(fc, "test/hadoop/file/deep/sub/dir");
try {
fc.mkdir(testDeepSubDir, FsPermission.getDefault(), true);
@@ -260,8 +267,14 @@ public abstract class FileContextMainOperationsBaseTest {
} catch (IOException e) {
// expected
}
-Assert.assertFalse(exists(fc, testDeepSubDir));
-
+
+try {
+ Assert.assertFalse(exists(fc, testDeepSubDir));
+} catch (AccessControlException e) {
+ // Expected : HDFS-11132 Checks on paths under file may be rejected by
+ // file missing execute permission.
+}
+
}
@Test
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19f373a4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
index bbd7336..6247959 100644
---
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
+++
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
@@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.StringUtils;
/**
@@ -158,7 +159,13 @@ public abstract class FileSystemContractBaseTest extends
TestCase {
} catch (IOException e) {
// expected
}
-assertFalse(fs.exists(testSubDir));
+
+try {
+ assertFalse(fs.exists(testSubDir));
+} catch (AccessControlException e) {
+ // Expected : HDFS-11132 Checks on paths under file may be rejected by
+ // file missing execute permission.
+}
Path testDeepSubDir = path("/test/hadoop/file/deep/sub/dir");
try {
@@ -167,7 +174,13 @@ public abstract class FileSystemContractBaseTest extends
TestCase {
} catch (IOException e) {
// expected
}
-assertFalse(fs.exists(testDeepSubDir));
+
+try {
+
Repository: hadoop
Updated Branches:
refs/heads/branch-2.8 afad13f8d -> dd4acebb4
HDFS-8674. Improve performance of postponed block scans. Contributed by Daryn
Sharp.
(cherry picked from commit 0d8a35bd6de5d2a5a9b816ca98f31975e94bd7c6)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd4acebb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd4acebb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd4acebb
Branch: refs/heads/branch-2.8
Commit: dd4acebb4146c7629ce914df6642c5d69b8172d9
Parents: afad13f
Author: Kihwal Lee
Authored: Thu Dec 1 12:20:30 2016 -0600
Committer: Kihwal Lee
Committed: Thu Dec 1 12:20:30 2016 -0600
--
.../server/blockmanagement/BlockManager.java| 75 +++-
1 file changed, 24 insertions(+), 51 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd4acebb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0ba01aa..a929c43 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@@ -143,7 +144,6 @@ public class BlockManager implements BlockStatsMXBean {
private boolean initializedReplQueues;
private final AtomicLong excessBlocksCount = new AtomicLong(0L);
- private final AtomicLong postponedMisreplicatedBlocksCount = new
AtomicLong(0L);
private final long startupDelayBlockDeletionInMs;
private final BlockReportLeaseManager blockReportLeaseManager;
private ObjectName mxBeanName;
@@ -178,7 +178,7 @@ public class BlockManager implements BlockStatsMXBean {
}
/** Used by metrics */
public long getPostponedMisreplicatedBlocksCount() {
-return postponedMisreplicatedBlocksCount.get();
+return postponedMisreplicatedBlocks.size();
}
/** Used by metrics */
public int getPendingDataNodeMessageCount() {
@@ -218,8 +218,10 @@ public class BlockManager implements BlockStatsMXBean {
* notified of all block deletions that might have been pending
* when the failover happened.
*/
- private final LightWeightHashSet postponedMisreplicatedBlocks =
- new LightWeightHashSet<>();
+ private final LinkedHashSet postponedMisreplicatedBlocks =
+ new LinkedHashSet();
+ private final int blocksPerPostpondedRescan;
+ private final ArrayList rescannedMisreplicatedBlocks;
/**
* Maps a StorageID to the set of blocks that are "extra" for this
@@ -316,6 +318,10 @@ public class BlockManager implements BlockStatsMXBean {
datanodeManager = new DatanodeManager(this, namesystem, conf);
heartbeatManager = datanodeManager.getHeartbeatManager();
+blocksPerPostpondedRescan = (int)Math.min(Integer.MAX_VALUE,
+datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan());
+rescannedMisreplicatedBlocks =
+new ArrayList(blocksPerPostpondedRescan);
startupDelayBlockDeletionInMs = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) *
1000L;
@@ -1424,9 +1430,7 @@ public class BlockManager implements BlockStatsMXBean {
private void postponeBlock(Block blk) {
-if (postponedMisreplicatedBlocks.add(blk)) {
- postponedMisreplicatedBlocksCount.incrementAndGet();
-}
+postponedMisreplicatedBlocks.add(blk);
}
@@ -2050,39 +2054,14 @@ public class BlockManager implements BlockStatsMXBean {
if (getPostponedMisreplicatedBlocksCount() == 0) {
return;
}
-long startTimeRescanPostponedMisReplicatedBlocks = Time.monotonicNow();
namesystem.writeLock();
-long startPostponedMisReplicatedBlocksCount =
-getPostponedMisreplicatedBlocksCount();
+long startTime = Time.monotonicNow();
+long startSize = postponedMisreplicatedBlocks.size();
try {
- // blocksPerRescan is the configured number of blocks per rescan.
- // Randomly
Repository: hadoop
Updated Branches:
refs/heads/branch-2 8ffe86f78 -> 0d8a35bd6
HDFS-8674. Improve performance of postponed block scans. Contributed by Daryn
Sharp.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d8a35bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d8a35bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d8a35bd
Branch: refs/heads/branch-2
Commit: 0d8a35bd6de5d2a5a9b816ca98f31975e94bd7c6
Parents: 8ffe86f
Author: Kihwal Lee
Authored: Thu Dec 1 12:15:15 2016 -0600
Committer: Kihwal Lee
Committed: Thu Dec 1 12:15:15 2016 -0600
--
.../server/blockmanagement/BlockManager.java| 76 +++-
1 file changed, 24 insertions(+), 52 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d8a35bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 954b297..f2805e2 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -29,6 +29,7 @@ import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@@ -170,7 +171,6 @@ public class BlockManager implements BlockStatsMXBean {
private boolean initializedReplQueues;
private final AtomicLong excessBlocksCount = new AtomicLong(0L);
- private final AtomicLong postponedMisreplicatedBlocksCount = new
AtomicLong(0L);
private final long startupDelayBlockDeletionInMs;
private final BlockReportLeaseManager blockReportLeaseManager;
private ObjectName mxBeanName;
@@ -205,7 +205,7 @@ public class BlockManager implements BlockStatsMXBean {
}
/** Used by metrics */
public long getPostponedMisreplicatedBlocksCount() {
-return postponedMisreplicatedBlocksCount.get();
+return postponedMisreplicatedBlocks.size();
}
/** Used by metrics */
public int getPendingDataNodeMessageCount() {
@@ -245,8 +245,10 @@ public class BlockManager implements BlockStatsMXBean {
* notified of all block deletions that might have been pending
* when the failover happened.
*/
- private final LightWeightHashSet postponedMisreplicatedBlocks =
- new LightWeightHashSet<>();
+ private final LinkedHashSet postponedMisreplicatedBlocks =
+ new LinkedHashSet();
+ private final int blocksPerPostpondedRescan;
+ private final ArrayList rescannedMisreplicatedBlocks;
/**
* Maps a StorageID to the set of blocks that are "extra" for this
@@ -345,7 +347,10 @@ public class BlockManager implements BlockStatsMXBean {
datanodeManager = new DatanodeManager(this, namesystem, conf);
heartbeatManager = datanodeManager.getHeartbeatManager();
this.blockIdManager = new BlockIdManager(this);
-
+blocksPerPostpondedRescan = (int)Math.min(Integer.MAX_VALUE,
+datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan());
+rescannedMisreplicatedBlocks =
+new ArrayList(blocksPerPostpondedRescan);
startupDelayBlockDeletionInMs = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) *
1000L;
@@ -1455,9 +1460,7 @@ public class BlockManager implements BlockStatsMXBean {
private void postponeBlock(Block blk) {
-if (postponedMisreplicatedBlocks.add(blk)) {
- postponedMisreplicatedBlocksCount.incrementAndGet();
-}
+postponedMisreplicatedBlocks.add(blk);
}
@@ -2164,39 +2167,14 @@ public class BlockManager implements BlockStatsMXBean {
if (getPostponedMisreplicatedBlocksCount() == 0) {
return;
}
-long startTimeRescanPostponedMisReplicatedBlocks = Time.monotonicNow();
namesystem.writeLock();
-long startPostponedMisReplicatedBlocksCount =
-getPostponedMisreplicatedBlocksCount();
+long startTime = Time.monotonicNow();
+long startSize = postponedMisreplicatedBlocks.size();
try {
- // blocksPerRescan is the configured number of blocks per rescan.
- // Randomly select blocksPerRescan consecutive blocks from the HashSet
- // when the number of blocks remaining is larger than blocksPerRescan.
- // The
Repository: hadoop
Updated Branches:
refs/heads/trunk e0fa49234 -> 96c574927
HDFS-8674. Improve performance of postponed block scans. Contributed by Daryn
Sharp.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96c57492
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96c57492
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96c57492
Branch: refs/heads/trunk
Commit: 96c574927a600d15fab919df1fdc9e07887af6c5
Parents: e0fa492
Author: Kihwal Lee
Authored: Thu Dec 1 12:11:27 2016 -0600
Committer: Kihwal Lee
Committed: Thu Dec 1 12:11:27 2016 -0600
--
.../server/blockmanagement/BlockManager.java| 79 ++--
1 file changed, 24 insertions(+), 55 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/96c57492/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1b744e7..e60703b 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -30,6 +30,7 @@ import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
+import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@@ -43,8 +44,6 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.FutureTask;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
import javax.management.ObjectName;
import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -101,7 +100,6 @@ import
org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.util.FoldedTreeSet;
-import org.apache.hadoop.hdfs.util.LightWeightHashSet;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.server.namenode.CacheManager;
@@ -184,7 +182,6 @@ public class BlockManager implements BlockStatsMXBean {
/** flag indicating whether replication queues have been initialized */
private boolean initializedReplQueues;
- private final AtomicLong postponedMisreplicatedBlocksCount = new
AtomicLong(0L);
private final long startupDelayBlockDeletionInMs;
private final BlockReportLeaseManager blockReportLeaseManager;
private ObjectName mxBeanName;
@@ -219,7 +216,7 @@ public class BlockManager implements BlockStatsMXBean {
}
/** Used by metrics */
public long getPostponedMisreplicatedBlocksCount() {
-return postponedMisreplicatedBlocksCount.get();
+return postponedMisreplicatedBlocks.size();
}
/** Used by metrics */
public int getPendingDataNodeMessageCount() {
@@ -275,8 +272,10 @@ public class BlockManager implements BlockStatsMXBean {
* notified of all block deletions that might have been pending
* when the failover happened.
*/
- private final LightWeightHashSet postponedMisreplicatedBlocks =
- new LightWeightHashSet<>();
+ private final Set postponedMisreplicatedBlocks =
+ new LinkedHashSet();
+ private final int blocksPerPostpondedRescan;
+ private final ArrayList rescannedMisreplicatedBlocks;
/**
* Maps a StorageID to the set of blocks that are "extra" for this
@@ -378,7 +377,10 @@ public class BlockManager implements BlockStatsMXBean {
datanodeManager = new DatanodeManager(this, namesystem, conf);
heartbeatManager = datanodeManager.getHeartbeatManager();
this.blockIdManager = new BlockIdManager(this);
-
+blocksPerPostpondedRescan = (int)Math.min(Integer.MAX_VALUE,
+datanodeManager.getBlocksPerPostponedMisreplicatedBlocksRescan());
+rescannedMisreplicatedBlocks =
+new ArrayList(blocksPerPostpondedRescan);
startupDelayBlockDeletionInMs = conf.getLong(
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) *
1000L;
@@ -1613,9 +1615,7 @@ public class BlockManager implements BlockStatsMXBean {
private void postponeBlock(Block blk) {
-if (postponedMisreplicatedBlocks.add(blk)) {
-
Repository: hadoop
Updated Branches:
refs/heads/trunk 1f7613be9 -> e0fa49234
HDFS-11180. Intermittent deadlock in NameNode when failover happens.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0fa4923
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0fa4923
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0fa4923
Branch: refs/heads/trunk
Commit: e0fa49234fd37aca88e1caa95bac77bca192bae4
Parents: 1f7613b
Author: Akira Ajisaka
Authored: Thu Dec 1 23:08:59 2016 +0900
Committer: Akira Ajisaka
Committed: Thu Dec 1 23:08:59 2016 +0900
--
.../dev-support/findbugsExcludeFile.xml | 27
.../hadoop/hdfs/server/namenode/FSEditLog.java | 72 +---
.../hadoop/hdfs/server/namenode/FSImage.java| 15 +++-
.../hdfs/server/namenode/FSNamesystem.java | 27 ++--
.../hdfs/server/namenode/NameNodeRpcServer.java | 2 +-
.../server/namenode/ha/StandbyCheckpointer.java | 4 +-
.../server/namenode/TestFSNamesystemMBean.java | 24 +++
7 files changed, 148 insertions(+), 23 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0fa4923/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 426fb72..e6e4057 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -109,6 +109,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0fa4923/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index ef9eb68..c9ee32b 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -155,14 +155,16 @@ public class FSEditLog implements LogsPurgeable {
private EditLogOutputStream editLogStream = null;
// a monotonically increasing counter that represents transactionIds.
- private long txid = 0;
+ // All of the threads which update/increment txid are synchronized,
+ // so make txid volatile instead of AtomicLong.
+ private volatile long txid = 0;
// stores the last synced transactionId.
private long synctxid = 0;
// the first txid of the log that's currently open for writing.
// If this value is N, we are currently writing to edits_inprogress_N
- private long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
+ private volatile long curSegmentTxId = HdfsServerConstants.INVALID_TXID;
// the time of printing the statistics to the log file.
private long lastPrintTime;
@@ -338,7 +340,18 @@ public class FSEditLog implements LogsPurgeable {
return state == State.IN_SEGMENT ||
state == State.BETWEEN_LOG_SEGMENTS;
}
-
+
+ /**
+ * Return true if the log is currently open in write mode.
+ * This method is not synchronized and must be used only for metrics.
+ * @return true if the log is currently open in write mode, regardless
+ * of whether it actually has an open segment.
+ */
+ boolean isOpenForWriteWithoutLock() {
+return state == State.IN_SEGMENT ||
+state == State.BETWEEN_LOG_SEGMENTS;
+ }
+
/**
* @return true if the log is open in write mode and has a segment open
* ready to take edits.
@@ -348,6 +361,16 @@ public class FSEditLog implements LogsPurgeable {
}
/**
+ * Return true the state is IN_SEGMENT.
+ * This method is not synchronized and must be used only for metrics.
+ * @return true if the log is open in write mode and has a segment open
+ * ready to take edits.
+ */
+ boolean isSegmentOpenWithoutLock() {
+return state == State.IN_SEGMENT;
+ }
+
+ /**
* @return true if the log is open in read mode.
*/
public synchronized boolean isOpenForRead() {
@@ -522,7 +545,16 @@ public class FSEditLog implements LogsPurgeable {
public synchronized long getLastWrittenTxId() {
return txid;
}
-
+
+ /**
+ * Return the transaction ID of the last transaction written to the log.
+ * This method
Repository: hadoop
Updated Branches:
refs/heads/HADOOP-13345 5e93093e6 -> cfd0fbf13
HADOOP-13850 s3guard to log choice of metadata store at debug. Contributed by
Mingliang Liu
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cfd0fbf1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cfd0fbf1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cfd0fbf1
Branch: refs/heads/HADOOP-13345
Commit: cfd0fbf13b7e901d991456f14ff7d4b89301f388
Parents: 5e93093
Author: Steve Loughran
Authored: Thu Dec 1 11:26:59 2016 +
Committer: Steve Loughran
Committed: Thu Dec 1 11:26:59 2016 +
--
.../src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfd0fbf1/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
--
diff --git
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
index c998072..904a1c3 100644
---
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
+++
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
@@ -66,7 +66,6 @@ final public class S3Guard {
* @return Reference to new MetadataStore.
*/
public static MetadataStore getMetadataStore(FileSystem fs) {
-
Preconditions.checkNotNull(fs);
Configuration conf = fs.getConf();
Preconditions.checkNotNull(conf);
@@ -74,8 +73,8 @@ final public class S3Guard {
try {
Class msClass = getMetadataStoreClass(conf);
msInstance = ReflectionUtils.newInstance(msClass, conf);
- LOG.info("Using {} for {} filesystem", msClass.getSimpleName(),
- fs.getScheme());
+ LOG.debug("Using {} metadata store for {} filesystem",
+ msClass.getSimpleName(), fs.getScheme());
} catch (RuntimeException e) {
LOG.error("Failed to instantiate {}, using NullMetadataStore:",
conf.get(S3_METADATA_STORE_IMPL), e);
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
Repository: hadoop
Updated Branches:
refs/heads/yarn-native-services f32cf8f3b -> 165e50b00
YARN-5944. Native services AM should remain up if RM is down. Contributed by
Billie Rinaldi
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/165e50b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/165e50b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/165e50b0
Branch: refs/heads/yarn-native-services
Commit: 165e50b0094fe83774b205e28a5ecce9139c8694
Parents: f32cf8f
Author: Gour Saha
Authored: Thu Dec 1 00:30:01 2016 -0800
Committer: Gour Saha
Committed: Thu Dec 1 00:30:01 2016 -0800
--
.../org/apache/slider/server/appmaster/SliderAppMaster.java | 5 +
1 file changed, 5 insertions(+)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/165e50b0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
--
diff --git
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
index 34b6a7d..8c39343 100644
---
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
+++
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/appmaster/SliderAppMaster.java
@@ -705,6 +705,11 @@ public class SliderAppMaster extends
AbstractSliderLaunchedService
synchronized (appState) {
int heartbeatInterval = HEARTBEAT_INTERVAL;
+ // configure AM to wait forever for RM
+
getConfig().setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS,
+ -1);
+ getConfig().unset(YarnConfiguration.CLIENT_FAILOVER_MAX_ATTEMPTS);
+
// add the RM client -this brings the callbacks in
asyncRMClient = AMRMClientAsync.createAMRMClientAsync(heartbeatInterval,
this);
addService(asyncRMClient);
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
37 matches
Mail list logo