Repository: hadoop
Updated Branches:
refs/heads/branch-2.9 60feb43b7 -> 6e57ca602
HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo
Goiri.
(cherry picked from commit 76be6cbf6c33f866794f27ca2560ca7c7b2fa0e7)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e57ca60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e57ca60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e57ca60
Branch: refs/heads/branch-2.9
Commit: 6e57ca602904f5a4a9862df377ea8d0f31ebaa21
Parents: 60feb43
Author: Yiqun Lin
Authored: Wed Mar 14 11:20:59 2018 +0800
Committer: Yiqun Lin
Committed: Wed Mar 14 11:32:50 2018 +0800
--
.../federation/metrics/StateStoreMetrics.java | 5 +
.../driver/StateStoreRecordOperations.java | 15 -
.../driver/impl/StateStoreFileBaseImpl.java | 433 ++-
.../store/driver/impl/StateStoreFileImpl.java | 109 ++---
.../driver/impl/StateStoreFileSystemImpl.java | 128 +++---
.../driver/impl/StateStoreZooKeeperImpl.java| 6 -
.../store/driver/TestStateStoreDriverBase.java | 9 +
.../store/driver/TestStateStoreFile.java| 12 +
.../store/driver/TestStateStoreFileBase.java| 47 ++
.../store/driver/TestStateStoreFileSystem.java | 14 +-
10 files changed, 428 insertions(+), 350 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e57ca60/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
index 40dcd40..09253a2 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
@@ -140,5 +140,10 @@ public final class StateStoreMetrics implements
StateStoreMBean {
writes.resetMinMax();
removes.resetMinMax();
failures.resetMinMax();
+
+reads.lastStat().reset();
+writes.lastStat().reset();
+removes.lastStat().reset();
+failures.lastStat().reset();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e57ca60/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
index e76a733..443d46e 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
@@ -49,21 +49,6 @@ public interface StateStoreRecordOperations {
QueryResult get(Class clazz) throws IOException;
/**
- * Get all records of the requested record class from the data store. To use
- * the default implementations in this class, getAll must return new
instances
- * of the records on each call. It is recommended to override the default
- * implementations for better performance.
- *
- * @param clazz Class of record to fetch.
- * @param sub Sub path.
- * @return List of all records that match the clazz and the sub path.
- * @throws IOException
- */
- @Idempotent
- QueryResult get(Class clazz, String sub)
- throws IOException;
-
- /**
* Get a single record from the store that matches the query.
*
* @param clazz Class of record to fetch.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e57ca60/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index a0cd878f..6638d1c 100644
---
Repository: hadoop
Updated Branches:
refs/heads/branch-3.0 95a4665ad -> a10506972
HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo
Goiri.
(cherry picked from commit 76be6cbf6c33f866794f27ca2560ca7c7b2fa0e7)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1050697
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1050697
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1050697
Branch: refs/heads/branch-3.0
Commit: a105069729f35f3c8ff7691fa3d51162677734f9
Parents: 95a4665
Author: Yiqun Lin
Authored: Wed Mar 14 11:20:59 2018 +0800
Committer: Yiqun Lin
Committed: Wed Mar 14 11:31:35 2018 +0800
--
.../federation/metrics/StateStoreMetrics.java | 5 +
.../driver/StateStoreRecordOperations.java | 15 -
.../driver/impl/StateStoreFileBaseImpl.java | 433 ++-
.../store/driver/impl/StateStoreFileImpl.java | 109 ++---
.../driver/impl/StateStoreFileSystemImpl.java | 128 +++---
.../driver/impl/StateStoreZooKeeperImpl.java| 6 -
.../store/driver/TestStateStoreDriverBase.java | 9 +
.../store/driver/TestStateStoreFile.java| 12 +
.../store/driver/TestStateStoreFileBase.java| 47 ++
.../store/driver/TestStateStoreFileSystem.java | 14 +-
10 files changed, 428 insertions(+), 350 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1050697/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
index 40dcd40..09253a2 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
@@ -140,5 +140,10 @@ public final class StateStoreMetrics implements
StateStoreMBean {
writes.resetMinMax();
removes.resetMinMax();
failures.resetMinMax();
+
+reads.lastStat().reset();
+writes.lastStat().reset();
+removes.lastStat().reset();
+failures.lastStat().reset();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1050697/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
index e76a733..443d46e 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
@@ -49,21 +49,6 @@ public interface StateStoreRecordOperations {
QueryResult get(Class clazz) throws IOException;
/**
- * Get all records of the requested record class from the data store. To use
- * the default implementations in this class, getAll must return new
instances
- * of the records on each call. It is recommended to override the default
- * implementations for better performance.
- *
- * @param clazz Class of record to fetch.
- * @param sub Sub path.
- * @return List of all records that match the clazz and the sub path.
- * @throws IOException
- */
- @Idempotent
- QueryResult get(Class clazz, String sub)
- throws IOException;
-
- /**
* Get a single record from the store that matches the query.
*
* @param clazz Class of record to fetch.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1050697/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index a0cd878f..6638d1c 100644
---
Repository: hadoop
Updated Branches:
refs/heads/branch-2 36451f2d5 -> b3d56cb83
HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo
Goiri.
(cherry picked from commit 76be6cbf6c33f866794f27ca2560ca7c7b2fa0e7)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3d56cb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3d56cb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3d56cb8
Branch: refs/heads/branch-2
Commit: b3d56cb83558c797403cb4538d0c21bf097263ce
Parents: 36451f2
Author: Yiqun Lin
Authored: Wed Mar 14 11:20:59 2018 +0800
Committer: Yiqun Lin
Committed: Wed Mar 14 11:25:08 2018 +0800
--
.../federation/metrics/StateStoreMetrics.java | 5 +
.../driver/StateStoreRecordOperations.java | 15 -
.../driver/impl/StateStoreFileBaseImpl.java | 433 ++-
.../store/driver/impl/StateStoreFileImpl.java | 109 ++---
.../driver/impl/StateStoreFileSystemImpl.java | 128 +++---
.../driver/impl/StateStoreZooKeeperImpl.java| 6 -
.../store/driver/TestStateStoreDriverBase.java | 9 +
.../store/driver/TestStateStoreFile.java| 12 +
.../store/driver/TestStateStoreFileBase.java| 47 ++
.../store/driver/TestStateStoreFileSystem.java | 14 +-
10 files changed, 428 insertions(+), 350 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d56cb8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
index 40dcd40..09253a2 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
@@ -140,5 +140,10 @@ public final class StateStoreMetrics implements
StateStoreMBean {
writes.resetMinMax();
removes.resetMinMax();
failures.resetMinMax();
+
+reads.lastStat().reset();
+writes.lastStat().reset();
+removes.lastStat().reset();
+failures.lastStat().reset();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d56cb8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
index e76a733..443d46e 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
@@ -49,21 +49,6 @@ public interface StateStoreRecordOperations {
QueryResult get(Class clazz) throws IOException;
/**
- * Get all records of the requested record class from the data store. To use
- * the default implementations in this class, getAll must return new
instances
- * of the records on each call. It is recommended to override the default
- * implementations for better performance.
- *
- * @param clazz Class of record to fetch.
- * @param sub Sub path.
- * @return List of all records that match the clazz and the sub path.
- * @throws IOException
- */
- @Idempotent
- QueryResult get(Class clazz, String sub)
- throws IOException;
-
- /**
* Get a single record from the store that matches the query.
*
* @param clazz Class of record to fetch.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3d56cb8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index a0cd878f..6638d1c 100644
---
Repository: hadoop
Updated Branches:
refs/heads/branch-3.1 d7aa93b2f -> 19521f71d
HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo
Goiri.
(cherry picked from commit 76be6cbf6c33f866794f27ca2560ca7c7b2fa0e7)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19521f71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19521f71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19521f71
Branch: refs/heads/branch-3.1
Commit: 19521f71d445d61434af380840a285e06503eed1
Parents: d7aa93b
Author: Yiqun Lin
Authored: Wed Mar 14 11:20:59 2018 +0800
Committer: Yiqun Lin
Committed: Wed Mar 14 11:23:01 2018 +0800
--
.../federation/metrics/StateStoreMetrics.java | 5 +
.../driver/StateStoreRecordOperations.java | 15 -
.../driver/impl/StateStoreFileBaseImpl.java | 433 ++-
.../store/driver/impl/StateStoreFileImpl.java | 109 ++---
.../driver/impl/StateStoreFileSystemImpl.java | 128 +++---
.../driver/impl/StateStoreZooKeeperImpl.java| 6 -
.../store/driver/TestStateStoreDriverBase.java | 9 +
.../store/driver/TestStateStoreFile.java| 12 +
.../store/driver/TestStateStoreFileBase.java| 47 ++
.../store/driver/TestStateStoreFileSystem.java | 14 +-
10 files changed, 428 insertions(+), 350 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19521f71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
index 40dcd40..09253a2 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
@@ -140,5 +140,10 @@ public final class StateStoreMetrics implements
StateStoreMBean {
writes.resetMinMax();
removes.resetMinMax();
failures.resetMinMax();
+
+reads.lastStat().reset();
+writes.lastStat().reset();
+removes.lastStat().reset();
+failures.lastStat().reset();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19521f71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
index e76a733..443d46e 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
@@ -49,21 +49,6 @@ public interface StateStoreRecordOperations {
QueryResult get(Class clazz) throws IOException;
/**
- * Get all records of the requested record class from the data store. To use
- * the default implementations in this class, getAll must return new
instances
- * of the records on each call. It is recommended to override the default
- * implementations for better performance.
- *
- * @param clazz Class of record to fetch.
- * @param sub Sub path.
- * @return List of all records that match the clazz and the sub path.
- * @throws IOException
- */
- @Idempotent
- QueryResult get(Class clazz, String sub)
- throws IOException;
-
- /**
* Get a single record from the store that matches the query.
*
* @param clazz Class of record to fetch.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19521f71/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index a0cd878f..6638d1c 100644
---
Repository: hadoop
Updated Branches:
refs/heads/trunk 427fd027a -> 76be6cbf6
HDFS-12773. RBF: Improve State Store FS implementation. Contributed by Inigo
Goiri.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76be6cbf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76be6cbf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76be6cbf
Branch: refs/heads/trunk
Commit: 76be6cbf6c33f866794f27ca2560ca7c7b2fa0e7
Parents: 427fd02
Author: Yiqun Lin
Authored: Wed Mar 14 11:20:59 2018 +0800
Committer: Yiqun Lin
Committed: Wed Mar 14 11:20:59 2018 +0800
--
.../federation/metrics/StateStoreMetrics.java | 5 +
.../driver/StateStoreRecordOperations.java | 15 -
.../driver/impl/StateStoreFileBaseImpl.java | 433 ++-
.../store/driver/impl/StateStoreFileImpl.java | 109 ++---
.../driver/impl/StateStoreFileSystemImpl.java | 128 +++---
.../driver/impl/StateStoreZooKeeperImpl.java| 6 -
.../store/driver/TestStateStoreDriverBase.java | 9 +
.../store/driver/TestStateStoreFile.java| 12 +
.../store/driver/TestStateStoreFileBase.java| 47 ++
.../store/driver/TestStateStoreFileSystem.java | 14 +-
10 files changed, 428 insertions(+), 350 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/76be6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
index 40dcd40..09253a2 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/StateStoreMetrics.java
@@ -140,5 +140,10 @@ public final class StateStoreMetrics implements
StateStoreMBean {
writes.resetMinMax();
removes.resetMinMax();
failures.resetMinMax();
+
+reads.lastStat().reset();
+writes.lastStat().reset();
+removes.lastStat().reset();
+failures.lastStat().reset();
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/76be6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
index e76a733..443d46e 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
@@ -49,21 +49,6 @@ public interface StateStoreRecordOperations {
QueryResult get(Class clazz) throws IOException;
/**
- * Get all records of the requested record class from the data store. To use
- * the default implementations in this class, getAll must return new
instances
- * of the records on each call. It is recommended to override the default
- * implementations for better performance.
- *
- * @param clazz Class of record to fetch.
- * @param sub Sub path.
- * @return List of all records that match the clazz and the sub path.
- * @throws IOException
- */
- @Idempotent
- QueryResult get(Class clazz, String sub)
- throws IOException;
-
- /**
* Get a single record from the store that matches the query.
*
* @param clazz Class of record to fetch.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/76be6cbf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
index a0cd878f..6638d1c 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
+++
HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy.
Contributed by Takanobu Asanuma.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84c10955
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84c10955
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84c10955
Branch: refs/heads/HDFS-7240
Commit: 84c10955863eca1e300aeeac1d9cd7a1186144b6
Parents: b2b9ce5
Author: Xiao Chen
Authored: Tue Mar 13 09:57:20 2018 -0700
Committer: Xiao Chen
Committed: Tue Mar 13 09:58:03 2018 -0700
--
.../hadoop/hdfs/TestFileStatusWithECPolicy.java | 15 --
.../hdfs/TestFileStatusWithRandomECPolicy.java | 49
2 files changed, 59 insertions(+), 5 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/84c10955/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
index 077cf3a..a5a 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
@@ -34,7 +34,10 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
-public class TestFileStatusWithECPolicy {
+/**
+ * This test ensures the statuses of EC files with the default policy.
+ */
+public class TestFileStatusWithDefaultECPolicy {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private DFSClient client;
@@ -50,8 +53,7 @@ public class TestFileStatusWithECPolicy {
cluster.waitActive();
fs = cluster.getFileSystem();
client = fs.getClient();
-fs.enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
+fs.enableErasureCodingPolicy(getEcPolicy().getName());
}
@After
@@ -62,6 +64,10 @@ public class TestFileStatusWithECPolicy {
}
}
+ public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+ }
+
@Test
public void testFileStatusWithECPolicy() throws Exception {
// test directory doesn't have an EC policy
@@ -76,8 +82,7 @@ public class TestFileStatusWithECPolicy {
ContractTestUtils.assertNotErasureCoded(fs, file);
fs.delete(file, true);
-final ErasureCodingPolicy ecPolicy1 =
-StripedFileTestUtil.getDefaultECPolicy();
+final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
// set EC policy on dir
fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
ContractTestUtils.assertErasureCoded(fs, dir);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/84c10955/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
new file mode 100644
index 000..18902a7
--- /dev/null
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This test extends TestFileStatusWithDefaultECPolicy to use a random
+ * (non-default) EC policy.
+ */
+public class TestFileStatusWithRandomECPolicy extends
+TestFileStatusWithDefaultECPolicy {
+ private static final Logger LOG =
HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy.
Contributed by Takanobu Asanuma.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8211a3d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8211a3d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8211a3d4
Branch: refs/heads/HDFS-7240
Commit: 8211a3d4693fea46cff11c5883c16a9b4df7b4de
Parents: f82d38d
Author: Xiao Chen
Authored: Tue Mar 13 10:48:35 2018 -0700
Committer: Xiao Chen
Committed: Tue Mar 13 10:48:45 2018 -0700
--
.../hdfs/TestFileStatusWithDefaultECPolicy.java | 107 +++
.../hadoop/hdfs/TestFileStatusWithECPolicy.java | 102 --
.../hdfs/TestFileStatusWithRandomECPolicy.java | 49 +
3 files changed, 156 insertions(+), 102 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8211a3d4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
new file mode 100644
index 000..a5a
--- /dev/null
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+/**
+ * This test ensures the statuses of EC files with the default policy.
+ */
+public class TestFileStatusWithDefaultECPolicy {
+ private MiniDFSCluster cluster;
+ private DistributedFileSystem fs;
+ private DFSClient client;
+
+ @Rule
+ public Timeout globalTimeout = new Timeout(30);
+
+ @Before
+ public void before() throws IOException {
+HdfsConfiguration conf = new HdfsConfiguration();
+cluster =
+new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster.waitActive();
+fs = cluster.getFileSystem();
+client = fs.getClient();
+fs.enableErasureCodingPolicy(getEcPolicy().getName());
+ }
+
+ @After
+ public void after() {
+if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+}
+ }
+
+ public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+ }
+
+ @Test
+ public void testFileStatusWithECPolicy() throws Exception {
+// test directory doesn't have an EC policy
+final Path dir = new Path("/foo");
+assertTrue(fs.mkdir(dir, FsPermission.getDirDefault()));
+ContractTestUtils.assertNotErasureCoded(fs, dir);
+assertNull(client.getFileInfo(dir.toString()).getErasureCodingPolicy());
+// test file doesn't have an EC policy
+final Path file = new Path(dir, "foo");
+fs.create(file).close();
+assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy());
+ContractTestUtils.assertNotErasureCoded(fs, file);
+fs.delete(file, true);
+
+final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
+// set EC policy on dir
+fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
+ContractTestUtils.assertErasureCoded(fs, dir);
+final ErasureCodingPolicy ecPolicy2 =
+client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
+assertNotNull(ecPolicy2);
+
HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count.
Contributed by Erik Krogen
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d6994da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d6994da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d6994da
Branch: refs/heads/HDFS-7240
Commit: 9d6994da1964c1125a33b3a65e7a7747e2d0bc59
Parents: a82d4a2
Author: Chris Douglas
Authored: Tue Mar 13 13:53:58 2018 -0700
Committer: Chris Douglas
Committed: Tue Mar 13 13:55:18 2018 -0700
--
.../org/apache/hadoop/http/HttpServer2.java | 12 +-
.../org/apache/hadoop/http/TestHttpServer.java | 23 +++-
2 files changed, 33 insertions(+), 2 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6994da/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 7e12640..8adb114 100644
---
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -134,6 +134,14 @@ public final class HttpServer2 implements FilterContainer {
"hadoop.http.socket.backlog.size";
public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
+ public static final String HTTP_ACCEPTOR_COUNT_KEY =
+ "hadoop.http.acceptor.count";
+ // -1 to use default behavior of setting count based on CPU core count
+ public static final int HTTP_ACCEPTOR_COUNT_DEFAULT = -1;
+ public static final String HTTP_SELECTOR_COUNT_KEY =
+ "hadoop.http.selector.count";
+ // -1 to use default behavior of setting count based on CPU core count
+ public static final int HTTP_SELECTOR_COUNT_DEFAULT = -1;
public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
public static final String FILTER_INITIALIZER_PROPERTY
@@ -465,7 +473,9 @@ public final class HttpServer2 implements FilterContainer {
private ServerConnector createHttpChannelConnector(
Server server, HttpConfiguration httpConfig) {
- ServerConnector conn = new ServerConnector(server);
+ ServerConnector conn = new ServerConnector(server,
+ conf.getInt(HTTP_ACCEPTOR_COUNT_KEY, HTTP_ACCEPTOR_COUNT_DEFAULT),
+ conf.getInt(HTTP_SELECTOR_COUNT_KEY, HTTP_SELECTOR_COUNT_DEFAULT));
ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
conn.addConnectionFactory(connFactory);
configureChannelConnector(conn);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6994da/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index ca7e466..7350d09 100644
---
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -147,7 +147,7 @@ public class TestHttpServer extends
HttpServerFunctionalTest {
@BeforeClass public static void setup() throws Exception {
Configuration conf = new Configuration();
-conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
server = createTestServer(conf);
server.addServlet("echo", "/echo", EchoServlet.class);
server.addServlet("echomap", "/echomap", EchoMapServlet.class);
@@ -195,6 +195,27 @@ public class TestHttpServer extends
HttpServerFunctionalTest {
ready.await();
start.countDown();
}
+
+ /**
+ * Test that the number of acceptors and selectors can be configured by
+ * trying to configure more of them than would be allowed based on the
+ * maximum thread count.
+ */
+ @Test
+ public void testAcceptorSelectorConfigurability() throws Exception {
+Configuration conf = new Configuration();
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
+conf.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY, MAX_THREADS - 2);
+conf.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY, MAX_THREADS - 2);
+HttpServer2 badserver = createTestServer(conf);
+try {
+
Revert "HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy.
Contributed by Takanobu Asanuma."
This reverts commit 84c10955863eca1e300aeeac1d9cd7a1186144b6.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f82d38dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f82d38dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f82d38dc
Branch: refs/heads/HDFS-7240
Commit: f82d38dcb3259dda6275c75765738fb9b249ee73
Parents: 3b8dbc2
Author: Xiao Chen
Authored: Tue Mar 13 10:30:07 2018 -0700
Committer: Xiao Chen
Committed: Tue Mar 13 10:36:16 2018 -0700
--
.../hadoop/hdfs/TestFileStatusWithECPolicy.java | 15 ++
.../hdfs/TestFileStatusWithRandomECPolicy.java | 49
2 files changed, 5 insertions(+), 59 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82d38dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
index a5a..077cf3a 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
@@ -34,10 +34,7 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
-/**
- * This test ensures the statuses of EC files with the default policy.
- */
-public class TestFileStatusWithDefaultECPolicy {
+public class TestFileStatusWithECPolicy {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private DFSClient client;
@@ -53,7 +50,8 @@ public class TestFileStatusWithDefaultECPolicy {
cluster.waitActive();
fs = cluster.getFileSystem();
client = fs.getClient();
-fs.enableErasureCodingPolicy(getEcPolicy().getName());
+fs.enableErasureCodingPolicy(
+StripedFileTestUtil.getDefaultECPolicy().getName());
}
@After
@@ -64,10 +62,6 @@ public class TestFileStatusWithDefaultECPolicy {
}
}
- public ErasureCodingPolicy getEcPolicy() {
-return StripedFileTestUtil.getDefaultECPolicy();
- }
-
@Test
public void testFileStatusWithECPolicy() throws Exception {
// test directory doesn't have an EC policy
@@ -82,7 +76,8 @@ public class TestFileStatusWithDefaultECPolicy {
ContractTestUtils.assertNotErasureCoded(fs, file);
fs.delete(file, true);
-final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
+final ErasureCodingPolicy ecPolicy1 =
+StripedFileTestUtil.getDefaultECPolicy();
// set EC policy on dir
fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
ContractTestUtils.assertErasureCoded(fs, dir);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82d38dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
deleted file mode 100644
index 18902a7..000
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This test extends TestFileStatusWithDefaultECPolicy to use a random
- * (non-default) EC policy.
- */
-public class TestFileStatusWithRandomECPolicy
HDFS-12780. Fix spelling mistake in DistCpUtils.java. Contributed by Jianfei
Jiang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45cccadd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45cccadd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45cccadd
Branch: refs/heads/HDFS-7240
Commit: 45cccadd2e84b99ec56f1cc0e2248dc8fc844f38
Parents: 8211a3d
Author: Chris Douglas
Authored: Tue Mar 13 11:08:11 2018 -0700
Committer: Chris Douglas
Committed: Tue Mar 13 11:08:11 2018 -0700
--
.../src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java| 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/45cccadd/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
--
diff --git
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
index 2b3b529..eba4bee 100644
---
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
+++
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
@@ -527,7 +527,7 @@ public class DistCpUtils {
/**
* Utility to compare checksums for the paths specified.
*
- * If checksums's can't be retrieved, it doesn't fail the test
+ * If checksums can't be retrieved, it doesn't fail the test
* Only time the comparison would fail is when checksums are
* available and they don't match
*
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
HDFS-13235. DiskBalancer: Update Documentation to add newly added options.
Contributed by Bharat Viswanadham.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39537b7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39537b7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39537b7c
Branch: refs/heads/HDFS-7240
Commit: 39537b7c84dddfa8084308459565ab77fd24abd3
Parents: 9d6994d
Author: Arpit Agarwal
Authored: Tue Mar 13 16:35:51 2018 -0700
Committer: Arpit Agarwal
Committed: Tue Mar 13 16:35:51 2018 -0700
--
.../hadoop-hdfs/src/main/resources/hdfs-default.xml| 6 +++---
.../hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md | 6 +-
2 files changed, 8 insertions(+), 4 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39537b7c/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 2d3c5e7..f90daba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4651,9 +4651,9 @@
dfs.disk.balancer.plan.valid.interval
1d
- Maximum number of hours the disk balancer plan is valid.
- This setting supports multiple time unit suffixes as described
- in dfs.heartbeat.interval. If no suffix is specified then milliseconds
+ Maximum amount of time disk balancer plan is valid. This setting
+ supports multiple time unit suffixes as described in
+ dfs.heartbeat.interval. If no suffix is specified then milliseconds
is assumed.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39537b7c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
index 6e1bd41..ed0233a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -79,6 +79,10 @@ Execute command takes a plan command executes it against the
datanode that plan
This executes the plan by reading datanode’s address from the plan file.
+| COMMAND\_OPTION| Description |
+|: |: |
+| `-skipDateCheck` | Skip date check and force execute the plan.|
+
### Query
Query command gets the current status of the diskbalancer from a datanode.
@@ -122,7 +126,7 @@ There is a set of diskbalancer settings that can be
controlled via hdfs-site.xml
|`dfs.disk.balancer.max.disk.errors`| sets the value of maximum number of
errors we can ignore for a specific move between two disks before it is
abandoned. For example, if a plan has 3 pair of disks to copy between , and the
first disk set encounters more than 5 errors, then we abandon the first copy
and start the second copy in the plan. The default value of max errors is set
to 5.|
|`dfs.disk.balancer.block.tolerance.percent`| The tolerance percent specifies
when we have reached a good enough value for any copy step. For example, if you
specify 10% then getting close to 10% of the target value is good enough.|
|`dfs.disk.balancer.plan.threshold.percent`| The percentage threshold value
for volume Data Density in a plan. If the absolute value of volume Data Density
which is out of threshold value in a node, it means that the volumes
corresponding to the disks should do the balancing in the plan. The default
value is 10.|
-
+|`dfs.disk.balancer.plan.valid.interval`| Maximum amount of time disk balancer
plan is valid. Supports the following suffixes (case insensitive): ms(millis),
s(sec), m(min), h(hour), d(day) to specify the time (such as 2s, 2m, 1h, etc.).
If no suffix is specified then milliseconds is assumed. Default value is 1d|
Debugging
-
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
HDFS-13226. RBF: Throw the exception if mount table entry validated failed.
Contributed by maobaolong.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19292bc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19292bc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19292bc2
Branch: refs/heads/HDFS-7240
Commit: 19292bc264cada5117ec76063d36cc88159afdf4
Parents: 7fab787
Author: Yiqun Lin
Authored: Tue Mar 13 11:03:31 2018 +0800
Committer: Yiqun Lin
Committed: Tue Mar 13 11:03:31 2018 +0800
--
.../federation/store/records/BaseRecord.java| 16 ++--
.../store/records/MembershipState.java | 29 -
.../federation/store/records/MountTable.java| 42 +++
.../federation/store/records/RouterState.java | 9 ++--
.../federation/router/TestRouterAdminCLI.java | 38 +++--
.../store/records/TestMountTable.java | 43
6 files changed, 137 insertions(+), 40 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19292bc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
index 79f99c8..d5e60ce 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
@@ -32,6 +32,10 @@ import org.apache.hadoop.util.Time;
*
*/
public abstract class BaseRecord implements Comparable {
+ public static final String ERROR_MSG_CREATION_TIME_NEGATIVE =
+ "The creation time for the record cannot be negative.";
+ public static final String ERROR_MSG_MODIFICATION_TIME_NEGATIVE =
+ "The modification time for the record cannot be negative.";
/**
* Set the modification time for the record.
@@ -193,11 +197,15 @@ public abstract class BaseRecord implements
Comparable {
/**
* Validates the record. Called when the record is created, populated from
the
- * state store, and before committing to the state store.
- * @return If the record is valid.
+ * state store, and before committing to the state store. If validate failed,
+ * there throws an exception.
*/
- public boolean validate() {
-return getDateCreated() > 0 && getDateModified() > 0;
+ public void validate() {
+if (getDateCreated() <= 0) {
+ throw new IllegalArgumentException(ERROR_MSG_CREATION_TIME_NEGATIVE);
+} else if (getDateModified() <= 0) {
+ throw new IllegalArgumentException(ERROR_MSG_MODIFICATION_TIME_NEGATIVE);
+}
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19292bc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
index ac0b22e..e33dedf 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
@@ -37,6 +37,14 @@ import
org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerialize
*/
public abstract class MembershipState extends BaseRecord
implements FederationNamenodeContext {
+ public static final String ERROR_MSG_NO_NS_SPECIFIED =
+ "Invalid registration, no nameservice specified ";
+ public static final String ERROR_MSG_NO_WEB_ADDR_SPECIFIED =
+ "Invalid registration, no web address specified ";
+ public static final String ERROR_MSG_NO_RPC_ADDR_SPECIFIED =
+ "Invalid registration, no rpc address specified ";
+ public static final String ERROR_MSG_NO_BP_SPECIFIED =
+ "Invalid registration, no block pool specified ";
/** Expiration time in ms for this entry. */
private static long expirationMs;
@@ -226,26 +234,25 @@ public abstract class MembershipState extends BaseRecord
* is missing required information.
*/
@Override
- public boolean validate() {
-boolean ret =
HADOOP-14696. parallel tests don't work for Windows. Contributed by Allen
Wittenauer
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45d1b0fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45d1b0fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45d1b0fd
Branch: refs/heads/HDFS-7240
Commit: 45d1b0fdcc04a86be91a9b72073cdc30bec04d3b
Parents: 19292bc
Author: Chris Douglas
Authored: Mon Mar 12 19:47:42 2018 -0700
Committer: Chris Douglas
Committed: Mon Mar 12 20:05:39 2018 -0700
--
hadoop-common-project/hadoop-common/pom.xml | 26 +
.../apache/hadoop/test/GenericTestUtils.java| 68 +
hadoop-hdfs-project/hadoop-hdfs/pom.xml | 26 +
.../plugin/paralleltests/CreateDirsMojo.java| 100 +++
hadoop-tools/hadoop-aws/pom.xml | 26 +
5 files changed, 161 insertions(+), 85 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d1b0fd/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml
b/hadoop-common-project/hadoop-common/pom.xml
index 078a943..49d3575 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -979,30 +979,13 @@
-maven-antrun-plugin
+org.apache.hadoop
+hadoop-maven-plugins
-create-parallel-tests-dirs
-test-compile
-
-
-
-
-
+parallel-tests-createdir
- run
+ parallel-tests-createdir
@@ -1015,6 +998,7 @@
false
${maven-surefire-plugin.argLine}
-DminiClusterDedicatedDirs=true
+${testsThreadCount}
${test.build.data}/${surefire.forkNumber}
${test.build.dir}/${surefire.forkNumber}
${hadoop.tmp.dir}/${surefire.forkNumber}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/45d1b0fd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index cdde48c..61b0271 100644
---
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -286,7 +286,7 @@ public abstract class GenericTestUtils {
public static void assertExists(File f) {
Assert.assertTrue("File " + f + " should exist", f.exists());
}
-
+
/**
* List all of the files in 'dir' that match the regex 'pattern'.
* Then check that this list is identical to 'expectedMatches'.
@@ -294,7 +294,7 @@ public abstract class GenericTestUtils {
*/
public static void assertGlobEquals(File dir, String pattern,
String ... expectedMatches) throws IOException {
-
+
Set found = Sets.newTreeSet();
for (File f : FileUtil.listFiles(dir)) {
if (f.getName().matches(pattern)) {
@@ -349,7 +349,7 @@ public abstract class GenericTestUtils {
StringUtils.stringifyException(t)),
t);
}
- }
+ }
/**
* Wait for the specified test to return true. The test will be performed
@@ -499,18 +499,18 @@ public abstract class GenericTestUtils {
*/
public static class DelayAnswer implements Answer {
private final Log LOG;
-
+
private final CountDownLatch fireLatch = new CountDownLatch(1);
private final CountDownLatch waitLatch = new CountDownLatch(1);
private final CountDownLatch resultLatch = new CountDownLatch(1);
-
+
private final AtomicInteger fireCounter = new
HADOOP-15297. Make S3A etag => checksum feature optional.
Contributed by Steve Loughran.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd05871b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd05871b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd05871b
Branch: refs/heads/HDFS-7240
Commit: dd05871b8b57303fe0b0c652e03257b59c191802
Parents: e1f5251
Author: Steve Loughran
Authored: Mon Mar 12 14:01:42 2018 +
Committer: Steve Loughran
Committed: Mon Mar 12 14:01:42 2018 +
--
.../src/main/resources/core-default.xml | 11
.../org/apache/hadoop/fs/s3a/Constants.java | 11
.../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 38 +-
.../hadoop/fs/s3a/S3AInstrumentation.java | 1 +
.../org/apache/hadoop/fs/s3a/Statistic.java | 2 +
.../src/site/markdown/tools/hadoop-aws/index.md | 41 ++-
.../hadoop/fs/s3a/ITestS3AMiscOperations.java | 53 +---
7 files changed, 136 insertions(+), 21 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd05871b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 6d6ed42..9074300 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1547,6 +1547,17 @@
+
+ fs.s3a.etag.checksum.enabled
+ false
+
+Should calls to getFileChecksum() return the etag value of the remote
+object.
+WARNING: if enabled, distcp operations between HDFS and S3 will fail unless
+-skipcrccheck is set.
+
+
+
fs.wasb.impl
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd05871b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
--
diff --git
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
index faec784..4c95843 100644
---
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
+++
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
@@ -542,4 +542,15 @@ public final class Constants {
*/
public static final String RETRY_THROTTLE_INTERVAL_DEFAULT = "500ms";
+ /**
+ * Should etags be exposed as checksums?
+ */
+ public static final String ETAG_CHECKSUM_ENABLED =
+ "fs.s3a.etag.checksum.enabled";
+
+ /**
+ * Default value: false.
+ */
+ public static final boolean ETAG_CHECKSUM_ENABLED_DEFAULT = false;
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd05871b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index eb65cfa..4b0c208 100644
---
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -2993,17 +2993,21 @@ public class S3AFileSystem extends FileSystem
implements StreamCapabilities {
}
/**
- * Get the etag of a object at the path via HEAD request and return it
- * as a checksum object. This has the whatever guarantees about equivalence
- * the S3 implementation offers.
+ * When enabled, get the etag of a object at the path via HEAD request and
+ * return it as a checksum object.
*
* If a tag has not changed, consider the object unchanged.
* Two tags being different does not imply the data is different.
*
* Different S3 implementations may offer different guarantees.
+ *
+ * This check is (currently) only made if
+ * {@link Constants#ETAG_CHECKSUM_ENABLED} is set; turning it on
+ * has caused problems with Distcp (HADOOP-15273).
+ *
* @param f The file path
* @param length The length of the file range for checksum calculation
- * @return The EtagChecksum or null if checksums are not supported.
+ * @return The EtagChecksum or null if checksums are not enabled or
supported.
* @throws IOException IO failure
* @see http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html;>Common
Response Headers
*/
@@ -3012,15 +3016,23 @@ public class S3AFileSystem extends
MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are
both present twice in mapred-default.xml. Contributed by Sen Zhao
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32fa3a63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32fa3a63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32fa3a63
Branch: refs/heads/HDFS-7240
Commit: 32fa3a63e0e7d8bfb3d3b9b3c500ecb3a4874ecf
Parents: 3f7bd46
Author: Jason Lowe
Authored: Fri Mar 9 10:41:16 2018 -0600
Committer: Jason Lowe
Committed: Fri Mar 9 10:41:16 2018 -0600
--
.../src/main/resources/mapred-default.xml | 16
1 file changed, 16 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/32fa3a63/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index d0e5a2d..cf8be33 100644
---
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -422,22 +422,6 @@
- mapreduce.map.cpu.vcores
- 1
-
- The number of virtual cores required for each map task.
-
-
-
-
- mapreduce.reduce.cpu.vcores
- 1
-
- The number of virtual cores required for each reduce task.
-
-
-
-
mapreduce.reduce.merge.inmem.threshold
1000
The threshold, in terms of the number of files
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
HDFS-11399. Many tests fails in Windows due to injecting disk failures.
Contributed by Yiqun Lin.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac627f56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac627f56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac627f56
Branch: refs/heads/HDFS-7240
Commit: ac627f561f0946e98a650850fb507536cbd2f2c4
Parents: dd05871
Author: Inigo Goiri
Authored: Mon Mar 12 09:58:56 2018 -0700
Committer: Inigo Goiri
Committed: Mon Mar 12 09:58:56 2018 -0700
--
.../server/blockmanagement/TestBlockStatsMXBean.java| 5 +
.../hdfs/server/datanode/TestDataNodeVolumeFailure.java | 12
2 files changed, 17 insertions(+)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac627f56/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
index 64364cb..11bfff8 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
+import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
@@ -160,6 +161,10 @@ public class TestBlockStatsMXBean {
@Test
public void testStorageTypeStatsWhenStorageFailed() throws Exception {
+// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
+// volume failures which is currently not supported on Windows.
+assumeNotWindows();
+
DFSTestUtil.createFile(cluster.getFileSystem(),
new Path("/blockStatsFile1"), 1024, (short) 1, 0L);
Map storageTypeStatsMap = cluster
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac627f56/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index e73337b..6385367 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -293,6 +293,10 @@ public class TestDataNodeVolumeFailure {
@Test(timeout=1)
public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated()
throws Exception {
+// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
+// volume failures which is currently not supported on Windows.
+assumeNotWindows();
+
// make both data directories to fail on dn0
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
@@ -314,6 +318,10 @@ public class TestDataNodeVolumeFailure {
@Test
public void testVolumeFailureRecoveredByHotSwappingVolume()
throws Exception {
+// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
+// volume failures which is currently not supported on Windows.
+assumeNotWindows();
+
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
final DataNode dn0 = cluster.getDataNodes().get(0);
@@ -354,6 +362,10 @@ public class TestDataNodeVolumeFailure {
@Test
public void testTolerateVolumeFailuresAfterAddingMoreVolumes()
throws Exception {
+// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
+// volume failures which is currently not supported on Windows.
+assumeNotWindows();
+
final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
final File dn0VolNew = new File(dataDir, "data_new");
-
To unsubscribe, e-mail:
YARN-8011.
TestOpportunisticContainerAllocatorAMService#testContainerPromoteAndDemoteBeforeContainerStart
fails intermittently. Contributed by Tao Yang.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b451889e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b451889e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b451889e
Branch: refs/heads/HDFS-7240
Commit: b451889e8e83f7977f2b76789c61e823e2d40487
Parents: 4cc9a6d
Author: Weiwei Yang
Authored: Thu Mar 8 18:13:36 2018 +0800
Committer: Weiwei Yang
Committed: Thu Mar 8 18:13:36 2018 +0800
--
...pportunisticContainerAllocatorAMService.java | 29 ++--
1 file changed, 15 insertions(+), 14 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b451889e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
--
diff --git
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
index 1af930f..efa76bc 100644
---
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
+++
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java
@@ -243,13 +243,13 @@ public class TestOpportunisticContainerAllocatorAMService
{
null, ExecutionType.GUARANTEED)));
// Node on same host should not result in allocation
sameHostDiffNode.nodeHeartbeat(true);
-Thread.sleep(200);
+rm.drainEvents();
allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
// Wait for scheduler to process all events
dispatcher.waitForEventThreadToWait();
-Thread.sleep(1000);
+rm.drainEvents();
// Verify Metrics After OPP allocation (Nothing should change again)
verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
@@ -286,7 +286,7 @@ public class TestOpportunisticContainerAllocatorAMService {
// Ensure after correct node heartbeats, we should get the allocation
allocNode.nodeHeartbeat(true);
-Thread.sleep(200);
+rm.drainEvents();
allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
Container uc =
@@ -303,7 +303,7 @@ public class TestOpportunisticContainerAllocatorAMService {
nm2.nodeHeartbeat(true);
nm3.nodeHeartbeat(true);
nm4.nodeHeartbeat(true);
-Thread.sleep(200);
+rm.drainEvents();
// Verify that the container is still in ACQUIRED state wrt the RM.
RMContainer rmContainer = ((CapacityScheduler) scheduler)
@@ -325,6 +325,7 @@ public class TestOpportunisticContainerAllocatorAMService {
// Wait for scheduler to finish processing events
dispatcher.waitForEventThreadToWait();
+rm.drainEvents();
// Verify Metrics After OPP allocation :
// Everything should have reverted to what it was
verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
@@ -396,7 +397,7 @@ public class TestOpportunisticContainerAllocatorAMService {
ContainerStatus.newInstance(container.getId(),
ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)),
true);
-Thread.sleep(200);
+rm.drainEvents();
// Verify that container is actually running wrt the RM..
RMContainer rmContainer = ((CapacityScheduler) scheduler)
@@ -434,7 +435,7 @@ public class TestOpportunisticContainerAllocatorAMService {
ContainerStatus.newInstance(container.getId(),
ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)),
true);
-Thread.sleep(200);
+rm.drainEvents();
allocateResponse = am1.allocate(new ArrayList<>(), new ArrayList<>());
Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
@@ -521,7 +522,7 @@ public class TestOpportunisticContainerAllocatorAMService {
ContainerStatus.newInstance(container.getId(),
ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)),
Revert "HADOOP-13119. Add ability to secure log servlet using proxy users.
Contribute by Yuanbo Liu."
This reverts commit a847903b6e64c6edb11d852b91f2c816b1253eb3.
Change-Id: I3122a2142f5bdf8507dece930e447556a43cd9ae
(cherry picked from commit 8fad3ec76070ccfcd3ed80feaba4355077bc6f5c)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa6a8b78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa6a8b78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa6a8b78
Branch: refs/heads/HDFS-7240
Commit: fa6a8b78d481d3b4d355e1bf078f30dd5e09850d
Parents: 3a8dade
Author: Owen O'Malley
Authored: Thu Mar 1 10:15:22 2018 -0800
Committer: Wangda Tan
Committed: Fri Mar 9 22:46:41 2018 -0800
--
.../AuthenticationFilterInitializer.java| 9 +-
.../AuthenticationWithProxyUserFilter.java | 119 -
.../hadoop/http/TestHttpServerWithSpengo.java | 481 ---
.../security/TestAuthenticationFilter.java | 13 +-
.../TestAuthenticationWithProxyUserFilter.java | 79 ---
5 files changed, 13 insertions(+), 688 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa6a8b78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index 65d2211..ca221f5 100644
---
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -29,9 +29,8 @@ import java.util.HashMap;
import java.util.Map;
/**
- * Initializes {@link AuthenticationWithProxyUserFilter}
- * which provides support for Kerberos HTTP SPNEGO authentication
- * and proxy user authentication.
+ * Initializes hadoop-auth AuthenticationFilter which provides support for
+ * Kerberos HTTP SPNEGO authentication.
*
* It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO
* authentication for Hadoop JobTracker, NameNode, DataNodes and
@@ -59,10 +58,8 @@ public class AuthenticationFilterInitializer extends
FilterInitializer {
public void initFilter(FilterContainer container, Configuration conf) {
Map filterConfig = getFilterConfigMap(conf, PREFIX);
-// extend AuthenticationFilter's feature to
-// support proxy user operation.
container.addFilter("authentication",
-AuthenticationWithProxyUserFilter.class.getName(),
+AuthenticationFilter.class.getName(),
filterConfig);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa6a8b78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
deleted file mode 100644
index ea9b282..000
---
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.security;
-
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.util.HttpExceptionUtils;
-import
HADOOP-14742. Document multi-URI replication Inode for ViewFS. Contributed by
Gera Shegalov
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ddb67ca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ddb67ca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ddb67ca7
Branch: refs/heads/HDFS-7240
Commit: ddb67ca707de896cd0ba5cda3c0d1a2d9edca968
Parents: cceb68f
Author: Chris Douglas
Authored: Mon Mar 12 13:42:38 2018 -0700
Committer: Chris Douglas
Committed: Mon Mar 12 13:43:27 2018 -0700
--
.../hadoop-hdfs/src/site/markdown/ViewFs.md | 139 +++
1 file changed, 139 insertions(+)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ddb67ca7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
index 1008583..f851ef6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
@@ -180,6 +180,145 @@ Recall that one cannot rename files or directories across
namenodes or clusters
This will NOT work in the new world if `/user` and `/data` are actually stored
on different namenodes within a cluster.
+Multi-Filesystem I/0 with Nfly Mount Points
+-
+
+HDFS and other distributed filesystems provide data resilience via some sort of
+redundancy such as block replication or more sophisticated distributed
encoding.
+However, modern setups may be comprised of multiple Hadoop clusters, enterprise
+filers, hosted on and off premise. Nfly mount points make it possible for a
+single logical file to be synchronously replicated by multiple filesystems.
+It's designed for a relatively small files up to a gigabyte. In general it's a
+function of a single core/single network link performance since the logic
+resides in a single client JVM using ViewFs such as FsShell or a
+MapReduce task.
+
+### Basic Configuration
+
+Consider the following example to understand the basic configuration of Nfly.
+Suppose we want to keep the directory `ads` replicated on three filesystems
+represented by URIs: `uri1`, `uri2` and `uri3`.
+
+```xml
+
+fs.viewfs.mounttable.global.linkNfly../ads
+uri1,uri2,uri3
+
+```
+Note 2 consecutive `..` in the property name. They arise because of empty
+settings for advanced tweaking of the mount point which we will show in
+subsequent sections. The property value is a comma-separated list of URIs.
+
+URIs may point to different clusters in different regions
+`hdfs://datacenter-east/ads`, `s3a://models-us-west/ads`,
`hdfs://datacenter-west/ads`
+or in the simplest case to different directories under the same filesystem,
+e.g., `file:/tmp/ads1`, `file:/tmp/ads2`, `file:/tmp/ads3`
+
+All *modifications* performed under the global path `viewfs://global/ads` are
+propagated to all destination URIs if the underlying system is available.
+
+For instance if we create a file via hadoop shell
+```bash
+hadoop fs -touchz viewfs://global/ads/z1
+```
+
+We will find it via local filesystem in the latter configuration
+```bash
+ls -al /tmp/ads*/z1
+-rw-r--r-- 1 user wheel 0 Mar 11 12:17 /tmp/ads1/z1
+-rw-r--r-- 1 user wheel 0 Mar 11 12:17 /tmp/ads2/z1
+-rw-r--r-- 1 user wheel 0 Mar 11 12:17 /tmp/ads3/z1
+```
+
+A read from the global path is processed by the first filesystem that does not
+result in an exception. The order in which filesystems are accessed depends on
+whether they are available at this moment or and whether a topological order
+exists.
+
+### Advanced Configuration
+
+Mount points `linkNfly` can be further configured using parameters passed as a
+comma-separated list of key=value pairs. Following parameters are currently
+supported.
+
+`minReplication=int` determines the minimum number of destinations that have to
+process a write modification without exceptions, if below nfly write is failed.
+It is an configuration error to have minReplication higher than the number of
+target URIs. The default is 2.
+
+If minReplication is lower than the number of target URIs we may have some
+target URIs without latest writes. It can be compensated by employing more
+expensive read operations controlled by the following settings
+
+`readMostRecent=boolean` if set to `true` causes Nfly client to check the path
+under all target URIs instead of just the first one based on the topology
order.
+Among all available at the moment the one with the most recent modification
time
+is processed.
+
+`repairOnRead=boolean` if set to `true` causes Nfly to copy most recent replica
+to stale targets such that
HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by
Yiqun Lin.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4743d4a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4743d4a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4743d4a2
Branch: refs/heads/HDFS-7240
Commit: 4743d4a2c70a213a41804a24c776e6db00e1b90d
Parents: 8133cd5
Author: Yiqun Lin
Authored: Sat Mar 10 11:28:55 2018 +0800
Committer: Yiqun Lin
Committed: Sat Mar 10 11:28:55 2018 +0800
--
.../src/site/markdown/HDFSRouterFederation.md | 12 +++-
1 file changed, 7 insertions(+), 5 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4743d4a2/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index 5412aae..fdaaa11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -29,7 +29,9 @@ Architecture
A natural extension to this partitioned federation is to add a layer of
software responsible for federating the namespaces.
-This extra layer allows users to access any subcluster transparently, lets
subclusters manage their own block pools independently, and supports
rebalancing of data across subclusters.
+This extra layer allows users to access any subcluster transparently, lets
subclusters manage their own block pools independently, and will support
rebalancing of data across subclusters later
+(see more info in
[HDFS-13123](https://issues.apache.org/jira/browse/HDFS-13123)). The
subclusters in RBF are not required to be the independent HDFS clusters, a
normal federation cluster
+(with multiple block pools) or a mixed cluster with federation and independent
cluster is also allowed.
To accomplish these goals, the federation layer directs block accesses to the
proper subcluster, maintains the state of the namespaces, and provides
mechanisms for data rebalancing.
This layer must be scalable, highly available, and fault tolerant.
@@ -324,8 +326,8 @@ The connection to the State Store and the internal caching
at the Router.
| Property | Default | Description|
|: |: |: |
| dfs.federation.router.store.enable | `true` | If `true`, the Router connects
to the State Store. |
-| dfs.federation.router.store.serializer | `StateStoreSerializerPBImpl` |
Class to serialize State Store records. |
-| dfs.federation.router.store.driver.class | `StateStoreZooKeeperImpl` | Class
to implement the State Store. |
+| dfs.federation.router.store.serializer |
`org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl`
| Class to serialize State Store records. |
+| dfs.federation.router.store.driver.class |
`org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl`
| Class to implement the State Store. |
| dfs.federation.router.store.connection.test | 6 | How often to check for
the connection to the State Store in milliseconds. |
| dfs.federation.router.cache.ttl | 6 | How often to refresh the State
Store caches in milliseconds. |
| dfs.federation.router.store.membership.expiration | 30 | Expiration time
in milliseconds for a membership record. |
@@ -336,8 +338,8 @@ Forwarding client requests to the right subcluster.
| Property | Default | Description|
|: |: |: |
-| dfs.federation.router.file.resolver.client.class | MountTableResolver |
Class to resolve files to subclusters. |
-| dfs.federation.router.namenode.resolver.client.class |
MembershipNamenodeResolver | Class to resolve the namenode for a subcluster. |
+| dfs.federation.router.file.resolver.client.class |
`org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver` | Class
to resolve files to subclusters. |
+| dfs.federation.router.namenode.resolver.client.class |
`org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver`
| Class to resolve the namenode for a subcluster. |
### Namenode monitoring
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
HADOOP-15273.distcp can't handle remote stores with different checksum
algorithms.
Contributed by Steve Loughran.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ef4d942
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ef4d942
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ef4d942
Branch: refs/heads/HDFS-7240
Commit: 7ef4d942dd96232b0743a40ed25f77065254f94d
Parents: 3bd6b1f
Author: Steve Loughran
Authored: Thu Mar 8 11:24:06 2018 +
Committer: Steve Loughran
Committed: Thu Mar 8 11:24:06 2018 +
--
.../org/apache/hadoop/tools/DistCpOptions.java | 5
.../tools/mapred/RetriableFileCopyCommand.java | 29 +++-
.../hadoop/tools/mapred/TestCopyMapper.java | 14 +-
3 files changed, 29 insertions(+), 19 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ef4d942/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
--
diff --git
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
index ece1a94..f33f7fd 100644
---
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
+++
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
@@ -534,11 +534,6 @@ public final class DistCpOptions {
+ "mutually exclusive");
}
- if (!syncFolder && skipCRC) {
-throw new IllegalArgumentException(
-"Skip CRC is valid only with update options");
- }
-
if (!syncFolder && append) {
throw new IllegalArgumentException(
"Append is valid only with update options");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ef4d942/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
--
diff --git
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index 0311061..55f90d0 100644
---
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -210,15 +210,30 @@ public class RetriableFileCopyCommand extends
RetriableCommand {
throws IOException {
if (!DistCpUtils.checksumsAreEqual(sourceFS, source, sourceChecksum,
targetFS, target)) {
- StringBuilder errorMessage = new StringBuilder("Check-sum mismatch
between ")
- .append(source).append(" and ").append(target).append(".");
- if (sourceFS.getFileStatus(source).getBlockSize() !=
+ StringBuilder errorMessage =
+ new StringBuilder("Checksum mismatch between ")
+ .append(source).append(" and ").append(target).append(".");
+ boolean addSkipHint = false;
+ String srcScheme = sourceFS.getScheme();
+ String targetScheme = targetFS.getScheme();
+ if (!srcScheme.equals(targetScheme)
+ && !(srcScheme.contains("hdfs") && targetScheme.contains("hdfs"))) {
+// the filesystems are different and they aren't both hdfs connectors
+errorMessage.append("Source and destination filesystems are of"
++ " different types\n")
+.append("Their checksum algorithms may be incompatible");
+addSkipHint = true;
+ } else if (sourceFS.getFileStatus(source).getBlockSize() !=
targetFS.getFileStatus(target).getBlockSize()) {
-errorMessage.append(" Source and target differ in block-size.")
-.append(" Use -pb to preserve block-sizes during copy.")
-.append(" Alternatively, skip checksum-checks altogether, using
-skipCrc.")
+errorMessage.append(" Source and target differ in block-size.\n")
+.append(" Use -pb to preserve block-sizes during copy.");
+addSkipHint = true;
+ }
+ if (addSkipHint) {
+errorMessage.append(" You can skip checksum-checks altogether "
++ " with -skipcrccheck.\n")
.append(" (NOTE: By skipping checksums, one runs the risk of " +
-"masking data-corruption during file-transfer.)");
+"masking data-corruption during file-transfer.)\n");
}
throw new IOException(errorMessage.toString());
}
HADOOP-15296. Fix a wrong link for RBF in the top page. Contributed by Takanobu
Asanuma.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cc9a6d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cc9a6d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cc9a6d9
Branch: refs/heads/HDFS-7240
Commit: 4cc9a6d9bb34329d6de30706d5432c7cb675bb88
Parents: 583f459
Author: Yiqun Lin
Authored: Thu Mar 8 16:02:34 2018 +0800
Committer: Yiqun Lin
Committed: Thu Mar 8 16:02:34 2018 +0800
--
hadoop-project/src/site/markdown/index.md.vm | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc9a6d9/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm
b/hadoop-project/src/site/markdown/index.md.vm
index 9b2d9de..8b9cfda 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -223,7 +223,7 @@ functionality, except the mount table is managed on the
server-side by the
routing layer rather than on the client. This simplifies access to a federated
cluster for existing HDFS clients.
-See [HDFS-10467](https://issues.apache.org/jira/browse/HADOOP-10467) and the
+See [HDFS-10467](https://issues.apache.org/jira/browse/HDFS-10467) and the
HDFS Router-based Federation
[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.html)
for
more details.
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
Repository: hadoop
Updated Branches:
refs/heads/trunk 9d6994da1 -> 39537b7c8
HDFS-13235. DiskBalancer: Update Documentation to add newly added options.
Contributed by Bharat Viswanadham.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39537b7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39537b7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39537b7c
Branch: refs/heads/trunk
Commit: 39537b7c84dddfa8084308459565ab77fd24abd3
Parents: 9d6994d
Author: Arpit Agarwal
Authored: Tue Mar 13 16:35:51 2018 -0700
Committer: Arpit Agarwal
Committed: Tue Mar 13 16:35:51 2018 -0700
--
.../hadoop-hdfs/src/main/resources/hdfs-default.xml| 6 +++---
.../hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md | 6 +-
2 files changed, 8 insertions(+), 4 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39537b7c/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 2d3c5e7..f90daba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -4651,9 +4651,9 @@
dfs.disk.balancer.plan.valid.interval
1d
- Maximum number of hours the disk balancer plan is valid.
- This setting supports multiple time unit suffixes as described
- in dfs.heartbeat.interval. If no suffix is specified then milliseconds
+ Maximum amount of time disk balancer plan is valid. This setting
+ supports multiple time unit suffixes as described in
+ dfs.heartbeat.interval. If no suffix is specified then milliseconds
is assumed.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39537b7c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
index 6e1bd41..ed0233a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -79,6 +79,10 @@ Execute command takes a plan command executes it against the
datanode that plan
This executes the plan by reading datanode’s address from the plan file.
+| COMMAND\_OPTION| Description |
+|: |: |
+| `-skipDateCheck` | Skip date check and force execute the plan.|
+
### Query
Query command gets the current status of the diskbalancer from a datanode.
@@ -122,7 +126,7 @@ There is a set of diskbalancer settings that can be
controlled via hdfs-site.xml
|`dfs.disk.balancer.max.disk.errors`| sets the value of maximum number of
errors we can ignore for a specific move between two disks before it is
abandoned. For example, if a plan has 3 pair of disks to copy between , and the
first disk set encounters more than 5 errors, then we abandon the first copy
and start the second copy in the plan. The default value of max errors is set
to 5.|
|`dfs.disk.balancer.block.tolerance.percent`| The tolerance percent specifies
when we have reached a good enough value for any copy step. For example, if you
specify 10% then getting close to 10% of the target value is good enough.|
|`dfs.disk.balancer.plan.threshold.percent`| The percentage threshold value
for volume Data Density in a plan. If the absolute value of volume Data Density
which is out of threshold value in a node, it means that the volumes
corresponding to the disks should do the balancing in the plan. The default
value is 10.|
-
+|`dfs.disk.balancer.plan.valid.interval`| Maximum amount of time disk balancer
plan is valid. Supports the following suffixes (case insensitive): ms(millis),
s(sec), m(min), h(hour), d(day) to specify the time (such as 2s, 2m, 1h, etc.).
If no suffix is specified then milliseconds is assumed. Default value is 1d|
Debugging
-
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
HADOOP-15273.distcp can't handle remote stores with different checksum
algorithms.
Contributed by Steve Loughran.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ef4d942
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ef4d942
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ef4d942
Branch: refs/heads/HDFS-12996
Commit: 7ef4d942dd96232b0743a40ed25f77065254f94d
Parents: 3bd6b1f
Author: Steve Loughran
Authored: Thu Mar 8 11:24:06 2018 +
Committer: Steve Loughran
Committed: Thu Mar 8 11:24:06 2018 +
--
.../org/apache/hadoop/tools/DistCpOptions.java | 5
.../tools/mapred/RetriableFileCopyCommand.java | 29 +++-
.../hadoop/tools/mapred/TestCopyMapper.java | 14 +-
3 files changed, 29 insertions(+), 19 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ef4d942/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
--
diff --git
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
index ece1a94..f33f7fd 100644
---
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
+++
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
@@ -534,11 +534,6 @@ public final class DistCpOptions {
+ "mutually exclusive");
}
- if (!syncFolder && skipCRC) {
-throw new IllegalArgumentException(
-"Skip CRC is valid only with update options");
- }
-
if (!syncFolder && append) {
throw new IllegalArgumentException(
"Append is valid only with update options");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ef4d942/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
--
diff --git
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
index 0311061..55f90d0 100644
---
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
+++
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
@@ -210,15 +210,30 @@ public class RetriableFileCopyCommand extends
RetriableCommand {
throws IOException {
if (!DistCpUtils.checksumsAreEqual(sourceFS, source, sourceChecksum,
targetFS, target)) {
- StringBuilder errorMessage = new StringBuilder("Check-sum mismatch
between ")
- .append(source).append(" and ").append(target).append(".");
- if (sourceFS.getFileStatus(source).getBlockSize() !=
+ StringBuilder errorMessage =
+ new StringBuilder("Checksum mismatch between ")
+ .append(source).append(" and ").append(target).append(".");
+ boolean addSkipHint = false;
+ String srcScheme = sourceFS.getScheme();
+ String targetScheme = targetFS.getScheme();
+ if (!srcScheme.equals(targetScheme)
+ && !(srcScheme.contains("hdfs") && targetScheme.contains("hdfs"))) {
+// the filesystems are different and they aren't both hdfs connectors
+errorMessage.append("Source and destination filesystems are of"
++ " different types\n")
+.append("Their checksum algorithms may be incompatible");
+addSkipHint = true;
+ } else if (sourceFS.getFileStatus(source).getBlockSize() !=
targetFS.getFileStatus(target).getBlockSize()) {
-errorMessage.append(" Source and target differ in block-size.")
-.append(" Use -pb to preserve block-sizes during copy.")
-.append(" Alternatively, skip checksum-checks altogether, using
-skipCrc.")
+errorMessage.append(" Source and target differ in block-size.\n")
+.append(" Use -pb to preserve block-sizes during copy.");
+addSkipHint = true;
+ }
+ if (addSkipHint) {
+errorMessage.append(" You can skip checksum-checks altogether "
++ " with -skipcrccheck.\n")
.append(" (NOTE: By skipping checksums, one runs the risk of " +
-"masking data-corruption during file-transfer.)");
+"masking data-corruption during file-transfer.)\n");
}
throw new IOException(errorMessage.toString());
}
Revert "HADOOP-13119. Add ability to secure log servlet using proxy users.
Contribute by Yuanbo Liu."
This reverts commit a847903b6e64c6edb11d852b91f2c816b1253eb3.
Change-Id: I3122a2142f5bdf8507dece930e447556a43cd9ae
(cherry picked from commit 8fad3ec76070ccfcd3ed80feaba4355077bc6f5c)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa6a8b78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa6a8b78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa6a8b78
Branch: refs/heads/HDFS-12996
Commit: fa6a8b78d481d3b4d355e1bf078f30dd5e09850d
Parents: 3a8dade
Author: Owen O'Malley
Authored: Thu Mar 1 10:15:22 2018 -0800
Committer: Wangda Tan
Committed: Fri Mar 9 22:46:41 2018 -0800
--
.../AuthenticationFilterInitializer.java| 9 +-
.../AuthenticationWithProxyUserFilter.java | 119 -
.../hadoop/http/TestHttpServerWithSpengo.java | 481 ---
.../security/TestAuthenticationFilter.java | 13 +-
.../TestAuthenticationWithProxyUserFilter.java | 79 ---
5 files changed, 13 insertions(+), 688 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa6a8b78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index 65d2211..ca221f5 100644
---
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -29,9 +29,8 @@ import java.util.HashMap;
import java.util.Map;
/**
- * Initializes {@link AuthenticationWithProxyUserFilter}
- * which provides support for Kerberos HTTP SPNEGO authentication
- * and proxy user authentication.
+ * Initializes hadoop-auth AuthenticationFilter which provides support for
+ * Kerberos HTTP SPNEGO authentication.
*
* It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO
* authentication for Hadoop JobTracker, NameNode, DataNodes and
@@ -59,10 +58,8 @@ public class AuthenticationFilterInitializer extends
FilterInitializer {
public void initFilter(FilterContainer container, Configuration conf) {
Map filterConfig = getFilterConfigMap(conf, PREFIX);
-// extend AuthenticationFilter's feature to
-// support proxy user operation.
container.addFilter("authentication",
-AuthenticationWithProxyUserFilter.class.getName(),
+AuthenticationFilter.class.getName(),
filterConfig);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa6a8b78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
deleted file mode 100644
index ea9b282..000
---
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.security;
-
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.util.HttpExceptionUtils;
-import
MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are
both present twice in mapred-default.xml. Contributed by Sen Zhao
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32fa3a63
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32fa3a63
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32fa3a63
Branch: refs/heads/HDFS-12996
Commit: 32fa3a63e0e7d8bfb3d3b9b3c500ecb3a4874ecf
Parents: 3f7bd46
Author: Jason Lowe
Authored: Fri Mar 9 10:41:16 2018 -0600
Committer: Jason Lowe
Committed: Fri Mar 9 10:41:16 2018 -0600
--
.../src/main/resources/mapred-default.xml | 16
1 file changed, 16 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/32fa3a63/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
--
diff --git
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index d0e5a2d..cf8be33 100644
---
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -422,22 +422,6 @@
- mapreduce.map.cpu.vcores
- 1
-
- The number of virtual cores required for each map task.
-
-
-
-
- mapreduce.reduce.cpu.vcores
- 1
-
- The number of virtual cores required for each reduce task.
-
-
-
-
mapreduce.reduce.merge.inmem.threshold
1000
The threshold, in terms of the number of files
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy.
Contributed by Takanobu Asanuma.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8211a3d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8211a3d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8211a3d4
Branch: refs/heads/HDFS-12996
Commit: 8211a3d4693fea46cff11c5883c16a9b4df7b4de
Parents: f82d38d
Author: Xiao Chen
Authored: Tue Mar 13 10:48:35 2018 -0700
Committer: Xiao Chen
Committed: Tue Mar 13 10:48:45 2018 -0700
--
.../hdfs/TestFileStatusWithDefaultECPolicy.java | 107 +++
.../hadoop/hdfs/TestFileStatusWithECPolicy.java | 102 --
.../hdfs/TestFileStatusWithRandomECPolicy.java | 49 +
3 files changed, 156 insertions(+), 102 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8211a3d4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
new file mode 100644
index 000..a5a
--- /dev/null
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithDefaultECPolicy.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+/**
+ * This test ensures the statuses of EC files with the default policy.
+ */
+public class TestFileStatusWithDefaultECPolicy {
+ private MiniDFSCluster cluster;
+ private DistributedFileSystem fs;
+ private DFSClient client;
+
+ @Rule
+ public Timeout globalTimeout = new Timeout(30);
+
+ @Before
+ public void before() throws IOException {
+HdfsConfiguration conf = new HdfsConfiguration();
+cluster =
+new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+cluster.waitActive();
+fs = cluster.getFileSystem();
+client = fs.getClient();
+fs.enableErasureCodingPolicy(getEcPolicy().getName());
+ }
+
+ @After
+ public void after() {
+if (cluster != null) {
+ cluster.shutdown();
+ cluster = null;
+}
+ }
+
+ public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+ }
+
+ @Test
+ public void testFileStatusWithECPolicy() throws Exception {
+// test directory doesn't have an EC policy
+final Path dir = new Path("/foo");
+assertTrue(fs.mkdir(dir, FsPermission.getDirDefault()));
+ContractTestUtils.assertNotErasureCoded(fs, dir);
+assertNull(client.getFileInfo(dir.toString()).getErasureCodingPolicy());
+// test file doesn't have an EC policy
+final Path file = new Path(dir, "foo");
+fs.create(file).close();
+assertNull(client.getFileInfo(file.toString()).getErasureCodingPolicy());
+ContractTestUtils.assertNotErasureCoded(fs, file);
+fs.delete(file, true);
+
+final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
+// set EC policy on dir
+fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
+ContractTestUtils.assertErasureCoded(fs, dir);
+final ErasureCodingPolicy ecPolicy2 =
+client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
+assertNotNull(ecPolicy2);
+
Revert "HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy.
Contributed by Takanobu Asanuma."
This reverts commit 84c10955863eca1e300aeeac1d9cd7a1186144b6.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f82d38dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f82d38dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f82d38dc
Branch: refs/heads/HDFS-12996
Commit: f82d38dcb3259dda6275c75765738fb9b249ee73
Parents: 3b8dbc2
Author: Xiao Chen
Authored: Tue Mar 13 10:30:07 2018 -0700
Committer: Xiao Chen
Committed: Tue Mar 13 10:36:16 2018 -0700
--
.../hadoop/hdfs/TestFileStatusWithECPolicy.java | 15 ++
.../hdfs/TestFileStatusWithRandomECPolicy.java | 49
2 files changed, 5 insertions(+), 59 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82d38dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
index a5a..077cf3a 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
@@ -34,10 +34,7 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
-/**
- * This test ensures the statuses of EC files with the default policy.
- */
-public class TestFileStatusWithDefaultECPolicy {
+public class TestFileStatusWithECPolicy {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private DFSClient client;
@@ -53,7 +50,8 @@ public class TestFileStatusWithDefaultECPolicy {
cluster.waitActive();
fs = cluster.getFileSystem();
client = fs.getClient();
-fs.enableErasureCodingPolicy(getEcPolicy().getName());
+fs.enableErasureCodingPolicy(
+StripedFileTestUtil.getDefaultECPolicy().getName());
}
@After
@@ -64,10 +62,6 @@ public class TestFileStatusWithDefaultECPolicy {
}
}
- public ErasureCodingPolicy getEcPolicy() {
-return StripedFileTestUtil.getDefaultECPolicy();
- }
-
@Test
public void testFileStatusWithECPolicy() throws Exception {
// test directory doesn't have an EC policy
@@ -82,7 +76,8 @@ public class TestFileStatusWithDefaultECPolicy {
ContractTestUtils.assertNotErasureCoded(fs, file);
fs.delete(file, true);
-final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
+final ErasureCodingPolicy ecPolicy1 =
+StripedFileTestUtil.getDefaultECPolicy();
// set EC policy on dir
fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
ContractTestUtils.assertErasureCoded(fs, dir);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f82d38dc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
deleted file mode 100644
index 18902a7..000
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This test extends TestFileStatusWithDefaultECPolicy to use a random
- * (non-default) EC policy.
- */
-public class TestFileStatusWithRandomECPolicy
HDFS-12780. Fix spelling mistake in DistCpUtils.java. Contributed by Jianfei
Jiang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/45cccadd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/45cccadd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/45cccadd
Branch: refs/heads/HDFS-12996
Commit: 45cccadd2e84b99ec56f1cc0e2248dc8fc844f38
Parents: 8211a3d
Author: Chris Douglas
Authored: Tue Mar 13 11:08:11 2018 -0700
Committer: Chris Douglas
Committed: Tue Mar 13 11:08:11 2018 -0700
--
.../src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java| 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/45cccadd/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
--
diff --git
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
index 2b3b529..eba4bee 100644
---
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
+++
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
@@ -527,7 +527,7 @@ public class DistCpUtils {
/**
* Utility to compare checksums for the paths specified.
*
- * If checksums's can't be retrieved, it doesn't fail the test
+ * If checksums can't be retrieved, it doesn't fail the test
* Only time the comparison would fail is when checksums are
* available and they don't match
*
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
HADOOP-15296. Fix a wrong link for RBF in the top page. Contributed by Takanobu
Asanuma.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cc9a6d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cc9a6d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cc9a6d9
Branch: refs/heads/HDFS-12996
Commit: 4cc9a6d9bb34329d6de30706d5432c7cb675bb88
Parents: 583f459
Author: Yiqun Lin
Authored: Thu Mar 8 16:02:34 2018 +0800
Committer: Yiqun Lin
Committed: Thu Mar 8 16:02:34 2018 +0800
--
hadoop-project/src/site/markdown/index.md.vm | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc9a6d9/hadoop-project/src/site/markdown/index.md.vm
--
diff --git a/hadoop-project/src/site/markdown/index.md.vm
b/hadoop-project/src/site/markdown/index.md.vm
index 9b2d9de..8b9cfda 100644
--- a/hadoop-project/src/site/markdown/index.md.vm
+++ b/hadoop-project/src/site/markdown/index.md.vm
@@ -223,7 +223,7 @@ functionality, except the mount table is managed on the
server-side by the
routing layer rather than on the client. This simplifies access to a federated
cluster for existing HDFS clients.
-See [HDFS-10467](https://issues.apache.org/jira/browse/HADOOP-10467) and the
+See [HDFS-10467](https://issues.apache.org/jira/browse/HDFS-10467) and the
HDFS Router-based Federation
[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.html)
for
more details.
-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org
HDFS-13226. RBF: Throw the exception if mount table entry validated failed.
Contributed by maobaolong.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19292bc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19292bc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19292bc2
Branch: refs/heads/HDFS-12996
Commit: 19292bc264cada5117ec76063d36cc88159afdf4
Parents: 7fab787
Author: Yiqun Lin
Authored: Tue Mar 13 11:03:31 2018 +0800
Committer: Yiqun Lin
Committed: Tue Mar 13 11:03:31 2018 +0800
--
.../federation/store/records/BaseRecord.java| 16 ++--
.../store/records/MembershipState.java | 29 -
.../federation/store/records/MountTable.java| 42 +++
.../federation/store/records/RouterState.java | 9 ++--
.../federation/router/TestRouterAdminCLI.java | 38 +++--
.../store/records/TestMountTable.java | 43
6 files changed, 137 insertions(+), 40 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19292bc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
index 79f99c8..d5e60ce 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
@@ -32,6 +32,10 @@ import org.apache.hadoop.util.Time;
*
*/
public abstract class BaseRecord implements Comparable {
+ public static final String ERROR_MSG_CREATION_TIME_NEGATIVE =
+ "The creation time for the record cannot be negative.";
+ public static final String ERROR_MSG_MODIFICATION_TIME_NEGATIVE =
+ "The modification time for the record cannot be negative.";
/**
* Set the modification time for the record.
@@ -193,11 +197,15 @@ public abstract class BaseRecord implements
Comparable {
/**
* Validates the record. Called when the record is created, populated from
the
- * state store, and before committing to the state store.
- * @return If the record is valid.
+ * state store, and before committing to the state store. If validate failed,
+ * there throws an exception.
*/
- public boolean validate() {
-return getDateCreated() > 0 && getDateModified() > 0;
+ public void validate() {
+if (getDateCreated() <= 0) {
+ throw new IllegalArgumentException(ERROR_MSG_CREATION_TIME_NEGATIVE);
+} else if (getDateModified() <= 0) {
+ throw new IllegalArgumentException(ERROR_MSG_MODIFICATION_TIME_NEGATIVE);
+}
}
@Override
http://git-wip-us.apache.org/repos/asf/hadoop/blob/19292bc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
index ac0b22e..e33dedf 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
@@ -37,6 +37,14 @@ import
org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerialize
*/
public abstract class MembershipState extends BaseRecord
implements FederationNamenodeContext {
+ public static final String ERROR_MSG_NO_NS_SPECIFIED =
+ "Invalid registration, no nameservice specified ";
+ public static final String ERROR_MSG_NO_WEB_ADDR_SPECIFIED =
+ "Invalid registration, no web address specified ";
+ public static final String ERROR_MSG_NO_RPC_ADDR_SPECIFIED =
+ "Invalid registration, no rpc address specified ";
+ public static final String ERROR_MSG_NO_BP_SPECIFIED =
+ "Invalid registration, no block pool specified ";
/** Expiration time in ms for this entry. */
private static long expirationMs;
@@ -226,26 +234,25 @@ public abstract class MembershipState extends BaseRecord
* is missing required information.
*/
@Override
- public boolean validate() {
-boolean ret =
HDFS-12505. Extend TestFileStatusWithECPolicy with a random EC policy.
Contributed by Takanobu Asanuma.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84c10955
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84c10955
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84c10955
Branch: refs/heads/HDFS-12996
Commit: 84c10955863eca1e300aeeac1d9cd7a1186144b6
Parents: b2b9ce5
Author: Xiao Chen
Authored: Tue Mar 13 09:57:20 2018 -0700
Committer: Xiao Chen
Committed: Tue Mar 13 09:58:03 2018 -0700
--
.../hadoop/hdfs/TestFileStatusWithECPolicy.java | 15 --
.../hdfs/TestFileStatusWithRandomECPolicy.java | 49
2 files changed, 59 insertions(+), 5 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/84c10955/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
index 077cf3a..a5a 100644
---
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
@@ -34,7 +34,10 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
-public class TestFileStatusWithECPolicy {
+/**
+ * This test ensures the statuses of EC files with the default policy.
+ */
+public class TestFileStatusWithDefaultECPolicy {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private DFSClient client;
@@ -50,8 +53,7 @@ public class TestFileStatusWithECPolicy {
cluster.waitActive();
fs = cluster.getFileSystem();
client = fs.getClient();
-fs.enableErasureCodingPolicy(
-StripedFileTestUtil.getDefaultECPolicy().getName());
+fs.enableErasureCodingPolicy(getEcPolicy().getName());
}
@After
@@ -62,6 +64,10 @@ public class TestFileStatusWithECPolicy {
}
}
+ public ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+ }
+
@Test
public void testFileStatusWithECPolicy() throws Exception {
// test directory doesn't have an EC policy
@@ -76,8 +82,7 @@ public class TestFileStatusWithECPolicy {
ContractTestUtils.assertNotErasureCoded(fs, file);
fs.delete(file, true);
-final ErasureCodingPolicy ecPolicy1 =
-StripedFileTestUtil.getDefaultECPolicy();
+final ErasureCodingPolicy ecPolicy1 = getEcPolicy();
// set EC policy on dir
fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
ContractTestUtils.assertErasureCoded(fs, dir);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/84c10955/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
--
diff --git
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
new file mode 100644
index 000..18902a7
--- /dev/null
+++
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithRandomECPolicy.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This test extends TestFileStatusWithDefaultECPolicy to use a random
+ * (non-default) EC policy.
+ */
+public class TestFileStatusWithRandomECPolicy extends
+TestFileStatusWithDefaultECPolicy {
+ private static final Logger LOG =
HADOOP-15311. HttpServer2 needs a way to configure the acceptor/selector count.
Contributed by Erik Krogen
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d6994da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d6994da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d6994da
Branch: refs/heads/HDFS-12996
Commit: 9d6994da1964c1125a33b3a65e7a7747e2d0bc59
Parents: a82d4a2
Author: Chris Douglas
Authored: Tue Mar 13 13:53:58 2018 -0700
Committer: Chris Douglas
Committed: Tue Mar 13 13:55:18 2018 -0700
--
.../org/apache/hadoop/http/HttpServer2.java | 12 +-
.../org/apache/hadoop/http/TestHttpServer.java | 23 +++-
2 files changed, 33 insertions(+), 2 deletions(-)
--
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6994da/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 7e12640..8adb114 100644
---
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -134,6 +134,14 @@ public final class HttpServer2 implements FilterContainer {
"hadoop.http.socket.backlog.size";
public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
+ public static final String HTTP_ACCEPTOR_COUNT_KEY =
+ "hadoop.http.acceptor.count";
+ // -1 to use default behavior of setting count based on CPU core count
+ public static final int HTTP_ACCEPTOR_COUNT_DEFAULT = -1;
+ public static final String HTTP_SELECTOR_COUNT_KEY =
+ "hadoop.http.selector.count";
+ // -1 to use default behavior of setting count based on CPU core count
+ public static final int HTTP_SELECTOR_COUNT_DEFAULT = -1;
public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
public static final String FILTER_INITIALIZER_PROPERTY
@@ -465,7 +473,9 @@ public final class HttpServer2 implements FilterContainer {
private ServerConnector createHttpChannelConnector(
Server server, HttpConfiguration httpConfig) {
- ServerConnector conn = new ServerConnector(server);
+ ServerConnector conn = new ServerConnector(server,
+ conf.getInt(HTTP_ACCEPTOR_COUNT_KEY, HTTP_ACCEPTOR_COUNT_DEFAULT),
+ conf.getInt(HTTP_SELECTOR_COUNT_KEY, HTTP_SELECTOR_COUNT_DEFAULT));
ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
conn.addConnectionFactory(connFactory);
configureChannelConnector(conn);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d6994da/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
--
diff --git
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index ca7e466..7350d09 100644
---
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -147,7 +147,7 @@ public class TestHttpServer extends
HttpServerFunctionalTest {
@BeforeClass public static void setup() throws Exception {
Configuration conf = new Configuration();
-conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10);
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
server = createTestServer(conf);
server.addServlet("echo", "/echo", EchoServlet.class);
server.addServlet("echomap", "/echomap", EchoMapServlet.class);
@@ -195,6 +195,27 @@ public class TestHttpServer extends
HttpServerFunctionalTest {
ready.await();
start.countDown();
}
+
+ /**
+ * Test that the number of acceptors and selectors can be configured by
+ * trying to configure more of them than would be allowed based on the
+ * maximum thread count.
+ */
+ @Test
+ public void testAcceptorSelectorConfigurability() throws Exception {
+Configuration conf = new Configuration();
+conf.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, MAX_THREADS);
+conf.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY, MAX_THREADS - 2);
+conf.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY, MAX_THREADS - 2);
+HttpServer2 badserver = createTestServer(conf);
+try {
+