[2/2] hadoop git commit: HADOOP-14999. AliyunOSS: provide one asynchronous multi-part based uploading mechanism. Contributed by Genmao Yu.

2018-04-10 Thread sammichen
HADOOP-14999. AliyunOSS: provide one asynchronous multi-part based uploading 
mechanism. Contributed by Genmao Yu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7de3cfa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7de3cfa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7de3cfa

Branch: refs/heads/branch-2.9.1
Commit: a7de3cfa712087b3a8476f9ad83c3b1118fa5394
Parents: b42f02c
Author: Sammi Chen 
Authored: Tue Apr 10 16:45:53 2018 +0800
Committer: Sammi Chen 
Committed: Tue Apr 10 16:45:53 2018 +0800

--
 .../aliyun/oss/AliyunCredentialsProvider.java   |   3 +-
 .../aliyun/oss/AliyunOSSBlockOutputStream.java  | 213 +++
 .../fs/aliyun/oss/AliyunOSSFileSystem.java  |  28 ++-
 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 167 ---
 .../fs/aliyun/oss/AliyunOSSOutputStream.java| 111 --
 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java| 117 +++---
 .../apache/hadoop/fs/aliyun/oss/Constants.java  |  23 +-
 .../oss/TestAliyunOSSBlockOutputStream.java | 115 ++
 .../fs/aliyun/oss/TestAliyunOSSInputStream.java |  10 +-
 .../aliyun/oss/TestAliyunOSSOutputStream.java   |  91 
 .../contract/TestAliyunOSSContractDistCp.java   |   2 +-
 11 files changed, 547 insertions(+), 333 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7de3cfa/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
index b46c67a..58c14a9 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunCredentialsProvider.java
@@ -35,8 +35,7 @@ import static org.apache.hadoop.fs.aliyun.oss.Constants.*;
 public class AliyunCredentialsProvider implements CredentialsProvider {
   private Credentials credentials = null;
 
-  public AliyunCredentialsProvider(Configuration conf)
-  throws IOException {
+  public AliyunCredentialsProvider(Configuration conf) throws IOException {
 String accessKeyId;
 String accessKeySecret;
 String securityToken;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7de3cfa/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
new file mode 100644
index 000..2d9a13b
--- /dev/null
+++ 
b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.aliyun.oss;
+
+import com.aliyun.oss.model.PartETag;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+
+/**
+ * Asynchronous multi-part based uploading 

hadoop git commit: HADOOP-15374. Add links of the new features of 3.1.0 to the top page

2018-04-10 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk e87be8a2a -> 7623cc5a9


HADOOP-15374. Add links of the new features of 3.1.0 to the top page

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7623cc5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7623cc5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7623cc5a

Branch: refs/heads/trunk
Commit: 7623cc5a982219fff2bdd9a84650f45106cbdf47
Parents: e87be8a
Author: Takanobu Asanuma 
Authored: Tue Apr 10 18:59:40 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Apr 10 18:59:40 2018 +0900

--
 hadoop-project/src/site/site.xml | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7623cc5a/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index b5ecd73..fdf5583 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -106,6 +106,7 @@
   
   
   
+  
 
 
 
@@ -147,6 +148,9 @@
   
   
   
+  
+  
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HDFS-11915. Sync rbw dir on the first hsync() to avoid file lost on power failure. Contributed by Vinayakumar B.

2018-04-10 Thread sammichen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9.1 91bb336d2 -> a7de3cfa7


HDFS-11915. Sync rbw dir on the first hsync() to avoid file lost on power 
failure. Contributed by Vinayakumar B.

(cherry picked from commit 2273499aef18ac2c7ffc435a61db8cea591e8b1f)
(cherry picked from commit f24d3b69b403f3a2c5af6b9c74a643fb9f4492e5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b42f02ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b42f02ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b42f02ca

Branch: refs/heads/branch-2.9.1
Commit: b42f02ca0c011f5998a12dbbc22e2674a22d
Parents: 91bb336
Author: Wei-Chiu Chuang 
Authored: Fri Jan 12 10:00:00 2018 -0800
Committer: Sammi Chen 
Committed: Tue Apr 10 11:41:48 2018 +0800

--
 .../hdfs/server/datanode/BlockReceiver.java   |  9 +
 .../hadoop/hdfs/server/datanode/DatanodeUtil.java | 18 ++
 .../datanode/fsdataset/impl/FsDatasetImpl.java| 15 ++-
 3 files changed, 29 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b42f02ca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index c8a33ca..7f381b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -24,6 +24,7 @@ import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
+import java.io.File;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
@@ -127,6 +128,7 @@ class BlockReceiver implements Closeable {
 
   private boolean syncOnClose;
   private volatile boolean dirSyncOnFinalize;
+  private boolean dirSyncOnHSyncDone = false;
   private long restartBudget;
   /** the reference of the volume where the block receiver writes to */
   private ReplicaHandler replicaHandler;
@@ -421,6 +423,13 @@ class BlockReceiver implements Closeable {
   }
   flushTotalNanos += flushEndNanos - flushStartNanos;
 }
+if (isSync && !dirSyncOnHSyncDone && replicaInfo instanceof ReplicaInfo) {
+  ReplicaInfo rInfo = (ReplicaInfo) replicaInfo;
+  File baseDir = rInfo.getBlockFile().getParentFile();
+  FileIoProvider fileIoProvider = datanode.getFileIoProvider();
+  DatanodeUtil.fsyncDirectory(fileIoProvider, rInfo.getVolume(), baseDir);
+  dirSyncOnHSyncDone = true;
+}
 if (checksumOut != null || streams.getDataOut() != null) {
   datanode.metrics.addFlushNanos(flushTotalNanos);
   if (isSync) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b42f02ca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
index c98ff54..e29a5ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
@@ -142,4 +142,22 @@ public class DatanodeUtil {
 }
 return (FileInputStream)lin.getWrappedStream();
   }
+
+  /**
+   * Call fsync on specified directories to sync metadata changes.
+   * @param fileIoProvider
+   * @param volume
+   * @param dirs
+   * @throws IOException
+   */
+  public static void fsyncDirectory(FileIoProvider fileIoProvider,
+  FsVolumeSpi volume, File... dirs) throws IOException {
+for (File dir : dirs) {
+  try {
+fileIoProvider.dirSync(volume, dir);
+  } catch (IOException e) {
+throw new IOException("Failed to sync " + dir, e);
+  }
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b42f02ca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 

hadoop git commit: YARN-7804. [UI2] Refresh action on Grid view page should not be redirected to graph view. Contributed by Gergely Novák.

2018-04-10 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7623cc5a9 -> 7c1e77dda


YARN-7804. [UI2] Refresh action on Grid view page should not be redirected to 
graph view. Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c1e77dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c1e77dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c1e77dd

Branch: refs/heads/trunk
Commit: 7c1e77dda4cb3ba8952328d142aafcf0366b5903
Parents: 7623cc5
Author: Sunil G 
Authored: Tue Apr 10 16:09:09 2018 +0530
Committer: Sunil G 
Committed: Tue Apr 10 16:09:09 2018 +0530

--
 .../main/webapp/app/components/timeline-view.js | 35 ++--
 .../webapp/app/controllers/yarn-app-attempt.js  |  9 -
 .../webapp/app/controllers/yarn-app/attempts.js | 11 --
 .../app/templates/components/timeline-view.hbs  | 12 +++
 .../webapp/app/templates/yarn-app-attempt.hbs   |  2 ++
 .../webapp/app/templates/yarn-app/attempts.hbs  |  2 ++
 6 files changed, 52 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c1e77dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
index 65a8cb1..3588009 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
@@ -25,6 +25,13 @@ export default Ember.Component.extend({
   tableDefinition: TableDefinition.create({
 searchType: 'manual',
   }),
+  graphDrawn: false,
+
+  actions: {
+changeViewType(param) {
+  this.sendAction("changeViewType", param);
+}
+  },
 
   canvas: {
 svg: undefined,
@@ -235,12 +242,10 @@ export default Ember.Component.extend({
   },
 
   didInsertElement: function() {
-// init tooltip
-this.initTooltip();
+// init model
 this.modelArr = [];
 this.containerIdArr = [];
 
-// init model
 if (this.get("rmModel")) {
   this.get("rmModel").forEach(function(o) {
 if(!this.modelArr.contains(o)) {
@@ -258,16 +263,30 @@ export default Ember.Component.extend({
   }.bind(this));
 }
 
-if(this.modelArr.length === 0) {
+if (this.modelArr.length === 0) {
   return;
 }
 
 this.modelArr.sort(function(a, b) {
   var tsA = a.get("startTs");
   var tsB = b.get("startTs");
-
   return tsA - tsB;
 });
+
+if (this.get('attemptModel')) {
+  this.setAttemptsGridColumnsAndRows();
+} else {
+  this.setContainersGridColumnsAndRows();
+}
+  },
+
+  didUpdate: function() {
+if (this.get("viewType") === "grid" || this.graphDrawn) {
+  return;
+}
+
+this.initTooltip();
+
 var begin = 0;
 if (this.modelArr.length > 0) {
   begin = this.modelArr[0].get("startTs");
@@ -289,11 +308,7 @@ export default Ember.Component.extend({
   this.setSelected(this.modelArr[0]);
 }
 
-if (this.get('attemptModel')) {
-  this.setAttemptsGridColumnsAndRows();
-} else {
-  this.setContainersGridColumnsAndRows();
-}
+this.graphDrawn = true;
   },
 
   setAttemptsGridColumnsAndRows: function() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c1e77dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
index 4c8b8a1..116920d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
@@ -19,8 +19,15 @@
 import Ember from 'ember';
 
 export default Ember.Controller.extend({
-  queryParams: ["service"],
+  queryParams: ["service", "viewType"],
   service: undefined,
+  viewType: "graph",
+
+  actions: {
+changeViewType(param) {
+  this.set("viewType", param);
+}
+  },
 
   breadcrumbs: Ember.computed("model.attempt.appId", "model.attempt.id", 
function () {
 var appId = this.get("model.attempt.appId");


hadoop git commit: YARN-7804. [UI2] Refresh action on Grid view page should not be redirected to graph view. Contributed by Gergely Novák.

2018-04-10 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 77cca3bef -> 5387b3f06


YARN-7804. [UI2] Refresh action on Grid view page should not be redirected to 
graph view. Contributed by Gergely Novák.

(cherry picked from commit 7c1e77dda4cb3ba8952328d142aafcf0366b5903)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5387b3f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5387b3f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5387b3f0

Branch: refs/heads/branch-3.1
Commit: 5387b3f066e73cd9eedfdca6639cee59b514b448
Parents: 77cca3b
Author: Sunil G 
Authored: Tue Apr 10 16:09:09 2018 +0530
Committer: Sunil G 
Committed: Tue Apr 10 16:09:49 2018 +0530

--
 .../main/webapp/app/components/timeline-view.js | 35 ++--
 .../webapp/app/controllers/yarn-app-attempt.js  |  9 -
 .../webapp/app/controllers/yarn-app/attempts.js | 11 --
 .../app/templates/components/timeline-view.hbs  | 12 +++
 .../webapp/app/templates/yarn-app-attempt.hbs   |  2 ++
 .../webapp/app/templates/yarn-app/attempts.hbs  |  2 ++
 6 files changed, 52 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5387b3f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
index 65a8cb1..3588009 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
@@ -25,6 +25,13 @@ export default Ember.Component.extend({
   tableDefinition: TableDefinition.create({
 searchType: 'manual',
   }),
+  graphDrawn: false,
+
+  actions: {
+changeViewType(param) {
+  this.sendAction("changeViewType", param);
+}
+  },
 
   canvas: {
 svg: undefined,
@@ -235,12 +242,10 @@ export default Ember.Component.extend({
   },
 
   didInsertElement: function() {
-// init tooltip
-this.initTooltip();
+// init model
 this.modelArr = [];
 this.containerIdArr = [];
 
-// init model
 if (this.get("rmModel")) {
   this.get("rmModel").forEach(function(o) {
 if(!this.modelArr.contains(o)) {
@@ -258,16 +263,30 @@ export default Ember.Component.extend({
   }.bind(this));
 }
 
-if(this.modelArr.length === 0) {
+if (this.modelArr.length === 0) {
   return;
 }
 
 this.modelArr.sort(function(a, b) {
   var tsA = a.get("startTs");
   var tsB = b.get("startTs");
-
   return tsA - tsB;
 });
+
+if (this.get('attemptModel')) {
+  this.setAttemptsGridColumnsAndRows();
+} else {
+  this.setContainersGridColumnsAndRows();
+}
+  },
+
+  didUpdate: function() {
+if (this.get("viewType") === "grid" || this.graphDrawn) {
+  return;
+}
+
+this.initTooltip();
+
 var begin = 0;
 if (this.modelArr.length > 0) {
   begin = this.modelArr[0].get("startTs");
@@ -289,11 +308,7 @@ export default Ember.Component.extend({
   this.setSelected(this.modelArr[0]);
 }
 
-if (this.get('attemptModel')) {
-  this.setAttemptsGridColumnsAndRows();
-} else {
-  this.setContainersGridColumnsAndRows();
-}
+this.graphDrawn = true;
   },
 
   setAttemptsGridColumnsAndRows: function() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5387b3f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
index 4c8b8a1..116920d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
@@ -19,8 +19,15 @@
 import Ember from 'ember';
 
 export default Ember.Controller.extend({
-  queryParams: ["service"],
+  queryParams: ["service", "viewType"],
   service: undefined,
+  viewType: "graph",
+
+  actions: {
+changeViewType(param) {
+  this.set("viewType", param);
+}
+  },
 
   breadcrumbs: Ember.computed("model.attempt.appId", "model.attempt.id", 
function () {
 var appId = this.get("model.attempt.appId");


hadoop git commit: HADOOP-15374. Add links of the new features of 3.1.0 to the top page

2018-04-10 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 5eb32396d -> 77cca3bef


HADOOP-15374. Add links of the new features of 3.1.0 to the top page

Signed-off-by: Akira Ajisaka 
(cherry picked from commit 7623cc5a982219fff2bdd9a84650f45106cbdf47)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77cca3be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77cca3be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77cca3be

Branch: refs/heads/branch-3.1
Commit: 77cca3befc9757f94a18f11b4f9b581812e243a1
Parents: 5eb3239
Author: Takanobu Asanuma 
Authored: Tue Apr 10 18:59:40 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Apr 10 19:01:18 2018 +0900

--
 hadoop-project/src/site/site.xml | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77cca3be/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index b5ecd73..fdf5583 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -106,6 +106,7 @@
   
   
   
+  
 
 
 
@@ -147,6 +148,9 @@
   
   
   
+  
+  
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13420. License header is displayed in ArchivalStorage/MemoryStorage html pages. Contributed by Akira Ajisaka.

2018-04-10 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/trunk 7c1e77dda -> 6729047a8


HDFS-13420. License header is displayed in ArchivalStorage/MemoryStorage html 
pages. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6729047a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6729047a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6729047a

Branch: refs/heads/trunk
Commit: 6729047a8ba273d27edcc6a1a9d397a096f44d84
Parents: 7c1e77d
Author: Weiwei Yang 
Authored: Tue Apr 10 22:10:44 2018 +0800
Committer: Weiwei Yang 
Committed: Tue Apr 10 22:10:44 2018 +0800

--
 .../hadoop-hdfs/src/site/markdown/ArchivalStorage.md   | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/MemoryStorage.md | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6729047a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 91ad107..ab7975a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -1,4 +1,4 @@

hadoop git commit: HDFS-13420. License header is displayed in ArchivalStorage/MemoryStorage html pages. Contributed by Akira Ajisaka.

2018-04-10 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 d4e438ff6 -> 1a0ce9369


HDFS-13420. License header is displayed in ArchivalStorage/MemoryStorage html 
pages. Contributed by Akira Ajisaka.

(cherry picked from commit 6729047a8ba273d27edcc6a1a9d397a096f44d84)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a0ce936
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a0ce936
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a0ce936

Branch: refs/heads/branch-3.0
Commit: 1a0ce936966cea6a2ca18055a77a691dd92d4a59
Parents: d4e438f
Author: Weiwei Yang 
Authored: Tue Apr 10 22:10:44 2018 +0800
Committer: Weiwei Yang 
Committed: Tue Apr 10 22:16:25 2018 +0800

--
 .../hadoop-hdfs/src/site/markdown/ArchivalStorage.md   | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/MemoryStorage.md | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a0ce936/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 91ad107..ab7975a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -1,4 +1,4 @@

hadoop git commit: HADOOP-15376. Remove double semi colons on imports that make Clover fall over.

2018-04-10 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 b104de3f6 -> c9da1e97f


HADOOP-15376. Remove double semi colons on imports that make Clover fall over.

Signed-off-by: Akira Ajisaka 
(cherry picked from commit cef8eb79810383f9970ed3713deecc18fbf0ffaa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9da1e97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9da1e97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9da1e97

Branch: refs/heads/branch-3.1
Commit: c9da1e97f8ecd71f4876eca0683377c03596b58f
Parents: b104de3
Author: Ewan Higgs 
Authored: Tue Apr 10 23:58:26 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Apr 10 23:59:34 2018 +0900

--
 .../src/test/java/org/apache/hadoop/io/TestIOUtils.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9da1e97/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
index 467e5bc..fca72d9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
@@ -39,7 +39,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.io.FileUtils;;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15376. Remove double semi colons on imports that make Clover fall over.

2018-04-10 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6729047a8 -> cef8eb798


HADOOP-15376. Remove double semi colons on imports that make Clover fall over.

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cef8eb79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cef8eb79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cef8eb79

Branch: refs/heads/trunk
Commit: cef8eb79810383f9970ed3713deecc18fbf0ffaa
Parents: 6729047
Author: Ewan Higgs 
Authored: Tue Apr 10 23:58:26 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Apr 10 23:58:26 2018 +0900

--
 .../src/test/java/org/apache/hadoop/io/TestIOUtils.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cef8eb79/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
index 467e5bc..fca72d9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
@@ -39,7 +39,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.io.FileUtils;;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. Contributed by Surendra Singh Lilhore.

2018-04-10 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/trunk cef8eb798 -> f89594f0b


HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f89594f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f89594f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f89594f0

Branch: refs/heads/trunk
Commit: f89594f0b80e8efffdcb887daa4a18a2b0a228b3
Parents: cef8eb7
Author: Rakesh Radhakrishnan 
Authored: Tue Apr 10 23:35:00 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Tue Apr 10 23:35:00 2018 +0530

--
 .../hdfs/server/namenode/FSTreeTraverser.java   | 339 ++
 .../server/namenode/ReencryptionHandler.java| 615 ---
 .../server/namenode/ReencryptionUpdater.java|   2 +-
 .../hdfs/server/namenode/TestReencryption.java  |   3 -
 .../namenode/TestReencryptionHandler.java   |  10 +-
 5 files changed, 595 insertions(+), 374 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f89594f0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
new file mode 100644
index 000..ff77029
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
@@ -0,0 +1,339 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * FSTreeTraverser traverse directory recursively and process files
+ * in batches.
+ */
+@InterfaceAudience.Private
+public abstract class FSTreeTraverser {
+
+
+  public static final Logger LOG = LoggerFactory
+  .getLogger(FSTreeTraverser.class);
+
+  private final FSDirectory dir;
+
+  private long readLockReportingThresholdMs;
+
+  private Timer timer;
+
+  public FSTreeTraverser(FSDirectory dir, Configuration conf) {
+this.dir = dir;
+this.readLockReportingThresholdMs = conf.getLong(
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+timer = new Timer();
+  }
+
+  public FSDirectory getFSDirectory() {
+return dir;
+  }
+
+  /**
+   * Iterate through all files directly inside parent, and recurse down
+   * directories. The listing is done in batch, and can optionally start after
+   * a position. The iteration of the inode tree is done in a depth-first
+   * fashion. But instead of holding all {@link INodeDirectory}'s in memory
+   * on the fly, only the path components to the current inode is held. This
+   * is to reduce memory consumption.
+   *
+   * @param parent
+   *  The inode id of parent directory
+   * @param startId
+   *  Id of the start inode.
+   * @param startAfter
+   *  Full path of a file the traverse should start after.
+   * @param traverseInfo
+   *  info which may 

hadoop git commit: Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun."

2018-04-10 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1a0ce9369 -> fe7a70e58


Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs 
all the time. Contributed by Jinglun."

This reverts commit 877f963a059d21e942b0b5cbff5f60ce23c5b5fe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe7a70e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe7a70e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe7a70e5

Branch: refs/heads/branch-3.0
Commit: fe7a70e5865f45b3f09fb08825f08783ee7690eb
Parents: 1a0ce93
Author: Inigo Goiri 
Authored: Tue Apr 10 10:08:34 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Apr 10 10:08:34 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 --
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe7a70e5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 814a091..b94e94d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -76,9 +76,6 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
-  if (currentUsedProxy != null) {
-return method.invoke(currentUsedProxy.proxy, args);
-  }
   Map proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe7a70e5/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 3c46f52..04e77ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -42,13 +42,10 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -102,37 +99,6 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
-  public void testRequestNNAfterOneSuccess() throws Exception {
-final AtomicInteger count = new AtomicInteger(0);
-final ClientProtocol goodMock = mock(ClientProtocol.class);
-when(goodMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-Thread.sleep(1000);
-return new long[]{1};
-  }
-});
-final ClientProtocol badMock = mock(ClientProtocol.class);
-when(badMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-throw new IOException("Bad mock !!");
-  }
-});
-
-RequestHedgingProxyProvider provider =
-new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
-createFactory(badMock, goodMock, goodMock, badMock));
-ClientProtocol proxy = provider.getProxy().proxy;
-proxy.getStats();
-assertEquals(2, count.get());
-proxy.getStats();
-assertEquals(3, count.get());
-  }
-
-  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 

[35/50] [abbrv] hadoop git commit: YARN-4511. Common scheduler changes to support scheduler-specific oversubscription implementations.

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b237095d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
index 1836919..02c0cc5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java
@@ -563,7 +563,7 @@ public class TestNodeLabelContainerAllocation {
   int numContainers) {
 CapacityScheduler cs = (CapacityScheduler) 
rm.getRMContext().getScheduler();
 SchedulerNode node = cs.getSchedulerNode(nodeId);
-Assert.assertEquals(numContainers, node.getNumContainers());
+Assert.assertEquals(numContainers, node.getNumGuaranteedContainers());
   }
 
   /**
@@ -1065,7 +1065,7 @@ public class TestNodeLabelContainerAllocation {
 for (int i = 0; i < 50; i++) {
   cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
   cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
-  if (schedulerNode1.getNumContainers() == 0) {
+  if (schedulerNode1.getNumGuaranteedContainers() == 0) {
 cycleWaited++;
   }
 }
@@ -1131,7 +1131,7 @@ public class TestNodeLabelContainerAllocation {
 CSAMContainerLaunchDiagnosticsConstants.LAST_NODE_PROCESSED_MSG
 + nodeIdStr + " ( Partition : [x]"));
 Assert.assertEquals(0, cs.getSchedulerNode(nm1.getNodeId())
-.getNumContainers());
+.getNumGuaranteedContainers());
 
 rm1.close();
   }
@@ -1215,7 +1215,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x
-Assert.assertEquals(10, schedulerNode1.getNumContainers());
+Assert.assertEquals(10, schedulerNode1.getNumGuaranteedContainers());
 
 // check non-exclusive containers of LeafQueue is correctly updated
 LeafQueue leafQueue = (LeafQueue) cs.getQueue("a");
@@ -1943,7 +1943,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x
-Assert.assertEquals(5, schedulerNode1.getNumContainers());
+Assert.assertEquals(5, schedulerNode1.getNumGuaranteedContainers());
 
 SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
 .getNodeReport(nm1.getNodeId());
@@ -2043,7 +2043,7 @@ public class TestNodeLabelContainerAllocation {
 }
 
 // app1 gets all resource in partition=x (non-exclusive)
-Assert.assertEquals(3, schedulerNode1.getNumContainers());
+Assert.assertEquals(3, schedulerNode1.getNumGuaranteedContainers());
 
 SchedulerNodeReport reportNm1 = rm1.getResourceScheduler()
 .getNodeReport(nm1.getNodeId());
@@ -2074,7 +2074,7 @@ public class TestNodeLabelContainerAllocation {
 cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
 
 // app1 gets all resource in default partition
-Assert.assertEquals(2, schedulerNode2.getNumContainers());
+Assert.assertEquals(2, schedulerNode2.getNumGuaranteedContainers());
 
 // 3GB is used from label x quota. 2GB used from default label.
 // So total 2.5 GB is remaining.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b237095d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
index 2512787..6390297 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java
+++ 

[09/50] [abbrv] hadoop git commit: YARN-6936. [Atsv2] Retrospect storing entities into sub application table from client perspective. (Rohith Sharma K S via Haibo Chen)

2018-04-10 Thread haibochen
YARN-6936. [Atsv2] Retrospect storing entities into sub application table from 
client perspective. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8b8bd53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8b8bd53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8b8bd53

Branch: refs/heads/YARN-1011
Commit: f8b8bd53c4797d406bea5b1b0cdb179e209169cc
Parents: d737bf99
Author: Haibo Chen 
Authored: Thu Apr 5 10:22:50 2018 -0700
Committer: Haibo Chen 
Committed: Thu Apr 5 10:23:42 2018 -0700

--
 .../timelineservice/SubApplicationEntity.java   | 50 
 .../yarn/client/api/TimelineV2Client.java   | 47 +++---
 .../client/api/impl/TimelineV2ClientImpl.java   | 30 ++--
 ...stTimelineReaderWebServicesHBaseStorage.java |  7 +--
 .../TestHBaseTimelineStorageEntities.java   |  3 +-
 .../storage/HBaseTimelineWriterImpl.java|  3 +-
 .../collector/TimelineCollectorWebService.java  | 19 ++--
 7 files changed, 138 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b8bd53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
new file mode 100644
index 000..a83ef3d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records.timelineservice;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This entity represents a user defined entities to be stored under sub
+ * application table.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class SubApplicationEntity extends HierarchicalTimelineEntity {
+
+  public static final String YARN_APPLICATION_ID = "YARN_APPLICATION_ID";
+
+  public SubApplicationEntity(TimelineEntity entity) {
+super(entity);
+  }
+
+  /**
+   * Checks if the input TimelineEntity object is an SubApplicationEntity.
+   *
+   * @param te TimelineEntity object.
+   * @return true if input is an SubApplicationEntity, false otherwise
+   */
+  public static boolean isSubApplicationEntity(TimelineEntity te) {
+return (te != null && te instanceof SubApplicationEntity);
+  }
+
+  public void setApplicationId(String appId) {
+addInfo(YARN_APPLICATION_ID, appId);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b8bd53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
index 423c059..e987b46 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
@@ -54,9 +54,10 @@ public abstract class TimelineV2Client extends 
CompositeService {
 
   /**
* 
-   * Send the information of a number of conceptual entities to the timeline
-   * service v.2 collector. It is a blocking API. The method will 

[13/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
new file mode 100644
index 000..ab7c120
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
@@ -0,0 +1,3034 @@
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+  
+  
+
+
+  
+  
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+  
+  
+  
+  
+
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+
+
+ 

[41/50] [abbrv] hadoop git commit: YARN-1015. FS should watch node resource utilization and allocate opportunistic containers if appropriate.

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/82ef338d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 1227de2..ac925c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -55,14 +55,20 @@ import org.apache.hadoop.yarn.MockApps;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.ContainerState;
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.api.records.ExecutionType;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import 
org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -72,6 +78,8 @@ import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
+import org.apache.hadoop.yarn.server.api.records.OverAllocationInfo;
+import org.apache.hadoop.yarn.server.api.records.ResourceThresholds;
 import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -93,6 +101,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 
 
@@ -1056,15 +1065,15 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 assertEquals(
 YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
 scheduler.getQueueManager().getQueue("queue1").
-getResourceUsage().getMemorySize());
+getGuaranteedResourceUsage().getMemorySize());
 
 NodeUpdateSchedulerEvent updateEvent2 = new 
NodeUpdateSchedulerEvent(node2);
 scheduler.handle(updateEvent2);
 
 assertEquals(1024, scheduler.getQueueManager().getQueue("queue1").
-  getResourceUsage().getMemorySize());
+getGuaranteedResourceUsage().getMemorySize());
 assertEquals(2, scheduler.getQueueManager().getQueue("queue1").
-  getResourceUsage().getVirtualCores());
+getGuaranteedResourceUsage().getVirtualCores());
 
 // verify metrics
 QueueMetrics queue1Metrics = scheduler.getQueueManager().getQueue("queue1")
@@ -1099,7 +1108,7 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 
 // Make sure queue 1 is allocated app capacity
 assertEquals(1024, scheduler.getQueueManager().getQueue("queue1").
-getResourceUsage().getMemorySize());
+getGuaranteedResourceUsage().getMemorySize());
 
 // Now queue 2 requests likewise
 ApplicationAttemptId attId = createSchedulingRequest(1024, "queue2", 
"user1", 1);
@@ -1109,7 +1118,7 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 
 // Make sure queue 2 is waiting 

[37/50] [abbrv] hadoop git commit: YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)

2018-04-10 Thread haibochen
YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6278cc71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6278cc71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6278cc71

Branch: refs/heads/YARN-1011
Commit: 6278cc716cf0e63d1a86a74b3519d52e0924d7aa
Parents: e9b9f48
Author: Karthik Kambatla 
Authored: Fri Jan 29 14:31:45 2016 -0800
Committer: Haibo Chen 
Committed: Mon Apr 9 17:07:06 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 ++-
 .../src/main/resources/yarn-default.xml |  21 
 .../RegisterNodeManagerRequest.java |  14 ++-
 .../pb/RegisterNodeManagerRequestPBImpl.java|  48 -
 .../server/api/records/OverAllocationInfo.java  |  45 
 .../server/api/records/ResourceThresholds.java  |  45 
 .../impl/pb/OverAllocationInfoPBImpl.java   | 106 +++
 .../impl/pb/ResourceThresholdsPBImpl.java   |  93 
 .../yarn_server_common_service_protos.proto |  10 ++
 .../hadoop/yarn/server/nodemanager/Context.java |   5 +
 .../yarn/server/nodemanager/NodeManager.java|  17 +++
 .../nodemanager/NodeStatusUpdaterImpl.java  |   7 +-
 .../monitor/ContainersMonitorImpl.java  |  34 ++
 .../amrmproxy/BaseAMRMProxyTest.java|  11 ++
 14 files changed, 457 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6278cc71/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2590b6f..2d69fa9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2042,7 +2042,6 @@ public class YarnConfiguration extends Configuration {
   public static final boolean 
DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE =
   false;
 
-
   // Configurations for applicaiton life time monitor feature
   public static final String RM_APPLICATION_MONITOR_INTERVAL_MS =
   RM_PREFIX + "application-timeouts.monitor.interval-ms";
@@ -2050,6 +2049,18 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
+  /** Overallocation (= allocation based on utilization) configs. */
+  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
+  NM_PREFIX + "overallocation.allocation-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
+  = 0f;
+  @Private
+  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
+  = 0f;
+
   /**
* Interval of time the linux container executor should try cleaning up
* cgroups entry when cleaning up a container. This is required due to what 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6278cc71/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 81b6658..4a7548a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1710,6 +1710,27 @@
   
 
   
+The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node,
+  expressed as a float between 0 and 0.95. By default, over-allocation is
+  turned off (value = 0). When turned on, the node allows running
+  OPPORTUNISTIC containers when the aggregate utilization is under the
+  value specified here multiplied by the node's advertised capacity.
+
+yarn.nodemanager.overallocation.allocation-threshold
+0f
+  
+
+  
+When a node is over-allocated to improve utilization by
+  

[33/50] [abbrv] hadoop git commit: HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. Contributed by Yiqun Lin.

2018-04-10 Thread haibochen
HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9b9f48d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9b9f48d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9b9f48d

Branch: refs/heads/YARN-1011
Commit: e9b9f48dad5ebb58ee529f918723089e8356c480
Parents: ac32b35
Author: Inigo Goiri 
Authored: Mon Apr 9 10:09:25 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 10:09:25 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 30 
 .../federation/router/TestRouterQuota.java  |  4 +++
 2 files changed, 28 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9b9f48d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 1159289..e6d2f5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -900,7 +900,8 @@ public class RouterRpcServer extends AbstractService
   throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -921,7 +922,8 @@ public class RouterRpcServer extends AbstractService
   final Options.Rename... options) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -998,7 +1000,8 @@ public class RouterRpcServer extends AbstractService
   public boolean delete(String src, boolean recursive) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List locations = getLocationsForPath(src, true);
+final List locations =
+getLocationsForPath(src, true, false);
 RemoteMethod method = new RemoteMethod("delete",
 new Class[] {String.class, boolean.class}, new RemoteParam(),
 recursive);
@@ -2213,14 +2216,29 @@ public class RouterRpcServer extends AbstractService
 
   /**
* Get the possible locations of a path in the federated cluster.
+   * During the get operation, it will do the quota verification.
+   *
+   * @param path Path to check.
+   * @param failIfLocked Fail the request if locked (top mount point).
+   * @return Prioritized list of locations in the federated cluster.
+   * @throws IOException If the location for this path cannot be determined.
+   */
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked) throws IOException {
+return getLocationsForPath(path, failIfLocked, true);
+  }
+
+  /**
+   * Get the possible locations of a path in the federated cluster.
*
* @param path Path to check.
* @param failIfLocked Fail the request if locked (top mount point).
+   * @param needQuotaVerify If need to do the quota verification.
* @return Prioritized list of locations in the federated cluster.
* @throws IOException If the location for this path cannot be determined.
*/
-  protected List getLocationsForPath(
-  String path, boolean failIfLocked) throws IOException {
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked, boolean needQuotaVerify) throws IOException {
 try {
   // Check the location for this path
   final PathLocation location =
@@ -2241,7 +2259,7 @@ public class RouterRpcServer extends AbstractService
 }
 
 // Check quota
-if (this.router.isQuotaEnabled()) {
+if (this.router.isQuotaEnabled() && needQuotaVerify) {
   RouterQuotaUsage quotaUsage = this.router.getQuotaManager()
   .getQuotaUsage(path);
   if (quotaUsage != null) {


[08/50] [abbrv] hadoop git commit: HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be considered as striped block. (Contributed by Lei (Eddy) Xu).

2018-04-10 Thread haibochen
HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be 
considered as striped block. (Contributed by Lei (Eddy) Xu).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d737bf99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d737bf99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d737bf99

Branch: refs/heads/YARN-1011
Commit: d737bf99d44ce34cd01baad716d23df269267c95
Parents: e52539b
Author: Lei Xu 
Authored: Wed Apr 4 15:56:17 2018 -0700
Committer: Lei Xu 
Committed: Thu Apr 5 09:59:10 2018 -0700

--
 .../server/blockmanagement/BlockIdManager.java  | 17 ++
 .../server/blockmanagement/BlockManager.java|  5 +-
 .../blockmanagement/BlockManagerSafeMode.java   |  2 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  | 12 ++--
 .../blockmanagement/CorruptReplicasMap.java | 35 +--
 .../blockmanagement/InvalidateBlocks.java   | 13 +++--
 .../blockmanagement/TestBlockManager.java   | 61 
 .../blockmanagement/TestCorruptReplicaInfo.java | 48 ++-
 8 files changed, 136 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 321155b..5eebe8e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -239,6 +239,23 @@ public class BlockIdManager {
 legacyGenerationStampLimit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
   }
 
+  /**
+   * Return true if the block is a striped block.
+   *
+   * Before HDFS-4645, block ID was randomly generated (legacy), so it is
+   * possible that legacy block ID to be negative, which should not be
+   * considered as striped block ID.
+   *
+   * @see #isLegacyBlock(Block) detecting legacy block IDs.
+   */
+  public boolean isStripedBlock(Block block) {
+return isStripedBlockID(block.getBlockId()) && !isLegacyBlock(block);
+  }
+
+  /**
+   * See {@link #isStripedBlock(Block)}, we should not use this function alone
+   * to determine a block is striped block.
+   */
   public static boolean isStripedBlockID(long id) {
 return BlockType.fromBlockId(id) == STRIPED;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f49e1d8..76a7781 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -448,7 +448,8 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
 invalidateBlocks = new InvalidateBlocks(
 datanodeManager.getBlockInvalidateLimit(),
-startupDelayBlockDeletionInMs);
+startupDelayBlockDeletionInMs,
+blockIdManager);
 
 // Compute the map capacity by allocating 2% of total memory
 blocksMap = new BlocksMap(
@@ -1677,7 +1678,7 @@ public class BlockManager implements BlockStatsMXBean {
   corrupted.setBlockId(b.getStored().getBlockId());
 }
 corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(),
-b.getReasonCode());
+b.getReasonCode(), b.getStored().isStriped());
 
 NumberReplicas numberOfReplicas = countNodes(b.getStored());
 boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
 

[12/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
new file mode 100644
index 000..1e826f3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
@@ -0,0 +1,1331 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+
+
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: HDFS-13402. RBF: Fix java doc for StateStoreFileSystemImpl. Contributed by Yiran Wu.

2018-04-10 Thread haibochen
HDFS-13402. RBF: Fix java doc for StateStoreFileSystemImpl. Contributed by 
Yiran Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5700556c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5700556c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5700556c

Branch: refs/heads/YARN-1011
Commit: 5700556cd65a558f4393e05acb7ea8db3ccd2f36
Parents: 0b345b7
Author: Yiqun Lin 
Authored: Sun Apr 8 12:01:55 2018 +0800
Committer: Yiqun Lin 
Committed: Sun Apr 8 12:01:55 2018 +0800

--
 .../federation/store/driver/impl/StateStoreFileSystemImpl.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5700556c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
index ad822fb..2e1ff8f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
@@ -35,13 +35,15 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
 import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * StateStoreDriver} implementation based on a filesystem. The most common uses
- * HDFS as a backend.
+ * {@link StateStoreDriver} implementation based on a filesystem. The common
+ * implementation uses HDFS as a backend. The path can be specified setting
+ * dfs.federation.router.driver.fs.path=hdfs://host:port/path/to/store.
  */
 public class StateStoreFileSystemImpl extends StateStoreFileBaseImpl {
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13420. License header is displayed in ArchivalStorage/MemoryStorage html pages. Contributed by Akira Ajisaka.

2018-04-10 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 5387b3f06 -> b104de3f6


HDFS-13420. License header is displayed in ArchivalStorage/MemoryStorage html 
pages. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b104de3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b104de3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b104de3f

Branch: refs/heads/branch-3.1
Commit: b104de3f64bbdb82a57a5749ad3bc116d93ea665
Parents: 5387b3f
Author: Weiwei Yang 
Authored: Tue Apr 10 22:10:44 2018 +0800
Committer: Weiwei Yang 
Committed: Tue Apr 10 22:13:08 2018 +0800

--
 .../hadoop-hdfs/src/site/markdown/ArchivalStorage.md   | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/MemoryStorage.md | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b104de3f/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 91ad107..ab7975a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -1,4 +1,4 @@

hadoop git commit: HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. Contributed by Surendra Singh Lilhore.

2018-04-10 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 c9da1e97f -> 3414bf6db


HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. 
Contributed by Surendra Singh Lilhore.

(cherry picked from commit f89594f0b80e8efffdcb887daa4a18a2b0a228b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3414bf6d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3414bf6d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3414bf6d

Branch: refs/heads/branch-3.1
Commit: 3414bf6dbbe31929121142e00a9468e2ce09d98d
Parents: c9da1e9
Author: Rakesh Radhakrishnan 
Authored: Tue Apr 10 23:35:00 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Tue Apr 10 23:40:26 2018 +0530

--
 .../hdfs/server/namenode/FSTreeTraverser.java   | 339 ++
 .../server/namenode/ReencryptionHandler.java| 615 ---
 .../server/namenode/ReencryptionUpdater.java|   2 +-
 .../hdfs/server/namenode/TestReencryption.java  |   3 -
 .../namenode/TestReencryptionHandler.java   |  10 +-
 5 files changed, 595 insertions(+), 374 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3414bf6d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
new file mode 100644
index 000..ff77029
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
@@ -0,0 +1,339 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * FSTreeTraverser traverse directory recursively and process files
+ * in batches.
+ */
+@InterfaceAudience.Private
+public abstract class FSTreeTraverser {
+
+
+  public static final Logger LOG = LoggerFactory
+  .getLogger(FSTreeTraverser.class);
+
+  private final FSDirectory dir;
+
+  private long readLockReportingThresholdMs;
+
+  private Timer timer;
+
+  public FSTreeTraverser(FSDirectory dir, Configuration conf) {
+this.dir = dir;
+this.readLockReportingThresholdMs = conf.getLong(
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+timer = new Timer();
+  }
+
+  public FSDirectory getFSDirectory() {
+return dir;
+  }
+
+  /**
+   * Iterate through all files directly inside parent, and recurse down
+   * directories. The listing is done in batch, and can optionally start after
+   * a position. The iteration of the inode tree is done in a depth-first
+   * fashion. But instead of holding all {@link INodeDirectory}'s in memory
+   * on the fly, only the path components to the current inode is held. This
+   * is to reduce memory consumption.
+   *
+   * @param parent
+   *  The inode id of parent directory
+   * @param startId
+   *  Id of the start inode.
+   * @param startAfter
+   *  Full path of a file the traverse 

hadoop git commit: HDFS-13363. Record file path when FSDirAclOp throws AclException. Contributed by Gabor Bota.

2018-04-10 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk f89594f0b -> e76c2aeb2


HDFS-13363. Record file path when FSDirAclOp throws AclException. Contributed 
by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e76c2aeb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e76c2aeb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e76c2aeb

Branch: refs/heads/trunk
Commit: e76c2aeb288710ebee39680528dec44e454bbe9e
Parents: f89594f
Author: Xiao Chen 
Authored: Tue Apr 10 11:19:23 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 11:19:48 2018 -0700

--
 .../org/apache/hadoop/hdfs/protocol/AclException.java   | 10 ++
 .../apache/hadoop/hdfs/server/namenode/FSDirAclOp.java  | 12 
 2 files changed, 22 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76c2aeb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
index 1210999..9948b99 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
@@ -36,4 +36,14 @@ public class AclException extends IOException {
   public AclException(String message) {
 super(message);
   }
+
+  /**
+   * Creates a new AclException.
+   *
+   * @param message String message
+   * @param cause The cause of the exception
+   */
+  public AclException(String message, Throwable cause) {
+super(message, cause);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76c2aeb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 7b3471d..8d77f89 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -53,6 +53,8 @@ class FSDirAclOp {
   existingAcl, aclSpec);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -77,6 +79,8 @@ class FSDirAclOp {
 existingAcl, aclSpec);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -100,6 +104,8 @@ class FSDirAclOp {
 existingAcl);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -117,6 +123,8 @@ class FSDirAclOp {
   src = iip.getPath();
   fsd.checkOwner(pc, iip);
   unprotectedRemoveAcl(fsd, iip);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -136,6 +144,8 @@ class FSDirAclOp {
   fsd.checkOwner(pc, iip);
   List newAcl = unprotectedSetAcl(fsd, iip, aclSpec, false);
   fsd.getEditLog().logSetAcl(iip.getPath(), newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -162,6 +172,8 @@ class FSDirAclOp {
   .stickyBit(fsPermission.getStickyBit())
   .setPermission(fsPermission)
   .addEntries(acl).build();
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.readUnlock();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13395. Ozone: Plugins support in HDSL Datanode Service. Contributed by Nanda Kumar.

2018-04-10 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 8475d6bb5 -> bb3c07fa3


HDFS-13395. Ozone: Plugins support in HDSL Datanode Service. Contributed by 
Nanda Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb3c07fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb3c07fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb3c07fa

Branch: refs/heads/HDFS-7240
Commit: bb3c07fa3e4f5b5c38c251e882a357eddab0957f
Parents: 8475d6b
Author: Xiaoyu Yao 
Authored: Tue Apr 10 11:28:52 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue Apr 10 11:28:52 2018 -0700

--
 .../src/main/compose/cblock/docker-config   |   3 +-
 .../src/main/compose/ozone/docker-config|   3 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   3 +
 .../common/src/main/resources/ozone-default.xml |   8 ++
 .../hadoop/ozone/HddsDatanodeService.java   | 118 ++-
 .../statemachine/DatanodeStateMachine.java  |  10 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |   5 -
 .../server/datanode/DataNodeServicePlugin.java  |  48 
 .../src/test/compose/docker-config  |   3 +-
 .../hadoop/ozone/MiniOzoneClassicCluster.java   |   4 +-
 .../hadoop/ozone/MiniOzoneTestHelper.java   |   5 +
 .../hadoop/ozone/web/ObjectStoreRestPlugin.java | 108 -
 .../ozone/web/OzoneHddsDatanodeService.java |  84 +
 13 files changed, 208 insertions(+), 194 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb3c07fa/hadoop-dist/src/main/compose/cblock/docker-config
--
diff --git a/hadoop-dist/src/main/compose/cblock/docker-config 
b/hadoop-dist/src/main/compose/cblock/docker-config
index 4690de0..f69bef0 100644
--- a/hadoop-dist/src/main/compose/cblock/docker-config
+++ b/hadoop-dist/src/main/compose/cblock/docker-config
@@ -27,7 +27,8 @@ OZONE-SITE.XML_ozone.scm.client.address=scm
 OZONE-SITE.XML_dfs.cblock.jscsi.cblock.server.address=cblock
 OZONE-SITE.XML_dfs.cblock.scm.ipaddress=scm
 OZONE-SITE.XML_dfs.cblock.service.leveldb.path=/tmp
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb3c07fa/hadoop-dist/src/main/compose/ozone/docker-config
--
diff --git a/hadoop-dist/src/main/compose/ozone/docker-config 
b/hadoop-dist/src/main/compose/ozone/docker-config
index 8e5efa9..c693db0 100644
--- a/hadoop-dist/src/main/compose/ozone/docker-config
+++ b/hadoop-dist/src/main/compose/ozone/docker-config
@@ -23,11 +23,12 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm
 OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata
 OZONE-SITE.XML_ozone.handler.type=distributed
 OZONE-SITE.XML_ozone.scm.client.address=scm
+OZONE-SITE.XML_hdds.datanode.plugins=org.apache.hadoop.ozone.web.OzoneHddsDatanodeService
 HDFS-SITE.XML_dfs.namenode.rpc-address=namenode:9000
 HDFS-SITE.XML_dfs.namenode.name.dir=/data/namenode
 HDFS-SITE.XML_rpc.metrics.quantile.enable=true
 HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300
-HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.web.ObjectStoreRestPlugin,org.apache.hadoop.ozone.HddsDatanodeService
+HDFS-SITE.XML_dfs.datanode.plugins=org.apache.hadoop.ozone.HddsDatanodeService
 LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout
 LOG4J.PROPERTIES_log4j.appender.stdout=org.apache.log4j.ConsoleAppender
 LOG4J.PROPERTIES_log4j.appender.stdout.layout=org.apache.log4j.PatternLayout

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb3c07fa/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index ef96f379..72531a2 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -230,6 +230,9 @@ public final class OzoneConfigKeys {
   public static final String OZONE_SCM_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL =
   "ozone.web.authentication.kerberos.principal";
 
+  public static final String 

[01/50] [abbrv] hadoop git commit: HDFS-13364. RBF: Support NamenodeProtocol in the Router. Contributed by Inigo Goiri.

2018-04-10 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 bb3c07fa3 -> df3ff9042


HDFS-13364. RBF: Support NamenodeProtocol in the Router. Contributed by Inigo 
Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2be64eb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2be64eb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2be64eb2

Branch: refs/heads/HDFS-7240
Commit: 2be64eb201134502a92f7239bef8aa780771ca0b
Parents: 1077392
Author: Yiqun Lin 
Authored: Tue Apr 3 15:08:40 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Apr 3 15:08:40 2018 +0800

--
 .../federation/router/ConnectionContext.java|  35 +++-
 .../federation/router/ConnectionManager.java|  10 +-
 .../federation/router/ConnectionPool.java   |  98 +-
 .../federation/router/ConnectionPoolId.java |  19 +-
 .../server/federation/router/RemoteMethod.java  |  68 ++-
 .../router/RouterNamenodeProtocol.java  | 187 +++
 .../federation/router/RouterRpcClient.java  |  56 --
 .../federation/router/RouterRpcServer.java  | 111 ++-
 .../server/federation/MiniRouterDFSCluster.java |   8 +
 .../router/TestConnectionManager.java   |  56 +-
 .../server/federation/router/TestRouterRpc.java | 115 ++--
 11 files changed, 698 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2be64eb2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java
index 1d27b51..7e779b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionContext.java
@@ -17,8 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import java.net.InetSocketAddress;
+
 import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.ipc.RPC;
 
 /**
@@ -26,18 +27,24 @@ import org.apache.hadoop.ipc.RPC;
  * a connection, it increments a counter to mark it as active. Once the client
  * is done with the connection, it decreases the counter. It also takes care of
  * closing the connection once is not active.
+ *
+ * The protocols currently used are:
+ * 
+ * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol}
+ * {@link org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol}
+ * 
  */
 public class ConnectionContext {
 
   /** Client for the connection. */
-  private final ProxyAndInfo client;
+  private final ProxyAndInfo client;
   /** How many threads are using this connection. */
   private int numThreads = 0;
   /** If the connection is closed. */
   private boolean closed = false;
 
 
-  public ConnectionContext(ProxyAndInfo connection) {
+  public ConnectionContext(ProxyAndInfo connection) {
 this.client = connection;
   }
 
@@ -74,7 +81,7 @@ public class ConnectionContext {
*
* @return Connection client.
*/
-  public synchronized ProxyAndInfo getClient() {
+  public synchronized ProxyAndInfo getClient() {
 this.numThreads++;
 return this.client;
   }
@@ -96,9 +103,27 @@ public class ConnectionContext {
   public synchronized void close() {
 this.closed = true;
 if (this.numThreads == 0) {
-  ClientProtocol proxy = this.client.getProxy();
+  Object proxy = this.client.getProxy();
   // Nobody should be using this anymore so it should close right away
   RPC.stopProxy(proxy);
 }
   }
+
+  @Override
+  public String toString() {
+InetSocketAddress addr = this.client.getAddress();
+Object proxy = this.client.getProxy();
+Class clazz = proxy.getClass();
+
+StringBuilder sb = new StringBuilder();
+sb.append(clazz.getSimpleName());
+sb.append("@");
+sb.append(addr);
+sb.append("x");
+sb.append(numThreads);
+if (closed) {
+  sb.append("[CLOSED]");
+}
+return sb.toString();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2be64eb2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java
--
diff --git 

[49/50] [abbrv] hadoop git commit: HADOOP-15340. Provide meaningful RPC server name for RpcMetrics. Contributed by Elek Marton.

2018-04-10 Thread xyao
HADOOP-15340. Provide meaningful RPC server name for RpcMetrics. Contributed by 
Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ab776d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ab776d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ab776d6

Branch: refs/heads/HDFS-7240
Commit: 8ab776d61e569c12ec62024415ff68e5d3b10141
Parents: e76c2ae
Author: Xiaoyu Yao 
Authored: Tue Apr 10 11:42:54 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue Apr 10 11:42:54 2018 -0700

--
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  5 +-
 .../main/java/org/apache/hadoop/ipc/RPC.java| 46 +---
 .../main/java/org/apache/hadoop/ipc/Server.java |  9 
 .../apache/hadoop/ipc/WritableRpcEngine.java|  2 +-
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   | 11 +++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 56 +++-
 6 files changed, 117 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab776d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 639bbad..70fde60 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -419,8 +419,9 @@ public class ProtobufRpcEngine implements RpcEngine {
 String portRangeConfig)
 throws IOException {
   super(bindAddress, port, null, numHandlers,
-  numReaders, queueSizePerHandler, conf, classNameBase(protocolImpl
-  .getClass().getName()), secretManager, portRangeConfig);
+  numReaders, queueSizePerHandler, conf,
+  serverNameFromClass(protocolImpl.getClass()), secretManager,
+  portRangeConfig);
   this.verbose = verbose;  
   registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass,
   protocolImpl);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab776d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 8f8eda6..9cfadc7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -35,6 +35,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.HashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import javax.net.SocketFactory;
 
@@ -808,13 +810,45 @@ public class RPC {
   
   /** An RPC Server. */
   public abstract static class Server extends org.apache.hadoop.ipc.Server {
-   boolean verbose;
-   static String classNameBase(String className) {
-  String[] names = className.split("\\.", -1);
-  if (names == null || names.length == 0) {
-return className;
+
+boolean verbose;
+
+private static final Pattern COMPLEX_SERVER_NAME_PATTERN =
+Pattern.compile("(?:[^\\$]*\\$)*([A-Za-z][^\\$]+)(?:\\$\\d+)?");
+
+/**
+ * Get a meaningful and short name for a server based on a java class.
+ *
+ * The rules are defined to support the current naming schema of the
+ * generated protobuf classes where the final class usually an anonymous
+ * inner class of an inner class.
+ *
+ * 1. For simple classes it returns with the simple name of the classes
+ * (with the name without package name)
+ *
+ * 2. For inner classes, this is the simple name of the inner class.
+ *
+ * 3.  If it is an Object created from a class factory
+ *   E.g., org.apache.hadoop.ipc.TestRPC$TestClass$2
+ * this method returns parent class TestClass.
+ *
+ * 4. If it is an anonymous class E.g., 'org.apache.hadoop.ipc.TestRPC$10'
+ * serverNameFromClass returns parent class TestRPC.
+ *
+ *
+ */
+static String serverNameFromClass(Class clazz) {
+  String name = clazz.getName();
+  String[] names = clazz.getName().split("\\.", -1);
+  if (names != null && names.length > 0) {
+name = names[names.length - 1];
+  }
+  Matcher matcher = 

[03/50] [abbrv] hadoop git commit: HADOOP-14758. S3GuardTool.prune to handle UnsupportedOperationException. Contributed by Gabor Bota.

2018-04-10 Thread xyao
HADOOP-14758. S3GuardTool.prune to handle UnsupportedOperationException.
Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a174f8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a174f8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a174f8a

Branch: refs/heads/HDFS-7240
Commit: 5a174f8ac6e5f170b427b30bf72ef33f90c20d91
Parents: 93d47a0
Author: Steve Loughran 
Authored: Tue Apr 3 16:31:34 2018 +0100
Committer: Steve Loughran 
Committed: Tue Apr 3 16:31:34 2018 +0100

--
 .../java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a174f8a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
index e764021..a9147ff 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
@@ -966,7 +966,11 @@ public abstract class S3GuardTool extends Configured 
implements Tool {
   long now = System.currentTimeMillis();
   long divide = now - delta;
 
-  getStore().prune(divide);
+  try {
+getStore().prune(divide);
+  } catch (UnsupportedOperationException e){
+errorln("Prune operation not supported in metadata store.");
+  }
 
   out.flush();
   return SUCCESS;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15340. Provide meaningful RPC server name for RpcMetrics. Contributed by Elek Marton.

2018-04-10 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk e76c2aeb2 -> 8ab776d61


HADOOP-15340. Provide meaningful RPC server name for RpcMetrics. Contributed by 
Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ab776d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ab776d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ab776d6

Branch: refs/heads/trunk
Commit: 8ab776d61e569c12ec62024415ff68e5d3b10141
Parents: e76c2ae
Author: Xiaoyu Yao 
Authored: Tue Apr 10 11:42:54 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue Apr 10 11:42:54 2018 -0700

--
 .../apache/hadoop/ipc/ProtobufRpcEngine.java|  5 +-
 .../main/java/org/apache/hadoop/ipc/RPC.java| 46 +---
 .../main/java/org/apache/hadoop/ipc/Server.java |  9 
 .../apache/hadoop/ipc/WritableRpcEngine.java|  2 +-
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   | 11 +++-
 .../java/org/apache/hadoop/ipc/TestRPC.java | 56 +++-
 6 files changed, 117 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab776d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 639bbad..70fde60 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -419,8 +419,9 @@ public class ProtobufRpcEngine implements RpcEngine {
 String portRangeConfig)
 throws IOException {
   super(bindAddress, port, null, numHandlers,
-  numReaders, queueSizePerHandler, conf, classNameBase(protocolImpl
-  .getClass().getName()), secretManager, portRangeConfig);
+  numReaders, queueSizePerHandler, conf,
+  serverNameFromClass(protocolImpl.getClass()), secretManager,
+  portRangeConfig);
   this.verbose = verbose;  
   registerProtocolAndImpl(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocolClass,
   protocolImpl);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ab776d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 8f8eda6..9cfadc7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -35,6 +35,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.HashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import javax.net.SocketFactory;
 
@@ -808,13 +810,45 @@ public class RPC {
   
   /** An RPC Server. */
   public abstract static class Server extends org.apache.hadoop.ipc.Server {
-   boolean verbose;
-   static String classNameBase(String className) {
-  String[] names = className.split("\\.", -1);
-  if (names == null || names.length == 0) {
-return className;
+
+boolean verbose;
+
+private static final Pattern COMPLEX_SERVER_NAME_PATTERN =
+Pattern.compile("(?:[^\\$]*\\$)*([A-Za-z][^\\$]+)(?:\\$\\d+)?");
+
+/**
+ * Get a meaningful and short name for a server based on a java class.
+ *
+ * The rules are defined to support the current naming schema of the
+ * generated protobuf classes where the final class usually an anonymous
+ * inner class of an inner class.
+ *
+ * 1. For simple classes it returns with the simple name of the classes
+ * (with the name without package name)
+ *
+ * 2. For inner classes, this is the simple name of the inner class.
+ *
+ * 3.  If it is an Object created from a class factory
+ *   E.g., org.apache.hadoop.ipc.TestRPC$TestClass$2
+ * this method returns parent class TestClass.
+ *
+ * 4. If it is an anonymous class E.g., 'org.apache.hadoop.ipc.TestRPC$10'
+ * serverNameFromClass returns parent class TestRPC.
+ *
+ *
+ */
+static String serverNameFromClass(Class clazz) {
+  String name = clazz.getName();
+  String[] names = clazz.getName().split("\\.", -1);
+  if (names != null && names.length > 0) {
+name = 

[23/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
Added CHANGES/RELEASES/Jdiff for 3.1.0 release

Change-Id: Ied5067a996151c04d15cad46c46ac98b60c37b39
(cherry picked from commit 2d96570452a72569befdf9cfe9b90c9fa2e0e261)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6cf023f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6cf023f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6cf023f9

Branch: refs/heads/HDFS-7240
Commit: 6cf023f9b76c0ae6ad2f80ffb0a9f77888c553e9
Parents: 3121e8c
Author: Wangda Tan 
Authored: Thu Apr 5 15:50:55 2018 -0700
Committer: Wangda Tan 
Committed: Thu Apr 5 15:52:39 2018 -0700

--
 .../markdown/release/3.1.0/CHANGES.3.1.0.md |  1022 +
 .../release/3.1.0/RELEASENOTES.3.1.0.md |   199 +
 .../jdiff/Apache_Hadoop_HDFS_3.1.0.xml  |   676 +
 .../Apache_Hadoop_MapReduce_Common_3.1.0.xml|   113 +
 .../Apache_Hadoop_MapReduce_Core_3.1.0.xml  | 28075 +
 .../Apache_Hadoop_MapReduce_JobClient_3.1.0.xml |16 +
 .../jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml   |  3146 ++
 .../jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml   |  3034 ++
 .../Apache_Hadoop_YARN_Server_Common_3.1.0.xml  |  1331 +
 9 files changed, 37612 insertions(+)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[15/50] [abbrv] hadoop git commit: HDFS-13353. RBF: TestRouterWebHDFSContractCreate failed. Contributed by Takanobu Asanuma.

2018-04-10 Thread xyao
HDFS-13353. RBF: TestRouterWebHDFSContractCreate failed. Contributed by 
Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3121e8c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3121e8c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3121e8c2

Branch: refs/heads/HDFS-7240
Commit: 3121e8c29361cb560df29188e1cd1061a5fc34c4
Parents: f32d627
Author: Wei Yan 
Authored: Thu Apr 5 12:00:52 2018 -0700
Committer: Wei Yan 
Committed: Thu Apr 5 12:00:52 2018 -0700

--
 .../hadoop/fs/contract/AbstractContractCreateTest.java  | 12 ++--
 .../src/test/resources/contract/webhdfs.xml |  5 +
 2 files changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3121e8c2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
index 2053f50..07c99e0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
@@ -244,12 +244,12 @@ public abstract class AbstractContractCreateTest extends
   out.write('a');
   out.flush();
   if (!fs.exists(path)) {
-
-if (isSupported(IS_BLOBSTORE)) {
-  // object store: downgrade to a skip so that the failure is visible
-  // in test results
-  skip("Filesystem is an object store and newly created files are not "
-  + "immediately visible");
+if (isSupported(IS_BLOBSTORE) ||
+isSupported(CREATE_VISIBILITY_DELAYED)) {
+  // object store or some file systems: downgrade to a skip so that the
+  // failure is visible in test results
+  skip("For object store or some file systems, newly created files are"
+  + " not immediately visible");
 }
 assertPathExists("expected path to be visible before file closed",
 path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3121e8c2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml
index f9b7d94..0cb6dd8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/resources/contract/webhdfs.xml
@@ -23,4 +23,9 @@
 false
   
 
+  
+fs.contract.create-visibility-delayed
+true
+  
+
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. Contributed by Suma Shivaprasad.

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
index 6c6ac20..addec66 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.security.TestGroupsCaching;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -65,6 +66,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
 .SimpleGroupsMapping;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.After;
 import org.junit.Assert;
@@ -89,6 +92,8 @@ import static 
org.apache.hadoop.yarn.server.resourcemanager.scheduler
 .capacity.CapacitySchedulerConfiguration.DOT;
 import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
 .capacity.CapacitySchedulerConfiguration.FAIR_APP_ORDERING_POLICY;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.capacity.CapacitySchedulerConfiguration.ROOT;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -99,7 +104,7 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   private static final Log LOG = LogFactory.getLog(
   TestCapacitySchedulerAutoCreatedQueueBase.class);
   public static final int GB = 1024;
-  public final static ContainerUpdates NULL_UPDATE_REQUESTS =
+  public static final ContainerUpdates NULL_UPDATE_REQUESTS =
   new ContainerUpdates();
 
   public static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
@@ -112,9 +117,6 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final String B1 = B + ".b1";
   public static final String B2 = B + ".b2";
   public static final String B3 = B + ".b3";
-  public static final String C1 = C + ".c1";
-  public static final String C2 = C + ".c2";
-  public static final String C3 = C + ".c3";
   public static final float A_CAPACITY = 20f;
   public static final float B_CAPACITY = 40f;
   public static final float C_CAPACITY = 20f;
@@ -124,8 +126,6 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final float B1_CAPACITY = 60f;
   public static final float B2_CAPACITY = 20f;
   public static final float B3_CAPACITY = 20f;
-  public static final float C1_CAPACITY = 20f;
-  public static final float C2_CAPACITY = 20f;
 
   public static final int NODE_MEMORY = 16;
 
@@ -147,12 +147,14 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final String NODEL_LABEL_GPU = "GPU";
   public static final String NODEL_LABEL_SSD = "SSD";
 
+  public static final float NODE_LABEL_GPU_TEMPLATE_CAPACITY = 30.0f;
+  public static final float NODEL_LABEL_SSD_TEMPLATE_CAPACITY = 40.0f;
+
   protected MockRM mockRM = null;
   protected MockNM nm1 = null;
   protected MockNM nm2 = null;
   protected MockNM nm3 = null;
   protected CapacityScheduler cs;
-  private final TestCapacityScheduler tcs = new TestCapacityScheduler();
   protected SpyDispatcher dispatcher;
   private static EventHandler rmAppEventEventHandler;
 
@@ -215,15 +217,29 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   }
 
   protected void setupNodes(MockRM newMockRM) throws Exception {
+NodeLabel ssdLabel = Records.newRecord(NodeLabel.class);
+ssdLabel.setName(NODEL_LABEL_SSD);
+ssdLabel.setExclusivity(true);
+
 nm1 

[12/50] [abbrv] hadoop git commit: HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be considered as striped block. (Contributed by Lei (Eddy) Xu).

2018-04-10 Thread xyao
HDFS-13350. Negative legacy block ID will confuse Erasure Coding to be 
considered as striped block. (Contributed by Lei (Eddy) Xu).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d737bf99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d737bf99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d737bf99

Branch: refs/heads/HDFS-7240
Commit: d737bf99d44ce34cd01baad716d23df269267c95
Parents: e52539b
Author: Lei Xu 
Authored: Wed Apr 4 15:56:17 2018 -0700
Committer: Lei Xu 
Committed: Thu Apr 5 09:59:10 2018 -0700

--
 .../server/blockmanagement/BlockIdManager.java  | 17 ++
 .../server/blockmanagement/BlockManager.java|  5 +-
 .../blockmanagement/BlockManagerSafeMode.java   |  2 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  | 12 ++--
 .../blockmanagement/CorruptReplicasMap.java | 35 +--
 .../blockmanagement/InvalidateBlocks.java   | 13 +++--
 .../blockmanagement/TestBlockManager.java   | 61 
 .../blockmanagement/TestCorruptReplicaInfo.java | 48 ++-
 8 files changed, 136 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 321155b..5eebe8e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -239,6 +239,23 @@ public class BlockIdManager {
 legacyGenerationStampLimit = HdfsConstants.GRANDFATHER_GENERATION_STAMP;
   }
 
+  /**
+   * Return true if the block is a striped block.
+   *
+   * Before HDFS-4645, block ID was randomly generated (legacy), so it is
+   * possible that legacy block ID to be negative, which should not be
+   * considered as striped block ID.
+   *
+   * @see #isLegacyBlock(Block) detecting legacy block IDs.
+   */
+  public boolean isStripedBlock(Block block) {
+return isStripedBlockID(block.getBlockId()) && !isLegacyBlock(block);
+  }
+
+  /**
+   * See {@link #isStripedBlock(Block)}, we should not use this function alone
+   * to determine a block is striped block.
+   */
   public static boolean isStripedBlockID(long id) {
 return BlockType.fromBlockId(id) == STRIPED;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f49e1d8..76a7781 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -448,7 +448,8 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 
1000L;
 invalidateBlocks = new InvalidateBlocks(
 datanodeManager.getBlockInvalidateLimit(),
-startupDelayBlockDeletionInMs);
+startupDelayBlockDeletionInMs,
+blockIdManager);
 
 // Compute the map capacity by allocating 2% of total memory
 blocksMap = new BlocksMap(
@@ -1677,7 +1678,7 @@ public class BlockManager implements BlockStatsMXBean {
   corrupted.setBlockId(b.getStored().getBlockId());
 }
 corruptReplicas.addToCorruptReplicasMap(corrupted, node, b.getReason(),
-b.getReasonCode());
+b.getReasonCode(), b.getStored().isStriped());
 
 NumberReplicas numberOfReplicas = countNodes(b.getStored());
 boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >=

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d737bf99/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java
 

[29/50] [abbrv] hadoop git commit: YARN-1151. Ability to configure auxiliary services from HDFS-based JAR files. (Xuan Gong via wangda)

2018-04-10 Thread xyao
YARN-1151. Ability to configure auxiliary services from HDFS-based JAR files. 
(Xuan Gong via wangda)

Change-Id: Ied37ff11e507fc86847753ba79486652c8fadfe9


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00ebec89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00ebec89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00ebec89

Branch: refs/heads/HDFS-7240
Commit: 00ebec89f101347a5da44657e388b30c57ed9deb
Parents: d4e63cc
Author: Wangda Tan 
Authored: Fri Apr 6 21:25:57 2018 -0700
Committer: Wangda Tan 
Committed: Fri Apr 6 21:25:57 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   3 +
 .../containermanager/AuxServices.java   | 160 +-
 .../containermanager/ContainerManagerImpl.java  |   3 +-
 .../containermanager/TestAuxServices.java   | 167 +--
 4 files changed, 313 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00ebec89/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 7a2a3ce..2590b6f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2106,6 +2106,9 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_AUX_SERVICES_CLASSPATH =
   NM_AUX_SERVICES + ".%s.classpath";
 
+  public static final String NM_AUX_SERVICE_REMOTE_CLASSPATH =
+  NM_AUX_SERVICES + ".%s.remote-classpath";
+
   public static final String NM_AUX_SERVICES_SYSTEM_CLASSES =
   NM_AUX_SERVICES + ".%s.system-classes";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00ebec89/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index 57cca50..c8b7a76 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager;
 
+import java.io.IOException;
+import java.net.URI;
 import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.Collections;
@@ -29,45 +31,70 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.service.ServiceStateChangeListener;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext;
 import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext;
 import org.apache.hadoop.yarn.server.api.AuxiliaryLocalPathHandler;
 import org.apache.hadoop.yarn.server.api.AuxiliaryService;
 import org.apache.hadoop.yarn.server.api.ContainerInitializationContext;
 

[22/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
new file mode 100644
index 000..3ccbae4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
@@ -0,0 +1,1022 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.1.0 - 2018-03-30
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-15008](https://issues.apache.org/jira/browse/HADOOP-15008) | Metrics 
sinks may emit too frequently if multiple sink periods are configured |  Minor 
| metrics | Erik Krogen | Erik Krogen |
+| [HDFS-12825](https://issues.apache.org/jira/browse/HDFS-12825) | Fsck report 
shows config key name for min replication issues |  Minor | hdfs | Harshakiran 
Reddy | Gabor Bota |
+| [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | RBF: 
Document Router and State Store metrics |  Major | documentation | Yiqun Lin | 
Yiqun Lin |
+| [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | RBF: Add 
ACL support for mount table |  Major | . | Yiqun Lin | Yiqun Lin |
+| [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | Ensure only 
NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath |  
Major | timelineclient, timelinereader, timelineserver | Vrushali C | Varun 
Saxena |
+| [HADOOP-13282](https://issues.apache.org/jira/browse/HADOOP-13282) | S3 blob 
etags to be made visible in S3A status/getFileChecksum() calls |  Minor | fs/s3 
| Steve Loughran | Steve Loughran |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use 
the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | 
Yiqun Lin |
+| [YARN-7677](https://issues.apache.org/jira/browse/YARN-7677) | Docker image 
cannot set HADOOP\_CONF\_DIR |  Major | . | Eric Badger | Jim Brennan |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix 
doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-15005](https://issues.apache.org/jira/browse/HADOOP-15005) | Support 
meta tag element in Hadoop XML configurations |  Major | . | Ajay Kumar | Ajay 
Kumar |
+| [YARN-3926](https://issues.apache.org/jira/browse/YARN-3926) | [Umbrella] 
Extend the YARN resource model for easier resource-type management and profiles 
|  Major | nodemanager, resourcemanager | Varun Vasudev | Varun Vasudev |
+| [HDFS-7877](https://issues.apache.org/jira/browse/HDFS-7877) | [Umbrella] 
Support maintenance state for datanodes |  Major | datanode, namenode | Ming Ma 
| Ming Ma |
+| [HADOOP-13055](https://issues.apache.org/jira/browse/HADOOP-13055) | 
Implement linkMergeSlash and linkFallback for ViewFileSystem |  Major | fs, 
viewfs | Zhe Zhang | Manoj Govindassamy |
+| [YARN-6871](https://issues.apache.org/jira/browse/YARN-6871) | Add 
additional deSelects params in RMWebServices#getAppReport |  Major | 
resourcemanager, router | Giovanni Matteo Fumarola | Tanuj Nayak |
+| [HADOOP-14840](https://issues.apache.org/jira/browse/HADOOP-14840) | Tool to 
estimate resource requirements of an application pipeline based on prior 
executions |  Major | tools | Subru Krishnan | Rui Li |
+| [HDFS-206](https://issues.apache.org/jira/browse/HDFS-206) | Support for 
head in FSShell |  Minor | . | Olga Natkovich | Gabor Bota |
+| [YARN-5079](https://issues.apache.org/jira/browse/YARN-5079) | [Umbrella] 
Native YARN framework layer for services and beyond |  Major | . | Vinod Kumar 
Vavilapalli |  |
+| [YARN-4757](https://issues.apache.org/jira/browse/YARN-4757) | [Umbrella] 
Simplified discovery of services via DNS mechanisms |  Major | . | Vinod Kumar 
Vavilapalli |  |
+| [HADOOP-13786](https://issues.apache.org/jira/browse/HADOOP-13786) | Add S3A 
committer for zero-rename commits to S3 endpoints |  Major | fs/s3 | Steve 
Loughran | Steve Loughran |
+| [HDFS-9806](https://issues.apache.org/jira/browse/HDFS-9806) | Allow HDFS 
block replicas to be provided by an external storage system |  Major | . | 
Chris Douglas |  |
+| [YARN-6592](https://issues.apache.org/jira/browse/YARN-6592) | [Umbrella] 
Rich placement constraints in YARN |  Major | . | Konstantinos Karanasos |  |
+| [HDFS-12998](https://issues.apache.org/jira/browse/HDFS-12998) | 
SnapshotDiff - Provide an iterator-based listing API for calculating 

[32/50] [abbrv] hadoop git commit: HADOOP-15366. Add a helper shutdown routine in HadoopExecutor to ensure clean shutdown. Contributed by Shashikant Banerjee.

2018-04-10 Thread xyao
HADOOP-15366. Add a helper shutdown routine in HadoopExecutor to ensure clean 
shutdown. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b345b76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b345b76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b345b76

Branch: refs/heads/HDFS-7240
Commit: 0b345b765370515d7222154ad5cae9b86f137a76
Parents: eb47c3d
Author: Mukul Kumar Singh 
Authored: Sat Apr 7 16:29:01 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Sat Apr 7 16:29:01 2018 +0530

--
 .../hadoop/util/concurrent/HadoopExecutors.java | 34 +++-
 1 file changed, 33 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b345b76/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
index 1bc6976..7a04c30 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
@@ -27,7 +27,7 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
-
+import org.slf4j.Logger;
 
 /** Factory methods for ExecutorService, ScheduledExecutorService instances.
  * These executor service instances provide additional functionality (e.g
@@ -91,6 +91,38 @@ public final class HadoopExecutors {
 return Executors.newSingleThreadScheduledExecutor(threadFactory);
   }
 
+  /**
+   * Helper routine to shutdown a executorService.
+   *
+   * @param executorService - executorService
+   * @param logger  - Logger
+   * @param timeout - Timeout
+   * @param unit- TimeUnits, generally seconds.
+   */
+  public static void shutdown(ExecutorService executorService, Logger logger,
+  long timeout, TimeUnit unit) {
+try {
+  if (executorService != null) {
+executorService.shutdown();
+try {
+  if (!executorService.awaitTermination(timeout, unit)) {
+executorService.shutdownNow();
+  }
+
+  if (!executorService.awaitTermination(timeout, unit)) {
+logger.error("Unable to shutdown properly.");
+  }
+} catch (InterruptedException e) {
+  logger.error("Error attempting to shutdown.", e);
+  executorService.shutdownNow();
+}
+  }
+} catch (Exception e) {
+  logger.error("Error during shutdown: ", e);
+  throw e;
+}
+  }
+
   //disable instantiation
   private HadoopExecutors() { }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: YARN-8106. Update LogAggregationIndexedFileController to use readFull instead read to avoid IOException while loading log meta. (Prabhu Joseph via wangda)

2018-04-10 Thread xyao
YARN-8106. Update LogAggregationIndexedFileController to use readFull instead 
read to avoid IOException while loading log meta. (Prabhu Joseph via wangda)

Change-Id: I63a65f73f8d1636e2c99ed9c8c2bbd05efcff80f


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b779f4f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b779f4f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b779f4f0

Branch: refs/heads/HDFS-7240
Commit: b779f4f0f614fe47e05bc2be5494cf3cbcf6f63c
Parents: f7a17b0
Author: Wangda Tan 
Authored: Tue Apr 3 21:06:24 2018 -0700
Committer: Wangda Tan 
Committed: Tue Apr 3 21:06:24 2018 -0700

--
 .../filecontroller/ifile/LogAggregationIndexedFileController.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b779f4f0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
index 5bba2e0..a8ae06f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java
@@ -865,7 +865,8 @@ public class LogAggregationIndexedFileController
   byte[] array = new byte[offset];
   fsDataIStream.seek(
   fileLength - offset - Integer.SIZE/ Byte.SIZE - UUID_LENGTH);
-  int actual = fsDataIStream.read(array);
+  fsDataIStream.readFully(array);
+  int actual = array.length;
   if (actual != offset) {
 throw new IOException("Error on loading log meta from "
 + remoteLogPath);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: YARN-7905. Parent directory permission incorrect during public localization. Contributed by Bilwa S T.

2018-04-10 Thread xyao
YARN-7905. Parent directory permission incorrect during public localization. 
Contributed by Bilwa S T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb47c3de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb47c3de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb47c3de

Branch: refs/heads/HDFS-7240
Commit: eb47c3de74ba4b8b3ef47eaf3a44e5562fd22fc9
Parents: 70590cd
Author: bibinchundatt 
Authored: Sat Apr 7 12:13:00 2018 +0530
Committer: bibinchundatt 
Committed: Sat Apr 7 12:26:29 2018 +0530

--
 .../localizer/ResourceLocalizationService.java  |  20 +++
 .../TestResourceLocalizationService.java| 125 +++
 2 files changed, 145 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb47c3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
old mode 100644
new mode 100755
index 29fc747..ddae2ae
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -152,6 +152,8 @@ public class ResourceLocalizationService extends 
CompositeService
LoggerFactory.getLogger(ResourceLocalizationService.class);
   public static final String NM_PRIVATE_DIR = "nmPrivate";
   public static final FsPermission NM_PRIVATE_PERM = new FsPermission((short) 
0700);
+  private static final FsPermission PUBLIC_FILECACHE_FOLDER_PERMS =
+  new FsPermission((short) 0755);
 
   private Server server;
   private InetSocketAddress localizationServerAddress;
@@ -881,6 +883,7 @@ public class ResourceLocalizationService extends 
CompositeService
 publicRsrc.getPathForLocalization(key, publicRootPath,
 delService);
 if (!publicDirDestPath.getParent().equals(publicRootPath)) {
+  createParentDirs(publicDirDestPath, publicRootPath);
   if (diskValidator != null) {
 diskValidator.checkStatus(
 new File(publicDirDestPath.toUri().getPath()));
@@ -932,6 +935,23 @@ public class ResourceLocalizationService extends 
CompositeService
   }
 }
 
+private void createParentDirs(Path destDirPath, Path destDirRoot)
+throws IOException {
+  if (destDirPath == null || destDirPath.equals(destDirRoot)) {
+return;
+  }
+  createParentDirs(destDirPath.getParent(), destDirRoot);
+  createDir(destDirPath, PUBLIC_FILECACHE_FOLDER_PERMS);
+}
+
+private void createDir(Path dirPath, FsPermission perms)
+throws IOException {
+  lfs.mkdir(dirPath, perms, false);
+  if (!perms.equals(perms.applyUMask(lfs.getUMask( {
+lfs.setPermission(dirPath, perms);
+  }
+}
+
 @Override
 public void run() {
   try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb47c3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
old mode 100644
new mode 100755
index d863c6a..4d03f15
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ 

[27/50] [abbrv] hadoop git commit: YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature exit. (Botong Huang via Subru).

2018-04-10 Thread xyao
YARN-8110. AMRMProxy recover should catch for all throwable to avoid premature 
exit. (Botong Huang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00905efa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00905efa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00905efa

Branch: refs/heads/HDFS-7240
Commit: 00905efab22edd9857e0a3828c201bf70f03cb96
Parents: 024d7c0
Author: Subru Krishnan 
Authored: Fri Apr 6 16:31:16 2018 -0700
Committer: Subru Krishnan 
Committed: Fri Apr 6 16:31:16 2018 -0700

--
 .../nodemanager/amrmproxy/AMRMProxyService.java |  2 +-
 .../amrmproxy/BaseAMRMProxyTest.java|  5 +++
 .../amrmproxy/TestAMRMProxyService.java | 42 
 3 files changed, 48 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00905efa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
index 815e39b..86fbb72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java
@@ -261,7 +261,7 @@ public class AMRMProxyService extends CompositeService 
implements
 // Create the intercepter pipeline for the AM
 initializePipeline(attemptId, user, amrmToken, localToken,
 entry.getValue(), true, amCred);
-  } catch (IOException e) {
+  } catch (Throwable e) {
 LOG.error("Exception when recovering " + attemptId
 + ", removing it from NMStateStore and move on", e);
 this.nmContext.getNMStateStore().removeAMRMProxyAppContext(attemptId);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00905efa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
index 4b1a887..677732d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/BaseAMRMProxyTest.java
@@ -112,6 +112,11 @@ public abstract class BaseAMRMProxyTest {
 return this.amrmProxyService;
   }
 
+  protected Context getNMContext() {
+Assert.assertNotNull(this.nmContext);
+return this.nmContext;
+  }
+
   @Before
   public void setUp() throws IOException {
 this.conf = createConfiguration();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00905efa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
index b955311..1eefbd5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/TestAMRMProxyService.java
+++ 

[33/50] [abbrv] hadoop git commit: HDFS-13402. RBF: Fix java doc for StateStoreFileSystemImpl. Contributed by Yiran Wu.

2018-04-10 Thread xyao
HDFS-13402. RBF: Fix java doc for StateStoreFileSystemImpl. Contributed by 
Yiran Wu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5700556c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5700556c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5700556c

Branch: refs/heads/HDFS-7240
Commit: 5700556cd65a558f4393e05acb7ea8db3ccd2f36
Parents: 0b345b7
Author: Yiqun Lin 
Authored: Sun Apr 8 12:01:55 2018 +0800
Committer: Yiqun Lin 
Committed: Sun Apr 8 12:01:55 2018 +0800

--
 .../federation/store/driver/impl/StateStoreFileSystemImpl.java | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5700556c/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
index ad822fb..2e1ff8f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java
@@ -35,13 +35,15 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
 import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * StateStoreDriver} implementation based on a filesystem. The most common uses
- * HDFS as a backend.
+ * {@link StateStoreDriver} implementation based on a filesystem. The common
+ * implementation uses HDFS as a backend. The path can be specified setting
+ * dfs.federation.router.driver.fs.path=hdfs://host:port/path/to/store.
  */
 public class StateStoreFileSystemImpl extends StateStoreFileBaseImpl {
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: HDFS-13384. RBF: Improve timeout RPC call mechanism. Contributed by Inigo Goiri.

2018-04-10 Thread xyao
HDFS-13384. RBF: Improve timeout RPC call mechanism. Contributed by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e87be8a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e87be8a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e87be8a2

Branch: refs/heads/HDFS-7240
Commit: e87be8a2a49573897e40bfdf43541e3635e35c98
Parents: a92200f
Author: Yiqun Lin 
Authored: Tue Apr 10 15:34:42 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Apr 10 15:34:42 2018 +0800

--
 .../federation/metrics/NamenodeBeanMetrics.java |   3 +
 .../federation/router/RouterRpcClient.java  |   2 +-
 .../router/SubClusterTimeoutException.java  |  33 +
 .../server/federation/MiniRouterDFSCluster.java |  31 -
 .../router/TestRouterRPCClientRetries.java  | 126 ++-
 5 files changed, 192 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e87be8a2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
index e8c6c82..4d22ae7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
@@ -45,6 +45,7 @@ import 
org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.Router;
 import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
+import 
org.apache.hadoop.hdfs.server.federation.router.SubClusterTimeoutException;
 import org.apache.hadoop.hdfs.server.federation.store.MembershipStore;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
 import 
org.apache.hadoop.hdfs.server.federation.store.protocol.GetNamespaceInfoRequest;
@@ -396,6 +397,8 @@ public class NamenodeBeanMetrics
   }
 } catch (StandbyException e) {
   LOG.error("Cannot get {} nodes, Router in safe mode", type);
+} catch (SubClusterTimeoutException e) {
+  LOG.error("Cannot get {} nodes, subclusters timed out responding", type);
 } catch (IOException e) {
   LOG.error("Cannot get " + type + " nodes", e);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e87be8a2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index 4723b4c..e2c9cb4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -1007,7 +1007,7 @@ public class RouterRpcClient {
   String msg =
   "Invocation to \"" + loc + "\" for \"" + method + "\" timed out";
   LOG.error(msg);
-  IOException ioe = new IOException(msg);
+  IOException ioe = new SubClusterTimeoutException(msg);
   exceptions.put(location, ioe);
 } catch (ExecutionException ex) {
   Throwable cause = ex.getCause();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e87be8a2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/SubClusterTimeoutException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/SubClusterTimeoutException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/SubClusterTimeoutException.java
new file mode 100644
index 000..dac5bd6
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/SubClusterTimeoutException.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more 

[14/50] [abbrv] hadoop git commit: YARN-8119. [UI2] Timeline Server address' url scheme should be removed while accessing via KNOX. Contributed by Sunil G.

2018-04-10 Thread xyao
YARN-8119. [UI2] Timeline Server address' url scheme should be removed while 
accessing via KNOX. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f32d6275
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f32d6275
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f32d6275

Branch: refs/heads/HDFS-7240
Commit: f32d6275ba9e377fb722e2440986033d7ce8b602
Parents: f8b8bd5
Author: Rohith Sharma K S 
Authored: Thu Apr 5 23:32:35 2018 +0530
Committer: Rohith Sharma K S 
Committed: Thu Apr 5 23:32:35 2018 +0530

--
 .../hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f32d6275/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index 1f9c7c1..83df971 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -61,6 +61,7 @@ function updateConfigs(application) {
   url: getTimeLineURL(rmhost),
   success: function(data) {
 timelinehost = data.property.value;
+timelinehost = timelinehost.replace(/(^\w+:|^)\/\//, '');
 ENV.hosts.timelineWebAddress = timelinehost;
 
 var address = timelinehost.split(":")[0];
@@ -94,6 +95,7 @@ function updateConfigs(application) {
   url: getTimeLineV1URL(rmhost),
   success: function(data) {
 timelinehost = data.property.value;
+timelinehost = timelinehost.replace(/(^\w+:|^)\/\//, '');
 ENV.hosts.timelineV1WebAddress = timelinehost;
 
 var address = timelinehost.split(":")[0];


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
new file mode 100644
index 000..1e826f3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Server_Common_3.1.0.xml
@@ -0,0 +1,1331 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+
+
+
+
+
+  
+  
+
+
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/50] [abbrv] hadoop git commit: YARN-8013. Support application tags when defining application namespaces for placement constraints. Contributed by Weiwei Yang.

2018-04-10 Thread xyao
YARN-8013. Support application tags when defining application namespaces for 
placement constraints. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7853ec8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7853ec8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7853ec8d

Branch: refs/heads/HDFS-7240
Commit: 7853ec8d2fb8731b7f7c28fd87491a0a2d47967e
Parents: 42cd367
Author: Konstantinos Karanasos 
Authored: Wed Apr 4 10:51:58 2018 -0700
Committer: Konstantinos Karanasos 
Committed: Wed Apr 4 10:51:58 2018 -0700

--
 .../api/records/AllocationTagNamespaceType.java |   2 +-
 .../constraint/AllocationTagNamespace.java  | 312 --
 .../scheduler/constraint/AllocationTags.java|  44 ++-
 .../constraint/AllocationTagsManager.java   |  47 ++-
 .../constraint/PlacementConstraintsUtil.java|  41 +--
 .../constraint/TargetApplications.java  |  53 ++-
 .../constraint/TargetApplicationsNamespace.java | 326 +++
 .../SingleConstraintAppPlacementAllocator.java  |  21 --
 .../server/resourcemanager/rmapp/MockRMApp.java |   9 +-
 ...estSchedulingRequestContainerAllocation.java |   5 +-
 .../constraint/TestAllocationTagsManager.java   |  22 +-
 .../constraint/TestAllocationTagsNamespace.java |  89 -
 .../TestPlacementConstraintsUtil.java   | 125 ++-
 13 files changed, 654 insertions(+), 442 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7853ec8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
index de5492e..f304600 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
@@ -26,7 +26,7 @@ public enum AllocationTagNamespaceType {
   SELF("self"),
   NOT_SELF("not-self"),
   APP_ID("app-id"),
-  APP_LABEL("app-label"),
+  APP_TAG("app-tag"),
   ALL("all");
 
   private String typeKeyword;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7853ec8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
deleted file mode 100644
index 7b9f3be..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
-
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.yarn.api.records.AllocationTagNamespaceType;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Set;
-import 

[10/50] [abbrv] hadoop git commit: YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions configuration. Contributed by Rohith Sharma K S

2018-04-10 Thread xyao
YARN-8073 TimelineClientImpl doesn't honor yarn.timeline-service.versions 
configuration. Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/345e7624
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/345e7624
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/345e7624

Branch: refs/heads/HDFS-7240
Commit: 345e7624d58a058a1bad666bd1e5ce4b346a9056
Parents: 3087e89
Author: Vrushali C 
Authored: Wed Apr 4 15:08:03 2018 -0700
Committer: Vrushali C 
Committed: Wed Apr 4 15:08:03 2018 -0700

--
 .../jobhistory/JobHistoryEventHandler.java  |  2 +-
 .../hadoop/yarn/conf/YarnConfiguration.java | 21 
 .../client/api/impl/TimelineClientImpl.java | 23 +
 .../yarn/util/timeline/TimelineUtils.java   |  3 +--
 .../TestCombinedSystemMetricsPublisher.java | 26 
 .../reader/TimelineReaderServer.java|  2 +-
 6 files changed, 61 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/345e7624/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index fd93d07..52c13f1 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -269,7 +269,7 @@ public class JobHistoryEventHandler extends AbstractService
   LOG.info("Emitting job history data to the timeline service is enabled");
   if (YarnConfiguration.timelineServiceEnabled(conf)) {
 boolean timelineServiceV2Enabled =
-((int) YarnConfiguration.getTimelineServiceVersion(conf) == 2);
+YarnConfiguration.timelineServiceV2Enabled(conf);
 if(timelineServiceV2Enabled) {
   timelineV2Client =
   ((MRAppMaster.RunningAppContext)context).getTimelineV2Client();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/345e7624/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 42f2cae..41755e2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3797,6 +3797,27 @@ public class YarnConfiguration extends Configuration {
   }
 
   /**
+   * Returns whether the timeline service v.1,5 is enabled via configuration.
+   *
+   * @param conf the configuration
+   * @return whether the timeline service v.1.5 is enabled. V.1.5 refers to a
+   * version equal to 1.5.
+   */
+  public static boolean timelineServiceV15Enabled(Configuration conf) {
+boolean enabled = false;
+if (timelineServiceEnabled(conf)) {
+  Collection versions = getTimelineServiceVersions(conf);
+  for (Float version : versions) {
+if (Float.compare(version, 1.5f) == 0) {
+  enabled = true;
+  break;
+}
+  }
+}
+return enabled;
+  }
+
+  /**
* Returns all the active timeline service versions. It does not check
* whether the timeline service itself is enabled.
*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/345e7624/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java
index 44d6d48..88fccd9 100644
--- 

[38/50] [abbrv] hadoop git commit: HDFS-13376. Specify minimum GCC version to avoid TLS support error in Build of hadoop-hdfs-native-client. Contributed by LiXin Ge.

2018-04-10 Thread xyao
HDFS-13376. Specify minimum GCC version to avoid TLS support error in Build of 
hadoop-hdfs-native-client.  Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90593767
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90593767
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90593767

Branch: refs/heads/HDFS-7240
Commit: 905937678577fc0deb57489590863464562088ad
Parents: e9b9f48
Author: James Clampffer 
Authored: Mon Apr 9 13:48:42 2018 -0400
Committer: James Clampffer 
Committed: Mon Apr 9 13:48:42 2018 -0400

--
 BUILDING.txt | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90593767/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 3b9a2ef..9727004 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -10,6 +10,8 @@ Requirements:
 * CMake 3.1 or newer (if compiling native code)
 * Zlib devel (if compiling native code)
 * Cyrus SASL devel (if compiling native code)
+* One of the compilers that support thread_local storage: GCC 4.8.1 or later, 
Visual Studio,
+  Clang (community version), Clang (version for iOS 9 and later) (if compiling 
native code)
 * openssl devel (if compiling native hadoop-pipes and to get the best HDFS 
encryption performance)
 * Linux FUSE (Filesystem in Userspace) version 2.6 or above (if compiling 
fuse_dfs)
 * Jansson C XML parsing library ( if compiling libwebhdfs )


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[39/50] [abbrv] hadoop git commit: YARN-7667. Docker Stop grace period should be configurable. Contributed by Eric Badger

2018-04-10 Thread xyao
YARN-7667. Docker Stop grace period should be configurable. Contributed by Eric 
Badger


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/907919d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/907919d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/907919d2

Branch: refs/heads/HDFS-7240
Commit: 907919d28c1b7e4496d189b46ecbb86a10d41339
Parents: 9059376
Author: Jason Lowe 
Authored: Mon Apr 9 17:19:21 2018 -0500
Committer: Jason Lowe 
Committed: Mon Apr 9 17:19:21 2018 -0500

--
 .../apache/hadoop/yarn/conf/YarnConfiguration.java| 14 ++
 .../src/main/resources/yarn-default.xml   |  8 
 .../linux/runtime/DockerLinuxContainerRuntime.java|  8 +++-
 .../linux/runtime/TestDockerContainerRuntime.java | 14 +++---
 4 files changed, 40 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/907919d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2590b6f..d2a71bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1951,6 +1951,20 @@ public class YarnConfiguration extends Configuration {
*/
   public static final boolean DEFAULT_NM_DOCKER_ALLOW_DELAYED_REMOVAL = false;
 
+  /**
+   * A configurable value to pass to the Docker Stop command. This value
+   * defines the number of seconds between the docker stop command sending
+   * a SIGTERM and a SIGKILL.
+   */
+  public static final String NM_DOCKER_STOP_GRACE_PERIOD =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "stop.grace-period";
+
+  /**
+   * The default value for the grace period between the SIGTERM and the
+   * SIGKILL in the Docker Stop command.
+   */
+  public static final int DEFAULT_NM_DOCKER_STOP_GRACE_PERIOD = 10;
+
   /** The mode in which the Java Container Sandbox should run detailed by
*  the JavaSandboxLinuxContainerRuntime. */
   public static final String YARN_CONTAINER_SANDBOX =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/907919d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 81b6658..def0816 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1787,6 +1787,14 @@
   
 
   
+A configurable value to pass to the Docker Stop command. This 
value
+  defines the number of seconds between the docker stop command sending
+  a SIGTERM and a SIGKILL.
+yarn.nodemanager.runtime.linux.docker.stop.grace-period
+10
+  
+
+  
 The mode in which the Java Container Sandbox should run 
detailed by
   the JavaSandboxLinuxContainerRuntime.
 yarn.nodemanager.runtime.linux.sandbox-mode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/907919d2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 0290493..132ae38 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 

[41/50] [abbrv] hadoop git commit: HDFS-13410. RBF: Support federation with no subclusters. Contributed by Inigo Goiri.

2018-04-10 Thread xyao
HDFS-13410. RBF: Support federation with no subclusters. Contributed by Inigo 
Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a92200f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a92200f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a92200f4

Branch: refs/heads/HDFS-7240
Commit: a92200f4a6cec57b7080d1cd6e2a20d79d772dd6
Parents: 0006346
Author: Yiqun Lin 
Authored: Tue Apr 10 14:29:28 2018 +0800
Committer: Yiqun Lin 
Committed: Tue Apr 10 14:29:28 2018 +0800

--
 .../federation/router/RouterRpcClient.java  |  4 +-
 .../server/federation/router/TestRouter.java| 70 +++-
 .../server/federation/router/TestRouterRpc.java | 21 +-
 3 files changed, 77 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92200f4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
index ecb9f50..4723b4c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
@@ -932,7 +932,9 @@ public class RouterRpcClient {
 final UserGroupInformation ugi = RouterRpcServer.getRemoteUser();
 final Method m = method.getMethod();
 
-if (locations.size() == 1) {
+if (locations.isEmpty()) {
+  throw new IOException("No remote locations available");
+} else if (locations.size() == 1) {
   // Shortcut, just one call
   T location = locations.iterator().next();
   String ns = location.getNameserviceId();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a92200f4/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java
index 39398f7..f8cf009 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java
@@ -17,23 +17,25 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
-import java.net.URISyntaxException;
+import java.net.InetSocketAddress;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.federation.MockResolver;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.service.Service.STATE;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -77,27 +79,31 @@ public class TestRouter {
 "0.0.0.0");
   }
 
-  @AfterClass
-  public static void destroy() {
-  }
-
-  @Before
-  public void setup() throws IOException, URISyntaxException {
-  }
-
-  @After
-  public void cleanup() {
-  }
-
   private static void testRouterStartup(Configuration routerConfig)
   throws InterruptedException, IOException {
 Router router = new Router();
 assertEquals(STATE.NOTINITED, router.getServiceState());
+assertEquals(RouterServiceState.UNINITIALIZED, router.getRouterState());
 router.init(routerConfig);
+if (routerConfig.getBoolean(
+RBFConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE,
+RBFConfigKeys.DFS_ROUTER_SAFEMODE_ENABLE_DEFAULT)) {
+  assertEquals(RouterServiceState.SAFEMODE, 

[24/50] [abbrv] hadoop git commit: HADOOP-14759 S3GuardTool prune to prune specific bucket entries. Contributed by Gabor Bota.

2018-04-10 Thread xyao
HADOOP-14759 S3GuardTool prune to prune specific bucket entries. Contributed by 
Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea3849f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea3849f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea3849f0

Branch: refs/heads/HDFS-7240
Commit: ea3849f0ccd32b2f8acbc6107de3b9e91803ed4a
Parents: 6cf023f
Author: Aaron Fabbri 
Authored: Thu Apr 5 20:23:17 2018 -0700
Committer: Aaron Fabbri 
Committed: Thu Apr 5 20:23:17 2018 -0700

--
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   | 18 +
 .../fs/s3a/s3guard/LocalMetadataStore.java  | 17 +++-
 .../hadoop/fs/s3a/s3guard/MetadataStore.java| 12 +++
 .../fs/s3a/s3guard/NullMetadataStore.java   |  4 
 .../hadoop/fs/s3a/s3guard/S3GuardTool.java  | 10 +-
 .../site/markdown/tools/hadoop-aws/s3guard.md   | 11 --
 .../s3guard/AbstractS3GuardToolTestBase.java| 21 
 7 files changed, 73 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea3849f0/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 4c4043e..c579b3c 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -812,23 +812,33 @@ public class DynamoDBMetadataStore implements 
MetadataStore {
   }
 
   @Retries.OnceRaw
-  private ItemCollection expiredFiles(long modTime) {
-String filterExpression = "mod_time < :mod_time";
+  private ItemCollection expiredFiles(long modTime,
+  String keyPrefix) {
+String filterExpression =
+"mod_time < :mod_time and begins_with(parent, :parent)";
 String projectionExpression = "parent,child";
-ValueMap map = new ValueMap().withLong(":mod_time", modTime);
+ValueMap map = new ValueMap()
+.withLong(":mod_time", modTime)
+.withString(":parent", keyPrefix);
 return table.scan(filterExpression, projectionExpression, null, map);
   }
 
   @Override
   @Retries.OnceRaw("once(batchWrite)")
   public void prune(long modTime) throws IOException {
+prune(modTime, "/");
+  }
+
+  @Override
+  @Retries.OnceRaw("once(batchWrite)")
+  public void prune(long modTime, String keyPrefix) throws IOException {
 int itemCount = 0;
 try {
   Collection deletionBatch =
   new ArrayList<>(S3GUARD_DDB_BATCH_WRITE_REQUEST_LIMIT);
   int delay = conf.getInt(S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_KEY,
   S3GUARD_DDB_BACKGROUND_SLEEP_MSEC_DEFAULT);
-  for (Item item : expiredFiles(modTime)) {
+  for (Item item : expiredFiles(modTime, keyPrefix)) {
 PathMetadata md = PathMetadataDynamoDBTranslation
 .itemToPathMetadata(item, username);
 Path path = md.getFileStatus().getPath();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea3849f0/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
index 9267ab4..86059c8 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/LocalMetadataStore.java
@@ -303,12 +303,18 @@ public class LocalMetadataStore implements MetadataStore {
   }
 
   @Override
-  public synchronized void prune(long modTime) throws IOException {
+  public void prune(long modTime) throws IOException{
+prune(modTime, "");
+  }
+
+  @Override
+  public synchronized void prune(long modTime, String keyPrefix)
+  throws IOException {
 Iterator> files =
 fileHash.entrySet().iterator();
 while (files.hasNext()) {
   Map.Entry entry = files.next();
-  if (expired(entry.getValue().getFileStatus(), modTime)) {
+  if (expired(entry.getValue().getFileStatus(), modTime, keyPrefix)) {
 files.remove();
   }
 }
@@ -323,7 +329,7 @@ public class LocalMetadataStore implements MetadataStore {
 
   for (PathMetadata 

[21/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
new file mode 100644
index 000..9e3c65d
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
@@ -0,0 +1,199 @@
+
+
+# Apache Hadoop  3.1.0 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HDFS-11799](https://issues.apache.org/jira/browse/HDFS-11799) | *Major* | 
**Introduce a config to allow setting up write pipeline with fewer nodes than 
replication factor**
+
+Added new configuration 
"dfs.client.block.write.replace-datanode-on-failure.min-replication".
+ 
+The minimum number of replications that are needed to not to fail
+  the write pipeline if new datanodes can not be found to replace
+  failed datanodes (could be due to network failure) in the write pipeline.
+  If the number of the remaining datanodes in the write pipeline is greater
+  than or equal to this property value, continue writing to the remaining 
nodes.
+  Otherwise throw exception.
+
+  If this is set to 0, an exception will be thrown, when a replacement
+  can not be found.
+
+
+---
+
+* [HDFS-12486](https://issues.apache.org/jira/browse/HDFS-12486) | *Major* | 
**GetConf to get journalnodeslist**
+
+Adds a getconf command option to list the journal nodes.
+Usage: hdfs getconf -journalnodes
+
+
+---
+
+* [HADOOP-14840](https://issues.apache.org/jira/browse/HADOOP-14840) | *Major* 
| **Tool to estimate resource requirements of an application pipeline based on 
prior executions**
+
+The first version of Resource Estimator service, a tool that captures the 
historical resource usage of an app and predicts its future resource 
requirement.
+
+
+---
+
+* [YARN-5079](https://issues.apache.org/jira/browse/YARN-5079) | *Major* | 
**[Umbrella] Native YARN framework layer for services and beyond**
+
+A framework is implemented to orchestrate containers on YARN
+
+
+---
+
+* [YARN-4757](https://issues.apache.org/jira/browse/YARN-4757) | *Major* | 
**[Umbrella] Simplified discovery of services via DNS mechanisms**
+
+A DNS server backed by yarn service registry is implemented to enable service 
discovery on YARN using standard DNS lookup.
+
+
+---
+
+* [YARN-4793](https://issues.apache.org/jira/browse/YARN-4793) | *Major* | 
**[Umbrella] Simplified API layer for services and beyond**
+
+A REST API service is implemented to enable users to launch and manage 
container based services on YARN via REST API
+
+
+---
+
+* [HADOOP-15008](https://issues.apache.org/jira/browse/HADOOP-15008) | *Minor* 
| **Metrics sinks may emit too frequently if multiple sink periods are 
configured**
+
+Previously if multiple metrics sinks were configured with different periods, 
they may emit more frequently than configured, at a period as low as the GCD of 
the configured periods. This change makes all metrics sinks emit at their 
configured period.
+
+
+---
+
+* [HDFS-12825](https://issues.apache.org/jira/browse/HDFS-12825) | *Minor* | 
**Fsck report shows config key name for min replication issues**
+
+**WARNING: No release note provided for this change.**
+
+
+---
+
+* [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | *Major* | 
**RBF: Document Router and State Store metrics**
+
+This JIRA makes following change:
+Change Router metrics context from 'router' to 'dfs'.
+
+
+---
+
+* [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | *Major* | 
**RBF: Add ACL support for mount table**
+
+Mount tables support ACL, The users won't be able to modify their own entries 
(we are assuming these old (no-permissions before) mount table with 
owner:superuser, group:supergroup, permission:755 as the default permissions).  
The fix way is login as superuser to modify these mount table entries.
+
+
+---
+
+* [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | *Major* | 
**Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user 
classpath**
+
+Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user 
classpath.
+
+
+---
+
+* [HDFS-9806](https://issues.apache.org/jira/browse/HDFS-9806) | *Major* | 
**Allow HDFS block replicas to be provided by an external storage system**
+
+Provided storage allows data stored outside HDFS to be mapped to and addressed 
from HDFS. It builds on heterogeneous storage by introducing a new storage 
type, PROVIDED, to the set of media in a datanode. Clients accessing data in 
PROVIDED storages can cache replicas in local media, enforce HDFS invariants 
(e.g., 

[19/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.1.0.xml
--
diff --git 
a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.1.0.xml
 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.1.0.xml
new file mode 100644
index 000..ef04652
--- /dev/null
+++ 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_JobClient_3.1.0.xml
@@ -0,0 +1,16 @@
+
+
+
+
+
+
+
+
+
+
+
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. Contributed by Surendra Singh Lilhore.

2018-04-10 Thread xyao
HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f89594f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f89594f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f89594f0

Branch: refs/heads/HDFS-7240
Commit: f89594f0b80e8efffdcb887daa4a18a2b0a228b3
Parents: cef8eb7
Author: Rakesh Radhakrishnan 
Authored: Tue Apr 10 23:35:00 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Tue Apr 10 23:35:00 2018 +0530

--
 .../hdfs/server/namenode/FSTreeTraverser.java   | 339 ++
 .../server/namenode/ReencryptionHandler.java| 615 ---
 .../server/namenode/ReencryptionUpdater.java|   2 +-
 .../hdfs/server/namenode/TestReencryption.java  |   3 -
 .../namenode/TestReencryptionHandler.java   |  10 +-
 5 files changed, 595 insertions(+), 374 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f89594f0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
new file mode 100644
index 000..ff77029
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
@@ -0,0 +1,339 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * FSTreeTraverser traverse directory recursively and process files
+ * in batches.
+ */
+@InterfaceAudience.Private
+public abstract class FSTreeTraverser {
+
+
+  public static final Logger LOG = LoggerFactory
+  .getLogger(FSTreeTraverser.class);
+
+  private final FSDirectory dir;
+
+  private long readLockReportingThresholdMs;
+
+  private Timer timer;
+
+  public FSTreeTraverser(FSDirectory dir, Configuration conf) {
+this.dir = dir;
+this.readLockReportingThresholdMs = conf.getLong(
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+timer = new Timer();
+  }
+
+  public FSDirectory getFSDirectory() {
+return dir;
+  }
+
+  /**
+   * Iterate through all files directly inside parent, and recurse down
+   * directories. The listing is done in batch, and can optionally start after
+   * a position. The iteration of the inode tree is done in a depth-first
+   * fashion. But instead of holding all {@link INodeDirectory}'s in memory
+   * on the fly, only the path components to the current inode is held. This
+   * is to reduce memory consumption.
+   *
+   * @param parent
+   *  The inode id of parent directory
+   * @param startId
+   *  Id of the start inode.
+   * @param startAfter
+   *  Full path of a file the traverse should start after.
+   * @param traverseInfo
+   *  info which may required for processing the child's.
+   * @throws IOException
+   * @throws 

[26/50] [abbrv] hadoop git commit: YARN-8107. Give an informative message when incorrect format is used in ATSv2 filter attributes. (Rohith Sharma K S via Haibo Chen)

2018-04-10 Thread xyao
YARN-8107. Give an informative message when incorrect format is used in ATSv2 
filter attributes. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/024d7c08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/024d7c08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/024d7c08

Branch: refs/heads/HDFS-7240
Commit: 024d7c08704e6a5fcc1f53a8f56a44c84c8d5fa0
Parents: b17dc9f
Author: Haibo Chen 
Authored: Fri Apr 6 09:37:21 2018 -0700
Committer: Haibo Chen 
Committed: Fri Apr 6 09:39:01 2018 -0700

--
 .../reader/TimelineParserForCompareExpr.java|  7 +-
 .../reader/TimelineParserForEqualityExpr.java   |  7 +-
 .../TestTimelineReaderWebServicesUtils.java | 25 
 3 files changed, 37 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/024d7c08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
index 1b020d9..a582956 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
@@ -282,7 +282,12 @@ abstract class TimelineParserForCompareExpr implements 
TimelineParser {
   parseValue(expr.substring(kvStartOffset, offset)));
 }
 if (filterList == null || filterList.getFilterList().isEmpty()) {
-  filterList = new TimelineFilterList(currentFilter);
+  if (currentFilter == null) {
+throw new TimelineParseException(
+"Invalid expression provided for " + exprName);
+  } else {
+filterList = new TimelineFilterList(currentFilter);
+  }
 } else if (currentFilter != null) {
   filterList.addFilter(currentFilter);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/024d7c08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
index 7451713..2bdce38 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
@@ -325,7 +325,12 @@ abstract class TimelineParserForEqualityExpr implements 
TimelineParser {
   }
 }
 if (filterList == null || filterList.getFilterList().isEmpty()) {
-  filterList = new TimelineFilterList(currentFilter);
+  if (currentFilter == null) {
+throw new TimelineParseException(
+"Invalid expression provided for " + exprName);
+  } else {
+filterList = new TimelineFilterList(currentFilter);
+  }
 } else if (currentFilter != null) {
   filterList.addFilter(currentFilter);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/024d7c08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java
--
diff --git 

[44/50] [abbrv] hadoop git commit: YARN-7804. [UI2] Refresh action on Grid view page should not be redirected to graph view. Contributed by Gergely Novák.

2018-04-10 Thread xyao
YARN-7804. [UI2] Refresh action on Grid view page should not be redirected to 
graph view. Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c1e77dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c1e77dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c1e77dd

Branch: refs/heads/HDFS-7240
Commit: 7c1e77dda4cb3ba8952328d142aafcf0366b5903
Parents: 7623cc5
Author: Sunil G 
Authored: Tue Apr 10 16:09:09 2018 +0530
Committer: Sunil G 
Committed: Tue Apr 10 16:09:09 2018 +0530

--
 .../main/webapp/app/components/timeline-view.js | 35 ++--
 .../webapp/app/controllers/yarn-app-attempt.js  |  9 -
 .../webapp/app/controllers/yarn-app/attempts.js | 11 --
 .../app/templates/components/timeline-view.hbs  | 12 +++
 .../webapp/app/templates/yarn-app-attempt.hbs   |  2 ++
 .../webapp/app/templates/yarn-app/attempts.hbs  |  2 ++
 6 files changed, 52 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c1e77dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
index 65a8cb1..3588009 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/timeline-view.js
@@ -25,6 +25,13 @@ export default Ember.Component.extend({
   tableDefinition: TableDefinition.create({
 searchType: 'manual',
   }),
+  graphDrawn: false,
+
+  actions: {
+changeViewType(param) {
+  this.sendAction("changeViewType", param);
+}
+  },
 
   canvas: {
 svg: undefined,
@@ -235,12 +242,10 @@ export default Ember.Component.extend({
   },
 
   didInsertElement: function() {
-// init tooltip
-this.initTooltip();
+// init model
 this.modelArr = [];
 this.containerIdArr = [];
 
-// init model
 if (this.get("rmModel")) {
   this.get("rmModel").forEach(function(o) {
 if(!this.modelArr.contains(o)) {
@@ -258,16 +263,30 @@ export default Ember.Component.extend({
   }.bind(this));
 }
 
-if(this.modelArr.length === 0) {
+if (this.modelArr.length === 0) {
   return;
 }
 
 this.modelArr.sort(function(a, b) {
   var tsA = a.get("startTs");
   var tsB = b.get("startTs");
-
   return tsA - tsB;
 });
+
+if (this.get('attemptModel')) {
+  this.setAttemptsGridColumnsAndRows();
+} else {
+  this.setContainersGridColumnsAndRows();
+}
+  },
+
+  didUpdate: function() {
+if (this.get("viewType") === "grid" || this.graphDrawn) {
+  return;
+}
+
+this.initTooltip();
+
 var begin = 0;
 if (this.modelArr.length > 0) {
   begin = this.modelArr[0].get("startTs");
@@ -289,11 +308,7 @@ export default Ember.Component.extend({
   this.setSelected(this.modelArr[0]);
 }
 
-if (this.get('attemptModel')) {
-  this.setAttemptsGridColumnsAndRows();
-} else {
-  this.setContainersGridColumnsAndRows();
-}
+this.graphDrawn = true;
   },
 
   setAttemptsGridColumnsAndRows: function() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c1e77dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
index 4c8b8a1..116920d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
@@ -19,8 +19,15 @@
 import Ember from 'ember';
 
 export default Ember.Controller.extend({
-  queryParams: ["service"],
+  queryParams: ["service", "viewType"],
   service: undefined,
+  viewType: "graph",
+
+  actions: {
+changeViewType(param) {
+  this.set("viewType", param);
+}
+  },
 
   breadcrumbs: Ember.computed("model.attempt.appId", "model.attempt.id", 
function () {
 var appId = this.get("model.attempt.appId");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c1e77dd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/attempts.js

[35/50] [abbrv] hadoop git commit: YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. Contributed by Suma Shivaprasad.

2018-04-10 Thread xyao
YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. 
Contributed by Suma Shivaprasad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/821b0de4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/821b0de4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/821b0de4

Branch: refs/heads/HDFS-7240
Commit: 821b0de4c59156d4a65112de03ba3e7e1c88e309
Parents: 5700556
Author: Sunil G 
Authored: Mon Apr 9 21:17:22 2018 +0530
Committer: Sunil G 
Committed: Mon Apr 9 21:17:22 2018 +0530

--
 .../server/resourcemanager/RMServerUtils.java   |   5 +-
 .../rmapp/attempt/RMAppAttemptImpl.java |  47 ++
 .../resourcemanager/scheduler/Allocation.java   |  12 +
 .../scheduler/SchedulerUtils.java   |  33 +-
 .../capacity/AutoCreatedLeafQueue.java  |   3 +-
 .../AutoCreatedQueueManagementPolicy.java   |  12 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +
 .../CapacitySchedulerConfiguration.java |  28 +
 .../scheduler/capacity/LeafQueue.java   |  11 +
 .../scheduler/capacity/ManagedParentQueue.java  |   5 +-
 .../GuaranteedOrZeroCapacityOverTimePolicy.java | 573 +++
 .../placement/PendingAskUpdateResult.java   |   8 +
 .../yarn/server/resourcemanager/MockNM.java |  15 +
 .../server/resourcemanager/TestAppManager.java  |  20 +-
 ...stCapacitySchedulerAutoCreatedQueueBase.java | 241 +---
 .../TestCapacitySchedulerAutoQueueCreation.java | 233 +---
 .../TestQueueManagementDynamicEditPolicy.java   |  30 +-
 17 files changed, 834 insertions(+), 444 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 33451295..ab6bbcf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -236,13 +236,14 @@ public class RMServerUtils {
*/
   public static void normalizeAndValidateRequests(List ask,
   Resource maximumResource, String queueName, YarnScheduler scheduler,
-  RMContext rmContext)
-  throws InvalidResourceRequestException {
+  RMContext rmContext) throws InvalidResourceRequestException {
 // Get queue from scheduler
 QueueInfo queueInfo = null;
 try {
   queueInfo = scheduler.getQueueInfo(queueName, false, false);
 } catch (IOException e) {
+  //Queue may not exist since it could be auto-created in case of
+  // dynamic queues
 }
 
 for (ResourceRequest resReq : ask) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index c23b135..1b1e2c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 

[13/50] [abbrv] hadoop git commit: YARN-6936. [Atsv2] Retrospect storing entities into sub application table from client perspective. (Rohith Sharma K S via Haibo Chen)

2018-04-10 Thread xyao
YARN-6936. [Atsv2] Retrospect storing entities into sub application table from 
client perspective. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8b8bd53
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8b8bd53
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8b8bd53

Branch: refs/heads/HDFS-7240
Commit: f8b8bd53c4797d406bea5b1b0cdb179e209169cc
Parents: d737bf99
Author: Haibo Chen 
Authored: Thu Apr 5 10:22:50 2018 -0700
Committer: Haibo Chen 
Committed: Thu Apr 5 10:23:42 2018 -0700

--
 .../timelineservice/SubApplicationEntity.java   | 50 
 .../yarn/client/api/TimelineV2Client.java   | 47 +++---
 .../client/api/impl/TimelineV2ClientImpl.java   | 30 ++--
 ...stTimelineReaderWebServicesHBaseStorage.java |  7 +--
 .../TestHBaseTimelineStorageEntities.java   |  3 +-
 .../storage/HBaseTimelineWriterImpl.java|  3 +-
 .../collector/TimelineCollectorWebService.java  | 19 ++--
 7 files changed, 138 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b8bd53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
new file mode 100644
index 000..a83ef3d
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/SubApplicationEntity.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records.timelineservice;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This entity represents a user defined entities to be stored under sub
+ * application table.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class SubApplicationEntity extends HierarchicalTimelineEntity {
+
+  public static final String YARN_APPLICATION_ID = "YARN_APPLICATION_ID";
+
+  public SubApplicationEntity(TimelineEntity entity) {
+super(entity);
+  }
+
+  /**
+   * Checks if the input TimelineEntity object is an SubApplicationEntity.
+   *
+   * @param te TimelineEntity object.
+   * @return true if input is an SubApplicationEntity, false otherwise
+   */
+  public static boolean isSubApplicationEntity(TimelineEntity te) {
+return (te != null && te instanceof SubApplicationEntity);
+  }
+
+  public void setApplicationId(String appId) {
+addInfo(YARN_APPLICATION_ID, appId);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8b8bd53/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
index 423c059..e987b46 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/TimelineV2Client.java
@@ -54,9 +54,10 @@ public abstract class TimelineV2Client extends 
CompositeService {
 
   /**
* 
-   * Send the information of a number of conceptual entities to the timeline
-   * service v.2 collector. It is a blocking API. The method will 

[37/50] [abbrv] hadoop git commit: HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. Contributed by Yiqun Lin.

2018-04-10 Thread xyao
HDFS-13380. RBF: mv/rm fail after the directory exceeded the quota limit. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e9b9f48d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e9b9f48d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e9b9f48d

Branch: refs/heads/HDFS-7240
Commit: e9b9f48dad5ebb58ee529f918723089e8356c480
Parents: ac32b35
Author: Inigo Goiri 
Authored: Mon Apr 9 10:09:25 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 10:09:25 2018 -0700

--
 .../federation/router/RouterRpcServer.java  | 30 
 .../federation/router/TestRouterQuota.java  |  4 +++
 2 files changed, 28 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e9b9f48d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 1159289..e6d2f5e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -900,7 +900,8 @@ public class RouterRpcServer extends AbstractService
   throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -921,7 +922,8 @@ public class RouterRpcServer extends AbstractService
   final Options.Rename... options) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List srcLocations = getLocationsForPath(src, true);
+final List srcLocations =
+getLocationsForPath(src, true, false);
 // srcLocations may be trimmed by getRenameDestinations()
 final List locs = new LinkedList<>(srcLocations);
 RemoteParam dstParam = getRenameDestinations(locs, dst);
@@ -998,7 +1000,8 @@ public class RouterRpcServer extends AbstractService
   public boolean delete(String src, boolean recursive) throws IOException {
 checkOperation(OperationCategory.WRITE);
 
-final List locations = getLocationsForPath(src, true);
+final List locations =
+getLocationsForPath(src, true, false);
 RemoteMethod method = new RemoteMethod("delete",
 new Class[] {String.class, boolean.class}, new RemoteParam(),
 recursive);
@@ -2213,14 +2216,29 @@ public class RouterRpcServer extends AbstractService
 
   /**
* Get the possible locations of a path in the federated cluster.
+   * During the get operation, it will do the quota verification.
+   *
+   * @param path Path to check.
+   * @param failIfLocked Fail the request if locked (top mount point).
+   * @return Prioritized list of locations in the federated cluster.
+   * @throws IOException If the location for this path cannot be determined.
+   */
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked) throws IOException {
+return getLocationsForPath(path, failIfLocked, true);
+  }
+
+  /**
+   * Get the possible locations of a path in the federated cluster.
*
* @param path Path to check.
* @param failIfLocked Fail the request if locked (top mount point).
+   * @param needQuotaVerify If need to do the quota verification.
* @return Prioritized list of locations in the federated cluster.
* @throws IOException If the location for this path cannot be determined.
*/
-  protected List getLocationsForPath(
-  String path, boolean failIfLocked) throws IOException {
+  protected List getLocationsForPath(String path,
+  boolean failIfLocked, boolean needQuotaVerify) throws IOException {
 try {
   // Check the location for this path
   final PathLocation location =
@@ -2241,7 +2259,7 @@ public class RouterRpcServer extends AbstractService
 }
 
 // Check quota
-if (this.router.isQuotaEnabled()) {
+if (this.router.isQuotaEnabled() && needQuotaVerify) {
   RouterQuotaUsage quotaUsage = this.router.getQuotaManager()
   .getQuotaUsage(path);
   if (quotaUsage != null) {


[45/50] [abbrv] hadoop git commit: HDFS-13420. License header is displayed in ArchivalStorage/MemoryStorage html pages. Contributed by Akira Ajisaka.

2018-04-10 Thread xyao
HDFS-13420. License header is displayed in ArchivalStorage/MemoryStorage html 
pages. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6729047a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6729047a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6729047a

Branch: refs/heads/HDFS-7240
Commit: 6729047a8ba273d27edcc6a1a9d397a096f44d84
Parents: 7c1e77d
Author: Weiwei Yang 
Authored: Tue Apr 10 22:10:44 2018 +0800
Committer: Weiwei Yang 
Committed: Tue Apr 10 22:10:44 2018 +0800

--
 .../hadoop-hdfs/src/site/markdown/ArchivalStorage.md   | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/MemoryStorage.md | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6729047a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 91ad107..ab7975a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -1,4 +1,4 @@

[18/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml
new file mode 100644
index 000..163eb3c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml
@@ -0,0 +1,3146 @@
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+   

[48/50] [abbrv] hadoop git commit: HDFS-13363. Record file path when FSDirAclOp throws AclException. Contributed by Gabor Bota.

2018-04-10 Thread xyao
HDFS-13363. Record file path when FSDirAclOp throws AclException. Contributed 
by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e76c2aeb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e76c2aeb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e76c2aeb

Branch: refs/heads/HDFS-7240
Commit: e76c2aeb288710ebee39680528dec44e454bbe9e
Parents: f89594f
Author: Xiao Chen 
Authored: Tue Apr 10 11:19:23 2018 -0700
Committer: Xiao Chen 
Committed: Tue Apr 10 11:19:48 2018 -0700

--
 .../org/apache/hadoop/hdfs/protocol/AclException.java   | 10 ++
 .../apache/hadoop/hdfs/server/namenode/FSDirAclOp.java  | 12 
 2 files changed, 22 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76c2aeb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
index 1210999..9948b99 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AclException.java
@@ -36,4 +36,14 @@ public class AclException extends IOException {
   public AclException(String message) {
 super(message);
   }
+
+  /**
+   * Creates a new AclException.
+   *
+   * @param message String message
+   * @param cause The cause of the exception
+   */
+  public AclException(String message, Throwable cause) {
+super(message, cause);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e76c2aeb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index 7b3471d..8d77f89 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -53,6 +53,8 @@ class FSDirAclOp {
   existingAcl, aclSpec);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -77,6 +79,8 @@ class FSDirAclOp {
 existingAcl, aclSpec);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -100,6 +104,8 @@ class FSDirAclOp {
 existingAcl);
   AclStorage.updateINodeAcl(inode, newAcl, snapshotId);
   fsd.getEditLog().logSetAcl(src, newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -117,6 +123,8 @@ class FSDirAclOp {
   src = iip.getPath();
   fsd.checkOwner(pc, iip);
   unprotectedRemoveAcl(fsd, iip);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -136,6 +144,8 @@ class FSDirAclOp {
   fsd.checkOwner(pc, iip);
   List newAcl = unprotectedSetAcl(fsd, iip, aclSpec, false);
   fsd.getEditLog().logSetAcl(iip.getPath(), newAcl);
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.writeUnlock();
 }
@@ -162,6 +172,8 @@ class FSDirAclOp {
   .stickyBit(fsPermission.getStickyBit())
   .setPermission(fsPermission)
   .addEntries(acl).build();
+} catch (AclException e){
+  throw new AclException(e.getMessage() + " Path: " + src, e);
 } finally {
   fsd.readUnlock();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: YARN-8083. [UI2] All YARN related configurations are paged together in conf page. Contributed by Gergely Novák.

2018-04-10 Thread xyao
YARN-8083. [UI2] All YARN related configurations are paged together in conf 
page. Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b17dc9f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b17dc9f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b17dc9f5

Branch: refs/heads/HDFS-7240
Commit: b17dc9f5f54fd91defc1d8646f8229da5fe7ccbb
Parents: ea3849f
Author: Sunil G 
Authored: Fri Apr 6 21:53:14 2018 +0530
Committer: Sunil G 
Committed: Fri Apr 6 21:53:14 2018 +0530

--
 .../main/webapp/app/controllers/yarn-tools/yarn-conf.js   | 10 +-
 .../main/webapp/app/templates/yarn-tools/yarn-conf.hbs|  6 +++---
 2 files changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17dc9f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
index 2984346..cc3be2e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
@@ -24,7 +24,15 @@ import ColumnDef from 'em-table/utils/column-definition';
 import YarnConf from '../../models/yarn-conf';
 
 export default Ember.Controller.extend({
-  tableDefinition: TableDef.create({
+  coreTableDefinition: TableDef.create({
+searchType: 'manual',
+  }),
+
+  mapredTableDefinition: TableDef.create({
+searchType: 'manual',
+  }),
+
+  yarnTableDefinition: TableDef.create({
 searchType: 'manual',
   }),
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17dc9f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
index 09a1410..c2108a2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
@@ -17,12 +17,12 @@
 }}
 
 Core Configuration
-{{em-table columns=columnsFromModel rows=rowsForCoreColumnsFromModel 
rowCount=10 definition=tableDefinition}}
+{{em-table columns=columnsFromModel rows=rowsForCoreColumnsFromModel 
rowCount=10 definition=coreTableDefinition}}
 
 YARN Configuration
-{{em-table columns=columnsFromModel rows=rowsForYarnColumnsFromModel 
rowCount=10 definition=tableDefinition}}
+{{em-table columns=columnsFromModel rows=rowsForYarnColumnsFromModel 
rowCount=10 definition=yarnTableDefinition}}
 
 MapReduce Configuration
-{{em-table columns=columnsFromModel rows=rowsForMapredColumnsFromModel 
rowCount=10 definition=tableDefinition}}
+{{em-table columns=columnsFromModel rows=rowsForMapredColumnsFromModel 
rowCount=10 definition=mapredTableDefinition}}
 
 {{outlet}}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun.

2018-04-10 Thread xyao
HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the 
time. Contributed by Jinglun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac32b357
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac32b357
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac32b357

Branch: refs/heads/HDFS-7240
Commit: ac32b3576da4cc463dff85118163ccfff02215fc
Parents: 821b0de
Author: Inigo Goiri 
Authored: Mon Apr 9 09:16:48 2018 -0700
Committer: Inigo Goiri 
Committed: Mon Apr 9 09:16:48 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 ++
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac32b357/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index 7b9cd64..1c38791 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -79,6 +79,9 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
+  if (currentUsedProxy != null) {
+return method.invoke(currentUsedProxy.proxy, args);
+  }
   Map proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac32b357/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 8d6b02d..4b3fdf9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -43,10 +43,13 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -100,6 +103,37 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
+  public void testRequestNNAfterOneSuccess() throws Exception {
+final AtomicInteger count = new AtomicInteger(0);
+final ClientProtocol goodMock = mock(ClientProtocol.class);
+when(goodMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+Thread.sleep(1000);
+return new long[]{1};
+  }
+});
+final ClientProtocol badMock = mock(ClientProtocol.class);
+when(badMock.getStats()).thenAnswer(new Answer() {
+  @Override
+  public long[] answer(InvocationOnMock invocation) throws Throwable {
+count.incrementAndGet();
+throw new IOException("Bad mock !!");
+  }
+});
+
+RequestHedgingProxyProvider provider =
+new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
+createFactory(badMock, goodMock, goodMock, badMock));
+ClientProtocol proxy = provider.getProxy().proxy;
+proxy.getStats();
+assertEquals(2, count.get());
+proxy.getStats();
+assertEquals(3, count.get());
+  }
+
+  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 Mockito.when(goodMock.getStats()).thenAnswer(new Answer() {


-
To unsubscribe, 

[28/50] [abbrv] hadoop git commit: YARN-8048. Support auto-spawning of admin configured services during bootstrap of RM (Rohith Sharma K S via wangda)

2018-04-10 Thread xyao
YARN-8048. Support auto-spawning of admin configured services during bootstrap 
of RM (Rohith Sharma K S via wangda)

Change-Id: I2d8d61ccad55e1118009294d7e17822df3cd0fd5


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4e63ccc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4e63ccc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4e63ccc

Branch: refs/heads/HDFS-7240
Commit: d4e63ccca0763b452e4a0169dd932b3f32066281
Parents: 00905ef
Author: Wangda Tan 
Authored: Fri Apr 6 21:24:58 2018 -0700
Committer: Wangda Tan 
Committed: Fri Apr 6 21:24:58 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   4 +
 .../hadoop-yarn-services-api/pom.xml|   5 +
 .../client/SystemServiceManagerImpl.java| 381 +++
 .../service/client/TestSystemServiceImpl.java   | 180 +
 .../users/sync/user1/example-app1.yarnfile  |  16 +
 .../users/sync/user1/example-app2.yarnfile  |  16 +
 .../users/sync/user1/example-app3.json  |  16 +
 .../users/sync/user2/example-app1.yarnfile  |  16 +
 .../users/sync/user2/example-app2.yarnfile  |  16 +
 .../yarn/service/conf/YarnServiceConf.java  |   2 +
 .../yarn/service/TestSystemServiceManager.java  | 156 
 .../server/service/SystemServiceManager.java|  25 ++
 .../yarn/server/service/package-info.java   |  27 ++
 .../server/resourcemanager/ResourceManager.java |  30 +-
 14 files changed, 889 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4e63ccc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 41755e2..7a2a3ce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -343,6 +343,10 @@ public class YarnConfiguration extends Configuration {
   public static final String YARN_API_SERVICES_ENABLE = "yarn."
   + "webapp.api-service.enable";
 
+  @Private
+  public static final String DEFAULT_YARN_API_SYSTEM_SERVICES_CLASS =
+  "org.apache.hadoop.yarn.service.client.SystemServiceManagerImpl";
+
   public static final String RM_RESOURCE_TRACKER_ADDRESS =
 RM_PREFIX + "resource-tracker.address";
   public static final int DEFAULT_RM_RESOURCE_TRACKER_PORT = 8031;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4e63ccc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
index 7fe2ef6..354c9b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/pom.xml
@@ -71,6 +71,7 @@
 
   
 **/*.json
+**/*.yarnfile
   
 
   
@@ -96,6 +97,10 @@
 
 
   org.apache.hadoop
+  hadoop-yarn-server-common
+
+
+  org.apache.hadoop
   hadoop-common
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4e63ccc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
new file mode 100644
index 000..225f8bd
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information 

[17/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
new file mode 100644
index 000..ab7c120
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/dev-support/jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml
@@ -0,0 +1,3034 @@
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+
+
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+  
+  
+
+
+  
+  
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+  
+
+
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+  
+  
+  
+  
+
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+
+
+
+ 

[20/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.1.0.xml
--
diff --git 
a/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.1.0.xml
 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.1.0.xml
new file mode 100644
index 000..f4762d9
--- /dev/null
+++ 
b/hadoop-mapreduce-project/dev-support/jdiff/Apache_Hadoop_MapReduce_Core_3.1.0.xml
@@ -0,0 +1,28075 @@
+
+
+
+
+
+
+
+
+
+  
+  
+
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+
+
+
+
+
+
+  
+
+
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+
+
+
+
+  
+
+
+
+
+  
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+
+
+  
+  
+  
+  
+  
+  
+  
+  
+
+
+
+
+
+
+
+
+
+
+  
+  
+  
+  
+
+
+
+  
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+
+
+  
+  
+  
+
+
+  
+  

[43/50] [abbrv] hadoop git commit: HADOOP-15374. Add links of the new features of 3.1.0 to the top page

2018-04-10 Thread xyao
HADOOP-15374. Add links of the new features of 3.1.0 to the top page

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7623cc5a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7623cc5a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7623cc5a

Branch: refs/heads/HDFS-7240
Commit: 7623cc5a982219fff2bdd9a84650f45106cbdf47
Parents: e87be8a
Author: Takanobu Asanuma 
Authored: Tue Apr 10 18:59:40 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Apr 10 18:59:40 2018 +0900

--
 hadoop-project/src/site/site.xml | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7623cc5a/hadoop-project/src/site/site.xml
--
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index b5ecd73..fdf5583 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -106,6 +106,7 @@
   
   
   
+  
 
 
 
@@ -147,6 +148,9 @@
   
   
   
+  
+  
+  
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/50] [abbrv] hadoop git commit: HADOOP-14855. Hadoop scripts may errantly believe a daemon is still running, preventing it from starting. Contributed by Robert Kanter.

2018-04-10 Thread xyao
HADOOP-14855. Hadoop scripts may errantly believe a daemon is still running, 
preventing it from starting. Contributed by Robert Kanter.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e52539b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e52539b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e52539b4

Branch: refs/heads/HDFS-7240
Commit: e52539b46fb13db423490fe02d46e9fae72d72fe
Parents: 345e762
Author: Miklos Szegedi 
Authored: Wed Apr 4 15:35:58 2018 -0700
Committer: Miklos Szegedi 
Committed: Wed Apr 4 15:35:58 2018 -0700

--
 .../hadoop-common/src/main/bin/hadoop-functions.sh  | 9 +++--
 1 file changed, 7 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e52539b4/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 9ea4587..9ef48b6 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1725,11 +1725,16 @@ function hadoop_status_daemon
   shift
 
   local pid
+  local pspid
 
   if [[ -f "${pidfile}" ]]; then
 pid=$(cat "${pidfile}")
-if ps -p "${pid}" > /dev/null 2>&1; then
-  return 0
+if pspid=$(ps -o args= -p"${pid}" 2>/dev/null); then
+  # this is to check that the running process we found is actually the same
+  # daemon that we're interested in
+  if [[ ${pspid} =~ -Dproc_${daemonname} ]]; then
+return 0
+  fi
 fi
 return 1
   fi


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] [abbrv] hadoop git commit: YARN-8035. Uncaught exception in ContainersMonitorImpl during relaunch due to the process ID changing. Contributed by Shane Kumpf.

2018-04-10 Thread xyao
YARN-8035. Uncaught exception in ContainersMonitorImpl during relaunch due to 
the process ID changing. Contributed by Shane Kumpf.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d06d885
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d06d885
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d06d885

Branch: refs/heads/HDFS-7240
Commit: 2d06d885c84b2e4a3acb6d3e0c50d4870e37ca82
Parents: 5a174f8
Author: Miklos Szegedi 
Authored: Tue Apr 3 10:01:00 2018 -0700
Committer: Miklos Szegedi 
Committed: Tue Apr 3 10:01:00 2018 -0700

--
 .../containermanager/monitor/ContainerMetrics.java |  2 +-
 .../monitor/TestContainerMetrics.java  | 17 +
 2 files changed, 18 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d06d885/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
index a6aa337..2a95849 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainerMetrics.java
@@ -274,7 +274,7 @@ public class ContainerMetrics implements MetricsSource {
   }
 
   public void recordProcessId(String processId) {
-registry.tag(PROCESSID_INFO, processId);
+registry.tag(PROCESSID_INFO, processId, true);
   }
 
   public void recordResourceLimit(int vmemLimit, int pmemLimit, int cpuVcores) 
{

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d06d885/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainerMetrics.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainerMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainerMetrics.java
index 1840d62..8b2bff1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainerMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainerMetrics.java
@@ -206,4 +206,21 @@ public class TestContainerMetrics {
 }
 Assert.assertEquals(expectedValues.keySet(), testResults);
   }
+
+  @Test
+  public void testContainerMetricsUpdateContainerPid() {
+ContainerId containerId = mock(ContainerId.class);
+ContainerMetrics metrics = ContainerMetrics.forContainer(containerId,
+100, 1);
+
+String origPid = "1234";
+metrics.recordProcessId(origPid);
+assertEquals(origPid, metrics.registry.getTag(
+ContainerMetrics.PROCESSID_INFO.name()).value());
+
+String newPid = "4321";
+metrics.recordProcessId(newPid);
+assertEquals(newPid, metrics.registry.getTag(
+ContainerMetrics.PROCESSID_INFO.name()).value());
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: HADOOP-15376. Remove double semi colons on imports that make Clover fall over.

2018-04-10 Thread xyao
HADOOP-15376. Remove double semi colons on imports that make Clover fall over.

Signed-off-by: Akira Ajisaka 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cef8eb79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cef8eb79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cef8eb79

Branch: refs/heads/HDFS-7240
Commit: cef8eb79810383f9970ed3713deecc18fbf0ffaa
Parents: 6729047
Author: Ewan Higgs 
Authored: Tue Apr 10 23:58:26 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Apr 10 23:58:26 2018 +0900

--
 .../src/test/java/org/apache/hadoop/io/TestIOUtils.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cef8eb79/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
index 467e5bc..fca72d9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
@@ -39,7 +39,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.io.FileUtils;;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: HADOOP-15328. Fix the typo in HttpAuthentication.md. Contributed by fang zhenyi

2018-04-10 Thread xyao
HADOOP-15328. Fix the typo in HttpAuthentication.md. Contributed by fang zhenyi


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0006346a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0006346a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0006346a

Branch: refs/heads/HDFS-7240
Commit: 0006346abe209a07d149fe5fd5a25cda0af26e07
Parents: 907919d
Author: Bharat 
Authored: Mon Apr 9 16:37:49 2018 -0700
Committer: Bharat 
Committed: Mon Apr 9 16:37:49 2018 -0700

--
 .../hadoop-common/src/site/markdown/HttpAuthentication.md  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0006346a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
index 44d814c..721abea 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/HttpAuthentication.md
@@ -28,7 +28,7 @@ Hadoop HTTP web-consoles can be configured to require 
Kerberos authentication us
 
 In addition, Hadoop HTTP web-consoles support the equivalent of Hadoop's 
Pseudo/Simple authentication. If this option is enabled, the user name must be 
specified in the first browser interaction using the user.name query string 
parameter. e.g. `http://localhost:8088/cluster?user.name=babu`.
 
-If a custom authentication mechanism is required for the HTTP web-consoles, it 
is possible to implement a plugin to support the alternate authentication 
mechanism (refer to Hadoop hadoop-auth for details on writing an 
`AuthenticatorHandler`).
+If a custom authentication mechanism is required for the HTTP web-consoles, it 
is possible to implement a plugin to support the alternate authentication 
mechanism (refer to Hadoop hadoop-auth for details on writing an 
`AuthenticationHandler`).
 
 The next section describes how to configure Hadoop HTTP web-consoles to 
require user authentication.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: YARN-7764. Findbugs warning: Resource#getResources may expose internal representation. Contributed by Weiwei Yang.

2018-04-10 Thread xyao
YARN-7764. Findbugs warning: Resource#getResources may expose internal 
representation. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7a17b02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7a17b02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7a17b02

Branch: refs/heads/HDFS-7240
Commit: f7a17b029ddd61ca73c2c2c88f5451dbf05fc501
Parents: 2d06d88
Author: Sunil G 
Authored: Wed Apr 4 09:22:35 2018 +0530
Committer: Sunil G 
Committed: Wed Apr 4 09:22:35 2018 +0530

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml  | 7 +++
 1 file changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7a17b02/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 81b8825..5841361 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -658,4 +658,11 @@
 
   
 
+  
+  
+
+
+
+  
+
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: YARN-8115. [UI2] URL data like nodeHTTPAddress must be encoded in UI before using to access NM. Contributed by Sreenath Somarajapuram.

2018-04-10 Thread xyao
YARN-8115. [UI2] URL data like nodeHTTPAddress must be encoded in UI before 
using to access NM. Contributed by Sreenath Somarajapuram.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42cd367c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42cd367c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42cd367c

Branch: refs/heads/HDFS-7240
Commit: 42cd367c9308b944bc71de6c07b6c3f028a0d874
Parents: b779f4f
Author: Sunil G 
Authored: Wed Apr 4 22:13:14 2018 +0530
Committer: Sunil G 
Committed: Wed Apr 4 22:13:14 2018 +0530

--
 .../webapp/app/components/node-menu-panel.js| 25 
 .../webapp/app/controllers/yarn-node-app.js |  3 ++-
 .../webapp/app/controllers/yarn-node-apps.js|  3 ++-
 .../app/controllers/yarn-node-container.js  |  3 ++-
 .../app/controllers/yarn-node-containers.js |  3 ++-
 .../main/webapp/app/controllers/yarn-node.js|  3 ++-
 .../webapp/app/controllers/yarn-nodes/table.js  |  2 +-
 .../src/main/webapp/app/helpers/node-link.js|  2 +-
 .../src/main/webapp/app/initializers/loader.js  |  1 +
 .../main/webapp/app/routes/yarn-node-apps.js|  8 ---
 .../webapp/app/routes/yarn-node-containers.js   |  8 ---
 .../src/main/webapp/app/routes/yarn-node.js |  8 ---
 .../templates/components/node-menu-panel.hbs|  8 +++
 13 files changed, 57 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42cd367c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
new file mode 100644
index 000..31457be
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  encodedAddr : Ember.computed("nodeAddr", function(){
+return encodeURIComponent(this.get('nodeAddr'));
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42cd367c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
index 3dc09fc..e0d58ec 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
@@ -22,6 +22,7 @@ export default Ember.Controller.extend({
 
   breadcrumbs: Ember.computed('model.nodeInfo', function () {
 var nodeInfo = this.get('model.nodeInfo');
+var addr = encodeURIComponent(nodeInfo.addr);
 return [{
   text: "Home",
   routeName: 'application'
@@ -30,7 +31,7 @@ export default Ember.Controller.extend({
   routeName: 'yarn-nodes.table'
 }, {
   text: `Node [ ${nodeInfo.id} ]`,
-  href: `#/yarn-node/${nodeInfo.id}/${nodeInfo.addr}`,
+  href: `#/yarn-node/${nodeInfo.id}/${addr}/info`,
 }, {
   text: `Application [ ${nodeInfo.appId} ]`,
 }];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42cd367c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-apps.js
 

[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2018-04-10 Thread xyao
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df3ff904
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df3ff904
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df3ff904

Branch: refs/heads/HDFS-7240
Commit: df3ff9042a6327b784ecf90ea8be8f0fe567859e
Parents: bb3c07f 8ab776d
Author: Xiaoyu Yao 
Authored: Tue Apr 10 12:22:50 2018 -0700
Committer: Xiaoyu Yao 
Committed: Tue Apr 10 12:22:50 2018 -0700

--
 BUILDING.txt|14 +
 .../src/main/bin/hadoop-functions.sh| 9 +-
 .../apache/hadoop/crypto/key/KeyProvider.java   |11 +-
 .../fs/CommonConfigurationKeysPublic.java   |21 +
 .../main/java/org/apache/hadoop/ipc/RPC.java|35 +-
 .../org/apache/hadoop/net/NetworkTopology.java  |   106 +-
 .../hadoop/util/concurrent/HadoopExecutors.java | 9 +-
 .../src/site/markdown/HttpAuthentication.md | 2 +-
 .../markdown/release/3.1.0/CHANGES.3.1.0.md |  1022 +
 .../release/3.1.0/RELEASENOTES.3.1.0.md |   199 +
 .../fs/contract/AbstractContractCreateTest.java |12 +-
 .../java/org/apache/hadoop/io/TestIOUtils.java  | 2 +-
 .../hadoop/hdfs/protocol/AclException.java  |10 +
 .../ha/RequestHedgingProxyProvider.java | 3 +
 .../ha/TestRequestHedgingProxyProvider.java |34 +
 .../federation/metrics/NamenodeBeanMetrics.java | 3 +
 .../federation/router/ConnectionContext.java|35 +-
 .../federation/router/ConnectionManager.java|10 +-
 .../federation/router/ConnectionPool.java   |98 +-
 .../federation/router/ConnectionPoolId.java |19 +-
 .../server/federation/router/RemoteMethod.java  |68 +-
 .../router/RouterNamenodeProtocol.java  |   187 +
 .../federation/router/RouterRpcClient.java  |62 +-
 .../federation/router/RouterRpcServer.java  |   141 +-
 .../router/SubClusterTimeoutException.java  |33 +
 .../driver/impl/StateStoreFileSystemImpl.java   | 6 +-
 .../server/federation/MiniRouterDFSCluster.java |39 +-
 .../router/TestConnectionManager.java   |56 +-
 .../server/federation/router/TestRouter.java|70 +-
 .../federation/router/TestRouterQuota.java  | 4 +
 .../router/TestRouterRPCClientRetries.java  |   126 +-
 .../server/federation/router/TestRouterRpc.java |   136 +-
 .../src/test/resources/contract/webhdfs.xml | 5 +
 .../jdiff/Apache_Hadoop_HDFS_3.1.0.xml  |   676 +
 .../server/blockmanagement/BlockIdManager.java  |17 +
 .../server/blockmanagement/BlockManager.java| 5 +-
 .../blockmanagement/BlockManagerSafeMode.java   | 2 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  |12 +-
 .../blockmanagement/CorruptReplicasMap.java |35 +-
 .../blockmanagement/InvalidateBlocks.java   |13 +-
 .../server/namenode/EncryptionZoneManager.java  | 8 +-
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |12 +
 .../hdfs/server/namenode/FSTreeTraverser.java   |   339 +
 .../server/namenode/ReencryptionHandler.java|   615 +-
 .../server/namenode/ReencryptionUpdater.java| 2 +-
 .../src/site/markdown/ArchivalStorage.md| 2 +-
 .../src/site/markdown/MemoryStorage.md  | 2 +-
 .../blockmanagement/TestBlockManager.java   |61 +-
 .../blockmanagement/TestCorruptReplicaInfo.java |48 +-
 .../hdfs/server/namenode/TestReencryption.java  | 3 -
 .../namenode/TestReencryptionHandler.java   |10 +-
 .../apache/hadoop/net/TestNetworkTopology.java  |75 +-
 .../src/test/resources/testCryptoConf.xml   |19 +
 .../Apache_Hadoop_MapReduce_Common_3.1.0.xml|   113 +
 .../Apache_Hadoop_MapReduce_Core_3.1.0.xml  | 28075 +
 .../Apache_Hadoop_MapReduce_JobClient_3.1.0.xml |16 +
 .../jobhistory/JobHistoryEventHandler.java  | 2 +-
 hadoop-project/src/site/site.xml| 4 +
 .../fs/s3a/s3guard/DynamoDBMetadataStore.java   |18 +-
 .../fs/s3a/s3guard/LocalMetadataStore.java  |17 +-
 .../hadoop/fs/s3a/s3guard/MetadataStore.java|12 +
 .../fs/s3a/s3guard/NullMetadataStore.java   | 4 +
 .../hadoop/fs/s3a/s3guard/S3GuardTool.java  |14 +-
 .../site/markdown/tools/hadoop-aws/s3guard.md   |11 +-
 .../s3guard/AbstractS3GuardToolTestBase.java|21 +-
 .../dev-support/findbugs-exclude.xml| 7 +
 .../jdiff/Apache_Hadoop_YARN_Client_3.1.0.xml   |  3146 ++
 .../jdiff/Apache_Hadoop_YARN_Common_3.1.0.xml   |  3034 ++
 .../Apache_Hadoop_YARN_Server_Common_3.1.0.xml  |  1331 +
 .../api/records/AllocationTagNamespaceType.java | 2 +-
 .../timelineservice/SubApplicationEntity.java   |50 +
 .../hadoop/yarn/conf/YarnConfiguration.java |42 +
 .../hadoop-yarn-services-api/pom.xml| 

[30/50] [abbrv] hadoop git commit: HDFS-13292. Crypto command should give proper exception when trying to set key on existing EZ directory. Contributed by Ranith Sardar.

2018-04-10 Thread xyao
HDFS-13292. Crypto command should give proper exception when trying to set key 
on existing EZ directory. Contributed by Ranith Sardar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/70590cd8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/70590cd8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/70590cd8

Branch: refs/heads/HDFS-7240
Commit: 70590cd8d948de581e2ae1184afb08574c67bbbe
Parents: 00ebec8
Author: Surendra Singh Lilhore 
Authored: Sat Apr 7 11:23:49 2018 +0530
Committer: Surendra Singh Lilhore 
Committed: Sat Apr 7 11:23:49 2018 +0530

--
 .../server/namenode/EncryptionZoneManager.java   |  8 
 .../src/test/resources/testCryptoConf.xml| 19 +++
 2 files changed, 23 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/70590cd8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index b1bca98..d06cd1c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -541,10 +541,6 @@ public class EncryptionZoneManager {
 if (srcIIP.getLastINode() == null) {
   throw new FileNotFoundException("cannot find " + srcIIP.getPath());
 }
-if (dir.isNonEmptyDirectory(srcIIP)) {
-  throw new IOException(
-  "Attempt to create an encryption zone for a non-empty directory.");
-}
 
 INode srcINode = srcIIP.getLastINode();
 if (!srcINode.isDirectory()) {
@@ -557,6 +553,10 @@ public class EncryptionZoneManager {
   "Directory " + srcIIP.getPath() + " is already an encryption zone.");
 }
 
+if (dir.isNonEmptyDirectory(srcIIP)) {
+  throw new IOException(
+  "Attempt to create an encryption zone for a non-empty directory.");
+}
 final HdfsProtos.ZoneEncryptionInfoProto proto =
 PBHelperClient.convert(suite, version, keyName);
 final XAttr ezXAttr = XAttrHelper

http://git-wip-us.apache.org/repos/asf/hadoop/blob/70590cd8/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
index c109442..f603cc9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCryptoConf.xml
@@ -114,6 +114,25 @@
 
 
 
+  Test failure of creating EZ on an existing EZ
+  
+-fs NAMENODE -mkdir /foo
+-fs NAMENODE -ls /-
+-createZone -path /foo -keyName 
myKey
+-createZone -path /foo -keyName 
myKey
+  
+  
+-fs NAMENODE -rmdir /foo
+  
+  
+
+  SubstringComparator
+  Directory /foo is already an encryption 
zone
+
+  
+
+
+
   Test success of creating an EZ as a subdir of an existing 
EZ.
   
 -fs NAMENODE -mkdir /foo


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/50] [abbrv] hadoop git commit: YARN-8051. TestRMEmbeddedElector#testCallbackSynchronization is flaky. (Robert Kanter via Haibo Chen)

2018-04-10 Thread xyao
YARN-8051. TestRMEmbeddedElector#testCallbackSynchronization is flaky. (Robert 
Kanter via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93d47a0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93d47a0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93d47a0e

Branch: refs/heads/HDFS-7240
Commit: 93d47a0ed504ee81d4b74d340c1815bdbb3c9b14
Parents: 2be64eb
Author: Haibo Chen 
Authored: Tue Apr 3 07:58:21 2018 -0700
Committer: Haibo Chen 
Committed: Tue Apr 3 07:59:20 2018 -0700

--
 .../resourcemanager/TestRMEmbeddedElector.java  | 72 +---
 1 file changed, 49 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93d47a0e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
index 140483a..9d38149 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMEmbeddedElector.java
@@ -22,18 +22,22 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.ClientBaseWithFixes;
 import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.atLeast;
-import static org.mockito.Mockito.atMost;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -48,6 +52,8 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 
   private Configuration conf;
   private AtomicBoolean callbackCalled;
+  private AtomicInteger transitionToActiveCounter;
+  private AtomicInteger transitionToStandbyCounter;
 
   private enum SyncTestType {
 ACTIVE,
@@ -75,6 +81,8 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 conf.setLong(YarnConfiguration.CLIENT_FAILOVER_SLEEPTIME_BASE_MS, 100L);
 
 callbackCalled = new AtomicBoolean(false);
+transitionToActiveCounter = new AtomicInteger(0);
+transitionToStandbyCounter = new AtomicInteger(0);
   }
 
   /**
@@ -103,7 +111,7 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
*/
   @Test
   public void testCallbackSynchronization()
-  throws IOException, InterruptedException {
+  throws IOException, InterruptedException, TimeoutException {
 testCallbackSynchronization(SyncTestType.ACTIVE);
 testCallbackSynchronization(SyncTestType.STANDBY);
 testCallbackSynchronization(SyncTestType.NEUTRAL);
@@ -117,9 +125,10 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
* @param type the type of test to run
* @throws IOException if there's an issue transitioning
* @throws InterruptedException if interrupted
+   * @throws TimeoutException if waitFor timeout reached
*/
   private void testCallbackSynchronization(SyncTestType type)
-  throws IOException, InterruptedException {
+  throws IOException, InterruptedException, TimeoutException {
 AdminService as = mock(AdminService.class);
 RMContext rc = mock(RMContext.class);
 ResourceManager rm = mock(ResourceManager.class);
@@ -129,6 +138,17 @@ public class TestRMEmbeddedElector extends 
ClientBaseWithFixes {
 when(rm.getRMContext()).thenReturn(rc);
 when(rc.getRMAdminService()).thenReturn(as);
 
+doAnswer(invocation -> {
+  transitionToActiveCounter.incrementAndGet();
+  return null;
+}).when(as).transitionToActive(any());
+

[09/50] [abbrv] hadoop git commit: YARN-7946. Update TimelineServerV2 doc as per YARN-7919. (Haibo Chen)

2018-04-10 Thread xyao
YARN-7946. Update TimelineServerV2 doc as per YARN-7919. (Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3087e891
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3087e891
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3087e891

Branch: refs/heads/HDFS-7240
Commit: 3087e89135365cad7f28f1bf8c9a1c483e245988
Parents: 7853ec8
Author: Haibo Chen 
Authored: Wed Apr 4 11:59:31 2018 -0700
Committer: Haibo Chen 
Committed: Wed Apr 4 11:59:31 2018 -0700

--
 BUILDING.txt| 12 
 .../src/site/markdown/TimelineServiceV2.md  |  8 
 2 files changed, 16 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3087e891/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index dbf2cb8..3b9a2ef 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -138,6 +138,18 @@ Maven build goals:
   * Use -DskipShade to disable client jar shading to speed up build times (in
 development environments only, not to build release artifacts)
 
+ YARN Application Timeline Service V2 build options:
+
+   YARN Timeline Service v.2 chooses Apache HBase as the primary backing 
storage. The supported
+   versions of Apache HBase are 1.2.6 (default) and 2.0.0-beta1.
+
+  * HBase 1.2.6 is used by default to build Hadoop. The official releases are 
ready to use if you
+plan on running Timeline Service v2 with HBase 1.2.6.
+
+  * Use -Dhbase.profile=2.0 to build Hadoop with HBase 2.0.0-beta1. Provide 
this option if you plan
+on running Timeline Service v2 with HBase 2.0.
+
+
  Snappy build options:
 
Snappy is a compression library that can be utilized by the native code.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3087e891/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index f097b60..312c10b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -190,9 +190,9 @@ Each step is explained in more detail below.
 
 #  Step 1) Set up the HBase cluster
 The first part is to set up or pick an Apache HBase cluster to use as the 
storage cluster. The
-version of Apache HBase that is supported with Timeline Service v.2 is 1.2.6. 
The 1.0.x versions
-do not work with Timeline Service v.2. Later versions of HBase have not been 
tested with
-Timeline Service.
+supported versions of Apache HBase are 1.2.6 (default) and 2.0.0-beta1.
+The 1.0.x versions do not work with Timeline Service v.2. By default, Hadoop 
releases are built
+with HBase 1.2.6. To use HBase 2.0.0-beta1, build from source with option 
-Dhbase.profile=2.0
 
 HBase has different deployment modes. Refer to the HBase book for 
understanding them and pick a
 mode that is suitable for your setup.
@@ -236,7 +236,7 @@ is needed for the `flowrun` table creation in the schema 
creator. The default HD
 For example,
 
 hadoop fs -mkdir /hbase/coprocessor
-hadoop fs -put 
hadoop-yarn-server-timelineservice-hbase-3.0.0-alpha1-SNAPSHOT.jar
+hadoop fs -put 
hadoop-yarn-server-timelineservice-hbase-coprocessor-3.2.0-SNAPSHOT.jar
/hbase/coprocessor/hadoop-yarn-server-timelineservice.jar
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. Contributed by Surendra Singh Lilhore.

2018-04-10 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 fe7a70e58 -> afbdd8fdc


HDFS-13328. Abstract ReencryptionHandler recursive logic in separate class. 
Contributed by Surendra Singh Lilhore.

(cherry picked from commit f89594f0b80e8efffdcb887daa4a18a2b0a228b3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afbdd8fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afbdd8fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afbdd8fd

Branch: refs/heads/branch-3.0
Commit: afbdd8fdcfda16ab0b3b317d04fb296418a23290
Parents: fe7a70e
Author: Rakesh Radhakrishnan 
Authored: Tue Apr 10 23:35:00 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Tue Apr 10 23:43:28 2018 +0530

--
 .../hdfs/server/namenode/FSTreeTraverser.java   | 339 ++
 .../server/namenode/ReencryptionHandler.java| 615 ---
 .../server/namenode/ReencryptionUpdater.java|   2 +-
 .../hdfs/server/namenode/TestReencryption.java  |   3 -
 .../namenode/TestReencryptionHandler.java   |  10 +-
 5 files changed, 595 insertions(+), 374 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/afbdd8fd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
new file mode 100644
index 000..ff77029
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
@@ -0,0 +1,339 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * FSTreeTraverser traverse directory recursively and process files
+ * in batches.
+ */
+@InterfaceAudience.Private
+public abstract class FSTreeTraverser {
+
+
+  public static final Logger LOG = LoggerFactory
+  .getLogger(FSTreeTraverser.class);
+
+  private final FSDirectory dir;
+
+  private long readLockReportingThresholdMs;
+
+  private Timer timer;
+
+  public FSTreeTraverser(FSDirectory dir, Configuration conf) {
+this.dir = dir;
+this.readLockReportingThresholdMs = conf.getLong(
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
+DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT);
+timer = new Timer();
+  }
+
+  public FSDirectory getFSDirectory() {
+return dir;
+  }
+
+  /**
+   * Iterate through all files directly inside parent, and recurse down
+   * directories. The listing is done in batch, and can optionally start after
+   * a position. The iteration of the inode tree is done in a depth-first
+   * fashion. But instead of holding all {@link INodeDirectory}'s in memory
+   * on the fly, only the path components to the current inode is held. This
+   * is to reduce memory consumption.
+   *
+   * @param parent
+   *  The inode id of parent directory
+   * @param startId
+   *  Id of the start inode.
+   * @param startAfter
+   *  Full path of a file the traverse 

[hadoop] Git Push Summary

2018-04-10 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-8 [created] df3ff9042

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs all the time. Contributed by Jinglun."

2018-04-10 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 6f4c26e9b -> 33cf224dc


Revert "HDFS-13388. RequestHedgingProxyProvider calls multiple configured NNs 
all the time. Contributed by Jinglun."

This reverts commit 3d2e327e2f99bac161fa8f00e93b5b6edece2a65.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33cf224d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33cf224d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33cf224d

Branch: refs/heads/branch-2.9
Commit: 33cf224dcbbd2549a86bde3946fc36c2941d9905
Parents: 6f4c26e
Author: Inigo Goiri 
Authored: Tue Apr 10 08:50:44 2018 -0700
Committer: Inigo Goiri 
Committed: Tue Apr 10 08:50:44 2018 -0700

--
 .../ha/RequestHedgingProxyProvider.java |  3 --
 .../ha/TestRequestHedgingProxyProvider.java | 34 
 2 files changed, 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33cf224d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
index b9f213e..49fe4be 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RequestHedgingProxyProvider.java
@@ -76,9 +76,6 @@ public class RequestHedgingProxyProvider extends
 public Object
 invoke(Object proxy, final Method method, final Object[] args)
 throws Throwable {
-  if (currentUsedProxy != null) {
-return method.invoke(currentUsedProxy.proxy, args);
-  }
   Map proxyMap = new HashMap<>();
   int numAttempts = 0;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33cf224d/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
index 3c46f52..04e77ad 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
@@ -42,13 +42,10 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.assertEquals;
 import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
-import static org.mockito.Mockito.when;
-import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
 
@@ -102,37 +99,6 @@ public class TestRequestHedgingProxyProvider {
   }
 
   @Test
-  public void testRequestNNAfterOneSuccess() throws Exception {
-final AtomicInteger count = new AtomicInteger(0);
-final ClientProtocol goodMock = mock(ClientProtocol.class);
-when(goodMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-Thread.sleep(1000);
-return new long[]{1};
-  }
-});
-final ClientProtocol badMock = mock(ClientProtocol.class);
-when(badMock.getStats()).thenAnswer(new Answer() {
-  @Override
-  public long[] answer(InvocationOnMock invocation) throws Throwable {
-count.incrementAndGet();
-throw new IOException("Bad mock !!");
-  }
-});
-
-RequestHedgingProxyProvider provider =
-new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class,
-createFactory(badMock, goodMock, goodMock, badMock));
-ClientProtocol proxy = provider.getProxy().proxy;
-proxy.getStats();
-assertEquals(2, count.get());
-proxy.getStats();
-assertEquals(3, count.get());
-  }
-
-  @Test
   public void testHedgingWhenOneIsSlow() throws Exception {
 final ClientProtocol goodMock = Mockito.mock(ClientProtocol.class);
 

[18/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
new file mode 100644
index 000..3ccbae4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/CHANGES.3.1.0.md
@@ -0,0 +1,1022 @@
+
+
+# Apache Hadoop Changelog
+
+## Release 3.1.0 - 2018-03-30
+
+### INCOMPATIBLE CHANGES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-15008](https://issues.apache.org/jira/browse/HADOOP-15008) | Metrics 
sinks may emit too frequently if multiple sink periods are configured |  Minor 
| metrics | Erik Krogen | Erik Krogen |
+| [HDFS-12825](https://issues.apache.org/jira/browse/HDFS-12825) | Fsck report 
shows config key name for min replication issues |  Minor | hdfs | Harshakiran 
Reddy | Gabor Bota |
+| [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | RBF: 
Document Router and State Store metrics |  Major | documentation | Yiqun Lin | 
Yiqun Lin |
+| [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | RBF: Add 
ACL support for mount table |  Major | . | Yiqun Lin | Yiqun Lin |
+| [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | Ensure only 
NM classpath in 2.x gets TSv2 related hbase jars, not the user classpath |  
Major | timelineclient, timelinereader, timelineserver | Vrushali C | Varun 
Saxena |
+| [HADOOP-13282](https://issues.apache.org/jira/browse/HADOOP-13282) | S3 blob 
etags to be made visible in S3A status/getFileChecksum() calls |  Minor | fs/s3 
| Steve Loughran | Steve Loughran |
+| [HDFS-13099](https://issues.apache.org/jira/browse/HDFS-13099) | RBF: Use 
the ZooKeeper as the default State Store |  Minor | documentation | Yiqun Lin | 
Yiqun Lin |
+| [YARN-7677](https://issues.apache.org/jira/browse/YARN-7677) | Docker image 
cannot set HADOOP\_CONF\_DIR |  Major | . | Eric Badger | Jim Brennan |
+
+
+### IMPORTANT ISSUES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HDFS-13083](https://issues.apache.org/jira/browse/HDFS-13083) | RBF: Fix 
doc error setting up client |  Major | federation | tartarus | tartarus |
+
+
+### NEW FEATURES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-15005](https://issues.apache.org/jira/browse/HADOOP-15005) | Support 
meta tag element in Hadoop XML configurations |  Major | . | Ajay Kumar | Ajay 
Kumar |
+| [YARN-3926](https://issues.apache.org/jira/browse/YARN-3926) | [Umbrella] 
Extend the YARN resource model for easier resource-type management and profiles 
|  Major | nodemanager, resourcemanager | Varun Vasudev | Varun Vasudev |
+| [HDFS-7877](https://issues.apache.org/jira/browse/HDFS-7877) | [Umbrella] 
Support maintenance state for datanodes |  Major | datanode, namenode | Ming Ma 
| Ming Ma |
+| [HADOOP-13055](https://issues.apache.org/jira/browse/HADOOP-13055) | 
Implement linkMergeSlash and linkFallback for ViewFileSystem |  Major | fs, 
viewfs | Zhe Zhang | Manoj Govindassamy |
+| [YARN-6871](https://issues.apache.org/jira/browse/YARN-6871) | Add 
additional deSelects params in RMWebServices#getAppReport |  Major | 
resourcemanager, router | Giovanni Matteo Fumarola | Tanuj Nayak |
+| [HADOOP-14840](https://issues.apache.org/jira/browse/HADOOP-14840) | Tool to 
estimate resource requirements of an application pipeline based on prior 
executions |  Major | tools | Subru Krishnan | Rui Li |
+| [HDFS-206](https://issues.apache.org/jira/browse/HDFS-206) | Support for 
head in FSShell |  Minor | . | Olga Natkovich | Gabor Bota |
+| [YARN-5079](https://issues.apache.org/jira/browse/YARN-5079) | [Umbrella] 
Native YARN framework layer for services and beyond |  Major | . | Vinod Kumar 
Vavilapalli |  |
+| [YARN-4757](https://issues.apache.org/jira/browse/YARN-4757) | [Umbrella] 
Simplified discovery of services via DNS mechanisms |  Major | . | Vinod Kumar 
Vavilapalli |  |
+| [HADOOP-13786](https://issues.apache.org/jira/browse/HADOOP-13786) | Add S3A 
committer for zero-rename commits to S3 endpoints |  Major | fs/s3 | Steve 
Loughran | Steve Loughran |
+| [HDFS-9806](https://issues.apache.org/jira/browse/HDFS-9806) | Allow HDFS 
block replicas to be provided by an external storage system |  Major | . | 
Chris Douglas |  |
+| [YARN-6592](https://issues.apache.org/jira/browse/YARN-6592) | [Umbrella] 
Rich placement constraints in YARN |  Major | . | Konstantinos Karanasos |  |
+| [HDFS-12998](https://issues.apache.org/jira/browse/HDFS-12998) | 
SnapshotDiff - Provide an iterator-based listing API for calculating 

[42/50] [abbrv] hadoop git commit: YARN-1015. FS should watch node resource utilization and allocate opportunistic containers if appropriate.

2018-04-10 Thread haibochen
YARN-1015. FS should watch node resource utilization and allocate opportunistic 
containers if appropriate.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82ef338d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82ef338d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82ef338d

Branch: refs/heads/YARN-1011
Commit: 82ef338d65f957ff58ca725c4ea52f504167f29b
Parents: b237095
Author: Haibo Chen 
Authored: Fri Nov 17 07:47:32 2017 -0800
Committer: Haibo Chen 
Committed: Mon Apr 9 17:13:04 2018 -0700

--
 .../sls/scheduler/FairSchedulerMetrics.java |   4 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |   5 +
 .../src/main/resources/yarn-default.xml |  13 +
 .../scheduler/SchedulerNode.java|  48 ++
 .../scheduler/fair/FSAppAttempt.java| 166 ---
 .../scheduler/fair/FSLeafQueue.java |  51 +-
 .../scheduler/fair/FSParentQueue.java   |  36 +-
 .../resourcemanager/scheduler/fair/FSQueue.java |  39 +-
 .../scheduler/fair/FairScheduler.java   |  97 ++--
 .../fair/FairSchedulerConfiguration.java|   5 +
 .../scheduler/fair/Schedulable.java |  17 +-
 .../DominantResourceFairnessPolicy.java |   8 +-
 .../fair/policies/FairSharePolicy.java  |   4 +-
 .../webapp/dao/FairSchedulerQueueInfo.java  |   2 +-
 .../yarn/server/resourcemanager/MockNodes.java  |  60 ++-
 .../TestWorkPreservingRMRestart.java|   2 +-
 .../scheduler/fair/FakeSchedulable.java |   9 +-
 .../scheduler/fair/TestAppRunnability.java  |   9 +-
 .../scheduler/fair/TestFSAppAttempt.java|   4 +-
 .../scheduler/fair/TestFSLeafQueue.java |   4 +-
 .../scheduler/fair/TestFSSchedulerNode.java |   4 +-
 .../scheduler/fair/TestFairScheduler.java   | 468 +--
 .../scheduler/fair/TestSchedulingPolicy.java|  10 +-
 23 files changed, 861 insertions(+), 204 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82ef338d/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
index a5aee74..1f4e7c7 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
@@ -75,7 +75,7 @@ public class FairSchedulerMetrics extends SchedulerMetrics {
   case DEMAND:
 return schedulable.getDemand().getMemorySize();
   case USAGE:
-return schedulable.getResourceUsage().getMemorySize();
+return schedulable.getGuaranteedResourceUsage().getMemorySize();
   case MINSHARE:
 return schedulable.getMinShare().getMemorySize();
   case MAXSHARE:
@@ -96,7 +96,7 @@ public class FairSchedulerMetrics extends SchedulerMetrics {
   case DEMAND:
 return schedulable.getDemand().getVirtualCores();
   case USAGE:
-return schedulable.getResourceUsage().getVirtualCores();
+return schedulable.getGuaranteedResourceUsage().getVirtualCores();
   case MINSHARE:
 return schedulable.getMinShare().getVirtualCores();
   case MAXSHARE:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82ef338d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index fdbbdbf..4c1c6d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -270,6 +270,11 @@ public class YarnConfiguration extends Configuration {
 
   public static final String APP_NAME_PLACEMENT_RULE = "app-name";
 
+  public static final String RM_SCHEDULER_OVERSUBSCRIPTION_ENABLED =
+  RM_PREFIX + "scheduler.oversubscription.enabled";
+  public static final boolean DEFAULT_RM_SCHEDULER_OVERSUBSCRIPTION_ENABLED
+  = false;
+
   /** Enable Resource Manager webapp ui actions */
   public static final String RM_WEBAPP_UI_ACTIONS_ENABLED =
 RM_PREFIX + 

[04/50] [abbrv] hadoop git commit: YARN-8013. Support application tags when defining application namespaces for placement constraints. Contributed by Weiwei Yang.

2018-04-10 Thread haibochen
YARN-8013. Support application tags when defining application namespaces for 
placement constraints. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7853ec8d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7853ec8d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7853ec8d

Branch: refs/heads/YARN-1011
Commit: 7853ec8d2fb8731b7f7c28fd87491a0a2d47967e
Parents: 42cd367
Author: Konstantinos Karanasos 
Authored: Wed Apr 4 10:51:58 2018 -0700
Committer: Konstantinos Karanasos 
Committed: Wed Apr 4 10:51:58 2018 -0700

--
 .../api/records/AllocationTagNamespaceType.java |   2 +-
 .../constraint/AllocationTagNamespace.java  | 312 --
 .../scheduler/constraint/AllocationTags.java|  44 ++-
 .../constraint/AllocationTagsManager.java   |  47 ++-
 .../constraint/PlacementConstraintsUtil.java|  41 +--
 .../constraint/TargetApplications.java  |  53 ++-
 .../constraint/TargetApplicationsNamespace.java | 326 +++
 .../SingleConstraintAppPlacementAllocator.java  |  21 --
 .../server/resourcemanager/rmapp/MockRMApp.java |   9 +-
 ...estSchedulingRequestContainerAllocation.java |   5 +-
 .../constraint/TestAllocationTagsManager.java   |  22 +-
 .../constraint/TestAllocationTagsNamespace.java |  89 -
 .../TestPlacementConstraintsUtil.java   | 125 ++-
 13 files changed, 654 insertions(+), 442 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7853ec8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
index de5492e..f304600 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AllocationTagNamespaceType.java
@@ -26,7 +26,7 @@ public enum AllocationTagNamespaceType {
   SELF("self"),
   NOT_SELF("not-self"),
   APP_ID("app-id"),
-  APP_LABEL("app-label"),
+  APP_TAG("app-tag"),
   ALL("all");
 
   private String typeKeyword;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7853ec8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
deleted file mode 100644
index 7b9f3be..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagNamespace.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint;
-
-import com.google.common.base.Strings;
-import com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.yarn.api.records.AllocationTagNamespaceType;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Set;
-import 

[39/50] [abbrv] hadoop git commit: YARN-7460. Exclude findbugs warnings on SchedulerNode.numGuaranteedContainers and numOpportunisticContainers. Contributed by Haibo Chen.

2018-04-10 Thread haibochen
YARN-7460. Exclude findbugs warnings on SchedulerNode.numGuaranteedContainers 
and numOpportunisticContainers. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ac1486d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ac1486d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ac1486d

Branch: refs/heads/YARN-1011
Commit: 1ac1486df043cbe4bd7cbd59718a833a74732649
Parents: 82ef338
Author: Miklos Szegedi 
Authored: Mon Nov 20 13:27:32 2017 -0800
Committer: Haibo Chen 
Committed: Mon Apr 9 17:13:04 2018 -0700

--
 .../hadoop-yarn/dev-support/findbugs-exclude.xml  | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ac1486d/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml 
b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 5841361..1c07b53 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -76,6 +76,16 @@
 
   
   
+
+
+
+  
+  
+
+
+
+  
+  
 
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[49/50] [abbrv] hadoop git commit: YARN-5473. Expose per-application over-allocation info in the Resource Manager. Contributed by Haibo Chen.

2018-04-10 Thread haibochen
YARN-5473. Expose per-application over-allocation info in the Resource Manager. 
Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eca4df88
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eca4df88
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eca4df88

Branch: refs/heads/YARN-1011
Commit: eca4df88923ec02e190fb58965ea4439c97e3e34
Parents: 8fd21fc
Author: Miklos Szegedi 
Authored: Tue Jan 23 22:34:49 2018 -0800
Committer: Haibo Chen 
Committed: Mon Apr 9 17:48:29 2018 -0700

--
 .../apache/hadoop/mapreduce/TypeConverter.java  |   4 +-
 .../hadoop/mapreduce/TestTypeConverter.java |   4 +-
 .../hadoop/mapred/TestResourceMgrDelegate.java  |   2 +-
 .../records/ApplicationResourceUsageReport.java | 158 +++---
 .../src/main/proto/yarn_protos.proto|   2 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  11 +-
 .../apache/hadoop/yarn/client/cli/TopCLI.java   |  18 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  23 ++-
 .../ApplicationResourceUsageReportPBImpl.java   | 205 +++
 .../hadoop/yarn/util/resource/Resources.java|  20 ++
 ...pplicationHistoryManagerOnTimelineStore.java |  83 +---
 .../TestApplicationHistoryClientService.java|   8 +-
 ...pplicationHistoryManagerOnTimelineStore.java |  18 +-
 .../metrics/ApplicationMetricsConstants.java|  10 +-
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  15 +-
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  |  39 +++-
 .../server/resourcemanager/RMAppManager.java|  18 +-
 .../server/resourcemanager/RMServerUtils.java   |   3 +-
 .../metrics/TimelineServiceV1Publisher.java |  14 +-
 .../metrics/TimelineServiceV2Publisher.java |  14 +-
 .../resourcemanager/recovery/RMStateStore.java  |   5 +-
 .../records/ApplicationAttemptStateData.java| 144 +++--
 .../pb/ApplicationAttemptStateDataPBImpl.java   | 110 --
 .../server/resourcemanager/rmapp/RMAppImpl.java |  38 ++--
 .../resourcemanager/rmapp/RMAppMetrics.java |  38 ++--
 .../attempt/AggregateAppResourceUsage.java  |  51 +++--
 .../rmapp/attempt/RMAppAttemptImpl.java |  21 +-
 .../rmapp/attempt/RMAppAttemptMetrics.java  |  47 +++--
 .../ContainerResourceUsageReport.java   |  46 +
 .../rmcontainer/RMContainer.java|   4 +-
 .../rmcontainer/RMContainerImpl.java| 117 ---
 .../scheduler/AbstractYarnScheduler.java|   2 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  63 +++---
 .../scheduler/YarnScheduler.java|   2 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |   2 +-
 .../scheduler/fair/FSAppAttempt.java|   9 +-
 .../webapp/FairSchedulerAppsBlock.java  |   8 +
 .../resourcemanager/webapp/RMAppBlock.java  |   9 +-
 .../resourcemanager/webapp/RMAppsBlock.java |  10 +
 .../resourcemanager/webapp/dao/AppInfo.java |  78 +--
 .../yarn_server_resourcemanager_recovery.proto  |   1 +
 .../server/resourcemanager/TestAppManager.java  |  39 ++--
 .../resourcemanager/TestApplicationACLs.java|   4 +-
 .../resourcemanager/TestClientRMService.java|  45 ++--
 .../TestContainerResourceUsage.java | 184 ++---
 .../applicationsmanager/MockAsm.java|   4 +-
 .../TestCombinedSystemMetricsPublisher.java |   2 +-
 .../metrics/TestSystemMetricsPublisher.java |  23 ++-
 .../TestSystemMetricsPublisherForV2.java|  12 +-
 .../recovery/RMStateStoreTestBase.java  |  12 +-
 .../recovery/TestZKRMStateStore.java|  40 ++--
 .../rmapp/TestRMAppTransitions.java |   6 +-
 .../attempt/TestRMAppAttemptTransitions.java|  32 +--
 .../capacity/TestCapacityScheduler.java |   4 +-
 .../TestCapacitySchedulerNodeLabelUpdate.java   |  20 +-
 .../resourcemanager/webapp/TestAppPage.java |   2 +-
 .../resourcemanager/webapp/TestRMWebApp.java|   3 +-
 .../webapp/TestRMWebAppFairScheduler.java   |   2 +-
 .../webapp/TestRMWebServicesApps.java   |   2 +-
 .../router/webapp/RouterWebServiceUtil.java |  14 +-
 .../router/webapp/TestRouterWebServiceUtil.java |  14 +-
 61 files changed, 1427 insertions(+), 511 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eca4df88/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
 

[34/50] [abbrv] hadoop git commit: YARN-6670 Add separate NM overallocation thresholds for cpu and memory (Haibo Chen)

2018-04-10 Thread haibochen
YARN-6670 Add separate NM overallocation thresholds for cpu and memory (Haibo 
Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4fb0e82a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4fb0e82a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4fb0e82a

Branch: refs/heads/YARN-1011
Commit: 4fb0e82a516ae02a206a1a717ef3e5b98203ae1e
Parents: 6278cc7
Author: Haibo Chen 
Authored: Mon Jul 10 09:55:42 2017 -0700
Committer: Haibo Chen 
Committed: Mon Apr 9 17:07:06 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 36 +--
 .../src/main/resources/yarn-default.xml | 42 ++--
 .../server/api/records/ResourceThresholds.java  | 11 +++-
 .../monitor/ContainersMonitorImpl.java  | 67 +++-
 4 files changed, 124 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fb0e82a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2d69fa9..45a2fb2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2049,17 +2049,39 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
-  /** Overallocation (= allocation based on utilization) configs. */
-  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
-  NM_PREFIX + "overallocation.allocation-threshold";
-  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
-  = 0f;
+  /**
+   * General overallocation threshold if no resource-type-specific
+   * threshold is provided.
+   */
+  public static final String NM_OVERALLOCATION_GENERAL_THRESHOLD =
+  NM_PREFIX + "overallocation.general-utilization-threshold";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_GENERAL_THRESHOLD = -1.0f;
+  /**
+   * The maximum value of utilization threshold for all resource types
+   * up to which the scheduler allocates OPPORTUNISTIC containers.
+   */
   @Private
-  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final float MAX_NM_OVERALLOCATION_THRESHOLD = 0.95f;
+
+  /**
+   * NM CPU utilization threshold up to which the scheduler allocates
+   * OPPORTUNISTIC containers after the node's capacity is fully allocated.
+   */
+  public static final String NM_OVERALLOCATION_CPU_UTILIZATION_THRESHOLD =
+  NM_PREFIX + "overallocation.cpu-utilization-threshold";
+
+  /**
+   * NM memory utilization threshold up to which the scheduler allocates
+   * OPPORTUNISTIC containers after the node's capacity is fully allocated.
+   */
+  public static final String NM_OVERALLOCATION_MEMORY_UTILIZATION_THRESHOLD =
+  NM_PREFIX + "overallocation.memory-utilization-threshold";
+
   public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
   NM_PREFIX + "overallocation.preemption-threshold";
   public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
-  = 0f;
+  = 0.96f;
 
   /**
* Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4fb0e82a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 4a7548a..5238f60 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1711,14 +1711,44 @@
 
   
 The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node 
that
+  applies to all resource types (expressed as a float between 0 and 0.95).
+  By default, over-allocation is turned off (value = -1). When turned on,
+  the node allows running OPPORTUNISTIC containers when the aggregate
+  utilization for each resource type is under the value specified here
+ 

[17/50] [abbrv] hadoop git commit: Added CHANGES/RELEASES/Jdiff for 3.1.0 release

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6cf023f9/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
new file mode 100644
index 000..9e3c65d
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.1.0/RELEASENOTES.3.1.0.md
@@ -0,0 +1,199 @@
+
+
+# Apache Hadoop  3.1.0 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+---
+
+* [HDFS-11799](https://issues.apache.org/jira/browse/HDFS-11799) | *Major* | 
**Introduce a config to allow setting up write pipeline with fewer nodes than 
replication factor**
+
+Added new configuration 
"dfs.client.block.write.replace-datanode-on-failure.min-replication".
+ 
+The minimum number of replications that are needed to not to fail
+  the write pipeline if new datanodes can not be found to replace
+  failed datanodes (could be due to network failure) in the write pipeline.
+  If the number of the remaining datanodes in the write pipeline is greater
+  than or equal to this property value, continue writing to the remaining 
nodes.
+  Otherwise throw exception.
+
+  If this is set to 0, an exception will be thrown, when a replacement
+  can not be found.
+
+
+---
+
+* [HDFS-12486](https://issues.apache.org/jira/browse/HDFS-12486) | *Major* | 
**GetConf to get journalnodeslist**
+
+Adds a getconf command option to list the journal nodes.
+Usage: hdfs getconf -journalnodes
+
+
+---
+
+* [HADOOP-14840](https://issues.apache.org/jira/browse/HADOOP-14840) | *Major* 
| **Tool to estimate resource requirements of an application pipeline based on 
prior executions**
+
+The first version of Resource Estimator service, a tool that captures the 
historical resource usage of an app and predicts its future resource 
requirement.
+
+
+---
+
+* [YARN-5079](https://issues.apache.org/jira/browse/YARN-5079) | *Major* | 
**[Umbrella] Native YARN framework layer for services and beyond**
+
+A framework is implemented to orchestrate containers on YARN
+
+
+---
+
+* [YARN-4757](https://issues.apache.org/jira/browse/YARN-4757) | *Major* | 
**[Umbrella] Simplified discovery of services via DNS mechanisms**
+
+A DNS server backed by yarn service registry is implemented to enable service 
discovery on YARN using standard DNS lookup.
+
+
+---
+
+* [YARN-4793](https://issues.apache.org/jira/browse/YARN-4793) | *Major* | 
**[Umbrella] Simplified API layer for services and beyond**
+
+A REST API service is implemented to enable users to launch and manage 
container based services on YARN via REST API
+
+
+---
+
+* [HADOOP-15008](https://issues.apache.org/jira/browse/HADOOP-15008) | *Minor* 
| **Metrics sinks may emit too frequently if multiple sink periods are 
configured**
+
+Previously if multiple metrics sinks were configured with different periods, 
they may emit more frequently than configured, at a period as low as the GCD of 
the configured periods. This change makes all metrics sinks emit at their 
configured period.
+
+
+---
+
+* [HDFS-12825](https://issues.apache.org/jira/browse/HDFS-12825) | *Minor* | 
**Fsck report shows config key name for min replication issues**
+
+**WARNING: No release note provided for this change.**
+
+
+---
+
+* [HDFS-12883](https://issues.apache.org/jira/browse/HDFS-12883) | *Major* | 
**RBF: Document Router and State Store metrics**
+
+This JIRA makes following change:
+Change Router metrics context from 'router' to 'dfs'.
+
+
+---
+
+* [HDFS-12895](https://issues.apache.org/jira/browse/HDFS-12895) | *Major* | 
**RBF: Add ACL support for mount table**
+
+Mount tables support ACL, The users won't be able to modify their own entries 
(we are assuming these old (no-permissions before) mount table with 
owner:superuser, group:supergroup, permission:755 as the default permissions).  
The fix way is login as superuser to modify these mount table entries.
+
+
+---
+
+* [YARN-7190](https://issues.apache.org/jira/browse/YARN-7190) | *Major* | 
**Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user 
classpath**
+
+Ensure only NM classpath in 2.x gets TSv2 related hbase jars, not the user 
classpath.
+
+
+---
+
+* [HDFS-9806](https://issues.apache.org/jira/browse/HDFS-9806) | *Major* | 
**Allow HDFS block replicas to be provided by an external storage system**
+
+Provided storage allows data stored outside HDFS to be mapped to and addressed 
from HDFS. It builds on heterogeneous storage by introducing a new storage 
type, PROVIDED, to the set of media in a datanode. Clients accessing data in 
PROVIDED storages can cache replicas in local media, enforce HDFS invariants 
(e.g., 

[48/50] [abbrv] hadoop git commit: YARN-5473. Expose per-application over-allocation info in the Resource Manager. Contributed by Haibo Chen.

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/eca4df88/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
index ed71ea2..0243443 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
@@ -56,8 +57,9 @@ public class ApplicationAttemptStateDataPBImpl extends
   private Container masterContainer = null;
   private ByteBuffer appAttemptTokens = null;
 
-  private Map resourceSecondsMap;
+  private Map guaranteedResourceSecondsMap;
   private Map preemptedResourceSecondsMap;
+  private Map opportunisticResourceSecondsMap;
 
   public ApplicationAttemptStateDataPBImpl() {
 builder = ApplicationAttemptStateDataProto.newBuilder();
@@ -243,30 +245,72 @@ public class ApplicationAttemptStateDataPBImpl extends
   }
 
   @Override
+  @Deprecated
   public long getMemorySeconds() {
-ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
-return p.getMemorySeconds();
+return getGuaranteedMemorySeconds();
   }
  
   @Override
+  @Deprecated
   public long getVcoreSeconds() {
-ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
-return p.getVcoreSeconds();
+return getGuaranteedVcoreSeconds();
   }
 
   @Override
+  @Deprecated
   public void setMemorySeconds(long memorySeconds) {
-maybeInitBuilder();
-builder.setMemorySeconds(memorySeconds);
+setGuaranteedMemorySeconds(memorySeconds);
   }
  
   @Override
+  @Deprecated
   public void setVcoreSeconds(long vcoreSeconds) {
+setGuaranteedVcoreSeconds(vcoreSeconds);
+  }
+
+  @Override
+  public long getGuaranteedMemorySeconds() {
+ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
+return p.getMemorySeconds();
+  }
+
+  @Override
+  public void setGuaranteedMemorySeconds(long memorySeconds) {
+maybeInitBuilder();
+builder.setMemorySeconds(memorySeconds);
+  }
+
+  @Override
+  public long getGuaranteedVcoreSeconds() {
+ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
+return p.getVcoreSeconds();
+  }
+
+  @Override
+  public void setGuaranteedVcoreSeconds(long vcoreSeconds) {
 maybeInitBuilder();
 builder.setVcoreSeconds(vcoreSeconds);
   }
 
   @Override
+  public long getOpportunisticMemorySeconds() {
+Map tmp = getOpportunisticResourceSecondsMap();
+if (tmp.containsKey(ResourceInformation.MEMORY_MB.getName())) {
+  return tmp.get(ResourceInformation.MEMORY_MB.getName());
+}
+return 0;
+  }
+
+  @Override
+  public long getOpportunisticVcoreSeconds() {
+Map tmp = getOpportunisticResourceSecondsMap();
+if (tmp.containsKey(ResourceInformation.VCORES.getName())) {
+  return tmp.get(ResourceInformation.VCORES.getName());
+}
+return 0;
+  }
+
+  @Override
   public long getPreemptedMemorySeconds() {
 ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
 return p.getPreemptedMemorySeconds();
@@ -410,21 +454,35 @@ public class ApplicationAttemptStateDataPBImpl extends
   }
 
   @Override
+  @Deprecated
   public Map getResourceSecondsMap() {
-if (this.resourceSecondsMap != null) {
-  return this.resourceSecondsMap;
+return getGuaranteedResourceSecondsMap();
+  }
+
+  @Override
+  @Deprecated
+  public void setResourceSecondsMap(Map resourceSecondsMap) {
+

[43/50] [abbrv] hadoop git commit: YARN-6921. Allow resource request to opt out of oversubscription in Fair Scheduler. Contributed by Haibo Chen.

2018-04-10 Thread haibochen
YARN-6921. Allow resource request to opt out of oversubscription in Fair 
Scheduler. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86a6c26f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86a6c26f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86a6c26f

Branch: refs/heads/YARN-1011
Commit: 86a6c26fbf9bb9c27e3993058672edda2367c77d
Parents: 00aed24
Author: Miklos Szegedi 
Authored: Wed Nov 22 09:03:05 2017 -0800
Committer: Haibo Chen 
Committed: Mon Apr 9 17:19:02 2018 -0700

--
 .../scheduler/common/PendingAsk.java| 15 +++-
 .../scheduler/fair/FSAppAttempt.java|  5 ++
 .../LocalityAppPlacementAllocator.java  | 27 ++-
 .../scheduler/fair/FairSchedulerTestBase.java   | 32 +++-
 .../scheduler/fair/TestFairScheduler.java   | 77 
 5 files changed, 149 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86a6c26f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/PendingAsk.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/PendingAsk.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/PendingAsk.java
index 2ed3e83..470dbbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/PendingAsk.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/PendingAsk.java
@@ -30,16 +30,21 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 public class PendingAsk {
   private final Resource perAllocationResource;
   private final int count;
-  public final static PendingAsk ZERO = new PendingAsk(Resources.none(), 0);
+  public final static PendingAsk ZERO =
+  new PendingAsk(Resources.none(), 0, false);
+
+  private final boolean isGuaranteedTypeEnforced;
 
   public PendingAsk(ResourceSizing sizing) {
 this.perAllocationResource = sizing.getResources();
 this.count = sizing.getNumAllocations();
+this.isGuaranteedTypeEnforced = true;
   }
 
-  public PendingAsk(Resource res, int num) {
+  public PendingAsk(Resource res, int num, boolean guaranteedTypeEnforced) {
 this.perAllocationResource = res;
 this.count = num;
+this.isGuaranteedTypeEnforced = guaranteedTypeEnforced;
   }
 
   public Resource getPerAllocationResource() {
@@ -50,11 +55,17 @@ public class PendingAsk {
 return count;
   }
 
+  public boolean isGuaranteedTypeEnforced() {
+return isGuaranteedTypeEnforced;
+  }
+
   @Override
   public String toString() {
 StringBuilder sb = new StringBuilder();
 

[50/50] [abbrv] hadoop git commit: YARN-6739. Crash NM at start time if oversubscription is on but LinuxContainerExcutor or cgroup is off. Contributed by Haibo Chen.

2018-04-10 Thread haibochen
YARN-6739. Crash NM at start time if oversubscription is on but 
LinuxContainerExcutor or cgroup is off. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72534c37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72534c37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72534c37

Branch: refs/heads/YARN-1011
Commit: 72534c37a91e656095bc0010918aa4271d065363
Parents: eca4df8
Author: Miklos Szegedi 
Authored: Mon Apr 2 15:09:52 2018 -0700
Committer: Haibo Chen 
Committed: Mon Apr 9 17:48:29 2018 -0700

--
 .../monitor/ContainersMonitorImpl.java  | 27 
 1 file changed, 27 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72534c37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 2fa7bca..acc256f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -20,6 +20,9 @@ package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule;
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -184,6 +187,7 @@ public class ContainersMonitorImpl extends AbstractService 
implements
 
 initializeOverAllocation(conf);
 if (context.isOverAllocationEnabled()) {
+  checkOverAllocationPrerequisites();
   pmemCheckEnabled = true;
   LOG.info("Force enabling physical memory checks because " +
   "overallocation is enabled");
@@ -223,6 +227,29 @@ public class ContainersMonitorImpl extends AbstractService 
implements
 super.serviceInit(this.conf);
   }
 
+  /**
+   * Check all prerequisites for NM over-allocation.
+   */
+  private void checkOverAllocationPrerequisites() throws YarnException {
+// LinuxContainerExecutor is required to enable overallocation
+if (!(containerExecutor instanceof LinuxContainerExecutor)) {
+  throw new YarnException(LinuxContainerExecutor.class.getName() +
+  " is required for overallocation");
+}
+if (ResourceHandlerModule.getCGroupsHandler() == null) {
+  throw new YarnException("CGroups must be enabled to support" +
+  " overallocation");
+}
+if (ResourceHandlerModule.getCpuResourceHandler() == null) {
+  throw new YarnException(
+  "CGroups cpu isolation must be enabled to support overallocation");
+}
+if (ResourceHandlerModule.getMemoryResourceHandler() == null) {
+  throw new YarnException(
+  "CGroups memory isolation must be enabled for overallocation");
+}
+  }
+
   private boolean isContainerMonitorEnabled() {
 return conf.getBoolean(YarnConfiguration.NM_CONTAINER_MONITOR_ENABLED,
 YarnConfiguration.DEFAULT_NM_CONTAINER_MONITOR_ENABLED);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: YARN-7337. Expose per-node over-allocation info in Node Report Contributed by Haibo Chen.

2018-04-10 Thread haibochen
YARN-7337. Expose per-node over-allocation info in Node Report Contributed by 
Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00aed24b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00aed24b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00aed24b

Branch: refs/heads/YARN-1011
Commit: 00aed24b1baa888d4ed4f45a8f2dd02bcaa8e58a
Parents: 1ac1486
Author: Miklos Szegedi 
Authored: Wed Nov 22 08:39:03 2017 -0800
Committer: Haibo Chen 
Committed: Mon Apr 9 17:13:04 2018 -0700

--
 .../hadoop/mapreduce/v2/TestRMNMInfo.java   | 14 ++-
 .../hadoop/yarn/api/records/NodeReport.java | 98 +++-
 .../src/main/proto/yarn_protos.proto|  7 +-
 .../applications/distributedshell/Client.java   |  2 +-
 .../apache/hadoop/yarn/client/cli/NodeCLI.java  | 49 +++---
 .../hadoop/yarn/client/cli/TestYarnCLI.java | 43 ++---
 .../api/records/impl/pb/NodeReportPBImpl.java   | 94 +++
 .../hadoop/yarn/server/utils/BuilderUtils.java  | 37 
 .../server/resourcemanager/ClientRMService.java | 21 +++--
 .../resourcemanager/DefaultAMSProcessor.java| 22 +++--
 .../yarn/server/resourcemanager/RMNMInfo.java   | 45 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  2 +-
 .../scheduler/SchedulerNodeReport.java  | 50 +++---
 .../webapp/dao/FifoSchedulerInfo.java   |  9 +-
 .../resourcemanager/webapp/dao/NodeInfo.java| 17 ++--
 .../server/resourcemanager/NodeManager.java |  4 +-
 .../capacity/TestApplicationPriority.java   | 66 -
 .../capacity/TestCapacityScheduler.java | 32 ---
 .../TestNodeLabelContainerAllocation.java   | 40 
 .../scheduler/fifo/TestFifoScheduler.java   | 50 ++
 .../webapp/TestRMWebServicesNodes.java  | 13 +--
 21 files changed, 491 insertions(+), 224 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00aed24b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java
index efea709..76b5493 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java
@@ -124,11 +124,17 @@ public class TestRMNMInfo {
   Assert.assertNotNull(n.get("NodeManagerVersion"));
   Assert.assertNotNull(n.get("NumContainers"));
   Assert.assertEquals(
-  n.get("NodeId") + ": Unexpected number of used containers",
-  0, n.get("NumContainers").asInt());
+  n.get("NodeId") + ": Unexpected number of guaranteed containers 
used",
+  0, n.get("NumContainers").asInt());
+  Assert.assertEquals(n.get("NodeId") +
+  ": Unexpected number of opportunistic containers used",
+  0, n.get("NumOpportunisticContainers").asInt());
   Assert.assertEquals(
-  n.get("NodeId") + ": Unexpected amount of used memory",
+  n.get("NodeId") + ": Unexpected amount of guaranteed memory 
used",
   0, n.get("UsedMemoryMB").asInt());
+  Assert.assertEquals(
+  n.get("NodeId") + ": Unexpected amount of used opportunistic memory",
+  0, n.get("UsedOpportunisticMemoryMB").asInt());
   Assert.assertNotNull(n.get("AvailableMemoryMB"));
 }
   }
@@ -161,6 +167,8 @@ public class TestRMNMInfo {
   Assert.assertNotNull(n.get("NodeManagerVersion"));
   Assert.assertNull(n.get("NumContainers"));
   Assert.assertNull(n.get("UsedMemoryMB"));
+  Assert.assertNull(n.get("NumOpportunisticContainers"));
+  Assert.assertNull(n.get("UsedOpportunisticMemoryMB"));
   Assert.assertNull(n.get("AvailableMemoryMB"));
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00aed24b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
 

[36/50] [abbrv] hadoop git commit: YARN-4511. Common scheduler changes to support scheduler-specific oversubscription implementations.

2018-04-10 Thread haibochen
YARN-4511. Common scheduler changes to support scheduler-specific 
oversubscription implementations.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b237095d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b237095d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b237095d

Branch: refs/heads/YARN-1011
Commit: b237095dbb5fd880b89c08aa6ad50e7fdf3ef4b9
Parents: da3021d
Author: Haibo Chen 
Authored: Thu Nov 2 09:12:19 2017 -0700
Committer: Haibo Chen 
Committed: Mon Apr 9 17:07:06 2018 -0700

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |   6 +
 .../yarn/sls/scheduler/RMNodeWrapper.java   |   6 +
 .../resourcemanager/ResourceTrackerService.java |   3 +-
 .../monitor/capacity/TempSchedulerNode.java |   2 +-
 .../server/resourcemanager/rmnode/RMNode.java   |   7 +
 .../resourcemanager/rmnode/RMNodeImpl.java  |  13 +-
 .../scheduler/AbstractYarnScheduler.java|   4 +-
 .../scheduler/ClusterNodeTracker.java   |   6 +-
 .../scheduler/SchedulerNode.java| 317 +++
 .../scheduler/SchedulerNodeReport.java  |   4 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +-
 .../allocator/RegularContainerAllocator.java|   4 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |   2 +-
 .../common/fica/FiCaSchedulerNode.java  |  11 +-
 .../scheduler/fair/FSPreemptionThread.java  |   2 +-
 .../scheduler/fair/FSSchedulerNode.java |   9 +-
 .../yarn/server/resourcemanager/MockNodes.java  |   6 +
 .../TestWorkPreservingRMRestart.java|  39 +-
 ...alCapacityPreemptionPolicyMockFramework.java |   2 +-
 ...alCapacityPreemptionPolicyMockFramework.java |   6 +-
 .../scheduler/TestAbstractYarnScheduler.java|   4 +-
 .../scheduler/TestSchedulerNode.java| 393 +++
 .../capacity/TestCapacityScheduler.java |   2 +-
 .../TestCapacitySchedulerAsyncScheduling.java   |   8 +-
 .../scheduler/capacity/TestLeafQueue.java   |   4 +-
 .../TestNodeLabelContainerAllocation.java   |  14 +-
 .../fair/TestContinuousScheduling.java  |  42 +-
 .../scheduler/fair/TestFSSchedulerNode.java |  18 +-
 .../scheduler/fair/TestFairScheduler.java   |  14 +-
 .../scheduler/fifo/TestFifoScheduler.java   |   4 +-
 30 files changed, 787 insertions(+), 167 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b237095d/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index 0c99139..4b9800f 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
+import org.apache.hadoop.yarn.server.api.records.OverAllocationInfo;
 import 
org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode
@@ -200,6 +201,11 @@ public class NodeInfo {
 }
 
 @Override
+public OverAllocationInfo getOverAllocationInfo() {
+  return null;
+}
+
+@Override
 public long getUntrackedTimeStamp() {
   return 0;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b237095d/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 78645e9..a652ac8 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceUtilization;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
 import 

[27/50] [abbrv] hadoop git commit: YARN-7905. Parent directory permission incorrect during public localization. Contributed by Bilwa S T.

2018-04-10 Thread haibochen
YARN-7905. Parent directory permission incorrect during public localization. 
Contributed by Bilwa S T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb47c3de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb47c3de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb47c3de

Branch: refs/heads/YARN-1011
Commit: eb47c3de74ba4b8b3ef47eaf3a44e5562fd22fc9
Parents: 70590cd
Author: bibinchundatt 
Authored: Sat Apr 7 12:13:00 2018 +0530
Committer: bibinchundatt 
Committed: Sat Apr 7 12:26:29 2018 +0530

--
 .../localizer/ResourceLocalizationService.java  |  20 +++
 .../TestResourceLocalizationService.java| 125 +++
 2 files changed, 145 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb47c3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
old mode 100644
new mode 100755
index 29fc747..ddae2ae
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -152,6 +152,8 @@ public class ResourceLocalizationService extends 
CompositeService
LoggerFactory.getLogger(ResourceLocalizationService.class);
   public static final String NM_PRIVATE_DIR = "nmPrivate";
   public static final FsPermission NM_PRIVATE_PERM = new FsPermission((short) 
0700);
+  private static final FsPermission PUBLIC_FILECACHE_FOLDER_PERMS =
+  new FsPermission((short) 0755);
 
   private Server server;
   private InetSocketAddress localizationServerAddress;
@@ -881,6 +883,7 @@ public class ResourceLocalizationService extends 
CompositeService
 publicRsrc.getPathForLocalization(key, publicRootPath,
 delService);
 if (!publicDirDestPath.getParent().equals(publicRootPath)) {
+  createParentDirs(publicDirDestPath, publicRootPath);
   if (diskValidator != null) {
 diskValidator.checkStatus(
 new File(publicDirDestPath.toUri().getPath()));
@@ -932,6 +935,23 @@ public class ResourceLocalizationService extends 
CompositeService
   }
 }
 
+private void createParentDirs(Path destDirPath, Path destDirRoot)
+throws IOException {
+  if (destDirPath == null || destDirPath.equals(destDirRoot)) {
+return;
+  }
+  createParentDirs(destDirPath.getParent(), destDirRoot);
+  createDir(destDirPath, PUBLIC_FILECACHE_FOLDER_PERMS);
+}
+
+private void createDir(Path dirPath, FsPermission perms)
+throws IOException {
+  lfs.mkdir(dirPath, perms, false);
+  if (!perms.equals(perms.applyUMask(lfs.getUMask( {
+lfs.setPermission(dirPath, perms);
+  }
+}
+
 @Override
 public void run() {
   try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb47c3de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
old mode 100644
new mode 100755
index d863c6a..4d03f15
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ 

[28/50] [abbrv] hadoop git commit: HADOOP-15366. Add a helper shutdown routine in HadoopExecutor to ensure clean shutdown. Contributed by Shashikant Banerjee.

2018-04-10 Thread haibochen
HADOOP-15366. Add a helper shutdown routine in HadoopExecutor to ensure clean 
shutdown. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b345b76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b345b76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b345b76

Branch: refs/heads/YARN-1011
Commit: 0b345b765370515d7222154ad5cae9b86f137a76
Parents: eb47c3d
Author: Mukul Kumar Singh 
Authored: Sat Apr 7 16:29:01 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Sat Apr 7 16:29:01 2018 +0530

--
 .../hadoop/util/concurrent/HadoopExecutors.java | 34 +++-
 1 file changed, 33 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b345b76/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
index 1bc6976..7a04c30 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/HadoopExecutors.java
@@ -27,7 +27,7 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
-
+import org.slf4j.Logger;
 
 /** Factory methods for ExecutorService, ScheduledExecutorService instances.
  * These executor service instances provide additional functionality (e.g
@@ -91,6 +91,38 @@ public final class HadoopExecutors {
 return Executors.newSingleThreadScheduledExecutor(threadFactory);
   }
 
+  /**
+   * Helper routine to shutdown a executorService.
+   *
+   * @param executorService - executorService
+   * @param logger  - Logger
+   * @param timeout - Timeout
+   * @param unit- TimeUnits, generally seconds.
+   */
+  public static void shutdown(ExecutorService executorService, Logger logger,
+  long timeout, TimeUnit unit) {
+try {
+  if (executorService != null) {
+executorService.shutdown();
+try {
+  if (!executorService.awaitTermination(timeout, unit)) {
+executorService.shutdownNow();
+  }
+
+  if (!executorService.awaitTermination(timeout, unit)) {
+logger.error("Unable to shutdown properly.");
+  }
+} catch (InterruptedException e) {
+  logger.error("Error attempting to shutdown.", e);
+  executorService.shutdownNow();
+}
+  }
+} catch (Exception e) {
+  logger.error("Error during shutdown: ", e);
+  throw e;
+}
+  }
+
   //disable instantiation
   private HadoopExecutors() { }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: MAPREDUCE-6926. Allow MR jobs to opt out of oversubscription. Contributed by Haibo Chen.

2018-04-10 Thread haibochen
MAPREDUCE-6926. Allow MR jobs to opt out of oversubscription. Contributed by 
Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fd21fc5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fd21fc5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fd21fc5

Branch: refs/heads/YARN-1011
Commit: 8fd21fc51d588b9bc77e88011ab867a8cc92a6a5
Parents: c9133fa
Author: Miklos Szegedi 
Authored: Wed Jan 10 13:21:11 2018 -0800
Committer: Haibo Chen 
Committed: Mon Apr 9 17:42:19 2018 -0700

--
 .../v2/app/rm/RMContainerRequestor.java |  48 ++---
 .../v2/app/rm/TestRMContainerAllocator.java | 192 +++
 .../apache/hadoop/mapreduce/MRJobConfig.java|   6 +
 .../src/main/resources/mapred-default.xml   |   8 +
 4 files changed, 231 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fd21fc5/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
index bb3e1fa..d996690 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
@@ -111,6 +111,7 @@ public abstract class RMContainerRequestor extends 
RMCommunicator {
   .newSetFromMap(new ConcurrentHashMap());
   private final Set blacklistRemovals = Collections
   .newSetFromMap(new ConcurrentHashMap());
+  private boolean optOutOfOversubscription;
 
   public RMContainerRequestor(ClientService clientService, AppContext context) 
{
 super(clientService, context);
@@ -136,9 +137,11 @@ public abstract class RMContainerRequestor extends 
RMCommunicator {
 public ContainerRequest(ContainerRequestEvent event, Priority priority,
 String nodeLabelExpression) {
   this(event.getAttemptID(), event.getCapability(), event.getHosts(),
-  event.getRacks(), priority, nodeLabelExpression);
+  event.getRacks(), priority, System.currentTimeMillis(),
+  nodeLabelExpression);
 }
 
+@VisibleForTesting
 public ContainerRequest(ContainerRequestEvent event, Priority priority,
 long requestTimeMs) {
   this(event.getAttemptID(), event.getCapability(), event.getHosts(),
@@ -146,13 +149,6 @@ public abstract class RMContainerRequestor extends 
RMCommunicator {
 }
 
 public ContainerRequest(TaskAttemptId attemptID,
-Resource capability, String[] hosts, String[] 
racks,
-Priority priority, String nodeLabelExpression) {
-  this(attemptID, capability, hosts, racks, priority,
-  System.currentTimeMillis(), nodeLabelExpression);
-}
-
-public ContainerRequest(TaskAttemptId attemptID,
 Resource capability, String[] hosts, String[] racks,
 Priority priority, long requestTimeMs,String nodeLabelExpression) {
   this.attemptID = attemptID;
@@ -186,6 +182,10 @@ public abstract class RMContainerRequestor extends 
RMCommunicator {
 MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT,
 
MRJobConfig.DEFAULT_MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERCENT);
 LOG.info("maxTaskFailuresPerNode is " + maxTaskFailuresPerNode);
+optOutOfOversubscription = conf.getBoolean(
+MRJobConfig.MR_OVERSUBSCRIPTION_OPT_OUT,
+MRJobConfig.DEFAULT_MR_OVERSUBSCRIPTION_OPT_OUT);
+LOG.info("optOutOfOversubscription is " + optOutOfOversubscription);
 if (blacklistDisablePercent < -1 || blacklistDisablePercent > 100) {
   throw new YarnRuntimeException("Invalid blacklistDisablePercent: "
   + blacklistDisablePercent
@@ -398,20 +398,20 @@ public abstract class RMContainerRequestor extends 
RMCommunicator {
 for (String host : req.hosts) {
   // Data-local
   if (!isNodeBlacklisted(host)) {
-addResourceRequest(req.priority, host, req.capability,
+addGuaranteedResourceRequest(req.priority, host, req.capability,
 null);
   }
 }
 
 // Nothing Rack-local for now
 for (String rack : 

[22/50] [abbrv] hadoop git commit: YARN-8107. Give an informative message when incorrect format is used in ATSv2 filter attributes. (Rohith Sharma K S via Haibo Chen)

2018-04-10 Thread haibochen
YARN-8107. Give an informative message when incorrect format is used in ATSv2 
filter attributes. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/024d7c08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/024d7c08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/024d7c08

Branch: refs/heads/YARN-1011
Commit: 024d7c08704e6a5fcc1f53a8f56a44c84c8d5fa0
Parents: b17dc9f
Author: Haibo Chen 
Authored: Fri Apr 6 09:37:21 2018 -0700
Committer: Haibo Chen 
Committed: Fri Apr 6 09:39:01 2018 -0700

--
 .../reader/TimelineParserForCompareExpr.java|  7 +-
 .../reader/TimelineParserForEqualityExpr.java   |  7 +-
 .../TestTimelineReaderWebServicesUtils.java | 25 
 3 files changed, 37 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/024d7c08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
index 1b020d9..a582956 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForCompareExpr.java
@@ -282,7 +282,12 @@ abstract class TimelineParserForCompareExpr implements 
TimelineParser {
   parseValue(expr.substring(kvStartOffset, offset)));
 }
 if (filterList == null || filterList.getFilterList().isEmpty()) {
-  filterList = new TimelineFilterList(currentFilter);
+  if (currentFilter == null) {
+throw new TimelineParseException(
+"Invalid expression provided for " + exprName);
+  } else {
+filterList = new TimelineFilterList(currentFilter);
+  }
 } else if (currentFilter != null) {
   filterList.addFilter(currentFilter);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/024d7c08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
index 7451713..2bdce38 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineParserForEqualityExpr.java
@@ -325,7 +325,12 @@ abstract class TimelineParserForEqualityExpr implements 
TimelineParser {
   }
 }
 if (filterList == null || filterList.getFilterList().isEmpty()) {
-  filterList = new TimelineFilterList(currentFilter);
+  if (currentFilter == null) {
+throw new TimelineParseException(
+"Invalid expression provided for " + exprName);
+  } else {
+filterList = new TimelineFilterList(currentFilter);
+  }
 } else if (currentFilter != null) {
   filterList.addFilter(currentFilter);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/024d7c08/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesUtils.java
--
diff --git 

[30/50] [abbrv] hadoop git commit: YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. Contributed by Suma Shivaprasad.

2018-04-10 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
index 6c6ac20..addec66 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.security.TestGroupsCaching;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
@@ -65,6 +66,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
 .SimpleGroupsMapping;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.util.YarnVersionInfo;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.After;
 import org.junit.Assert;
@@ -89,6 +92,8 @@ import static 
org.apache.hadoop.yarn.server.resourcemanager.scheduler
 .capacity.CapacitySchedulerConfiguration.DOT;
 import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
 .capacity.CapacitySchedulerConfiguration.FAIR_APP_ORDERING_POLICY;
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler
+.capacity.CapacitySchedulerConfiguration.ROOT;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -99,7 +104,7 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   private static final Log LOG = LogFactory.getLog(
   TestCapacitySchedulerAutoCreatedQueueBase.class);
   public static final int GB = 1024;
-  public final static ContainerUpdates NULL_UPDATE_REQUESTS =
+  public static final ContainerUpdates NULL_UPDATE_REQUESTS =
   new ContainerUpdates();
 
   public static final String A = CapacitySchedulerConfiguration.ROOT + ".a";
@@ -112,9 +117,6 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final String B1 = B + ".b1";
   public static final String B2 = B + ".b2";
   public static final String B3 = B + ".b3";
-  public static final String C1 = C + ".c1";
-  public static final String C2 = C + ".c2";
-  public static final String C3 = C + ".c3";
   public static final float A_CAPACITY = 20f;
   public static final float B_CAPACITY = 40f;
   public static final float C_CAPACITY = 20f;
@@ -124,8 +126,6 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final float B1_CAPACITY = 60f;
   public static final float B2_CAPACITY = 20f;
   public static final float B3_CAPACITY = 20f;
-  public static final float C1_CAPACITY = 20f;
-  public static final float C2_CAPACITY = 20f;
 
   public static final int NODE_MEMORY = 16;
 
@@ -147,12 +147,14 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final String NODEL_LABEL_GPU = "GPU";
   public static final String NODEL_LABEL_SSD = "SSD";
 
+  public static final float NODE_LABEL_GPU_TEMPLATE_CAPACITY = 30.0f;
+  public static final float NODEL_LABEL_SSD_TEMPLATE_CAPACITY = 40.0f;
+
   protected MockRM mockRM = null;
   protected MockNM nm1 = null;
   protected MockNM nm2 = null;
   protected MockNM nm3 = null;
   protected CapacityScheduler cs;
-  private final TestCapacityScheduler tcs = new TestCapacityScheduler();
   protected SpyDispatcher dispatcher;
   private static EventHandler rmAppEventEventHandler;
 
@@ -215,15 +217,29 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   }
 
   protected void setupNodes(MockRM newMockRM) throws Exception {
+NodeLabel ssdLabel = Records.newRecord(NodeLabel.class);
+ssdLabel.setName(NODEL_LABEL_SSD);
+ssdLabel.setExclusivity(true);
+
 nm1 

[31/50] [abbrv] hadoop git commit: YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. Contributed by Suma Shivaprasad.

2018-04-10 Thread haibochen
YARN-7574. Add support for Node Labels on Auto Created Leaf Queue Template. 
Contributed by Suma Shivaprasad.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/821b0de4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/821b0de4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/821b0de4

Branch: refs/heads/YARN-1011
Commit: 821b0de4c59156d4a65112de03ba3e7e1c88e309
Parents: 5700556
Author: Sunil G 
Authored: Mon Apr 9 21:17:22 2018 +0530
Committer: Sunil G 
Committed: Mon Apr 9 21:17:22 2018 +0530

--
 .../server/resourcemanager/RMServerUtils.java   |   5 +-
 .../rmapp/attempt/RMAppAttemptImpl.java |  47 ++
 .../resourcemanager/scheduler/Allocation.java   |  12 +
 .../scheduler/SchedulerUtils.java   |  33 +-
 .../capacity/AutoCreatedLeafQueue.java  |   3 +-
 .../AutoCreatedQueueManagementPolicy.java   |  12 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +
 .../CapacitySchedulerConfiguration.java |  28 +
 .../scheduler/capacity/LeafQueue.java   |  11 +
 .../scheduler/capacity/ManagedParentQueue.java  |   5 +-
 .../GuaranteedOrZeroCapacityOverTimePolicy.java | 573 +++
 .../placement/PendingAskUpdateResult.java   |   8 +
 .../yarn/server/resourcemanager/MockNM.java |  15 +
 .../server/resourcemanager/TestAppManager.java  |  20 +-
 ...stCapacitySchedulerAutoCreatedQueueBase.java | 241 +---
 .../TestCapacitySchedulerAutoQueueCreation.java | 233 +---
 .../TestQueueManagementDynamicEditPolicy.java   |  30 +-
 17 files changed, 834 insertions(+), 444 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
index 33451295..ab6bbcf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java
@@ -236,13 +236,14 @@ public class RMServerUtils {
*/
   public static void normalizeAndValidateRequests(List ask,
   Resource maximumResource, String queueName, YarnScheduler scheduler,
-  RMContext rmContext)
-  throws InvalidResourceRequestException {
+  RMContext rmContext) throws InvalidResourceRequestException {
 // Get queue from scheduler
 QueueInfo queueInfo = null;
 try {
   queueInfo = scheduler.getQueueInfo(queueName, false, false);
 } catch (IOException e) {
+  //Queue may not exist since it could be auto-created in case of
+  // dynamic queues
 }
 
 for (ResourceRequest resReq : ask) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/821b0de4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index c23b135..1b1e2c4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
 

[25/50] [abbrv] hadoop git commit: YARN-1151. Ability to configure auxiliary services from HDFS-based JAR files. (Xuan Gong via wangda)

2018-04-10 Thread haibochen
YARN-1151. Ability to configure auxiliary services from HDFS-based JAR files. 
(Xuan Gong via wangda)

Change-Id: Ied37ff11e507fc86847753ba79486652c8fadfe9


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00ebec89
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00ebec89
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00ebec89

Branch: refs/heads/YARN-1011
Commit: 00ebec89f101347a5da44657e388b30c57ed9deb
Parents: d4e63cc
Author: Wangda Tan 
Authored: Fri Apr 6 21:25:57 2018 -0700
Committer: Wangda Tan 
Committed: Fri Apr 6 21:25:57 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   3 +
 .../containermanager/AuxServices.java   | 160 +-
 .../containermanager/ContainerManagerImpl.java  |   3 +-
 .../containermanager/TestAuxServices.java   | 167 +--
 4 files changed, 313 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00ebec89/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 7a2a3ce..2590b6f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2106,6 +2106,9 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_AUX_SERVICES_CLASSPATH =
   NM_AUX_SERVICES + ".%s.classpath";
 
+  public static final String NM_AUX_SERVICE_REMOTE_CLASSPATH =
+  NM_AUX_SERVICES + ".%s.remote-classpath";
+
   public static final String NM_AUX_SERVICES_SYSTEM_CLASSES =
   NM_AUX_SERVICES + ".%s.system-classes";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00ebec89/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
index 57cca50..c8b7a76 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.yarn.server.nodemanager.containermanager;
 
+import java.io.IOException;
+import java.net.URI;
 import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.Collections;
@@ -29,45 +31,70 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.service.ServiceStateChangeListener;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext;
 import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext;
 import org.apache.hadoop.yarn.server.api.AuxiliaryLocalPathHandler;
 import org.apache.hadoop.yarn.server.api.AuxiliaryService;
 import org.apache.hadoop.yarn.server.api.ContainerInitializationContext;
 

[21/50] [abbrv] hadoop git commit: YARN-8083. [UI2] All YARN related configurations are paged together in conf page. Contributed by Gergely Novák.

2018-04-10 Thread haibochen
YARN-8083. [UI2] All YARN related configurations are paged together in conf 
page. Contributed by Gergely Novák.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b17dc9f5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b17dc9f5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b17dc9f5

Branch: refs/heads/YARN-1011
Commit: b17dc9f5f54fd91defc1d8646f8229da5fe7ccbb
Parents: ea3849f
Author: Sunil G 
Authored: Fri Apr 6 21:53:14 2018 +0530
Committer: Sunil G 
Committed: Fri Apr 6 21:53:14 2018 +0530

--
 .../main/webapp/app/controllers/yarn-tools/yarn-conf.js   | 10 +-
 .../main/webapp/app/templates/yarn-tools/yarn-conf.hbs|  6 +++---
 2 files changed, 12 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17dc9f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
index 2984346..cc3be2e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-tools/yarn-conf.js
@@ -24,7 +24,15 @@ import ColumnDef from 'em-table/utils/column-definition';
 import YarnConf from '../../models/yarn-conf';
 
 export default Ember.Controller.extend({
-  tableDefinition: TableDef.create({
+  coreTableDefinition: TableDef.create({
+searchType: 'manual',
+  }),
+
+  mapredTableDefinition: TableDef.create({
+searchType: 'manual',
+  }),
+
+  yarnTableDefinition: TableDef.create({
 searchType: 'manual',
   }),
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b17dc9f5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
index 09a1410..c2108a2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-tools/yarn-conf.hbs
@@ -17,12 +17,12 @@
 }}
 
 Core Configuration
-{{em-table columns=columnsFromModel rows=rowsForCoreColumnsFromModel 
rowCount=10 definition=tableDefinition}}
+{{em-table columns=columnsFromModel rows=rowsForCoreColumnsFromModel 
rowCount=10 definition=coreTableDefinition}}
 
 YARN Configuration
-{{em-table columns=columnsFromModel rows=rowsForYarnColumnsFromModel 
rowCount=10 definition=tableDefinition}}
+{{em-table columns=columnsFromModel rows=rowsForYarnColumnsFromModel 
rowCount=10 definition=yarnTableDefinition}}
 
 MapReduce Configuration
-{{em-table columns=columnsFromModel rows=rowsForMapredColumnsFromModel 
rowCount=10 definition=tableDefinition}}
+{{em-table columns=columnsFromModel rows=rowsForMapredColumnsFromModel 
rowCount=10 definition=mapredTableDefinition}}
 
 {{outlet}}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: YARN-8115. [UI2] URL data like nodeHTTPAddress must be encoded in UI before using to access NM. Contributed by Sreenath Somarajapuram.

2018-04-10 Thread haibochen
YARN-8115. [UI2] URL data like nodeHTTPAddress must be encoded in UI before 
using to access NM. Contributed by Sreenath Somarajapuram.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42cd367c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42cd367c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42cd367c

Branch: refs/heads/YARN-1011
Commit: 42cd367c9308b944bc71de6c07b6c3f028a0d874
Parents: b779f4f
Author: Sunil G 
Authored: Wed Apr 4 22:13:14 2018 +0530
Committer: Sunil G 
Committed: Wed Apr 4 22:13:14 2018 +0530

--
 .../webapp/app/components/node-menu-panel.js| 25 
 .../webapp/app/controllers/yarn-node-app.js |  3 ++-
 .../webapp/app/controllers/yarn-node-apps.js|  3 ++-
 .../app/controllers/yarn-node-container.js  |  3 ++-
 .../app/controllers/yarn-node-containers.js |  3 ++-
 .../main/webapp/app/controllers/yarn-node.js|  3 ++-
 .../webapp/app/controllers/yarn-nodes/table.js  |  2 +-
 .../src/main/webapp/app/helpers/node-link.js|  2 +-
 .../src/main/webapp/app/initializers/loader.js  |  1 +
 .../main/webapp/app/routes/yarn-node-apps.js|  8 ---
 .../webapp/app/routes/yarn-node-containers.js   |  8 ---
 .../src/main/webapp/app/routes/yarn-node.js |  8 ---
 .../templates/components/node-menu-panel.hbs|  8 +++
 13 files changed, 57 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/42cd367c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
new file mode 100644
index 000..31457be
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/node-menu-panel.js
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import Ember from 'ember';
+
+export default Ember.Component.extend({
+  encodedAddr : Ember.computed("nodeAddr", function(){
+return encodeURIComponent(this.get('nodeAddr'));
+  })
+});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42cd367c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
index 3dc09fc..e0d58ec 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-app.js
@@ -22,6 +22,7 @@ export default Ember.Controller.extend({
 
   breadcrumbs: Ember.computed('model.nodeInfo', function () {
 var nodeInfo = this.get('model.nodeInfo');
+var addr = encodeURIComponent(nodeInfo.addr);
 return [{
   text: "Home",
   routeName: 'application'
@@ -30,7 +31,7 @@ export default Ember.Controller.extend({
   routeName: 'yarn-nodes.table'
 }, {
   text: `Node [ ${nodeInfo.id} ]`,
-  href: `#/yarn-node/${nodeInfo.id}/${nodeInfo.addr}`,
+  href: `#/yarn-node/${nodeInfo.id}/${addr}/info`,
 }, {
   text: `Application [ ${nodeInfo.appId} ]`,
 }];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/42cd367c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-apps.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-node-apps.js
 

  1   2   >