[hadoop] branch trunk updated: YARN-9629. Support configurable MIN_LOG_ROLLING_INTERVAL. Contributed by Adam Antal.

2019-07-03 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a2a8be1  YARN-9629. Support configurable MIN_LOG_ROLLING_INTERVAL. 
Contributed by Adam Antal.
a2a8be1 is described below

commit a2a8be18cb5e912c8de0ea6beec1de4a99de656b
Author: Szilard Nemeth 
AuthorDate: Wed Jul 3 13:44:27 2019 +0200

YARN-9629. Support configurable MIN_LOG_ROLLING_INTERVAL. Contributed by 
Adam Antal.
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java |  8 +++
 .../src/main/resources/yarn-default.xml| 17 -
 .../logaggregation/LogAggregationService.java  | 79 ++
 .../logaggregation/TestLogAggregationService.java  | 39 +++
 4 files changed, 112 insertions(+), 31 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 3fb4a37..548d868 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1412,6 +1412,14 @@ public class YarnConfiguration extends Configuration {
   DEFAULT_NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS = -1;
 
   /**
+   * The allowed hard minimum limit for {@link
+   * YarnConfiguration#NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS}.
+   */
+  public static final String MIN_LOG_ROLLING_INTERVAL_SECONDS = NM_PREFIX
+  + "log-aggregation.roll-monitoring-interval-seconds.min";
+  public static final long MIN_LOG_ROLLING_INTERVAL_SECONDS_DEFAULT = 3600;
+
+  /**
* Define how many aggregated log files per application per NM we can have
* in remote file system.
*/
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 7e52dd1..8582522 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3219,15 +3219,26 @@
   
 Defines how often NMs wake up to upload log files.
 The default value is -1. By default, the logs will be uploaded when
-the application is finished. By setting this configure, logs can be 
uploaded
-periodically when the application is running. The minimum 
rolling-interval-seconds
-can be set is 3600.
+the application is finished. By setting this configuration logs can
+be uploaded periodically while the application is running.
+The minimum positive accepted value can be configured by the setting
+"yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds.min".
 
 
yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds
 -1
   
 
   
+Defines the positive minimum hard limit for
+"yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds".
+If this configuration has been set less than its default value (3600)
+the NodeManager may raise a warning.
+
+
yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds.min
+3600
+  
+
+  
 Define how many aggregated log files per application per NM
 we can have in remote file system. By default, the total number of
 aggregated log files per application per NM is 30.
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
index d8db967..2280e75 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
@@ -71,7 +71,6 @@ public class LogAggregationService extends AbstractService 
implements
 
   private static final Logger LOG =
LoggerFactory.getLogger(LogAggregationService.class);
-  private static final long MIN_LOG_ROLLING_INTERVAL = 3600;
   // This configuration is for debug and test purpose. By setting
   // this configuration as true. W

[hadoop] branch branch-3.1 updated: YARN-9629. Support configurable MIN_LOG_ROLLING_INTERVAL. Contributed by Adam Antal.

2019-07-03 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 46177ad  YARN-9629. Support configurable MIN_LOG_ROLLING_INTERVAL. 
Contributed by Adam Antal.
46177ad is described below

commit 46177ade8bf3595f134b9509c6c0fa136c86c52a
Author: Szilard Nemeth 
AuthorDate: Wed Jul 3 13:44:27 2019 +0200

YARN-9629. Support configurable MIN_LOG_ROLLING_INTERVAL. Contributed by 
Adam Antal.

(cherry picked from commit a2a8be18cb5e912c8de0ea6beec1de4a99de656b)
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java |  8 +++
 .../src/main/resources/yarn-default.xml| 17 -
 .../logaggregation/LogAggregationService.java  | 79 ++
 .../logaggregation/TestLogAggregationService.java  | 39 +++
 4 files changed, 112 insertions(+), 31 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index c0ca231..0977f34 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1364,6 +1364,14 @@ public class YarnConfiguration extends Configuration {
   DEFAULT_NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS = -1;
 
   /**
+   * The allowed hard minimum limit for {@link
+   * YarnConfiguration#NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS}.
+   */
+  public static final String MIN_LOG_ROLLING_INTERVAL_SECONDS = NM_PREFIX
+  + "log-aggregation.roll-monitoring-interval-seconds.min";
+  public static final long MIN_LOG_ROLLING_INTERVAL_SECONDS_DEFAULT = 3600;
+
+  /**
* Define how many aggregated log files per application per NM we can have
* in remote file system.
*/
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 7804ff7..641baf8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3025,15 +3025,26 @@
   
 Defines how often NMs wake up to upload log files.
 The default value is -1. By default, the logs will be uploaded when
-the application is finished. By setting this configure, logs can be 
uploaded
-periodically when the application is running. The minimum 
rolling-interval-seconds
-can be set is 3600.
+the application is finished. By setting this configuration logs can
+be uploaded periodically while the application is running.
+The minimum positive accepted value can be configured by the setting
+"yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds.min".
 
 
yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds
 -1
   
 
   
+Defines the positive minimum hard limit for
+"yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds".
+If this configuration has been set less than its default value (3600)
+the NodeManager may raise a warning.
+
+
yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds.min
+3600
+  
+
+  
 Define how many aggregated log files per application per NM
 we can have in remote file system. By default, the total number of
 aggregated log files per application per NM is 30.
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
index d8db967..2280e75 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
@@ -71,7 +71,6 @@ public class LogAggregationService extends AbstractService 
implements
 
   private static final Logger LOG =
LoggerFactory.getLogger(LogAggregationService.class);
-  private static final long MIN_LOG_ROLLING_INTERVAL = 3600;
   // This configuration is for debu

[hadoop] branch branch-3.2 updated: YARN-9629. Support configurable MIN_LOG_ROLLING_INTERVAL. Contributed by Adam Antal.

2019-07-04 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 4638fa0  YARN-9629. Support configurable MIN_LOG_ROLLING_INTERVAL. 
Contributed by Adam Antal.
4638fa0 is described below

commit 4638fa00fc715469d8a07657ccf61261ae13
Author: Szilard Nemeth 
AuthorDate: Wed Jul 3 13:44:27 2019 +0200

YARN-9629. Support configurable MIN_LOG_ROLLING_INTERVAL. Contributed by 
Adam Antal.

(cherry picked from commit a2a8be18cb5e912c8de0ea6beec1de4a99de656b)
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java |  8 +++
 .../src/main/resources/yarn-default.xml| 17 -
 .../logaggregation/LogAggregationService.java  | 79 ++
 .../logaggregation/TestLogAggregationService.java  | 39 +++
 4 files changed, 112 insertions(+), 31 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 9774bde..d8dc7c0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1384,6 +1384,14 @@ public class YarnConfiguration extends Configuration {
   DEFAULT_NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS = -1;
 
   /**
+   * The allowed hard minimum limit for {@link
+   * YarnConfiguration#NM_LOG_AGGREGATION_ROLL_MONITORING_INTERVAL_SECONDS}.
+   */
+  public static final String MIN_LOG_ROLLING_INTERVAL_SECONDS = NM_PREFIX
+  + "log-aggregation.roll-monitoring-interval-seconds.min";
+  public static final long MIN_LOG_ROLLING_INTERVAL_SECONDS_DEFAULT = 3600;
+
+  /**
* Define how many aggregated log files per application per NM we can have
* in remote file system.
*/
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 72436f8..76ae2be 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3134,15 +3134,26 @@
   
 Defines how often NMs wake up to upload log files.
 The default value is -1. By default, the logs will be uploaded when
-the application is finished. By setting this configure, logs can be 
uploaded
-periodically when the application is running. The minimum 
rolling-interval-seconds
-can be set is 3600.
+the application is finished. By setting this configuration logs can
+be uploaded periodically while the application is running.
+The minimum positive accepted value can be configured by the setting
+"yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds.min".
 
 
yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds
 -1
   
 
   
+Defines the positive minimum hard limit for
+"yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds".
+If this configuration has been set less than its default value (3600)
+the NodeManager may raise a warning.
+
+
yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds.min
+3600
+  
+
+  
 Define how many aggregated log files per application per NM
 we can have in remote file system. By default, the total number of
 aggregated log files per application per NM is 30.
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
index d8db967..2280e75 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
@@ -71,7 +71,6 @@ public class LogAggregationService extends AbstractService 
implements
 
   private static final Logger LOG =
LoggerFactory.getLogger(LogAggregationService.class);
-  private static final long MIN_LOG_ROLLING_INTERVAL = 3600;
   // This configuration is for debu

[hadoop] branch trunk updated: YARN-9625. UI2 - No link to a queue on the Queues page for Fair Scheduler. Contributed by Zoltan Siegl

2019-07-11 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9cec023  YARN-9625. UI2 - No link to a queue on the Queues page for 
Fair Scheduler. Contributed by Zoltan Siegl
9cec023 is described below

commit 9cec02318644c8430cbf65bcc3096ffe45992a8e
Author: Szilard Nemeth 
AuthorDate: Thu Jul 11 16:11:21 2019 +0200

YARN-9625. UI2 - No link to a queue on the Queues page for Fair Scheduler. 
Contributed by Zoltan Siegl
---
 .../app/templates/components/yarn-queue/fair-queue.hbs   | 12 
 1 file changed, 12 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
index 85670da..4ef22fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
@@ -25,6 +25,18 @@
   
   
 
+
+  
+  Queue details for {{model.selected}}
+  
+  
+
+  {{model.selected}}
+
+  
+
+
+
   
   {{yarn-queue.fair-queue-conf-table queue=model.selectedQueue}}
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9573. DistributedShell cannot specify LogAggregationContext. Contributed by Adam Antal.

2019-07-11 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 4216090  YARN-9573. DistributedShell cannot specify 
LogAggregationContext. Contributed by Adam Antal.
4216090 is described below

commit 4216090f19f17fa6dd850fdfdde308006bf50631
Author: Szilard Nemeth 
AuthorDate: Thu Jul 11 19:24:11 2019 +0200

YARN-9573. DistributedShell cannot specify LogAggregationContext. 
Contributed by Adam Antal.
---
 .../yarn/applications/distributedshell/Client.java | 47 --
 .../distributedshell/TestDistributedShell.java | 29 -
 2 files changed, 62 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index 408afe3..7a5ec96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -93,6 +94,7 @@ import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -198,7 +200,9 @@ public class Client {
   private String nodeAttributeSpec = "";
   // log4j.properties file 
   // if available, add to local resources and set into classpath 
-  private String log4jPropFile = "";   
+  private String log4jPropFile = "";
+  // rolling
+  private String rollingFilesPattern = "";
 
   // Start time for client
   private final long clientStartTime = System.currentTimeMillis();
@@ -273,7 +277,7 @@ public class Client {
 }
 if (result) {
   LOG.info("Application completed successfully");
-  System.exit(0);  
+  System.exit(0);
 } 
 LOG.error("Application failed to complete successfully");
 System.exit(2);
@@ -337,6 +341,8 @@ public class Client {
 opts.addOption("enforce_execution_type", false,
 "Flag to indicate whether to enforce execution type of containers");
 opts.addOption("log_properties", true, "log4j.properties file");
+opts.addOption("rolling_log_pattern", true,
+"pattern for files that should be aggregated in a rolling fashion");
 opts.addOption("keep_containers_across_application_attempts", false,
 "Flag to indicate whether to keep containers across application "
 + "attempts."
@@ -434,6 +440,10 @@ public class Client {
   }
 }
 
+if (cliParser.hasOption("rolling_log_pattern")) {
+  rollingFilesPattern = cliParser.getOptionValue("rolling_log_pattern");
+}
+
 if (cliParser.hasOption("help")) {
   printUsage();
   return false;
@@ -479,7 +489,7 @@ public class Client {
 
 if (!cliParser.hasOption("jar")) {
   throw new IllegalArgumentException("No jar file specified for 
application master");
-}  
+}
 
 appMasterJar = cliParser.getOptionValue("jar");
 
@@ -669,16 +679,16 @@ public class Client {
 + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity()
 + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
 + ", queueApplicationCount=" + queueInfo.getApplications().size()
-+ ", queueChildQueueCount=" + queueInfo.getChildQueues().size());  

++ ", queueChildQueueCount=" + queueInfo.getChildQueues().size());
 
 List listAclInfo = yarnClient.getQueueAclsInfo();
 for (QueueUs

[hadoop] branch branch-3.1 updated: YARN-9573. DistributedShell cannot specify LogAggregationContext. Contributed by Adam Antal.

2019-07-11 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new d590745  YARN-9573. DistributedShell cannot specify 
LogAggregationContext. Contributed by Adam Antal.
d590745 is described below

commit d5907450463ab2a3436ff4f28918d6253159cd76
Author: Szilard Nemeth 
AuthorDate: Thu Jul 11 19:54:31 2019 +0200

YARN-9573. DistributedShell cannot specify LogAggregationContext. 
Contributed by Adam Antal.
---
 .../yarn/applications/distributedshell/Client.java | 47 --
 .../distributedshell/TestDistributedShell.java | 29 -
 2 files changed, 62 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
index 9c1d8fc..1d225d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
@@ -65,6 +65,7 @@ import 
org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.LogAggregationContext;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -93,6 +94,7 @@ import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
+import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -196,7 +198,9 @@ public class Client {
   private String placementSpec = "";
   // log4j.properties file 
   // if available, add to local resources and set into classpath 
-  private String log4jPropFile = "";   
+  private String log4jPropFile = "";
+  // rolling
+  private String rollingFilesPattern = "";
 
   // Start time for client
   private final long clientStartTime = System.currentTimeMillis();
@@ -271,7 +275,7 @@ public class Client {
 }
 if (result) {
   LOG.info("Application completed successfully");
-  System.exit(0);  
+  System.exit(0);
 } 
 LOG.error("Application failed to complete successfully");
 System.exit(2);
@@ -335,6 +339,8 @@ public class Client {
 opts.addOption("enforce_execution_type", false,
 "Flag to indicate whether to enforce execution type of containers");
 opts.addOption("log_properties", true, "log4j.properties file");
+opts.addOption("rolling_log_pattern", true,
+"pattern for files that should be aggregated in a rolling fashion");
 opts.addOption("keep_containers_across_application_attempts", false,
 "Flag to indicate whether to keep containers across application "
 + "attempts."
@@ -432,6 +438,10 @@ public class Client {
   }
 }
 
+if (cliParser.hasOption("rolling_log_pattern")) {
+  rollingFilesPattern = cliParser.getOptionValue("rolling_log_pattern");
+}
+
 if (cliParser.hasOption("help")) {
   printUsage();
   return false;
@@ -476,7 +486,7 @@ public class Client {
 
 if (!cliParser.hasOption("jar")) {
   throw new IllegalArgumentException("No jar file specified for 
application master");
-}  
+}
 
 appMasterJar = cliParser.getOptionValue("jar");
 
@@ -666,16 +676,16 @@ public class Client {
 + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity()
 + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
 + ", queueApplicationCount=" + queueInfo.getApplications().size()
-+ ", queueChildQueueCount=" + queueInfo.getChildQueues().size());  

++ ", queueChildQueueCount=" + queueInfo.getChildQueues().size());
 
 List listAclInfo = yarnClient.getQueueAclsInfo();
 for (QueueUs

[hadoop] branch branch-3.2 updated: YARN-9625. UI2 - No link to a queue on the Queues page for Fair Scheduler. Contributed by Zoltan Siegl

2019-07-11 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 3e3bbb7  YARN-9625. UI2 - No link to a queue on the Queues page for 
Fair Scheduler. Contributed by Zoltan Siegl
3e3bbb7 is described below

commit 3e3bbb7f5e67aea70010a2fec7edb5b726c28c52
Author: Szilard Nemeth 
AuthorDate: Thu Jul 11 16:11:21 2019 +0200

YARN-9625. UI2 - No link to a queue on the Queues page for Fair Scheduler. 
Contributed by Zoltan Siegl

(cherry picked from commit 9cec02318644c8430cbf65bcc3096ffe45992a8e)
---
 .../app/templates/components/yarn-queue/fair-queue.hbs   | 12 
 1 file changed, 12 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
index 85670da..4ef22fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
@@ -25,6 +25,18 @@
   
   
 
+
+  
+  Queue details for {{model.selected}}
+  
+  
+
+  {{model.selected}}
+
+  
+
+
+
   
   {{yarn-queue.fair-queue-conf-table queue=model.selectedQueue}}
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9625. UI2 - No link to a queue on the Queues page for Fair Scheduler. Contributed by Zoltan Siegl

2019-07-11 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 872a039  YARN-9625. UI2 - No link to a queue on the Queues page for 
Fair Scheduler. Contributed by Zoltan Siegl
872a039 is described below

commit 872a039bacfc06cf09896f433a2d5b36d3ceaa16
Author: Szilard Nemeth 
AuthorDate: Thu Jul 11 16:11:21 2019 +0200

YARN-9625. UI2 - No link to a queue on the Queues page for Fair Scheduler. 
Contributed by Zoltan Siegl

(cherry picked from commit 9cec02318644c8430cbf65bcc3096ffe45992a8e)
---
 .../app/templates/components/yarn-queue/fair-queue.hbs   | 12 
 1 file changed, 12 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
index 85670da..4ef22fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue.hbs
@@ -25,6 +25,18 @@
   
   
 
+
+  
+  Queue details for {{model.selected}}
+  
+  
+
+  {{model.selected}}
+
+  
+
+
+
   
   {{yarn-queue.fair-queue-conf-table queue=model.selectedQueue}}
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9235. If linux container executor is not set for a GPU cluster GpuResourceHandlerImpl is not initialized and NPE is thrown. Contributed by Antal Balint Steinba

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new c61c969  YARN-9235. If linux container executor is not set for a GPU 
cluster GpuResourceHandlerImpl is not initialized and NPE is thrown. 
Contributed by Antal Balint Steinbach, Adam Antal
c61c969 is described below

commit c61c9696689399e339c0d4a45e588d9f39f8d819
Author: Szilard Nemeth 
AuthorDate: Fri Jul 12 16:51:58 2019 +0200

YARN-9235. If linux container executor is not set for a GPU cluster 
GpuResourceHandlerImpl is not initialized and NPE is thrown. Contributed by 
Antal Balint Steinbach, Adam Antal

(cherry picked from commit c416284bb7581747beef36d7899d8680fe33abbd)
---
 .../resourceplugin/gpu/GpuResourcePlugin.java  | 22 +
 .../resourceplugin/gpu/TestGpuResourcePlugin.java  | 54 ++
 2 files changed, 76 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
index 393d76e..1ac6f83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
@@ -18,6 +18,7 @@
 
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu;
 
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
@@ -33,8 +34,14 @@ import 
org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.GpuDeviceInforma
 import 
org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.NMGpuResourceInfo;
 
 import java.util.List;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class GpuResourcePlugin implements ResourcePlugin {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(GpuResourcePlugin.class);
+
   private final GpuNodeResourceUpdateHandler resourceDiscoverHandler;
   private final GpuDiscoverer gpuDiscoverer;
   private GpuResourceHandlerImpl gpuResourceHandler = null;
@@ -84,6 +91,10 @@ public class GpuResourcePlugin implements ResourcePlugin {
   public synchronized NMResourceInfo getNMResourceInfo() throws YarnException {
 GpuDeviceInformation gpuDeviceInformation =
 gpuDiscoverer.getGpuDeviceInformation();
+
+//At this point the gpu plugin is already enabled
+checkGpuResourceHandler();
+
 GpuResourceAllocator gpuResourceAllocator =
 gpuResourceHandler.getGpuAllocator();
 List totalGpus = gpuResourceAllocator.getAllowedGpusCopy();
@@ -94,6 +105,17 @@ public class GpuResourcePlugin implements ResourcePlugin {
 assignedGpuDevices);
   }
 
+  private void checkGpuResourceHandler() throws YarnException {
+if(gpuResourceHandler == null) {
+  String errorMsg =
+  "Linux Container Executor is not configured for the NodeManager. "
+  + "To fully enable GPU feature on the node also set "
+  + YarnConfiguration.NM_CONTAINER_EXECUTOR + " properly.";
+  LOG.warn(errorMsg);
+  throw new YarnException(errorMsg);
+}
+  }
+
   @Override
   public String toString() {
 return GpuResourcePlugin.class.getName();
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java
new file mode 100644
index 000..888f899
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright own

[hadoop] branch branch-3.1 updated: YARN-9235. If linux container executor is not set for a GPU cluster GpuResourceHandlerImpl is not initialized and NPE is thrown. Contributed by Antal Balint Steinba

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 43c89d1  YARN-9235. If linux container executor is not set for a GPU 
cluster GpuResourceHandlerImpl is not initialized and NPE is thrown. 
Contributed by Antal Balint Steinbach, Adam Antal
43c89d1 is described below

commit 43c89d1e2b65cda26cbf037f0cd45db617d9145a
Author: Szilard Nemeth 
AuthorDate: Fri Jul 12 16:51:58 2019 +0200

YARN-9235. If linux container executor is not set for a GPU cluster 
GpuResourceHandlerImpl is not initialized and NPE is thrown. Contributed by 
Antal Balint Steinbach, Adam Antal

(cherry picked from commit c416284bb7581747beef36d7899d8680fe33abbd)
---
 .../resourceplugin/gpu/GpuResourcePlugin.java  | 22 +
 .../resourceplugin/gpu/TestGpuResourcePlugin.java  | 54 ++
 2 files changed, 76 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
index 393d76e..1ac6f83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
@@ -18,6 +18,7 @@
 
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu;
 
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
@@ -33,8 +34,14 @@ import 
org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.GpuDeviceInforma
 import 
org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.NMGpuResourceInfo;
 
 import java.util.List;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class GpuResourcePlugin implements ResourcePlugin {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(GpuResourcePlugin.class);
+
   private final GpuNodeResourceUpdateHandler resourceDiscoverHandler;
   private final GpuDiscoverer gpuDiscoverer;
   private GpuResourceHandlerImpl gpuResourceHandler = null;
@@ -84,6 +91,10 @@ public class GpuResourcePlugin implements ResourcePlugin {
   public synchronized NMResourceInfo getNMResourceInfo() throws YarnException {
 GpuDeviceInformation gpuDeviceInformation =
 gpuDiscoverer.getGpuDeviceInformation();
+
+//At this point the gpu plugin is already enabled
+checkGpuResourceHandler();
+
 GpuResourceAllocator gpuResourceAllocator =
 gpuResourceHandler.getGpuAllocator();
 List totalGpus = gpuResourceAllocator.getAllowedGpusCopy();
@@ -94,6 +105,17 @@ public class GpuResourcePlugin implements ResourcePlugin {
 assignedGpuDevices);
   }
 
+  private void checkGpuResourceHandler() throws YarnException {
+if(gpuResourceHandler == null) {
+  String errorMsg =
+  "Linux Container Executor is not configured for the NodeManager. "
+  + "To fully enable GPU feature on the node also set "
+  + YarnConfiguration.NM_CONTAINER_EXECUTOR + " properly.";
+  LOG.warn(errorMsg);
+  throw new YarnException(errorMsg);
+}
+  }
+
   @Override
   public String toString() {
 return GpuResourcePlugin.class.getName();
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java
new file mode 100644
index 000..888f899
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright own

[hadoop] branch trunk updated: YARN-9235. If linux container executor is not set for a GPU cluster GpuResourceHandlerImpl is not initialized and NPE is thrown. Contributed by Antal Balint Steinbach, A

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c416284  YARN-9235. If linux container executor is not set for a GPU 
cluster GpuResourceHandlerImpl is not initialized and NPE is thrown. 
Contributed by Antal Balint Steinbach, Adam Antal
c416284 is described below

commit c416284bb7581747beef36d7899d8680fe33abbd
Author: Szilard Nemeth 
AuthorDate: Fri Jul 12 16:51:58 2019 +0200

YARN-9235. If linux container executor is not set for a GPU cluster 
GpuResourceHandlerImpl is not initialized and NPE is thrown. Contributed by 
Antal Balint Steinbach, Adam Antal
---
 .../resourceplugin/gpu/GpuResourcePlugin.java  | 22 +
 .../resourceplugin/gpu/TestGpuResourcePlugin.java  | 54 ++
 2 files changed, 76 insertions(+)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
index 393d76e..1ac6f83 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
@@ -18,6 +18,7 @@
 
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu;
 
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.privileged.PrivilegedOperationExecutor;
@@ -33,8 +34,14 @@ import 
org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.GpuDeviceInforma
 import 
org.apache.hadoop.yarn.server.nodemanager.webapp.dao.gpu.NMGpuResourceInfo;
 
 import java.util.List;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class GpuResourcePlugin implements ResourcePlugin {
+
+  private static final Logger LOG =
+  LoggerFactory.getLogger(GpuResourcePlugin.class);
+
   private final GpuNodeResourceUpdateHandler resourceDiscoverHandler;
   private final GpuDiscoverer gpuDiscoverer;
   private GpuResourceHandlerImpl gpuResourceHandler = null;
@@ -84,6 +91,10 @@ public class GpuResourcePlugin implements ResourcePlugin {
   public synchronized NMResourceInfo getNMResourceInfo() throws YarnException {
 GpuDeviceInformation gpuDeviceInformation =
 gpuDiscoverer.getGpuDeviceInformation();
+
+//At this point the gpu plugin is already enabled
+checkGpuResourceHandler();
+
 GpuResourceAllocator gpuResourceAllocator =
 gpuResourceHandler.getGpuAllocator();
 List totalGpus = gpuResourceAllocator.getAllowedGpusCopy();
@@ -94,6 +105,17 @@ public class GpuResourcePlugin implements ResourcePlugin {
 assignedGpuDevices);
   }
 
+  private void checkGpuResourceHandler() throws YarnException {
+if(gpuResourceHandler == null) {
+  String errorMsg =
+  "Linux Container Executor is not configured for the NodeManager. "
+  + "To fully enable GPU feature on the node also set "
+  + YarnConfiguration.NM_CONTAINER_EXECUTOR + " properly.";
+  LOG.warn(errorMsg);
+  throw new YarnException(errorMsg);
+}
+  }
+
   @Override
   public String toString() {
 return GpuResourcePlugin.class.getName();
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java
new file mode 100644
index 000..888f899
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuResourcePlugin.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (t

[hadoop] branch trunk updated: YARN-9135. NM State store ResourceMappings serialization are tested with Strings instead of real Device objects. Contributed by Peter Bacsko

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 8b3c679  YARN-9135. NM State store ResourceMappings serialization are 
tested with Strings instead of real Device objects. Contributed by Peter Bacsko
8b3c679 is described below

commit 8b3c6791b13fc57891cf81e83d4b626b4f2932e6
Author: Szilard Nemeth 
AuthorDate: Fri Jul 12 17:20:42 2019 +0200

YARN-9135. NM State store ResourceMappings serialization are tested with 
Strings instead of real Device objects. Contributed by Peter Bacsko
---
 .../resources/numa/NumaResourceAllocation.java | 59 ++
 .../resources/numa/NumaResourceAllocator.java  | 34 -
 .../recovery/NMLeveldbStateStoreService.java   |  5 +-
 .../recovery/TestNMLeveldbStateStoreService.java   | 52 +++
 4 files changed, 91 insertions(+), 59 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
index f8d4739..e91ac3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
@@ -17,9 +17,11 @@
  */
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.numa;
 
+import com.google.common.collect.ImmutableMap;
+
 import java.io.Serializable;
-import java.util.HashMap;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 
 /**
@@ -28,27 +30,18 @@ import java.util.Set;
  */
 public class NumaResourceAllocation implements Serializable {
   private static final long serialVersionUID = 6339719798446595123L;
-  private Map nodeVsMemory;
-  private Map nodeVsCpus;
+  private final ImmutableMap nodeVsMemory;
+  private final ImmutableMap nodeVsCpus;
 
-  public NumaResourceAllocation() {
-nodeVsMemory = new HashMap<>();
-nodeVsCpus = new HashMap<>();
+  public NumaResourceAllocation(Map memoryAllocations,
+  Map cpuAllocations) {
+nodeVsMemory = ImmutableMap.copyOf(memoryAllocations);
+nodeVsCpus = ImmutableMap.copyOf(cpuAllocations);
   }
 
   public NumaResourceAllocation(String memNodeId, long memory, String 
cpuNodeId,
   int cpus) {
-this();
-nodeVsMemory.put(memNodeId, memory);
-nodeVsCpus.put(cpuNodeId, cpus);
-  }
-
-  public void addMemoryNode(String memNodeId, long memory) {
-nodeVsMemory.put(memNodeId, memory);
-  }
-
-  public void addCpuNode(String cpuNodeId, int cpus) {
-nodeVsCpus.put(cpuNodeId, cpus);
+this(ImmutableMap.of(memNodeId, memory), ImmutableMap.of(cpuNodeId, cpus));
   }
 
   public Set getMemNodes() {
@@ -59,11 +52,37 @@ public class NumaResourceAllocation implements Serializable 
{
 return nodeVsCpus.keySet();
   }
 
-  public Map getNodeVsMemory() {
+  public ImmutableMap getNodeVsMemory() {
 return nodeVsMemory;
   }
 
-  public Map getNodeVsCpus() {
+  public ImmutableMap getNodeVsCpus() {
 return nodeVsCpus;
   }
-}
+
+  @Override
+  public String toString() {
+return "NumaResourceAllocation{" +
+"nodeVsMemory=" + nodeVsMemory +
+", nodeVsCpus=" + nodeVsCpus +
+'}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+NumaResourceAllocation that = (NumaResourceAllocation) o;
+return Objects.equals(nodeVsMemory, that.nodeVsMemory) &&
+Objects.equals(nodeVsCpus, that.nodeVsCpus);
+  }
+
+  @Override
+  public int hashCode() {
+return Objects.hash(nodeVsMemory, nodeVsCpus);
+  }
+}
\ No newline at end of file
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
index 08c3282..ac55e2f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-se

[hadoop] branch trunk updated: YARN-9337. GPU auto-discovery script runs even when the resource is given by hand. Contributed by Adam Antal

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 61b0c2b  YARN-9337. GPU auto-discovery script runs even when the 
resource is given by hand. Contributed by Adam Antal
61b0c2b is described below

commit 61b0c2bb7c0f18c4a666b96ca1603cbd4d27eb6d
Author: Szilard Nemeth 
AuthorDate: Fri Jul 12 17:28:14 2019 +0200

YARN-9337. GPU auto-discovery script runs even when the resource is given 
by hand. Contributed by Adam Antal
---
 .../resourceplugin/gpu/GpuDiscoverer.java  | 60 +-
 .../resourceplugin/gpu/TestGpuDiscoverer.java  | 19 ++-
 2 files changed, 53 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
index 0c55478..b52d767 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
@@ -69,6 +69,8 @@ public class GpuDiscoverer {
   private int numOfErrorExecutionSinceLastSucceed = 0;
   private GpuDeviceInformation lastDiscoveredGpuInformation = null;
 
+  private List gpuDevicesFromUser;
+
   private void validateConfOrThrowException() throws YarnException {
 if (conf == null) {
   throw new YarnException("Please initialize (call initialize) before use "
@@ -141,6 +143,14 @@ public class GpuDiscoverer {
 }
   }
 
+  private boolean IsAutoDiscoveryEnabled() {
+String allowedDevicesStr = conf.get(
+YarnConfiguration.NM_GPU_ALLOWED_DEVICES,
+YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
+return allowedDevicesStr.equals(
+YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
+  }
+
   /**
* Get list of GPU devices usable by YARN.
*
@@ -151,15 +161,13 @@ public class GpuDiscoverer {
   throws YarnException {
 validateConfOrThrowException();
 
-String allowedDevicesStr = conf.get(
-YarnConfiguration.NM_GPU_ALLOWED_DEVICES,
-YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
-
-if (allowedDevicesStr.equals(
-YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES)) {
+if (IsAutoDiscoveryEnabled()) {
   return parseGpuDevicesFromAutoDiscoveredGpuInfo();
 } else {
-  return parseGpuDevicesFromUserDefinedValues(allowedDevicesStr);
+  if (gpuDevicesFromUser == null) {
+gpuDevicesFromUser = parseGpuDevicesFromUserDefinedValues();
+  }
+  return gpuDevicesFromUser;
 }
   }
 
@@ -191,16 +199,16 @@ public class GpuDiscoverer {
   }
 
   /**
-   * @param devices allowed devices coming from the config.
-   *  Individual devices should be separated by commas.
-   *  The format of individual devices should be:
-   *   <index:><minorNumber>
* @return List of GpuDevices
* @throws YarnException when a GPU device is defined as a duplicate.
* The first duplicate GPU device will be added to the exception message.
*/
-  private List parseGpuDevicesFromUserDefinedValues(String devices)
+  private List parseGpuDevicesFromUserDefinedValues()
   throws YarnException {
+String devices = conf.get(
+YarnConfiguration.NM_GPU_ALLOWED_DEVICES,
+YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
+
 if (devices.trim().isEmpty()) {
   throw GpuDeviceSpecificationException.createWithEmptyValueSpecified();
 }
@@ -242,19 +250,21 @@ public class GpuDiscoverer {
   public synchronized void initialize(Configuration config)
   throws YarnException {
 this.conf = config;
-numOfErrorExecutionSinceLastSucceed = 0;
-lookUpAutoDiscoveryBinary(config);
-
-// Try to discover GPU information once and print
-try {
-  LOG.info("Trying to discover GPU information ...");
-  GpuDeviceInformation info = getGpuDeviceInformation();
-  LOG.info("Discovered GPU information: " + info.toString());
-} catch (YarnException e) {
-  String msg =
-  "Failed to discover GPU information from system, exception message:"
-  + e.getMessage() + " continue...";
-  LOG.warn(msg);
+if (IsAutoDiscoveryEnabled()) {
+  numOfErrorE

[hadoop] branch branch-3.2 updated: YARN-9337. GPU auto-discovery script runs even when the resource is given by hand. Contributed by Adam Antal

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 0ede873  YARN-9337. GPU auto-discovery script runs even when the 
resource is given by hand. Contributed by Adam Antal
0ede873 is described below

commit 0ede873090f7b7c0163288b4cec748afd9ae2f4a
Author: Szilard Nemeth 
AuthorDate: Fri Jul 12 17:28:14 2019 +0200

YARN-9337. GPU auto-discovery script runs even when the resource is given 
by hand. Contributed by Adam Antal

(cherry picked from commit 61b0c2bb7c0f18c4a666b96ca1603cbd4d27eb6d)
---
 .../resourceplugin/gpu/GpuDiscoverer.java  | 60 +-
 .../resourceplugin/gpu/TestGpuDiscoverer.java  | 19 ++-
 2 files changed, 53 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
index 6cf6a8d..27a4ea1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
@@ -69,6 +69,8 @@ public class GpuDiscoverer {
   private int numOfErrorExecutionSinceLastSucceed = 0;
   private GpuDeviceInformation lastDiscoveredGpuInformation = null;
 
+  private List gpuDevicesFromUser;
+
   private void validateConfOrThrowException() throws YarnException {
 if (conf == null) {
   throw new YarnException("Please initialize (call initialize) before use "
@@ -143,6 +145,14 @@ public class GpuDiscoverer {
 }
   }
 
+  private boolean IsAutoDiscoveryEnabled() {
+String allowedDevicesStr = conf.get(
+YarnConfiguration.NM_GPU_ALLOWED_DEVICES,
+YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
+return allowedDevicesStr.equals(
+YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
+  }
+
   /**
* Get list of GPU devices usable by YARN.
*
@@ -153,15 +163,13 @@ public class GpuDiscoverer {
   throws YarnException {
 validateConfOrThrowException();
 
-String allowedDevicesStr = conf.get(
-YarnConfiguration.NM_GPU_ALLOWED_DEVICES,
-YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
-
-if (allowedDevicesStr.equals(
-YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES)) {
+if (IsAutoDiscoveryEnabled()) {
   return parseGpuDevicesFromAutoDiscoveredGpuInfo();
 } else {
-  return parseGpuDevicesFromUserDefinedValues(allowedDevicesStr);
+  if (gpuDevicesFromUser == null) {
+gpuDevicesFromUser = parseGpuDevicesFromUserDefinedValues();
+  }
+  return gpuDevicesFromUser;
 }
   }
 
@@ -193,16 +201,16 @@ public class GpuDiscoverer {
   }
 
   /**
-   * @param devices allowed devices coming from the config.
-   *  Individual devices should be separated by commas.
-   *  The format of individual devices should be:
-   *   <index:><minorNumber>
* @return List of GpuDevices
* @throws YarnException when a GPU device is defined as a duplicate.
* The first duplicate GPU device will be added to the exception message.
*/
-  private List parseGpuDevicesFromUserDefinedValues(String devices)
+  private List parseGpuDevicesFromUserDefinedValues()
   throws YarnException {
+String devices = conf.get(
+YarnConfiguration.NM_GPU_ALLOWED_DEVICES,
+YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
+
 if (devices.trim().isEmpty()) {
   throw GpuDeviceSpecificationException.createWithEmptyValueSpecified();
 }
@@ -244,19 +252,21 @@ public class GpuDiscoverer {
   public synchronized void initialize(Configuration config)
   throws YarnException {
 this.conf = config;
-numOfErrorExecutionSinceLastSucceed = 0;
-lookUpAutoDiscoveryBinary(config);
-
-// Try to discover GPU information once and print
-try {
-  LOG.info("Trying to discover GPU information ...");
-  GpuDeviceInformation info = getGpuDeviceInformation();
-  LOG.info("Discovered GPU information: " + info.toString());
-} catch (YarnException e) {
-  String msg =
-  "Failed to discover GPU information from system, exception message:"
-  + e.getMessage() + " continue..

[hadoop] branch branch-3.1 updated: YARN-9337. GPU auto-discovery script runs even when the resource is given by hand. Contributed by Adam Antal

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 531e0c0  YARN-9337. GPU auto-discovery script runs even when the 
resource is given by hand. Contributed by Adam Antal
531e0c0 is described below

commit 531e0c0bc1ac93863bdfa43529751ee57c4a5cbe
Author: Szilard Nemeth 
AuthorDate: Fri Jul 12 17:28:14 2019 +0200

YARN-9337. GPU auto-discovery script runs even when the resource is given 
by hand. Contributed by Adam Antal

(cherry picked from commit 61b0c2bb7c0f18c4a666b96ca1603cbd4d27eb6d)
---
 .../resourceplugin/gpu/GpuDiscoverer.java  | 60 +-
 .../resourceplugin/gpu/TestGpuDiscoverer.java  | 19 ++-
 2 files changed, 53 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
index 6cf6a8d..27a4ea1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
@@ -69,6 +69,8 @@ public class GpuDiscoverer {
   private int numOfErrorExecutionSinceLastSucceed = 0;
   private GpuDeviceInformation lastDiscoveredGpuInformation = null;
 
+  private List gpuDevicesFromUser;
+
   private void validateConfOrThrowException() throws YarnException {
 if (conf == null) {
   throw new YarnException("Please initialize (call initialize) before use "
@@ -143,6 +145,14 @@ public class GpuDiscoverer {
 }
   }
 
+  private boolean IsAutoDiscoveryEnabled() {
+String allowedDevicesStr = conf.get(
+YarnConfiguration.NM_GPU_ALLOWED_DEVICES,
+YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
+return allowedDevicesStr.equals(
+YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
+  }
+
   /**
* Get list of GPU devices usable by YARN.
*
@@ -153,15 +163,13 @@ public class GpuDiscoverer {
   throws YarnException {
 validateConfOrThrowException();
 
-String allowedDevicesStr = conf.get(
-YarnConfiguration.NM_GPU_ALLOWED_DEVICES,
-YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
-
-if (allowedDevicesStr.equals(
-YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES)) {
+if (IsAutoDiscoveryEnabled()) {
   return parseGpuDevicesFromAutoDiscoveredGpuInfo();
 } else {
-  return parseGpuDevicesFromUserDefinedValues(allowedDevicesStr);
+  if (gpuDevicesFromUser == null) {
+gpuDevicesFromUser = parseGpuDevicesFromUserDefinedValues();
+  }
+  return gpuDevicesFromUser;
 }
   }
 
@@ -193,16 +201,16 @@ public class GpuDiscoverer {
   }
 
   /**
-   * @param devices allowed devices coming from the config.
-   *  Individual devices should be separated by commas.
-   *  The format of individual devices should be:
-   *   <index:><minorNumber>
* @return List of GpuDevices
* @throws YarnException when a GPU device is defined as a duplicate.
* The first duplicate GPU device will be added to the exception message.
*/
-  private List parseGpuDevicesFromUserDefinedValues(String devices)
+  private List parseGpuDevicesFromUserDefinedValues()
   throws YarnException {
+String devices = conf.get(
+YarnConfiguration.NM_GPU_ALLOWED_DEVICES,
+YarnConfiguration.AUTOMATICALLY_DISCOVER_GPU_DEVICES);
+
 if (devices.trim().isEmpty()) {
   throw GpuDeviceSpecificationException.createWithEmptyValueSpecified();
 }
@@ -244,19 +252,21 @@ public class GpuDiscoverer {
   public synchronized void initialize(Configuration config)
   throws YarnException {
 this.conf = config;
-numOfErrorExecutionSinceLastSucceed = 0;
-lookUpAutoDiscoveryBinary(config);
-
-// Try to discover GPU information once and print
-try {
-  LOG.info("Trying to discover GPU information ...");
-  GpuDeviceInformation info = getGpuDeviceInformation();
-  LOG.info("Discovered GPU information: " + info.toString());
-} catch (YarnException e) {
-  String msg =
-  "Failed to discover GPU information from system, exception message:"
-  + e.getMessage() + " continue..

[hadoop] branch trunk updated: YARN-9626. UI2 - Fair scheduler queue apps page issues. Contributed by Zoltan Siegl

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 557056e  YARN-9626. UI2 - Fair scheduler queue apps page issues. 
Contributed by Zoltan Siegl
557056e is described below

commit 557056e18ea3d5b3fe3046f0ea4b4c7345ea21c5
Author: Szilard Nemeth 
AuthorDate: Fri Jul 12 17:40:14 2019 +0200

YARN-9626. UI2 - Fair scheduler queue apps page issues. Contributed by 
Zoltan Siegl
---
 .../yarn-queue/fair-queue-conf-table.hbs   |  2 +-
 .../src/main/webapp/app/templates/yarn-queue.hbs   | 44 +++---
 2 files changed, 24 insertions(+), 22 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
index b2448cf..fba7a55 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
@@ -38,7 +38,7 @@
 
 
   Pending, Allocated, Reserved Containers
-  {{queue.pendingContainers}} , {{queue.allocatedContainers}} , 
{{queue.reservedContainers}}
+  {{#if 
queue.pendingContainers}}{{queue.pendingContainers}}{{else}}0{{/if}}, {{#if 
queue.allocatedContainers}}{{queue.allocatedContainers}}{{else}}0{{/if}}, {{#if 
queue.reservedContainers}}{{queue.reservedContainers}}{{else}}0{{/if}}
 
 
   Scheduling Policy
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
index 756ef69..8024ffa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
@@ -29,32 +29,34 @@
 {{em-table-simple-status-cell content=model.selectedQueue.state}}
   
   {{/if}}
-  
-
-  configured capacity
-  {{model.selectedQueue.capacity}}%
-
-
-  configured max capacity
-  {{model.selectedQueue.maxCapacity}}%
-
-{{#if model.selectedQueue.isLeafQueue}}
-
-  user limit
-  {{model.selectedQueue.userLimit}}%
-
-
-  user limit factor
-  {{model.selectedQueue.userLimitFactor}}
-
-{{/if}}
-  
+  {{#if (eq model.queues.firstObject.type "capacity")}}
+  
+
+  configured capacity
+  {{model.selectedQueue.capacity}}%
+
+
+  configured max capacity
+  {{model.selectedQueue.maxCapacity}}%
+
+{{#if model.selectedQueue.isLeafQueue}}
+
+  user limit
+  {{model.selectedQueue.userLimit}}%
+
+
+  user limit factor
+  {{model.selectedQueue.userLimitFactor}}
+
+{{/if}}
+  
+  {{/if}}
 
 
   {{#each model.selectedQueue.capacitiesBarChartData as |item|}}
 
{{lower item.label}}
-  {{item.value}}%
+{{item.value}}{{#if (eq 
model.queues.firstObject.type "fair")}} MB{{else}}%{{/if}}
 
   {{/each}}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9626. UI2 - Fair scheduler queue apps page issues. Contributed by Zoltan Siegl

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 4fa0de9  YARN-9626. UI2 - Fair scheduler queue apps page issues. 
Contributed by Zoltan Siegl
4fa0de9 is described below

commit 4fa0de9f04877b60137f4995124cee4ca0acf707
Author: Szilard Nemeth 
AuthorDate: Fri Jul 12 17:40:14 2019 +0200

YARN-9626. UI2 - Fair scheduler queue apps page issues. Contributed by 
Zoltan Siegl

(cherry picked from commit 557056e18ea3d5b3fe3046f0ea4b4c7345ea21c5)
---
 .../yarn-queue/fair-queue-conf-table.hbs   |  2 +-
 .../src/main/webapp/app/templates/yarn-queue.hbs   | 44 +++---
 2 files changed, 24 insertions(+), 22 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
index b2448cf..fba7a55 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
@@ -38,7 +38,7 @@
 
 
   Pending, Allocated, Reserved Containers
-  {{queue.pendingContainers}} , {{queue.allocatedContainers}} , 
{{queue.reservedContainers}}
+  {{#if 
queue.pendingContainers}}{{queue.pendingContainers}}{{else}}0{{/if}}, {{#if 
queue.allocatedContainers}}{{queue.allocatedContainers}}{{else}}0{{/if}}, {{#if 
queue.reservedContainers}}{{queue.reservedContainers}}{{else}}0{{/if}}
 
 
   Scheduling Policy
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
index 756ef69..8024ffa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
@@ -29,32 +29,34 @@
 {{em-table-simple-status-cell content=model.selectedQueue.state}}
   
   {{/if}}
-  
-
-  configured capacity
-  {{model.selectedQueue.capacity}}%
-
-
-  configured max capacity
-  {{model.selectedQueue.maxCapacity}}%
-
-{{#if model.selectedQueue.isLeafQueue}}
-
-  user limit
-  {{model.selectedQueue.userLimit}}%
-
-
-  user limit factor
-  {{model.selectedQueue.userLimitFactor}}
-
-{{/if}}
-  
+  {{#if (eq model.queues.firstObject.type "capacity")}}
+  
+
+  configured capacity
+  {{model.selectedQueue.capacity}}%
+
+
+  configured max capacity
+  {{model.selectedQueue.maxCapacity}}%
+
+{{#if model.selectedQueue.isLeafQueue}}
+
+  user limit
+  {{model.selectedQueue.userLimit}}%
+
+
+  user limit factor
+  {{model.selectedQueue.userLimitFactor}}
+
+{{/if}}
+  
+  {{/if}}
 
 
   {{#each model.selectedQueue.capacitiesBarChartData as |item|}}
 
{{lower item.label}}
-  {{item.value}}%
+{{item.value}}{{#if (eq 
model.queues.firstObject.type "fair")}} MB{{else}}%{{/if}}
 
   {{/each}}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9626. UI2 - Fair scheduler queue apps page issues. Contributed by Zoltan Siegl

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 773591e  YARN-9626. UI2 - Fair scheduler queue apps page issues. 
Contributed by Zoltan Siegl
773591e is described below

commit 773591ee42089a3dbf7f71324acbcfc68758dbd2
Author: Szilard Nemeth 
AuthorDate: Fri Jul 12 17:40:14 2019 +0200

YARN-9626. UI2 - Fair scheduler queue apps page issues. Contributed by 
Zoltan Siegl

(cherry picked from commit 557056e18ea3d5b3fe3046f0ea4b4c7345ea21c5)
---
 .../yarn-queue/fair-queue-conf-table.hbs   |  2 +-
 .../src/main/webapp/app/templates/yarn-queue.hbs   | 44 +++---
 2 files changed, 24 insertions(+), 22 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
index b2448cf..fba7a55 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/components/yarn-queue/fair-queue-conf-table.hbs
@@ -38,7 +38,7 @@
 
 
   Pending, Allocated, Reserved Containers
-  {{queue.pendingContainers}} , {{queue.allocatedContainers}} , 
{{queue.reservedContainers}}
+  {{#if 
queue.pendingContainers}}{{queue.pendingContainers}}{{else}}0{{/if}}, {{#if 
queue.allocatedContainers}}{{queue.allocatedContainers}}{{else}}0{{/if}}, {{#if 
queue.reservedContainers}}{{queue.reservedContainers}}{{else}}0{{/if}}
 
 
   Scheduling Policy
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
index 756ef69..8024ffa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-queue.hbs
@@ -29,32 +29,34 @@
 {{em-table-simple-status-cell content=model.selectedQueue.state}}
   
   {{/if}}
-  
-
-  configured capacity
-  {{model.selectedQueue.capacity}}%
-
-
-  configured max capacity
-  {{model.selectedQueue.maxCapacity}}%
-
-{{#if model.selectedQueue.isLeafQueue}}
-
-  user limit
-  {{model.selectedQueue.userLimit}}%
-
-
-  user limit factor
-  {{model.selectedQueue.userLimitFactor}}
-
-{{/if}}
-  
+  {{#if (eq model.queues.firstObject.type "capacity")}}
+  
+
+  configured capacity
+  {{model.selectedQueue.capacity}}%
+
+
+  configured max capacity
+  {{model.selectedQueue.maxCapacity}}%
+
+{{#if model.selectedQueue.isLeafQueue}}
+
+  user limit
+  {{model.selectedQueue.userLimit}}%
+
+
+  user limit factor
+  {{model.selectedQueue.userLimitFactor}}
+
+{{/if}}
+  
+  {{/if}}
 
 
   {{#each model.selectedQueue.capacitiesBarChartData as |item|}}
 
{{lower item.label}}
-  {{item.value}}%
+{{item.value}}{{#if (eq 
model.queues.firstObject.type "fair")}} MB{{else}}%{{/if}}
 
   {{/each}}
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated (2a1451a -> bb37c6c)

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a change to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 2a1451a  HDFS-13791. Amend fix LOG string format.
 add bb37c6c  YARN-9337. Addendum to fix compilation error due to mockito 
spy call

No new revisions were added by this update.

Summary of changes:
 .../containermanager/resourceplugin/gpu/TestGpuDiscoverer.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9337. Addendum to fix compilation error due to mockito spy call

2019-07-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 2fcbdf4  YARN-9337. Addendum to fix compilation error due to mockito 
spy call
2fcbdf4 is described below

commit 2fcbdf4131ec21e785a31c6ac13bf05e6c6a3007
Author: Szilard Nemeth 
AuthorDate: Sat Jul 13 00:42:14 2019 +0200

YARN-9337. Addendum to fix compilation error due to mockito spy call

(cherry picked from commit bb37c6cb7ff2b810efd139525ad0a37937baa93c)
---
 .../containermanager/resourceplugin/gpu/TestGpuDiscoverer.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuDiscoverer.java
index a70e668..f0f100c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/TestGpuDiscoverer.java
@@ -505,7 +505,7 @@ public class TestGpuDiscoverer {
 Configuration conf = new Configuration();
 conf.set(YarnConfiguration.NM_GPU_ALLOWED_DEVICES, "0:1,2:3");
 
-GpuDiscoverer gpuSpy = spy(GpuDiscoverer.class);
+GpuDiscoverer gpuSpy = spy(new GpuDiscoverer());
 
 gpuSpy.initialize(conf);
 gpuSpy.getGpusUsableByYarn();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9360. Do not expose innards of QueueMetrics object into FSLeafQueue#computeMaxAMResource. Contributed by Peter Bacsko

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 91ce09e  YARN-9360. Do not expose innards of QueueMetrics object into 
FSLeafQueue#computeMaxAMResource. Contributed by Peter Bacsko
91ce09e is described below

commit 91ce09e7065bacd7b4f09696fff35b789c52bcd7
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 10:47:10 2019 +0200

YARN-9360. Do not expose innards of QueueMetrics object into 
FSLeafQueue#computeMaxAMResource. Contributed by Peter Bacsko
---
 .../resourcemanager/scheduler/QueueMetrics.java| 34 --
 .../scheduler/fair/FSLeafQueue.java| 33 -
 2 files changed, 38 insertions(+), 29 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index d126f09..c126338 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -833,7 +833,37 @@ public class QueueMetrics implements MetricsSource {
 return aggregateContainersPreempted.value();
   }
 
-  public QueueMetricsForCustomResources getQueueMetricsForCustomResources() {
-return queueMetricsForCustomResources;
+  /**
+   * Fills in Resource values from available metrics values of custom resources
+   * to @code{targetResource}, only if the corresponding
+   * value of @code{targetResource} is zero.
+   * If @code{fromResource} has a value less than the available metrics value
+   * for a particular resource, it will be set to the @code{targetResource}
+   * instead.
+   *
+   * @param fromResource The resource to compare available resource values 
with.
+   * @param targetResource The resource to save the values into.
+   */
+  public void fillInValuesFromAvailableResources(Resource fromResource,
+  Resource targetResource) {
+if (queueMetricsForCustomResources != null) {
+  QueueMetricsCustomResource availableResources =
+  queueMetricsForCustomResources.getAvailable();
+
+  // We expect all custom resources contained in availableResources,
+  // so we will loop through all of them.
+  for (Map.Entry availableEntry : availableResources
+  .getValues().entrySet()) {
+String resourceName = availableEntry.getKey();
+
+// We only update the value if fairshare is 0 for that resource.
+if (targetResource.getResourceValue(resourceName) == 0) {
+  Long availableValue = availableEntry.getValue();
+  long value = Math.min(availableValue,
+  fromResource.getResourceValue(resourceName));
+  targetResource.setResourceValue(resourceName, value);
+}
+  }
+}
   }
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 361355b..afea3d5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -23,7 +23,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
@@ -43,8 +42,6 @@ import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetricsCustomResource;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetricsForCustomResources;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils;
 import

[hadoop] branch trunk updated: SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in RunJobCli. Contributed by Adam Antal

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new be784de  SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in 
RunJobCli. Contributed by Adam Antal
be784de is described below

commit be784de2d4c8d7ae2724cf348925a0fbdbe0c503
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 11:17:16 2019 +0200

SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in RunJobCli. 
Contributed by Adam Antal
---
 .../submarine/client/cli/runjob/RunJobCli.java | 149 -
 1 file changed, 88 insertions(+), 61 deletions(-)

diff --git 
a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
 
b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
index 7b544c1..dfd951f 100644
--- 
a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
+++ 
b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
@@ -71,13 +71,22 @@ import java.util.Map;
 public class RunJobCli extends AbstractCli {
   private static final Logger LOG =
   LoggerFactory.getLogger(RunJobCli.class);
+
+  private static final String TENSORFLOW = "TensorFlow";
+  private static final String PYTORCH = "PyTorch";
+  private static final String PS = "PS";
+  private static final String WORKER = "worker";
+  private static final String TENSORBOARD = "TensorBoard";
+
   private static final String CAN_BE_USED_WITH_TF_PYTORCH =
-  "Can be used with TensorFlow or PyTorch frameworks.";
-  private static final String CAN_BE_USED_WITH_TF_ONLY =
-  "Can only be used with TensorFlow framework.";
+  String.format("Can be used with %s or %s frameworks.",
+  TENSORFLOW, PYTORCH);
+  private static final String TENSORFLOW_ONLY =
+  String.format("Can only be used with %s framework.", TENSORFLOW);
   public static final String YAML_PARSE_FAILED = "Failed to parse " +
   "YAML config";
-
+  private static final String LOCAL_OR_ANY_FS_DIRECTORY = "Could be a local " +
+  "directory or any other directory on the file system.";
 
   private Options options;
   private JobSubmitter jobSubmitter;
@@ -112,50 +121,55 @@ public class RunJobCli extends AbstractCli {
 Framework.getValues()));
 options.addOption(CliConstants.NAME, true, "Name of the job");
 options.addOption(CliConstants.INPUT_PATH, true,
-"Input of the job, could be local or other FS directory");
+"Input of the job. " + LOCAL_OR_ANY_FS_DIRECTORY);
 options.addOption(CliConstants.CHECKPOINT_PATH, true,
-"Training output directory of the job, "
-+ "could be local or other FS directory. This typically includes "
-+ "checkpoint files and exported model ");
+"Training output directory of the job. " + LOCAL_OR_ANY_FS_DIRECTORY +
+"This typically includes checkpoint files and exported model");
 options.addOption(CliConstants.SAVED_MODEL_PATH, true,
-"Model exported path (savedmodel) of the job, which is needed when "
-+ "exported model is not placed under ${checkpoint_path}"
-+ "could be local or other FS directory. " +
-"This will be used to serve.");
+"Model exported path (saved model) of the job, which is needed when " +
+"exported model is not placed under ${checkpoint_path}. " +
+LOCAL_OR_ANY_FS_DIRECTORY + "This will be used to serve");
 options.addOption(CliConstants.DOCKER_IMAGE, true, "Docker image 
name/tag");
+options.addOption(CliConstants.PS_DOCKER_IMAGE, true,
+getDockerImageMessage(PS));
+options.addOption(CliConstants.WORKER_DOCKER_IMAGE, true,
+getDockerImageMessage(WORKER));
 options.addOption(CliConstants.QUEUE, true,
-"Name of queue to run the job, by default it uses default queue");
+"Name of queue to run the job. By default, the default queue is used");
 
 addWorkerOptions(options);
 addPSOptions(options);
 addTensorboardOptions(options);
 
 options.addOption(CliConstants.ENV, true,
-"Common environment variable of worker/ps");
+"Common environment variable passed to worker / PS");
 options.addOption(CliConstants.VERBOSE, false,
 "Print verbose log for troubleshooting");
 options.addOption(CliConstan

[hadoop] branch trunk updated: YARN-9127. Create more tests to verify GpuDeviceInformationParser. Contributed by Peter Bacsko

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 18ee109  YARN-9127. Create more tests to verify 
GpuDeviceInformationParser. Contributed by Peter Bacsko
18ee109 is described below

commit 18ee1092b471c5337f05809f8f01dae415e51a3a
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 11:59:11 2019 +0200

YARN-9127. Create more tests to verify GpuDeviceInformationParser. 
Contributed by Peter Bacsko
---
 .../resource-types-error-redefine-fpga-unit.xml|  45 ++
 .../resource-types-error-redefine-gpu-unit.xml |  45 ++
 .../resourceplugin/gpu/GpuDiscoverer.java  |   2 +-
 .../webapp/dao/gpu/GpuDeviceInformation.java   |  18 +-
 .../webapp/dao/gpu/GpuDeviceInformationParser.java |  48 +-
 .../webapp/dao/gpu/PerGpuDeviceInformation.java|   1 -
 .../webapp/dao/gpu/PerGpuMemoryUsage.java  |   7 +-
 .../dao/gpu/TestGpuDeviceInformationParser.java| 157 +-
 .../test/resources/nvidia-smi-output-excerpt.xml   |  71 +++
 .../resources/nvidia-smi-output-missing-tags.xml   |  28 ++
 .../resources/nvidia-smi-output-missing-tags2.xml  |  61 +++
 .../test/resources/nvidia-smi-sample-output.xml| 547 +
 .../test/resources/nvidia-smi-sample-xml-output| 547 -
 13 files changed, 977 insertions(+), 600 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
new file mode 100644
index 000..72cfb98
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/fpga
+ 
+
+ 
+   yarn.resource-types.yarn.io/fpga.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
new file mode 100644
index 000..aa61b5f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/gpu
+ 
+
+ 
+   yarn.resource-types.yarn.io/gpu.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
index b52d767..986f84a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
@@ -64,7 +64,6 @@ public class GpuDiscoverer {
   private Configuration conf = null;
   private String pathOfGpuBinary = null;
   private Map environment = new HashMap<>();
-  private GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
 
   private int numOfErrorExecutionSinceLastSucceed = 0;
   private GpuDeviceInformation lastDiscoveredGpuInformation = null;
@@ -125,6 +124,7 @@ public class GpuDiscoverer {
 try {
   output = Shell.execCommand(environment,
   new String[] { pathOfGpuBinary, "-x", "-q" }, MAX_EXEC_TIMEOUT_MS);
+  GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
   lastDiscoveredGpuInformation = parser.parseXml(output);
   numOfErrorExecutionSinceLastSucceed = 0;
   return lastDiscoveredGpuInformation;
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
index 837d5cc..c830d43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
+++ 
b/hadoop-yarn-project/hado

[hadoop] branch branch-3.2 updated: YARN-9127. Create more tests to verify GpuDeviceInformationParser. Contributed by Peter Bacsko

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 28d6a45  YARN-9127. Create more tests to verify 
GpuDeviceInformationParser. Contributed by Peter Bacsko
28d6a45 is described below

commit 28d6a453a9d5b1ec12a1b5ec4f21cf275f01d3d4
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 11:59:11 2019 +0200

YARN-9127. Create more tests to verify GpuDeviceInformationParser. 
Contributed by Peter Bacsko

(cherry picked from commit 18ee1092b471c5337f05809f8f01dae415e51a3a)
---
 .../resource-types-error-redefine-fpga-unit.xml|  45 ++
 .../resource-types-error-redefine-gpu-unit.xml |  45 ++
 .../resourceplugin/gpu/GpuDiscoverer.java  |   2 +-
 .../webapp/dao/gpu/GpuDeviceInformation.java   |  18 +-
 .../webapp/dao/gpu/GpuDeviceInformationParser.java |  48 +-
 .../webapp/dao/gpu/PerGpuDeviceInformation.java|   1 -
 .../webapp/dao/gpu/PerGpuMemoryUsage.java  |   7 +-
 .../dao/gpu/TestGpuDeviceInformationParser.java| 157 +-
 .../test/resources/nvidia-smi-output-excerpt.xml   |  71 +++
 .../resources/nvidia-smi-output-missing-tags.xml   |  28 ++
 .../resources/nvidia-smi-output-missing-tags2.xml  |  61 +++
 .../test/resources/nvidia-smi-sample-output.xml| 547 +
 .../test/resources/nvidia-smi-sample-xml-output| 547 -
 13 files changed, 977 insertions(+), 600 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
new file mode 100644
index 000..72cfb98
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/fpga
+ 
+
+ 
+   yarn.resource-types.yarn.io/fpga.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
new file mode 100644
index 000..aa61b5f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/gpu
+ 
+
+ 
+   yarn.resource-types.yarn.io/gpu.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
index 27a4ea1..ce76722 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
@@ -64,7 +64,6 @@ public class GpuDiscoverer {
   private Configuration conf = null;
   private String pathOfGpuBinary = null;
   private Map environment = new HashMap<>();
-  private GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
 
   private int numOfErrorExecutionSinceLastSucceed = 0;
   private GpuDeviceInformation lastDiscoveredGpuInformation = null;
@@ -125,6 +124,7 @@ public class GpuDiscoverer {
 try {
   output = Shell.execCommand(environment,
   new String[] { pathOfGpuBinary, "-x", "-q" }, MAX_EXEC_TIMEOUT_MS);
+  GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
   lastDiscoveredGpuInformation = parser.parseXml(output);
   numOfErrorExecutionSinceLastSucceed = 0;
   return lastDiscoveredGpuInformation;
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
index 837d5cc..c830d43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/

[hadoop] branch branch-3.1 updated: YARN-9127. Create more tests to verify GpuDeviceInformationParser. Contributed by Peter Bacsko

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 30c7b43  YARN-9127. Create more tests to verify 
GpuDeviceInformationParser. Contributed by Peter Bacsko
30c7b43 is described below

commit 30c7b432276bf66cc6b8a88305cbb0cee87f62e9
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 11:59:11 2019 +0200

YARN-9127. Create more tests to verify GpuDeviceInformationParser. 
Contributed by Peter Bacsko

(cherry picked from commit 18ee1092b471c5337f05809f8f01dae415e51a3a)
---
 .../resource-types-error-redefine-fpga-unit.xml|  45 ++
 .../resource-types-error-redefine-gpu-unit.xml |  45 ++
 .../resourceplugin/gpu/GpuDiscoverer.java  |   2 +-
 .../webapp/dao/gpu/GpuDeviceInformation.java   |  18 +-
 .../webapp/dao/gpu/GpuDeviceInformationParser.java |  48 +-
 .../webapp/dao/gpu/PerGpuDeviceInformation.java|   1 -
 .../webapp/dao/gpu/PerGpuMemoryUsage.java  |   7 +-
 .../dao/gpu/TestGpuDeviceInformationParser.java| 157 +-
 .../test/resources/nvidia-smi-output-excerpt.xml   |  71 +++
 .../resources/nvidia-smi-output-missing-tags.xml   |  28 ++
 .../resources/nvidia-smi-output-missing-tags2.xml  |  61 +++
 .../test/resources/nvidia-smi-sample-output.xml| 547 +
 .../test/resources/nvidia-smi-sample-xml-output| 547 -
 13 files changed, 977 insertions(+), 600 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
new file mode 100644
index 000..72cfb98
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-fpga-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/fpga
+ 
+
+ 
+   yarn.resource-types.yarn.io/fpga.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
new file mode 100644
index 000..aa61b5f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/resource-types/resource-types-error-redefine-gpu-unit.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+ 
+   yarn.resource-types
+   yarn.io/gpu
+ 
+
+ 
+   yarn.resource-types.yarn.io/gpu.units
+   G
+ 
+
+
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
index 27a4ea1..ce76722 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
@@ -64,7 +64,6 @@ public class GpuDiscoverer {
   private Configuration conf = null;
   private String pathOfGpuBinary = null;
   private Map environment = new HashMap<>();
-  private GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
 
   private int numOfErrorExecutionSinceLastSucceed = 0;
   private GpuDeviceInformation lastDiscoveredGpuInformation = null;
@@ -125,6 +124,7 @@ public class GpuDiscoverer {
 try {
   output = Shell.execCommand(environment,
   new String[] { pathOfGpuBinary, "-x", "-q" }, MAX_EXEC_TIMEOUT_MS);
+  GpuDeviceInformationParser parser = new GpuDeviceInformationParser();
   lastDiscoveredGpuInformation = parser.parseXml(output);
   numOfErrorExecutionSinceLastSucceed = 0;
   return lastDiscoveredGpuInformation;
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/GpuDeviceInformation.java
index 837d5cc..c830d43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/

[hadoop] branch submarine-0.2 updated: SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in RunJobCli. Contributed by Adam Antal

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch submarine-0.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/submarine-0.2 by this push:
 new cc382eb  SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in 
RunJobCli. Contributed by Adam Antal
cc382eb is described below

commit cc382eb163af3a2621c39682e1d6c91ff556fde9
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 11:17:16 2019 +0200

SUBMARINE-62. PS_LAUNCH_CMD CLI description is wrong in RunJobCli. 
Contributed by Adam Antal

(cherry picked from commit be784de2d4c8d7ae2724cf348925a0fbdbe0c503)
---
 .../submarine/client/cli/runjob/RunJobCli.java | 149 -
 1 file changed, 88 insertions(+), 61 deletions(-)

diff --git 
a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
 
b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
index 7b544c1..dfd951f 100644
--- 
a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
+++ 
b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/runjob/RunJobCli.java
@@ -71,13 +71,22 @@ import java.util.Map;
 public class RunJobCli extends AbstractCli {
   private static final Logger LOG =
   LoggerFactory.getLogger(RunJobCli.class);
+
+  private static final String TENSORFLOW = "TensorFlow";
+  private static final String PYTORCH = "PyTorch";
+  private static final String PS = "PS";
+  private static final String WORKER = "worker";
+  private static final String TENSORBOARD = "TensorBoard";
+
   private static final String CAN_BE_USED_WITH_TF_PYTORCH =
-  "Can be used with TensorFlow or PyTorch frameworks.";
-  private static final String CAN_BE_USED_WITH_TF_ONLY =
-  "Can only be used with TensorFlow framework.";
+  String.format("Can be used with %s or %s frameworks.",
+  TENSORFLOW, PYTORCH);
+  private static final String TENSORFLOW_ONLY =
+  String.format("Can only be used with %s framework.", TENSORFLOW);
   public static final String YAML_PARSE_FAILED = "Failed to parse " +
   "YAML config";
-
+  private static final String LOCAL_OR_ANY_FS_DIRECTORY = "Could be a local " +
+  "directory or any other directory on the file system.";
 
   private Options options;
   private JobSubmitter jobSubmitter;
@@ -112,50 +121,55 @@ public class RunJobCli extends AbstractCli {
 Framework.getValues()));
 options.addOption(CliConstants.NAME, true, "Name of the job");
 options.addOption(CliConstants.INPUT_PATH, true,
-"Input of the job, could be local or other FS directory");
+"Input of the job. " + LOCAL_OR_ANY_FS_DIRECTORY);
 options.addOption(CliConstants.CHECKPOINT_PATH, true,
-"Training output directory of the job, "
-+ "could be local or other FS directory. This typically includes "
-+ "checkpoint files and exported model ");
+"Training output directory of the job. " + LOCAL_OR_ANY_FS_DIRECTORY +
+"This typically includes checkpoint files and exported model");
 options.addOption(CliConstants.SAVED_MODEL_PATH, true,
-"Model exported path (savedmodel) of the job, which is needed when "
-+ "exported model is not placed under ${checkpoint_path}"
-+ "could be local or other FS directory. " +
-"This will be used to serve.");
+"Model exported path (saved model) of the job, which is needed when " +
+"exported model is not placed under ${checkpoint_path}. " +
+LOCAL_OR_ANY_FS_DIRECTORY + "This will be used to serve");
 options.addOption(CliConstants.DOCKER_IMAGE, true, "Docker image 
name/tag");
+options.addOption(CliConstants.PS_DOCKER_IMAGE, true,
+getDockerImageMessage(PS));
+options.addOption(CliConstants.WORKER_DOCKER_IMAGE, true,
+getDockerImageMessage(WORKER));
 options.addOption(CliConstants.QUEUE, true,
-"Name of queue to run the job, by default it uses default queue");
+"Name of queue to run the job. By default, the default queue is used");
 
 addWorkerOptions(options);
 addPSOptions(options);
 addTensorboardOptions(options);
 
 options.addOption(CliConstants.ENV, true,
-"Common environment variable of worker/ps");
+"Common environment variable passed to worker / PS");
 options.addOption(CliConstants.VERBOS

[hadoop] branch trunk updated: YARN-9326. Fair Scheduler configuration defaults are not documented in case of min and maxResources. Contributed by Adam Antal

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 5446308  YARN-9326. Fair Scheduler configuration defaults are not 
documented in case of min and maxResources. Contributed by Adam Antal
5446308 is described below

commit 5446308360f57cb98c54c416231788ba9ae332f8
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 13:28:01 2019 +0200

YARN-9326. Fair Scheduler configuration defaults are not documented in case 
of min and maxResources. Contributed by Adam Antal
---
 .../hadoop-yarn-site/src/site/markdown/FairScheduler.md| 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 5f9e779..991796a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -86,13 +86,19 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **Queue elements**: which represent queues. Queue elements can take an 
optional attribute 'type', which when set to 'parent' makes it a parent queue. 
This is useful when we want to create a parent queue without configuring any 
leaf queues. Each queue element may contain the following properties:
 
-* **minResources**: minimum resources the queue is entitled to, in the 
form of "X mb, Y vcores" or "vcores=X, memory-mb=Y". The latter form is 
required when specifying resources other than memory and CPU. For the 
single-resource fairness policy, the vcores value is ignored. If a queue's 
minimum share is not satisfied, it will be offered available resources before 
any other queue under the same parent. Under the single-resource fairness 
policy, a queue is considered unsatisfied if its [...]
+* **minResources**: minimum resources the queue is entitled to. For the 
single-resource fairness policy, only the memory is used, other resources are 
ignored. If a queue's minimum share is not satisfied, it will be offered 
available resources before any other queue under the same parent. Under the 
single-resource fairness policy, a queue is considered unsatisfied if its 
memory usage is below its minimum memory share. Under dominant resource 
fairness, a queue is considered unsatisfied [...]
 
-* **maxResources**: maximum resources a queue will allocated, expressed in 
the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, 
memory-mb=Y". The last form is required when specifying resources other than 
memory and CPU. In the last form, X and Y can either be a percentage or an 
integer resource value without units. In the latter case the units will be 
inferred from the default units configured for that resource. A queue will not 
be assigned a container that would p [...]
+* **maxResources**: maximum resources a queue can be allocated. A queue 
will not be assigned a container that would put its aggregate usage over this 
limit. This limit is enforced recursively, the queue will not be assigned a 
container if that assignment would put the queue or its parent(s) over the 
maximum resources.
 
-* **maxContainerAllocation**: maximum resources a queue can allocate for a 
single container, expressed in the form of "X mb, Y vcores" or "vcores=X, 
memory-mb=Y". The latter form is required when specifying resources other than 
memory and CPU. If the property is not set it's value is inherited from a 
parent queue. It's default value is **yarn.scheduler.maximum-allocation-mb**. 
Cannot be higher than **maxResources**. This property is invalid for root queue.
+* **maxContainerAllocation**: maximum resources a queue can allocate for a 
single container. If the property is not set it's value is inherited from a 
parent queue. The default values are **yarn.scheduler.maximum-allocation-mb** 
and **yarn.scheduler.maximum-allocation-vcores**. Cannot be higher than 
**maxResources**. This property is invalid for root queue.
 
-* **maxChildResources**: maximum resources an ad hoc child queue will 
allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y 
vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying 
resources other than memory and CPU. In the last form, X and Y can either be a 
percentage or an integer resource value without units. In the latter case the 
units will be inferred from the default units configured for that resource. An 
ad hoc child queue will not be  [...]
+* **maxChildResources**:

[hadoop] branch branch-3.2 updated: YARN-9326. Fair Scheduler configuration defaults are not documented in case of min and maxResources. Contributed by Adam Antal

2019-07-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 7c9cfc0  YARN-9326. Fair Scheduler configuration defaults are not 
documented in case of min and maxResources. Contributed by Adam Antal
7c9cfc0 is described below

commit 7c9cfc0996316c08f23a4fb3a53e10fd20521d7b
Author: Szilard Nemeth 
AuthorDate: Mon Jul 15 13:28:01 2019 +0200

YARN-9326. Fair Scheduler configuration defaults are not documented in case 
of min and maxResources. Contributed by Adam Antal

(cherry picked from commit 5446308360f57cb98c54c416231788ba9ae332f8)
---
 .../hadoop-yarn-site/src/site/markdown/FairScheduler.md| 14 ++
 1 file changed, 10 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
index 5f9e779..991796a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/FairScheduler.md
@@ -86,13 +86,19 @@ The allocation file must be in XML format. The format 
contains five types of ele
 
 * **Queue elements**: which represent queues. Queue elements can take an 
optional attribute 'type', which when set to 'parent' makes it a parent queue. 
This is useful when we want to create a parent queue without configuring any 
leaf queues. Each queue element may contain the following properties:
 
-* **minResources**: minimum resources the queue is entitled to, in the 
form of "X mb, Y vcores" or "vcores=X, memory-mb=Y". The latter form is 
required when specifying resources other than memory and CPU. For the 
single-resource fairness policy, the vcores value is ignored. If a queue's 
minimum share is not satisfied, it will be offered available resources before 
any other queue under the same parent. Under the single-resource fairness 
policy, a queue is considered unsatisfied if its [...]
+* **minResources**: minimum resources the queue is entitled to. For the 
single-resource fairness policy, only the memory is used, other resources are 
ignored. If a queue's minimum share is not satisfied, it will be offered 
available resources before any other queue under the same parent. Under the 
single-resource fairness policy, a queue is considered unsatisfied if its 
memory usage is below its minimum memory share. Under dominant resource 
fairness, a queue is considered unsatisfied [...]
 
-* **maxResources**: maximum resources a queue will allocated, expressed in 
the form of "X%", "X% cpu, Y% memory", "X mb, Y vcores", or "vcores=X, 
memory-mb=Y". The last form is required when specifying resources other than 
memory and CPU. In the last form, X and Y can either be a percentage or an 
integer resource value without units. In the latter case the units will be 
inferred from the default units configured for that resource. A queue will not 
be assigned a container that would p [...]
+* **maxResources**: maximum resources a queue can be allocated. A queue 
will not be assigned a container that would put its aggregate usage over this 
limit. This limit is enforced recursively, the queue will not be assigned a 
container if that assignment would put the queue or its parent(s) over the 
maximum resources.
 
-* **maxContainerAllocation**: maximum resources a queue can allocate for a 
single container, expressed in the form of "X mb, Y vcores" or "vcores=X, 
memory-mb=Y". The latter form is required when specifying resources other than 
memory and CPU. If the property is not set it's value is inherited from a 
parent queue. It's default value is **yarn.scheduler.maximum-allocation-mb**. 
Cannot be higher than **maxResources**. This property is invalid for root queue.
+* **maxContainerAllocation**: maximum resources a queue can allocate for a 
single container. If the property is not set it's value is inherited from a 
parent queue. The default values are **yarn.scheduler.maximum-allocation-mb** 
and **yarn.scheduler.maximum-allocation-vcores**. Cannot be higher than 
**maxResources**. This property is invalid for root queue.
 
-* **maxChildResources**: maximum resources an ad hoc child queue will 
allocated, expressed in the form of "X%", "X% cpu, Y% memory", "X mb, Y 
vcores", or "vcores=X, memory-mb=Y". The last form is required when specifying 
resources other than memory and CPU. In the last form, X and Y can either be a 
percentage or an integer resource value without units. In the latter case the 
units will be inferred from the default units configure

[hadoop] branch trunk updated: MAPREDUCE-7225: Fix broken current folder expansion during MR job start. Contributed by Peter Bacsko.

2019-08-01 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a7371a7  MAPREDUCE-7225: Fix broken current folder expansion during MR 
job start. Contributed by Peter Bacsko.
a7371a7 is described below

commit a7371a779c591893700df1df279330589474960c
Author: Szilard Nemeth 
AuthorDate: Thu Aug 1 13:01:30 2019 +0200

MAPREDUCE-7225: Fix broken current folder expansion during MR job start. 
Contributed by Peter Bacsko.
---
 .../hadoop/mapreduce/JobResourceUploader.java  | 25 -
 .../hadoop/mapreduce/TestJobResourceUploader.java  | 64 +-
 2 files changed, 85 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index e106a54..c8686d7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -59,6 +59,8 @@ import com.google.common.annotations.VisibleForTesting;
 class JobResourceUploader {
   protected static final Logger LOG =
   LoggerFactory.getLogger(JobResourceUploader.class);
+  private static final String ROOT_PATH = "/";
+
   private final boolean useWildcard;
   private final FileSystem jtFs;
   private SharedCacheClient scClient = null;
@@ -674,9 +676,30 @@ class JobResourceUploader {
 if (FileUtil.compareFs(remoteFs, jtFs)) {
   return originalPath;
 }
+
+boolean root = false;
+if (ROOT_PATH.equals(originalPath.toUri().getPath())) {
+  // "/" needs special treatment
+  root = true;
+} else {
+  // If originalPath ends in a "/", then remove it so
+  // that originalPath.getName() does not return an empty string
+  String uriString = originalPath.toUri().toString();
+  if (uriString.endsWith("/")) {
+try {
+  URI strippedURI =
+  new URI(uriString.substring(0, uriString.length() - 1));
+  originalPath = new Path(strippedURI);
+} catch (URISyntaxException e) {
+  throw new IllegalArgumentException("Error processing URI", e);
+}
+  }
+}
+
 // this might have name collisions. copy will throw an exception
 // parse the original path to create new path
-Path newPath = new Path(parentDir, originalPath.getName());
+Path newPath = root ?
+parentDir : new Path(parentDir, originalPath.getName());
 FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, conf);
 jtFs.setReplication(newPath, replication);
 jtFs.makeQualified(newPath);
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
index 8033897..bbfe2fb 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
@@ -18,16 +18,19 @@
 
 package org.apache.hadoop.mapreduce;
 
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.spy;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.eq;
 
 import java.io.IOException;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
@@ -46,8 +49,10 @@ import 
org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.mapred.JobConf;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.ArgumentCaptor;
 import org.mockito.verification.VerificationMode;
 
+
 /**
  * A class for unit testing JobResourceUploader.
  */
@@ -375,6 +380,50 @@ public class TestJobResourceUploader {
 testErasureCodingSetting(false);
   }
 
+  @Test
+  public void testOriginalPathEndsInSlash()
+  throws IOException, URISyntaxException {
+testOriginalPathWithTrailingSl

[hadoop] branch branch-3.2 updated: MAPREDUCE-7225: Fix broken current folder expansion during MR job start. Contributed by Peter Bacsko.

2019-08-01 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new e611fb8  MAPREDUCE-7225: Fix broken current folder expansion during MR 
job start. Contributed by Peter Bacsko.
e611fb8 is described below

commit e611fb878bd34b5083f7988e28a86933b181db2c
Author: Szilard Nemeth 
AuthorDate: Thu Aug 1 16:05:28 2019 +0200

MAPREDUCE-7225: Fix broken current folder expansion during MR job start. 
Contributed by Peter Bacsko.
---
 .../hadoop/mapreduce/JobResourceUploader.java  | 25 -
 .../hadoop/mapreduce/TestJobResourceUploader.java  | 64 +-
 2 files changed, 85 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index e106a54..c8686d7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -59,6 +59,8 @@ import com.google.common.annotations.VisibleForTesting;
 class JobResourceUploader {
   protected static final Logger LOG =
   LoggerFactory.getLogger(JobResourceUploader.class);
+  private static final String ROOT_PATH = "/";
+
   private final boolean useWildcard;
   private final FileSystem jtFs;
   private SharedCacheClient scClient = null;
@@ -674,9 +676,30 @@ class JobResourceUploader {
 if (FileUtil.compareFs(remoteFs, jtFs)) {
   return originalPath;
 }
+
+boolean root = false;
+if (ROOT_PATH.equals(originalPath.toUri().getPath())) {
+  // "/" needs special treatment
+  root = true;
+} else {
+  // If originalPath ends in a "/", then remove it so
+  // that originalPath.getName() does not return an empty string
+  String uriString = originalPath.toUri().toString();
+  if (uriString.endsWith("/")) {
+try {
+  URI strippedURI =
+  new URI(uriString.substring(0, uriString.length() - 1));
+  originalPath = new Path(strippedURI);
+} catch (URISyntaxException e) {
+  throw new IllegalArgumentException("Error processing URI", e);
+}
+  }
+}
+
 // this might have name collisions. copy will throw an exception
 // parse the original path to create new path
-Path newPath = new Path(parentDir, originalPath.getName());
+Path newPath = root ?
+parentDir : new Path(parentDir, originalPath.getName());
 FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, conf);
 jtFs.setReplication(newPath, replication);
 jtFs.makeQualified(newPath);
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
index d347da5..8ab54a6 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapreduce;
 
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.never;
@@ -25,9 +26,11 @@ import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.times;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
@@ -46,8 +49,10 @@ import 
org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.mapred.JobConf;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.ArgumentCaptor;
 import org.mockito.verification.VerificationMode;
 
+
 /**
  * A class for unit testing JobResourceUploader.
  */
@@ -375,6 +380,50 @@ public class TestJobResourceUploader {
 testErasureCodingSetting(false);
   }
 
+  @Test
+  public void testOriginalPathEndsInSlash()
+  throws IOException, URISyntaxException {
+testOriginalPathWithTra

[hadoop] branch branch-3.1 updated: MAPREDUCE-7225: Fix broken current folder expansion during MR job start. Contributed by Peter Bacsko.

2019-08-01 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 639763c  MAPREDUCE-7225: Fix broken current folder expansion during MR 
job start. Contributed by Peter Bacsko.
639763c is described below

commit 639763c561c7dafd0d502bb246f1d84f3be50c4c
Author: Szilard Nemeth 
AuthorDate: Thu Aug 1 16:07:44 2019 +0200

MAPREDUCE-7225: Fix broken current folder expansion during MR job start. 
Contributed by Peter Bacsko.
---
 .../hadoop/mapreduce/JobResourceUploader.java  | 25 -
 .../hadoop/mapreduce/TestJobResourceUploader.java  | 64 +-
 2 files changed, 85 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index e106a54..c8686d7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -59,6 +59,8 @@ import com.google.common.annotations.VisibleForTesting;
 class JobResourceUploader {
   protected static final Logger LOG =
   LoggerFactory.getLogger(JobResourceUploader.class);
+  private static final String ROOT_PATH = "/";
+
   private final boolean useWildcard;
   private final FileSystem jtFs;
   private SharedCacheClient scClient = null;
@@ -674,9 +676,30 @@ class JobResourceUploader {
 if (FileUtil.compareFs(remoteFs, jtFs)) {
   return originalPath;
 }
+
+boolean root = false;
+if (ROOT_PATH.equals(originalPath.toUri().getPath())) {
+  // "/" needs special treatment
+  root = true;
+} else {
+  // If originalPath ends in a "/", then remove it so
+  // that originalPath.getName() does not return an empty string
+  String uriString = originalPath.toUri().toString();
+  if (uriString.endsWith("/")) {
+try {
+  URI strippedURI =
+  new URI(uriString.substring(0, uriString.length() - 1));
+  originalPath = new Path(strippedURI);
+} catch (URISyntaxException e) {
+  throw new IllegalArgumentException("Error processing URI", e);
+}
+  }
+}
+
 // this might have name collisions. copy will throw an exception
 // parse the original path to create new path
-Path newPath = new Path(parentDir, originalPath.getName());
+Path newPath = root ?
+parentDir : new Path(parentDir, originalPath.getName());
 FileUtil.copy(remoteFs, originalPath, jtFs, newPath, false, conf);
 jtFs.setReplication(newPath, replication);
 jtFs.makeQualified(newPath);
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
index d347da5..8ab54a6 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/TestJobResourceUploader.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapreduce;
 
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.never;
@@ -25,9 +26,11 @@ import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.times;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.spy;
 
 import java.io.IOException;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
@@ -46,8 +49,10 @@ import 
org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.mapred.JobConf;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.ArgumentCaptor;
 import org.mockito.verification.VerificationMode;
 
+
 /**
  * A class for unit testing JobResourceUploader.
  */
@@ -375,6 +380,50 @@ public class TestJobResourceUploader {
 testErasureCodingSetting(false);
   }
 
+  @Test
+  public void testOriginalPathEndsInSlash()
+  throws IOException, URISyntaxException {
+testOriginalPathWithTra

[hadoop] branch trunk updated: YARN-9375. Use Configured in GpuDiscoverer and FpgaDiscoverer (#1131)

2019-08-02 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 95fc38f  YARN-9375. Use Configured in GpuDiscoverer and FpgaDiscoverer 
(#1131)
95fc38f is described below

commit 95fc38f2e9011b16ca09ac44843a71a2cac45c99
Author: Adam Antal 
AuthorDate: Fri Aug 2 11:24:09 2019 +0200

YARN-9375. Use Configured in GpuDiscoverer and FpgaDiscoverer (#1131)

Contributed by Adam Antal
---
 .../resourceplugin/fpga/FpgaDiscoverer.java | 17 ++---
 .../resourceplugin/gpu/GpuDiscoverer.java   | 12 ++--
 2 files changed, 12 insertions(+), 17 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java
index 180a011..afe190a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDiscoverer.java
@@ -28,6 +28,7 @@ import java.util.function.Function;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -46,11 +47,10 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Sets;
 
-public class FpgaDiscoverer {
+public class FpgaDiscoverer extends Configured {
   private static final Logger LOG = LoggerFactory.getLogger(
   FpgaDiscoverer.class);
 
-  private Configuration conf = null;
   private AbstractFpgaVendorPlugin plugin = null;
   private List currentFpgaInfo = null;
 
@@ -64,11 +64,6 @@ public class FpgaDiscoverer {
 this.scriptRunner = scriptRunner;
   }
 
-  @VisibleForTesting
-  public void setConf(Configuration configuration) {
-this.conf = configuration;
-  }
-
   public List getCurrentFpgaInfo() {
 return currentFpgaInfo;
   }
@@ -82,7 +77,7 @@ public class FpgaDiscoverer {
   }
 
   public void initialize(Configuration config) throws YarnException {
-this.conf = config;
+setConf(config);
 this.plugin.initPlugin(config);
 // Try to diagnose FPGA
 LOG.info("Trying to diagnose FPGA information ...");
@@ -100,11 +95,11 @@ public class FpgaDiscoverer {
   public List discover()
   throws ResourceHandlerException {
 List list;
-String allowed = this.conf.get(YarnConfiguration.NM_FPGA_ALLOWED_DEVICES);
+String allowed = getConf().get(YarnConfiguration.NM_FPGA_ALLOWED_DEVICES);
 
-String availableDevices = conf.get(
+String availableDevices = getConf().get(
 YarnConfiguration.NM_FPGA_AVAILABLE_DEVICES);
-String discoveryScript = conf.get(
+String discoveryScript = getConf().get(
 YarnConfiguration.NM_FPGA_DEVICE_DISCOVERY_SCRIPT);
 
 FPGADiscoveryStrategy discoveryStrategy;
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
index 986f84a..939093f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuDiscoverer.java
@@ -25,6 +25,7 @@ import com.google.common.collect.Sets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -45,7 +46,7 @@ import java.util.Set;
 
 @InterfaceAudien

[hadoop] branch trunk updated: YARN-9124. Resolve contradiction in ResourceUtils: addMandatoryResources / checkMandatoryResources work differently (#1121)

2019-08-02 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new cbcada80 YARN-9124. Resolve contradiction in ResourceUtils: 
addMandatoryResources / checkMandatoryResources work differently (#1121)
cbcada80 is described below

commit cbcada804d119b837ad99de71d7f44cb4629026e
Author: Adam Antal 
AuthorDate: Fri Aug 2 13:04:34 2019 +0200

YARN-9124. Resolve contradiction in ResourceUtils: addMandatoryResources / 
checkMandatoryResources work differently (#1121)
---
 .../yarn/api/records/ResourceInformation.java  |  6 +++-
 .../hadoop/yarn/util/resource/ResourceUtils.java   | 35 +-
 2 files changed, 33 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 4209ca7..e83ea72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -59,7 +59,11 @@ public class ResourceInformation implements 
Comparable {
   public static final ResourceInformation FPGAS =
   ResourceInformation.newInstance(FPGA_URI);
 
-  public static final Map MANDATORY_RESOURCES =
+  /**
+   * Special resources that should be treated separately
+   * from arbitrary resource types.
+   */
+  public static final Map SPECIAL_RESOURCES =
   ImmutableMap.of(MEMORY_URI, MEMORY_MB, VCORES_URI, VCORES, GPU_URI, 
GPUS, FPGA_URI, FPGAS);
 
   /**
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index 13a872c..9aaff15 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -93,14 +93,28 @@ public class ResourceUtils {
   private ResourceUtils() {
   }
 
-  private static void checkMandatoryResources(
+  /**
+   * Ensures that historical resource types (like {@link
+   * ResourceInformation#MEMORY_URI}, {@link ResourceInformation#VCORES_URI})
+   * are not getting overridden in the resourceInformationMap.
+   *
+   * Also checks whether {@link ResourceInformation#SPECIAL_RESOURCES} are not
+   * configured poorly: having their proper units and types.
+   *
+   * @param resourceInformationMap Map object having keys as resources names
+   *   and {@link ResourceInformation} objects as
+   *   values
+   * @throws YarnRuntimeException if either of the two above
+   * conditions do not hold
+   */
+  private static void checkSpecialResources(
   Map resourceInformationMap)
   throws YarnRuntimeException {
 /*
- * Supporting 'memory', 'memory-mb', 'vcores' also as invalid resource 
names, in addition to
- * 'MEMORY' for historical reasons
+ * Supporting 'memory', 'memory-mb', 'vcores' also as invalid resource
+ * names, in addition to 'MEMORY' for historical reasons
  */
-String keys[] = { "memory", ResourceInformation.MEMORY_URI,
+String[] keys = { "memory", ResourceInformation.MEMORY_URI,
 ResourceInformation.VCORES_URI };
 for(String key : keys) {
   if (resourceInformationMap.containsKey(key)) {
@@ -111,7 +125,7 @@ public class ResourceUtils {
 }
 
 for (Map.Entry mandatoryResourceEntry :
-ResourceInformation.MANDATORY_RESOURCES.entrySet()) {
+ResourceInformation.SPECIAL_RESOURCES.entrySet()) {
   String key = mandatoryResourceEntry.getKey();
   ResourceInformation mandatoryRI = mandatoryResourceEntry.getValue();
 
@@ -134,6 +148,13 @@ public class ResourceUtils {
 }
   }
 
+  /**
+   * Ensures that {@link ResourceUtils#MEMORY} and {@link ResourceUtils#VCORES}
+   * resources are contained in the map received as parameter.
+   *
+   * @param res Map object having keys as resources names
+   *and {@link ResourceInformation} objects as values
+   */
   private static void addMandatoryResources(
   Map res) {
 ResourceInformation ri;
@@ -272,7 +293,7 @@ public class ResourceUtils {
   validateNameOfResourceNameAndThrowException(name);
 }
 
-checkMandatoryResources(resourceInformationMap);
+

[hadoop] branch trunk updated: YARN-9093. Remove commented code block from the beginning of Tes… (#444)

2019-08-02 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1930a7b  YARN-9093. Remove commented code block from the beginning of 
Tes… (#444)
1930a7b is described below

commit 1930a7bf608fef760c4286882f6abb99f47001ff
Author: Vidura Mudalige 
AuthorDate: Fri Aug 2 21:16:19 2019 +1000

YARN-9093. Remove commented code block from the beginning of Tes… (#444)
---
 .../nodemanager/TestDefaultContainerExecutor.java  | 39 --
 1 file changed, 39 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDefaultContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDefaultContainerExecutor.java
index dba79b3..a5c1152 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDefaultContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDefaultContainerExecutor.java
@@ -83,45 +83,6 @@ import org.mockito.stubbing.Answer;
 
 public class TestDefaultContainerExecutor {
 
-  /*
-  // XXX FileContext cannot be mocked to do this
-  static FSDataInputStream getRandomStream(Random r, int len)
-  throws IOException {
-byte[] bytes = new byte[len];
-r.nextBytes(bytes);
-DataInputBuffer buf = new DataInputBuffer();
-buf.reset(bytes, 0, bytes.length);
-return new FSDataInputStream(new FakeFSDataInputStream(buf));
-  }
-
-  class PathEndsWith extends ArgumentMatcher {
-final String suffix;
-PathEndsWith(String suffix) {
-  this.suffix = suffix;
-}
-@Override
-public boolean matches(Object o) {
-  return
-  suffix.equals(((Path)o).getName());
-}
-  }
-
-  DataOutputBuffer mockStream(
-  AbstractFileSystem spylfs, Path p, Random r, int len) 
-  throws IOException {
-DataOutputBuffer dob = new DataOutputBuffer();
-doReturn(getRandomStream(r, len)).when(spylfs).open(p);
-doReturn(new FileStatus(len, false, -1, -1L, -1L, p)).when(
-spylfs).getFileStatus(argThat(new PathEndsWith(p.getName(;
-doReturn(new FSDataOutputStream(dob)).when(spylfs).createInternal(
-argThat(new PathEndsWith(p.getName())),
-eq(EnumSet.of(OVERWRITE)),
-Matchers.anyObject(), anyInt(), anyShort(), anyLong(),
-Matchers.anyObject(), anyInt(), anyBoolean());
-return dob;
-  }
-  */
-
   private static Path BASE_TMP_PATH = new Path("target",
   TestDefaultContainerExecutor.class.getSimpleName());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: Logging fileSize of log files under NM Local Dir. Contributed by Prabhu Joseph

2019-08-02 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 54ac801  Logging fileSize of log files under NM Local Dir. Contributed 
by Prabhu Joseph
54ac801 is described below

commit 54ac80176e8487b7a18cd9e16a11efa289d0b7df
Author: Szilard Nemeth 
AuthorDate: Fri Aug 2 13:38:06 2019 +0200

Logging fileSize of log files under NM Local Dir. Contributed by Prabhu 
Joseph
---
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java |  5 +
 .../src/main/resources/yarn-default.xml|  8 
 .../logaggregation/AppLogAggregatorImpl.java   | 18 +-
 3 files changed, 30 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 548d868..917d32b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1370,6 +1370,11 @@ public class YarnConfiguration extends Configuration {
   public static final String LOG_AGGREGATION_RETAIN_SECONDS = YARN_PREFIX
   + "log-aggregation.retain-seconds";
   public static final long DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS = -1;
+
+  public static final String LOG_AGGREGATION_DEBUG_FILESIZE = YARN_PREFIX
+  + "log-aggregation.debug.filesize";
+  public static final long DEFAULT_LOG_AGGREGATION_DEBUG_FILESIZE
+  = 100 * 1024 * 1024;
   
   /**
* How long to wait between aggregated log retention checks. If set to
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 8582522..f379844 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1292,6 +1292,14 @@
   
 
   
+The log files created under NM Local Directories
+will be logged if it exceeds the configured bytes. This
+only takes effect if log4j level is at least Debug.
+yarn.log-aggregation.debug.filesize
+104857600
+  
+
+  
 Specify which log file controllers we will support. The first
 file controller we add will be used to write the aggregated logs.
 This comma separated configuration will work with the configuration:
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index fdac2e4..ef14d2a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -109,7 +109,7 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   private final AtomicBoolean waiting = new AtomicBoolean(false);
   private int logAggregationTimes = 0;
   private int cleanupOldLogTimes = 0;
-
+  private long logFileSizeThreshold;
   private boolean renameTemporaryLogFileFailed = false;
 
   private final Map 
containerLogAggregators =
@@ -176,6 +176,9 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 this.nodeId = nodeId;
 this.logAggPolicy = getLogAggPolicy(conf);
 this.recoveredLogInitedTime = recoveredLogInitedTime;
+this.logFileSizeThreshold =
+conf.getLong(YarnConfiguration.LOG_AGGREGATION_DEBUG_FILESIZE,
+YarnConfiguration.DEFAULT_LOG_AGGREGATION_DEBUG_FILESIZE);
 if (logAggregationFileController == null) {
   // by default, use T-File Controller
   this.logAggregationFileController = new LogAggregationTFileController();
@@ -330,6 +333,19 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   uploadedLogsInThisCycle = true;
   List uploadedFilePathsInThisCycleList = new ArrayList<>();
   
uploadedFilePathsInThisCycleList.addAll(uploadedFilePathsInThisCycle);
+  if (LOG.isDebugEnabled()) {
+for (Path uploadedFilePath : upload

[hadoop] branch trunk updated: YARN-9727: Allowed Origin pattern is discouraged if regex contains *. Contributed by Zoltan Siegl

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new df30d8e  YARN-9727: Allowed Origin pattern is discouraged if regex 
contains *. Contributed by Zoltan Siegl
df30d8e is described below

commit df30d8ea092cddc037482bc60fc790b26b577963
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 09:34:23 2019 +0200

YARN-9727: Allowed Origin pattern is discouraged if regex contains *. 
Contributed by Zoltan Siegl
---
 .../main/java/org/apache/hadoop/security/http/CrossOriginFilter.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
index 02c168f..60c2864 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/http/CrossOriginFilter.java
@@ -197,7 +197,7 @@ public class CrossOriginFilter implements Filter {
 LOG.info("Allowed Origins: " + StringUtils.join(allowedOrigins, ','));
 LOG.info("Allow All Origins: " + allowAllOrigins);
 List discouragedAllowedOrigins = allowedOrigins.stream()
-.filter(s -> s.length() > 1 && s.contains("*"))
+.filter(s -> s.length() > 1 && s.contains("*") && 
!(s.startsWith(ALLOWED_ORIGINS_REGEX_PREFIX)))
 .collect(Collectors.toList());
 for (String discouragedAllowedOrigin : discouragedAllowedOrigins) {
 LOG.warn("Allowed Origin pattern '" + discouragedAllowedOrigin + "' is 
discouraged, use the 'regex:' prefix and use a Java regular expression 
instead.");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9094: Remove unused interface method: NodeResourceUpdaterPlugin#handleUpdatedResourceFromRM. Contributed by Gergely Pollak

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 72d7e57  YARN-9094: Remove unused interface method: 
NodeResourceUpdaterPlugin#handleUpdatedResourceFromRM. Contributed by Gergely 
Pollak
72d7e57 is described below

commit 72d7e570a73989aa18b737c0e642d570a55c6781
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 09:49:18 2019 +0200

YARN-9094: Remove unused interface method: 
NodeResourceUpdaterPlugin#handleUpdatedResourceFromRM. Contributed by Gergely 
Pollak
---
 .../resourceplugin/NodeResourceUpdaterPlugin.java| 16 +---
 1 file changed, 1 insertion(+), 15 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
index 88f77ed..107e43c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
@@ -28,25 +28,11 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 public abstract class NodeResourceUpdaterPlugin {
   /**
* Update configured resource for the given component.
-   * @param res resource passed in by external mododule (such as
+   * @param res resource passed in by external module (such as
*{@link 
org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater}
* @throws YarnException when any issue happens.
*/
   public abstract void updateConfiguredResource(Resource res)
   throws YarnException;
 
-  /**
-   * This method will be called when the node's resource is loaded from
-   * dynamic-resources.xml in ResourceManager.
-   *
-   * @param newResource newResource reported by RM
-   * @throws YarnException when any mismatch between NM/RM
-   */
-  public void handleUpdatedResourceFromRM(Resource newResource) throws
-  YarnException {
-// by default do nothing, subclass should implement this method when any
-// special activities required upon new resource reported by RM.
-  }
-
-  // TODO: add implementation to update node attribute once YARN-3409 merged.
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9094: Remove unused interface method: NodeResourceUpdaterPlugin#handleUpdatedResourceFromRM. Contributed by Gergely Pollak

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 3bcf44f  YARN-9094: Remove unused interface method: 
NodeResourceUpdaterPlugin#handleUpdatedResourceFromRM. Contributed by Gergely 
Pollak
3bcf44f is described below

commit 3bcf44f07079002eb2e3b6ab5892f6c96b971041
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 09:49:18 2019 +0200

YARN-9094: Remove unused interface method: 
NodeResourceUpdaterPlugin#handleUpdatedResourceFromRM. Contributed by Gergely 
Pollak

(cherry picked from commit 72d7e570a73989aa18b737c0e642d570a55c6781)
---
 .../resourceplugin/NodeResourceUpdaterPlugin.java| 16 +---
 1 file changed, 1 insertion(+), 15 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
index 88f77ed..107e43c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
@@ -28,25 +28,11 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 public abstract class NodeResourceUpdaterPlugin {
   /**
* Update configured resource for the given component.
-   * @param res resource passed in by external mododule (such as
+   * @param res resource passed in by external module (such as
*{@link 
org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater}
* @throws YarnException when any issue happens.
*/
   public abstract void updateConfiguredResource(Resource res)
   throws YarnException;
 
-  /**
-   * This method will be called when the node's resource is loaded from
-   * dynamic-resources.xml in ResourceManager.
-   *
-   * @param newResource newResource reported by RM
-   * @throws YarnException when any mismatch between NM/RM
-   */
-  public void handleUpdatedResourceFromRM(Resource newResource) throws
-  YarnException {
-// by default do nothing, subclass should implement this method when any
-// special activities required upon new resource reported by RM.
-  }
-
-  // TODO: add implementation to update node attribute once YARN-3409 merged.
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9094: Remove unused interface method: NodeResourceUpdaterPlugin#handleUpdatedResourceFromRM. Contributed by Gergely Pollak

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 943dfc7  YARN-9094: Remove unused interface method: 
NodeResourceUpdaterPlugin#handleUpdatedResourceFromRM. Contributed by Gergely 
Pollak
943dfc7 is described below

commit 943dfc78d17c0df4ad398b9b4747562b9d4a9c84
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 09:49:18 2019 +0200

YARN-9094: Remove unused interface method: 
NodeResourceUpdaterPlugin#handleUpdatedResourceFromRM. Contributed by Gergely 
Pollak

(cherry picked from commit 72d7e570a73989aa18b737c0e642d570a55c6781)
---
 .../resourceplugin/NodeResourceUpdaterPlugin.java| 16 +---
 1 file changed, 1 insertion(+), 15 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
index 88f77ed..107e43c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/NodeResourceUpdaterPlugin.java
@@ -28,25 +28,11 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 public abstract class NodeResourceUpdaterPlugin {
   /**
* Update configured resource for the given component.
-   * @param res resource passed in by external mododule (such as
+   * @param res resource passed in by external module (such as
*{@link 
org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater}
* @throws YarnException when any issue happens.
*/
   public abstract void updateConfiguredResource(Resource res)
   throws YarnException;
 
-  /**
-   * This method will be called when the node's resource is loaded from
-   * dynamic-resources.xml in ResourceManager.
-   *
-   * @param newResource newResource reported by RM
-   * @throws YarnException when any mismatch between NM/RM
-   */
-  public void handleUpdatedResourceFromRM(Resource newResource) throws
-  YarnException {
-// by default do nothing, subclass should implement this method when any
-// special activities required upon new resource reported by RM.
-  }
-
-  // TODO: add implementation to update node attribute once YARN-3409 merged.
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9096: Some GpuResourcePlugin and ResourcePluginManager methods are synchronized unnecessarily. Contributed by Gergely Pollak

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 742e30b  YARN-9096: Some GpuResourcePlugin and ResourcePluginManager 
methods are synchronized unnecessarily. Contributed by Gergely Pollak
742e30b is described below

commit 742e30b47381ad63e2b2fe63738cd0fe6cbce106
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 09:59:19 2019 +0200

YARN-9096: Some GpuResourcePlugin and ResourcePluginManager methods are 
synchronized unnecessarily. Contributed by Gergely Pollak
---
 .../containermanager/resourceplugin/ResourcePluginManager.java  | 4 ++--
 .../containermanager/resourceplugin/gpu/GpuResourcePlugin.java  | 6 +++---
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
index de061d6..1274b64 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
@@ -65,7 +65,7 @@ public class ResourcePluginManager {
 
   private DeviceMappingManager deviceMappingManager = null;
 
-  public synchronized void initialize(Context context)
+  public void initialize(Context context)
   throws YarnException, ClassNotFoundException {
 Configuration conf = context.getConf();
 Map pluginMap = new HashMap<>();
@@ -274,7 +274,7 @@ public class ResourcePluginManager {
 return deviceMappingManager;
   }
 
-  public synchronized void cleanup() throws YarnException {
+  public void cleanup() throws YarnException {
 for (ResourcePlugin plugin : configuredPlugins.values()) {
   plugin.cleanup();
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
index 1ac6f83..7719afb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
@@ -54,7 +54,7 @@ public class GpuResourcePlugin implements ResourcePlugin {
   }
 
   @Override
-  public synchronized void initialize(Context context) throws YarnException {
+  public void initialize(Context context) throws YarnException {
 this.gpuDiscoverer.initialize(context.getConf());
 this.dockerCommandPlugin =
 GpuDockerCommandPluginFactory.createGpuDockerCommandPlugin(
@@ -62,7 +62,7 @@ public class GpuResourcePlugin implements ResourcePlugin {
   }
 
   @Override
-  public synchronized ResourceHandler createResourceHandler(
+  public ResourceHandler createResourceHandler(
   Context context, CGroupsHandler cGroupsHandler,
   PrivilegedOperationExecutor privilegedOperationExecutor) {
 if (gpuResourceHandler == null) {
@@ -74,7 +74,7 @@ public class GpuResourcePlugin implements ResourcePlugin {
   }
 
   @Override
-  public synchronized NodeResourceUpdaterPlugin 
getNodeResourceHandlerInstance() {
+  public NodeResourceUpdaterPlugin getNodeResourceHandlerInstance() {
 return resourceDiscoverHandler;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9096: Some GpuResourcePlugin and ResourcePluginManager methods are synchronized unnecessarily. Contributed by Gergely Pollak

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new f0dfb8b  YARN-9096: Some GpuResourcePlugin and ResourcePluginManager 
methods are synchronized unnecessarily. Contributed by Gergely Pollak
f0dfb8b is described below

commit f0dfb8b8325cea0446924affaf3a4eb0c3be6653
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 09:59:19 2019 +0200

YARN-9096: Some GpuResourcePlugin and ResourcePluginManager methods are 
synchronized unnecessarily. Contributed by Gergely Pollak

(cherry picked from commit 742e30b47381ad63e2b2fe63738cd0fe6cbce106)
---
 .../containermanager/resourceplugin/ResourcePluginManager.java  | 4 ++--
 .../containermanager/resourceplugin/gpu/GpuResourcePlugin.java  | 6 +++---
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
index c1f2910..d6edfdd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
@@ -52,7 +52,7 @@ public class ResourcePluginManager {
   private Map configuredPlugins =
   Collections.emptyMap();
 
-  public synchronized void initialize(Context context)
+  public void initialize(Context context)
   throws YarnException {
 Configuration conf = context.getConf();
 
@@ -108,7 +108,7 @@ public class ResourcePluginManager {
 }
   }
 
-  public synchronized void cleanup() throws YarnException {
+  public void cleanup() throws YarnException {
 for (ResourcePlugin plugin : configuredPlugins.values()) {
   plugin.cleanup();
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
index 1ac6f83..7719afb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
@@ -54,7 +54,7 @@ public class GpuResourcePlugin implements ResourcePlugin {
   }
 
   @Override
-  public synchronized void initialize(Context context) throws YarnException {
+  public void initialize(Context context) throws YarnException {
 this.gpuDiscoverer.initialize(context.getConf());
 this.dockerCommandPlugin =
 GpuDockerCommandPluginFactory.createGpuDockerCommandPlugin(
@@ -62,7 +62,7 @@ public class GpuResourcePlugin implements ResourcePlugin {
   }
 
   @Override
-  public synchronized ResourceHandler createResourceHandler(
+  public ResourceHandler createResourceHandler(
   Context context, CGroupsHandler cGroupsHandler,
   PrivilegedOperationExecutor privilegedOperationExecutor) {
 if (gpuResourceHandler == null) {
@@ -74,7 +74,7 @@ public class GpuResourcePlugin implements ResourcePlugin {
   }
 
   @Override
-  public synchronized NodeResourceUpdaterPlugin 
getNodeResourceHandlerInstance() {
+  public NodeResourceUpdaterPlugin getNodeResourceHandlerInstance() {
 return resourceDiscoverHandler;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9096: Some GpuResourcePlugin and ResourcePluginManager methods are synchronized unnecessarily. Contributed by Gergely Pollak

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new b2f39f8  YARN-9096: Some GpuResourcePlugin and ResourcePluginManager 
methods are synchronized unnecessarily. Contributed by Gergely Pollak
b2f39f8 is described below

commit b2f39f81fe5d6d06501dbd1c5944b8c59483b747
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 09:59:19 2019 +0200

YARN-9096: Some GpuResourcePlugin and ResourcePluginManager methods are 
synchronized unnecessarily. Contributed by Gergely Pollak

(cherry picked from commit 742e30b47381ad63e2b2fe63738cd0fe6cbce106)
---
 .../containermanager/resourceplugin/ResourcePluginManager.java  | 4 ++--
 .../containermanager/resourceplugin/gpu/GpuResourcePlugin.java  | 6 +++---
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
index c1f2910..d6edfdd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
@@ -52,7 +52,7 @@ public class ResourcePluginManager {
   private Map configuredPlugins =
   Collections.emptyMap();
 
-  public synchronized void initialize(Context context)
+  public void initialize(Context context)
   throws YarnException {
 Configuration conf = context.getConf();
 
@@ -108,7 +108,7 @@ public class ResourcePluginManager {
 }
   }
 
-  public synchronized void cleanup() throws YarnException {
+  public void cleanup() throws YarnException {
 for (ResourcePlugin plugin : configuredPlugins.values()) {
   plugin.cleanup();
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
index 1ac6f83..7719afb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/gpu/GpuResourcePlugin.java
@@ -54,7 +54,7 @@ public class GpuResourcePlugin implements ResourcePlugin {
   }
 
   @Override
-  public synchronized void initialize(Context context) throws YarnException {
+  public void initialize(Context context) throws YarnException {
 this.gpuDiscoverer.initialize(context.getConf());
 this.dockerCommandPlugin =
 GpuDockerCommandPluginFactory.createGpuDockerCommandPlugin(
@@ -62,7 +62,7 @@ public class GpuResourcePlugin implements ResourcePlugin {
   }
 
   @Override
-  public synchronized ResourceHandler createResourceHandler(
+  public ResourceHandler createResourceHandler(
   Context context, CGroupsHandler cGroupsHandler,
   PrivilegedOperationExecutor privilegedOperationExecutor) {
 if (gpuResourceHandler == null) {
@@ -74,7 +74,7 @@ public class GpuResourcePlugin implements ResourcePlugin {
   }
 
   @Override
-  public synchronized NodeResourceUpdaterPlugin 
getNodeResourceHandlerInstance() {
+  public NodeResourceUpdaterPlugin getNodeResourceHandlerInstance() {
 return resourceDiscoverHandler;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9092. Create an object for cgroups mount enable and cgroups mount path as they belong together. Contributed by Gergely Pollak

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e0c21c6  YARN-9092. Create an object for cgroups mount enable and 
cgroups mount path as they belong together. Contributed by Gergely Pollak
e0c21c6 is described below

commit e0c21c6da91776caf661661a19c368939c81fcc4
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 10:18:34 2019 +0200

YARN-9092. Create an object for cgroups mount enable and cgroups mount path 
as they belong together. Contributed by Gergely Pollak
---
 .../linux/resources/CGroupsHandlerImpl.java| 28 +++--
 .../linux/resources/CGroupsMountConfig.java| 72 ++
 .../util/CgroupsLCEResourcesHandler.java   | 21 +++
 3 files changed, 91 insertions(+), 30 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index fab1490..6a87ede 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -66,8 +66,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 
   private final String mtabFile;
   private final String cGroupPrefix;
-  private final boolean enableCGroupMount;
-  private final String cGroupMountPath;
+  private final CGroupsMountConfig cGroupsMountConfig;
   private final long deleteCGroupTimeout;
   private final long deleteCGroupDelay;
   private Map controllerPaths;
@@ -91,10 +90,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 this.cGroupPrefix = conf.get(YarnConfiguration.
 NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, "/hadoop-yarn")
 .replaceAll("^/+", "").replaceAll("/+$", "");
-this.enableCGroupMount = conf.getBoolean(YarnConfiguration.
-NM_LINUX_CONTAINER_CGROUPS_MOUNT, false);
-this.cGroupMountPath = conf.get(YarnConfiguration.
-NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null);
+this.cGroupsMountConfig = new CGroupsMountConfig(conf);
 this.deleteCGroupTimeout = conf.getLong(
 YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT,
 YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT) +
@@ -150,9 +146,9 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 Map> newMtab = null;
 Map cPaths;
 try {
-  if (this.cGroupMountPath != null && !this.enableCGroupMount) {
+  if (this.cGroupsMountConfig.mountDisabledButMountPathDefined()) {
 newMtab = ResourceHandlerModule.
-parseConfiguredCGroupPath(this.cGroupMountPath);
+parseConfiguredCGroupPath(this.cGroupsMountConfig.getMountPath());
   }
 
   if (newMtab == null) {
@@ -282,14 +278,10 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 
   private void mountCGroupController(CGroupController controller)
   throws ResourceHandlerException {
-if (cGroupMountPath == null) {
-  throw new ResourceHandlerException(
-  String.format("Cgroups mount path not specified in %s.",
-  YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH));
-}
 String existingMountPath = getControllerPath(controller);
 String requestedMountPath =
-new File(cGroupMountPath, controller.getName()).getAbsolutePath();
+new File(cGroupsMountConfig.getMountPath(),
+controller.getName()).getAbsolutePath();
 
 if (existingMountPath == null ||
 !requestedMountPath.equals(existingMountPath)) {
@@ -367,7 +359,8 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   @Override
   public void initializeCGroupController(CGroupController controller) throws
   ResourceHandlerException {
-if (enableCGroupMount) {
+if (this.cGroupsMountConfig.isMountEnabled() &&
+cGroupsMountConfig.ensureMountPathIsDefined()) {
   // We have a controller that needs to be mounted
   mountCGroupController(controller);
 }
@@ -611,7 +604,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 
   @Override
   public String getCGroupMountPath() {
-return cGroupMountPath;
+return this.cGroupsMountConfig.getMountPath();
   }
 
   @Override
@@ -619,8 +612,7 @@ class CGroupsHandl

[hadoop] branch branch-3.2 updated: YARN-9092. Create an object for cgroups mount enable and cgroups mount path as they belong together. Contributed by Gergely Pollak

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 02d0e54  YARN-9092. Create an object for cgroups mount enable and 
cgroups mount path as they belong together. Contributed by Gergely Pollak
02d0e54 is described below

commit 02d0e545969e382ded365ddea6391bca41ae707f
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 10:18:34 2019 +0200

YARN-9092. Create an object for cgroups mount enable and cgroups mount path 
as they belong together. Contributed by Gergely Pollak

(cherry picked from commit e0c21c6da91776caf661661a19c368939c81fcc4)
---
 .../linux/resources/CGroupsHandlerImpl.java| 28 +++--
 .../linux/resources/CGroupsMountConfig.java| 72 ++
 .../util/CgroupsLCEResourcesHandler.java   | 21 +++
 3 files changed, 91 insertions(+), 30 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index 5045ac3..aa9a60f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -66,8 +66,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 
   private final String mtabFile;
   private final String cGroupPrefix;
-  private final boolean enableCGroupMount;
-  private final String cGroupMountPath;
+  private final CGroupsMountConfig cGroupsMountConfig;
   private final long deleteCGroupTimeout;
   private final long deleteCGroupDelay;
   private Map controllerPaths;
@@ -90,10 +89,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 this.cGroupPrefix = conf.get(YarnConfiguration.
 NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, "/hadoop-yarn")
 .replaceAll("^/", "").replaceAll("$/", "");
-this.enableCGroupMount = conf.getBoolean(YarnConfiguration.
-NM_LINUX_CONTAINER_CGROUPS_MOUNT, false);
-this.cGroupMountPath = conf.get(YarnConfiguration.
-NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null);
+this.cGroupsMountConfig = new CGroupsMountConfig(conf);
 this.deleteCGroupTimeout = conf.getLong(
 YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT,
 YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT) +
@@ -149,9 +145,9 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 Map> newMtab = null;
 Map cPaths;
 try {
-  if (this.cGroupMountPath != null && !this.enableCGroupMount) {
+  if (this.cGroupsMountConfig.mountDisabledButMountPathDefined()) {
 newMtab = ResourceHandlerModule.
-parseConfiguredCGroupPath(this.cGroupMountPath);
+parseConfiguredCGroupPath(this.cGroupsMountConfig.getMountPath());
   }
 
   if (newMtab == null) {
@@ -281,14 +277,10 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 
   private void mountCGroupController(CGroupController controller)
   throws ResourceHandlerException {
-if (cGroupMountPath == null) {
-  throw new ResourceHandlerException(
-  String.format("Cgroups mount path not specified in %s.",
-  YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH));
-}
 String existingMountPath = getControllerPath(controller);
 String requestedMountPath =
-new File(cGroupMountPath, controller.getName()).getAbsolutePath();
+new File(cGroupsMountConfig.getMountPath(),
+controller.getName()).getAbsolutePath();
 
 if (existingMountPath == null ||
 !requestedMountPath.equals(existingMountPath)) {
@@ -367,7 +359,8 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   @Override
   public void initializeCGroupController(CGroupController controller) throws
   ResourceHandlerException {
-if (enableCGroupMount) {
+if (this.cGroupsMountConfig.isMountEnabled() &&
+cGroupsMountConfig.ensureMountPathIsDefined()) {
   // We have a controller that needs to be mounted
   mountCGroupController(controller);
 }
@@ -619,7 +612,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 
   @Override
   public String getCGroupMountPath() {
-return cGroupMountPath;
+return this.cGroupsMountConfig.g

[hadoop] branch branch-3.1 updated: YARN-9092. Create an object for cgroups mount enable and cgroups mount path as they belong together. Contributed by Gergely Pollak

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 410f7a3  YARN-9092. Create an object for cgroups mount enable and 
cgroups mount path as they belong together. Contributed by Gergely Pollak
410f7a3 is described below

commit 410f7a3069db961a66dd932ebc6aa80e340fbbe0
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 10:18:34 2019 +0200

YARN-9092. Create an object for cgroups mount enable and cgroups mount path 
as they belong together. Contributed by Gergely Pollak

(cherry picked from commit e0c21c6da91776caf661661a19c368939c81fcc4)
---
 .../linux/resources/CGroupsHandlerImpl.java| 28 +++--
 .../linux/resources/CGroupsMountConfig.java| 72 ++
 .../util/CgroupsLCEResourcesHandler.java   | 21 +++
 3 files changed, 91 insertions(+), 30 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index ff2a3b7..bf7d06b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -66,8 +66,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 
   private final String mtabFile;
   private final String cGroupPrefix;
-  private final boolean enableCGroupMount;
-  private final String cGroupMountPath;
+  private final CGroupsMountConfig cGroupsMountConfig;
   private final long deleteCGroupTimeout;
   private final long deleteCGroupDelay;
   private Map controllerPaths;
@@ -90,10 +89,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 this.cGroupPrefix = conf.get(YarnConfiguration.
 NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, "/hadoop-yarn")
 .replaceAll("^/", "").replaceAll("$/", "");
-this.enableCGroupMount = conf.getBoolean(YarnConfiguration.
-NM_LINUX_CONTAINER_CGROUPS_MOUNT, false);
-this.cGroupMountPath = conf.get(YarnConfiguration.
-NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null);
+this.cGroupsMountConfig = new CGroupsMountConfig(conf);
 this.deleteCGroupTimeout = conf.getLong(
 YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT,
 YarnConfiguration.DEFAULT_NM_LINUX_CONTAINER_CGROUPS_DELETE_TIMEOUT) +
@@ -149,9 +145,9 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 Map> newMtab = null;
 Map cPaths;
 try {
-  if (this.cGroupMountPath != null && !this.enableCGroupMount) {
+  if (this.cGroupsMountConfig.mountDisabledButMountPathDefined()) {
 newMtab = ResourceHandlerModule.
-parseConfiguredCGroupPath(this.cGroupMountPath);
+parseConfiguredCGroupPath(this.cGroupsMountConfig.getMountPath());
   }
 
   if (newMtab == null) {
@@ -281,14 +277,10 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 
   private void mountCGroupController(CGroupController controller)
   throws ResourceHandlerException {
-if (cGroupMountPath == null) {
-  throw new ResourceHandlerException(
-  String.format("Cgroups mount path not specified in %s.",
-  YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH));
-}
 String existingMountPath = getControllerPath(controller);
 String requestedMountPath =
-new File(cGroupMountPath, controller.getName()).getAbsolutePath();
+new File(cGroupsMountConfig.getMountPath(),
+controller.getName()).getAbsolutePath();
 
 if (existingMountPath == null ||
 !requestedMountPath.equals(existingMountPath)) {
@@ -367,7 +359,8 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   @Override
   public void initializeCGroupController(CGroupController controller) throws
   ResourceHandlerException {
-if (enableCGroupMount) {
+if (this.cGroupsMountConfig.isMountEnabled() &&
+cGroupsMountConfig.ensureMountPathIsDefined()) {
   // We have a controller that needs to be mounted
   mountCGroupController(controller);
 }
@@ -615,7 +608,7 @@ class CGroupsHandlerImpl implements CGroupsHandler {
 
   @Override
   public String getCGroupMountPath() {
-return cGroupMountPath;
+return this.cGroupsMountConfig.g

[hadoop] branch branch-3.1 updated: YARN-9124. Resolve contradiction in ResourceUtils: addMandatoryResources / checkMandatoryResources work differently (#1121)

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 600a61f  YARN-9124. Resolve contradiction in ResourceUtils: 
addMandatoryResources / checkMandatoryResources work differently (#1121)
600a61f is described below

commit 600a61f4101ed5865fcf4f6b07f8ae643d19d5c7
Author: Adam Antal 
AuthorDate: Fri Aug 2 13:04:34 2019 +0200

YARN-9124. Resolve contradiction in ResourceUtils: addMandatoryResources / 
checkMandatoryResources work differently (#1121)


(cherry picked from commit cbcada804d119b837ad99de71d7f44cb4629026e)
---
 .../yarn/api/records/ResourceInformation.java  |  6 +++-
 .../hadoop/yarn/util/resource/ResourceUtils.java   | 35 +-
 2 files changed, 33 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index 904ff4b..d802cdf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -53,7 +53,11 @@ public class ResourceInformation implements 
Comparable {
   public static final ResourceInformation FPGAS =
   ResourceInformation.newInstance(FPGA_URI);
 
-  public static final Map MANDATORY_RESOURCES =
+  /**
+   * Special resources that should be treated separately
+   * from arbitrary resource types.
+   */
+  public static final Map SPECIAL_RESOURCES =
   ImmutableMap.of(MEMORY_URI, MEMORY_MB, VCORES_URI, VCORES, GPU_URI, 
GPUS, FPGA_URI, FPGAS);
 
   /**
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index f211f49..142dafa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -80,14 +80,28 @@ public class ResourceUtils {
   private ResourceUtils() {
   }
 
-  private static void checkMandatoryResources(
+  /**
+   * Ensures that historical resource types (like {@link
+   * ResourceInformation#MEMORY_URI}, {@link ResourceInformation#VCORES_URI})
+   * are not getting overridden in the resourceInformationMap.
+   *
+   * Also checks whether {@link ResourceInformation#SPECIAL_RESOURCES} are not
+   * configured poorly: having their proper units and types.
+   *
+   * @param resourceInformationMap Map object having keys as resources names
+   *   and {@link ResourceInformation} objects as
+   *   values
+   * @throws YarnRuntimeException if either of the two above
+   * conditions do not hold
+   */
+  private static void checkSpecialResources(
   Map resourceInformationMap)
   throws YarnRuntimeException {
 /*
- * Supporting 'memory', 'memory-mb', 'vcores' also as invalid resource 
names, in addition to
- * 'MEMORY' for historical reasons
+ * Supporting 'memory', 'memory-mb', 'vcores' also as invalid resource
+ * names, in addition to 'MEMORY' for historical reasons
  */
-String keys[] = { "memory", ResourceInformation.MEMORY_URI,
+String[] keys = { "memory", ResourceInformation.MEMORY_URI,
 ResourceInformation.VCORES_URI };
 for(String key : keys) {
   if (resourceInformationMap.containsKey(key)) {
@@ -98,7 +112,7 @@ public class ResourceUtils {
 }
 
 for (Map.Entry mandatoryResourceEntry :
-ResourceInformation.MANDATORY_RESOURCES.entrySet()) {
+ResourceInformation.SPECIAL_RESOURCES.entrySet()) {
   String key = mandatoryResourceEntry.getKey();
   ResourceInformation mandatoryRI = mandatoryResourceEntry.getValue();
 
@@ -121,6 +135,13 @@ public class ResourceUtils {
 }
   }
 
+  /**
+   * Ensures that {@link ResourceUtils#MEMORY} and {@link ResourceUtils#VCORES}
+   * resources are contained in the map received as parameter.
+   *
+   * @param res Map object having keys as resources names
+   *and {@link ResourceInformation} objects as values
+   */
   private static void addMandatoryResources(
   Map res) {
 ResourceInformation ri;
@@ -258,7 +279,7 @@ public class ResourceUtils {
   validateNameOfResourceNameAndThrowExcept

[hadoop] branch branch-3.2 updated: YARN-9124. Resolve contradiction in ResourceUtils: addMandatoryResources / checkMandatoryResources work differently (#1121)

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 4c4f7d9  YARN-9124. Resolve contradiction in ResourceUtils: 
addMandatoryResources / checkMandatoryResources work differently (#1121)
4c4f7d9 is described below

commit 4c4f7d9c80302d18d4abf51b26955b0ae1aabbc2
Author: Adam Antal 
AuthorDate: Fri Aug 2 13:04:34 2019 +0200

YARN-9124. Resolve contradiction in ResourceUtils: addMandatoryResources / 
checkMandatoryResources work differently (#1121)


(cherry picked from commit cbcada804d119b837ad99de71d7f44cb4629026e)
---
 .../yarn/api/records/ResourceInformation.java  |  6 +++-
 .../hadoop/yarn/util/resource/ResourceUtils.java   | 35 +-
 2 files changed, 33 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
index c83c3a2..3f2f4ce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java
@@ -53,7 +53,11 @@ public class ResourceInformation implements 
Comparable {
   public static final ResourceInformation FPGAS =
   ResourceInformation.newInstance(FPGA_URI);
 
-  public static final Map MANDATORY_RESOURCES =
+  /**
+   * Special resources that should be treated separately
+   * from arbitrary resource types.
+   */
+  public static final Map SPECIAL_RESOURCES =
   ImmutableMap.of(MEMORY_URI, MEMORY_MB, VCORES_URI, VCORES, GPU_URI, 
GPUS, FPGA_URI, FPGAS);
 
   /**
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index c2d7201..fd8be24 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -81,14 +81,28 @@ public class ResourceUtils {
   private ResourceUtils() {
   }
 
-  private static void checkMandatoryResources(
+  /**
+   * Ensures that historical resource types (like {@link
+   * ResourceInformation#MEMORY_URI}, {@link ResourceInformation#VCORES_URI})
+   * are not getting overridden in the resourceInformationMap.
+   *
+   * Also checks whether {@link ResourceInformation#SPECIAL_RESOURCES} are not
+   * configured poorly: having their proper units and types.
+   *
+   * @param resourceInformationMap Map object having keys as resources names
+   *   and {@link ResourceInformation} objects as
+   *   values
+   * @throws YarnRuntimeException if either of the two above
+   * conditions do not hold
+   */
+  private static void checkSpecialResources(
   Map resourceInformationMap)
   throws YarnRuntimeException {
 /*
- * Supporting 'memory', 'memory-mb', 'vcores' also as invalid resource 
names, in addition to
- * 'MEMORY' for historical reasons
+ * Supporting 'memory', 'memory-mb', 'vcores' also as invalid resource
+ * names, in addition to 'MEMORY' for historical reasons
  */
-String keys[] = { "memory", ResourceInformation.MEMORY_URI,
+String[] keys = { "memory", ResourceInformation.MEMORY_URI,
 ResourceInformation.VCORES_URI };
 for(String key : keys) {
   if (resourceInformationMap.containsKey(key)) {
@@ -99,7 +113,7 @@ public class ResourceUtils {
 }
 
 for (Map.Entry mandatoryResourceEntry :
-ResourceInformation.MANDATORY_RESOURCES.entrySet()) {
+ResourceInformation.SPECIAL_RESOURCES.entrySet()) {
   String key = mandatoryResourceEntry.getKey();
   ResourceInformation mandatoryRI = mandatoryResourceEntry.getValue();
 
@@ -122,6 +136,13 @@ public class ResourceUtils {
 }
   }
 
+  /**
+   * Ensures that {@link ResourceUtils#MEMORY} and {@link ResourceUtils#VCORES}
+   * resources are contained in the map received as parameter.
+   *
+   * @param res Map object having keys as resources names
+   *and {@link ResourceInformation} objects as values
+   */
   private static void addMandatoryResources(
   Map res) {
 ResourceInformation ri;
@@ -259,7 +280,7 @@ public class ResourceUtils {
   validateNameOfResourceNameAndThrowExcept

[hadoop] branch trunk updated: SUBMARINE-57. Add more elaborate message if submarine command is not recognized. Contributed by Adam Antal

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e5f4cd0  SUBMARINE-57. Add more elaborate message if submarine command 
is not recognized. Contributed by Adam Antal
e5f4cd0 is described below

commit e5f4cd0fdae7e689789dd74bfbcfa6c52895f037
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 12:13:37 2019 +0200

SUBMARINE-57. Add more elaborate message if submarine command is not 
recognized. Contributed by Adam Antal
---
 .../src/main/java/org/apache/hadoop/yarn/submarine/client/cli/Cli.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/Cli.java
 
b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/Cli.java
index 593fb89..b1fd90f 100644
--- 
a/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/Cli.java
+++ 
b/hadoop-submarine/hadoop-submarine-core/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/Cli.java
@@ -100,7 +100,7 @@ public class Cli {
   }
 } else {
   printHelp();
-  throw new IllegalArgumentException("Bad parameters ");
+  throw new IllegalArgumentException("Unrecognized option: " + args[0]);
 }
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: SUBMARINE-57. Add more elaborate message if submarine command is not recognized. Contributed by Adam Antal

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 3e90712  SUBMARINE-57. Add more elaborate message if submarine command 
is not recognized. Contributed by Adam Antal
3e90712 is described below

commit 3e9071207a60a5b8cb3ad0ca23bbaf75b3e3c6d9
Author: Szilard Nemeth 
AuthorDate: Fri Aug 9 12:13:37 2019 +0200

SUBMARINE-57. Add more elaborate message if submarine command is not 
recognized. Contributed by Adam Antal

(cherry picked from commit e5f4cd0fdae7e689789dd74bfbcfa6c52895f037)
---
 .../src/main/java/org/apache/hadoop/yarn/submarine/client/cli/Cli.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/Cli.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/Cli.java
index 69189f4..1106b2e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/Cli.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/Cli.java
@@ -97,7 +97,7 @@ public class Cli {
   }
 } else {
   printHelp();
-  throw new IllegalArgumentException("Bad parameters ");
+  throw new IllegalArgumentException("Unrecognized option: " + args[0]);
 }
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: Logging fileSize of log files under NM Local Dir. Contributed by Prabhu Joseph

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 2e6beb1  Logging fileSize of log files under NM Local Dir. Contributed 
by Prabhu Joseph
2e6beb1 is described below

commit 2e6beb1550961a3828a96d67e21fea9f33c28c00
Author: Szilard Nemeth 
AuthorDate: Fri Aug 2 13:38:06 2019 +0200

Logging fileSize of log files under NM Local Dir. Contributed by Prabhu 
Joseph

(cherry picked from commit 54ac80176e8487b7a18cd9e16a11efa289d0b7df)
---
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java |  5 +
 .../src/main/resources/yarn-default.xml|  8 
 .../logaggregation/AppLogAggregatorImpl.java   | 18 +-
 3 files changed, 30 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 2164e7d..79593ea 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1342,6 +1342,11 @@ public class YarnConfiguration extends Configuration {
   public static final String LOG_AGGREGATION_RETAIN_SECONDS = YARN_PREFIX
   + "log-aggregation.retain-seconds";
   public static final long DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS = -1;
+
+  public static final String LOG_AGGREGATION_DEBUG_FILESIZE = YARN_PREFIX
+  + "log-aggregation.debug.filesize";
+  public static final long DEFAULT_LOG_AGGREGATION_DEBUG_FILESIZE
+  = 100 * 1024 * 1024;
   
   /**
* How long to wait between aggregated log retention checks. If set to
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index bcbb289..887a7c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1279,6 +1279,14 @@
   
 
   
+The log files created under NM Local Directories
+will be logged if it exceeds the configured bytes. This
+only takes effect if log4j level is at least Debug.
+yarn.log-aggregation.debug.filesize
+104857600
+  
+
+  
 Specify which log file controllers we will support. The first
 file controller we add will be used to write the aggregated logs.
 This comma separated configuration will work with the configuration:
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 04503ef..84eec4f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -109,7 +109,7 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   private final AtomicBoolean waiting = new AtomicBoolean(false);
   private int logAggregationTimes = 0;
   private int cleanupOldLogTimes = 0;
-
+  private long logFileSizeThreshold;
   private boolean renameTemporaryLogFileFailed = false;
 
   private final Map 
containerLogAggregators =
@@ -176,6 +176,9 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 this.nodeId = nodeId;
 this.logAggPolicy = getLogAggPolicy(conf);
 this.recoveredLogInitedTime = recoveredLogInitedTime;
+this.logFileSizeThreshold =
+conf.getLong(YarnConfiguration.LOG_AGGREGATION_DEBUG_FILESIZE,
+YarnConfiguration.DEFAULT_LOG_AGGREGATION_DEBUG_FILESIZE);
 if (logAggregationFileController == null) {
   // by default, use T-File Controller
   this.logAggregationFileController = new LogAggregationTFileController();
@@ -330,6 +333,19 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   uploadedLogsInThisCycle = true;
   List uploadedFilePathsInThisCycleList = new ArrayList<>();
   
uploadedFilePathsInThisCycleList.addAll(uploadedFilePathsI

[hadoop] branch branch-3.1 updated: Logging fileSize of log files under NM Local Dir. Contributed by Prabhu Joseph

2019-08-09 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new be9ac8a  Logging fileSize of log files under NM Local Dir. Contributed 
by Prabhu Joseph
be9ac8a is described below

commit be9ac8adf9d0e2327bc5d0871bd153bef9ec7694
Author: Szilard Nemeth 
AuthorDate: Fri Aug 2 13:38:06 2019 +0200

Logging fileSize of log files under NM Local Dir. Contributed by Prabhu 
Joseph

(cherry picked from commit 54ac80176e8487b7a18cd9e16a11efa289d0b7df)
---
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java |  5 +
 .../src/main/resources/yarn-default.xml|  8 
 .../logaggregation/AppLogAggregatorImpl.java   | 18 +-
 3 files changed, 30 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 9f3136d..abf6fcf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1322,6 +1322,11 @@ public class YarnConfiguration extends Configuration {
   public static final String LOG_AGGREGATION_RETAIN_SECONDS = YARN_PREFIX
   + "log-aggregation.retain-seconds";
   public static final long DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS = -1;
+
+  public static final String LOG_AGGREGATION_DEBUG_FILESIZE = YARN_PREFIX
+  + "log-aggregation.debug.filesize";
+  public static final long DEFAULT_LOG_AGGREGATION_DEBUG_FILESIZE
+  = 100 * 1024 * 1024;
   
   /**
* How long to wait between aggregated log retention checks. If set to
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index ebeecdb..dfb6cbe 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1263,6 +1263,14 @@
   
 
   
+The log files created under NM Local Directories
+will be logged if it exceeds the configured bytes. This
+only takes effect if log4j level is at least Debug.
+yarn.log-aggregation.debug.filesize
+104857600
+  
+
+  
 Specify which log file controllers we will support. The first
 file controller we add will be used to write the aggregated logs.
 This comma separated configuration will work with the configuration:
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index c290eb4..5dfe6d9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -108,7 +108,7 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   private final AtomicBoolean waiting = new AtomicBoolean(false);
   private int logAggregationTimes = 0;
   private int cleanupOldLogTimes = 0;
-
+  private long logFileSizeThreshold;
   private boolean renameTemporaryLogFileFailed = false;
 
   private final Map 
containerLogAggregators =
@@ -175,6 +175,9 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 this.nodeId = nodeId;
 this.logAggPolicy = getLogAggPolicy(conf);
 this.recoveredLogInitedTime = recoveredLogInitedTime;
+this.logFileSizeThreshold =
+conf.getLong(YarnConfiguration.LOG_AGGREGATION_DEBUG_FILESIZE,
+YarnConfiguration.DEFAULT_LOG_AGGREGATION_DEBUG_FILESIZE);
 if (logAggregationFileController == null) {
   // by default, use T-File Controller
   this.logAggregationFileController = new LogAggregationTFileController();
@@ -327,6 +330,19 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   uploadedLogsInThisCycle = true;
   List uploadedFilePathsInThisCycleList = new ArrayList<>();
   
uploadedFilePathsInThisCycleList.addAll(uploadedFilePathsI

[hadoop] branch branch-3.2 updated: YARN-9135. NM State store ResourceMappings serialization are tested with Strings instead of real Device objects. Contributed by Peter Bacsko

2019-08-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new b20fd9e  YARN-9135. NM State store ResourceMappings serialization are 
tested with Strings instead of real Device objects. Contributed by Peter Bacsko
b20fd9e is described below

commit b20fd9e21295add7e80f07b471bba5c76e433aed
Author: Szilard Nemeth 
AuthorDate: Mon Aug 12 14:02:17 2019 +0200

YARN-9135. NM State store ResourceMappings serialization are tested with 
Strings instead of real Device objects. Contributed by Peter Bacsko
---
 .../node_modules/.bin/apidoc   |   1 +
 .../node_modules/.bin/markdown-it  |   1 +
 .../node_modules/.bin/r.js |   1 +
 .../node_modules/.bin/r_js |   1 +
 .../node_modules/.bin/semver   |   1 +
 .../node_modules/.bin/shjs |   1 +
 .../yarn.lock  | 422 +
 .../resources/numa/NumaResourceAllocation.java |  59 ++-
 .../resources/numa/NumaResourceAllocator.java  |  34 +-
 .../recovery/NMLeveldbStateStoreService.java   |   5 +-
 .../recovery/TestNMLeveldbStateStoreService.java   |  52 +--
 11 files changed, 519 insertions(+), 59 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/apidoc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/apidoc
new file mode 12
index 000..a588095
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/apidoc
@@ -0,0 +1 @@
+../../target/generated-sources/vendor/apidoc/bin/apidoc
\ No newline at end of file
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/markdown-it
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/markdown-it
new file mode 12
index 000..1ce5019
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/markdown-it
@@ -0,0 +1 @@
+../../target/generated-sources/vendor/markdown-it/bin/markdown-it.js
\ No newline at end of file
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/r.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/r.js
new file mode 12
index 000..d4c664a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/r.js
@@ -0,0 +1 @@
+../../target/generated-sources/vendor/requirejs/bin/r.js
\ No newline at end of file
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/r_js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/r_js
new file mode 12
index 000..d4c664a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/r_js
@@ -0,0 +1 @@
+../../target/generated-sources/vendor/requirejs/bin/r.js
\ No newline at end of file
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/semver
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/semver
new file mode 12
index 000..2bb3fcb
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/semver
@@ -0,0 +1 @@
+../../target/generated-sources/vendor/semver/bin/semver
\ No newline at end of file
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/node_modules/.bin/shjs
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog

[hadoop] branch branch-3.1 updated: YARN-9135. NM State store ResourceMappings serialization are tested with Strings instead of real Device objects. Contributed by Peter Bacsko

2019-08-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 6b4ded7  YARN-9135. NM State store ResourceMappings serialization are 
tested with Strings instead of real Device objects. Contributed by Peter Bacsko
6b4ded7 is described below

commit 6b4ded7647b1ad2c891d8b2ef064d74cc9ba3c75
Author: Szilard Nemeth 
AuthorDate: Mon Aug 12 14:03:50 2019 +0200

YARN-9135. NM State store ResourceMappings serialization are tested with 
Strings instead of real Device objects. Contributed by Peter Bacsko
---
 .../resources/numa/NumaResourceAllocation.java | 59 ++
 .../resources/numa/NumaResourceAllocator.java  | 34 -
 .../recovery/NMLeveldbStateStoreService.java   |  5 +-
 .../recovery/TestNMLeveldbStateStoreService.java   | 52 +++
 4 files changed, 91 insertions(+), 59 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
index f8d4739..e91ac3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
@@ -17,9 +17,11 @@
  */
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.numa;
 
+import com.google.common.collect.ImmutableMap;
+
 import java.io.Serializable;
-import java.util.HashMap;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 
 /**
@@ -28,27 +30,18 @@ import java.util.Set;
  */
 public class NumaResourceAllocation implements Serializable {
   private static final long serialVersionUID = 6339719798446595123L;
-  private Map nodeVsMemory;
-  private Map nodeVsCpus;
+  private final ImmutableMap nodeVsMemory;
+  private final ImmutableMap nodeVsCpus;
 
-  public NumaResourceAllocation() {
-nodeVsMemory = new HashMap<>();
-nodeVsCpus = new HashMap<>();
+  public NumaResourceAllocation(Map memoryAllocations,
+  Map cpuAllocations) {
+nodeVsMemory = ImmutableMap.copyOf(memoryAllocations);
+nodeVsCpus = ImmutableMap.copyOf(cpuAllocations);
   }
 
   public NumaResourceAllocation(String memNodeId, long memory, String 
cpuNodeId,
   int cpus) {
-this();
-nodeVsMemory.put(memNodeId, memory);
-nodeVsCpus.put(cpuNodeId, cpus);
-  }
-
-  public void addMemoryNode(String memNodeId, long memory) {
-nodeVsMemory.put(memNodeId, memory);
-  }
-
-  public void addCpuNode(String cpuNodeId, int cpus) {
-nodeVsCpus.put(cpuNodeId, cpus);
+this(ImmutableMap.of(memNodeId, memory), ImmutableMap.of(cpuNodeId, cpus));
   }
 
   public Set getMemNodes() {
@@ -59,11 +52,37 @@ public class NumaResourceAllocation implements Serializable 
{
 return nodeVsCpus.keySet();
   }
 
-  public Map getNodeVsMemory() {
+  public ImmutableMap getNodeVsMemory() {
 return nodeVsMemory;
   }
 
-  public Map getNodeVsCpus() {
+  public ImmutableMap getNodeVsCpus() {
 return nodeVsCpus;
   }
-}
+
+  @Override
+  public String toString() {
+return "NumaResourceAllocation{" +
+"nodeVsMemory=" + nodeVsMemory +
+", nodeVsCpus=" + nodeVsCpus +
+'}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+NumaResourceAllocation that = (NumaResourceAllocation) o;
+return Objects.equals(nodeVsMemory, that.nodeVsMemory) &&
+Objects.equals(nodeVsCpus, that.nodeVsCpus);
+  }
+
+  @Override
+  public int hashCode() {
+return Objects.hash(nodeVsMemory, nodeVsCpus);
+  }
+}
\ No newline at end of file
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
index e152bda..f95e55e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-

[hadoop] branch trunk updated: YARN-9134. No test coverage for redefining FPGA / GPU resource types in TestResourceUtils. Contributed by Peter Bacsko

2019-08-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e0517fe  YARN-9134. No test coverage for redefining FPGA / GPU 
resource types in TestResourceUtils. Contributed by Peter Bacsko
e0517fe is described below

commit e0517fea3399946a20852cefff300eb3d4d7ece7
Author: Szilard Nemeth 
AuthorDate: Mon Aug 12 14:36:07 2019 +0200

YARN-9134. No test coverage for redefining FPGA / GPU resource types in 
TestResourceUtils. Contributed by Peter Bacsko
---
 .../yarn/util/resource/TestResourceUtils.java  | 164 -
 1 file changed, 98 insertions(+), 66 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
index 7a701a4..9cc96b6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
@@ -31,9 +31,15 @@ import 
org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
+import java.io.IOException;
+import java.net.URL;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -43,6 +49,8 @@ import java.util.Map;
  * Test class to verify all resource utility methods.
  */
 public class TestResourceUtils {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestResourceUtils.class);
 
   private File nodeResourcesFile;
   private File resourceTypesFile;
@@ -52,13 +60,16 @@ public class TestResourceUtils {
 int resourceCount;
 Map resourceNameUnitsMap;
 
-public ResourceFileInformation(String name, int count) {
+ResourceFileInformation(String name, int count) {
   filename = name;
   resourceCount = count;
   resourceNameUnitsMap = new HashMap<>();
 }
   }
 
+  @Rule
+  public ExpectedException expexted = ExpectedException.none();
+
   @Before
   public void setup() {
 ResourceUtils.resetResourceTypes();
@@ -66,14 +77,60 @@ public class TestResourceUtils {
 
   @After
   public void teardown() {
-if(nodeResourcesFile != null && nodeResourcesFile.exists()) {
+if (nodeResourcesFile != null && nodeResourcesFile.exists()) {
   nodeResourcesFile.delete();
 }
-if(resourceTypesFile != null && resourceTypesFile.exists()) {
+if (resourceTypesFile != null && resourceTypesFile.exists()) {
   resourceTypesFile.delete();
 }
   }
 
+  public static String setupResourceTypes(Configuration conf, String filename)
+  throws Exception {
+File source = new File(
+conf.getClassLoader().getResource(filename).getFile());
+File dest = new File(source.getParent(), "resource-types.xml");
+FileUtils.copyFile(source, dest);
+try {
+  ResourceUtils.getResourceTypes();
+} catch (Exception e) {
+  if (!dest.delete()) {
+LOG.error("Could not delete {}", dest);
+  }
+  throw e;
+}
+return dest.getAbsolutePath();
+  }
+
+  private Map setupResourceTypesInternal(
+  Configuration conf, String srcFileName) throws IOException {
+URL srcFileUrl = conf.getClassLoader().getResource(srcFileName);
+if (srcFileUrl == null) {
+  throw new IllegalArgumentException(
+  "Source file does not exist: " + srcFileName);
+}
+File source = new File(srcFileUrl.getFile());
+File dest = new File(source.getParent(), "resource-types.xml");
+FileUtils.copyFile(source, dest);
+this.resourceTypesFile = dest;
+return ResourceUtils.getResourceTypes();
+  }
+
+  private Map setupNodeResources(
+  Configuration conf, String srcFileName) throws IOException {
+URL srcFileUrl = conf.getClassLoader().getResource(srcFileName);
+if (srcFileUrl == null) {
+  throw new IllegalArgumentException(
+  "Source file does not exist: " + srcFileName);
+}
+File source = new File(srcFileUrl.getFile());
+File dest = new File(source.getParent(), "node-resources.xml");
+FileUtils.copyFile(source, dest);
+this.nodeResourcesFile = dest;
+return ResourceUtils
+.getNodeResourceInformation(conf);
+  }
+
   private void testMemoryAndVcores(Map res) {
 String memory = ResourceInformation.MEMORY_MB.getName();
 String vcores = ResourceInformati

[hadoop] branch trunk updated: YARN-9451. AggregatedLogsBlock shows wrong NM http port. Contributed by Prabhu Joseph

2019-08-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b91099e  YARN-9451. AggregatedLogsBlock shows wrong NM http port. 
Contributed by Prabhu Joseph
b91099e is described below

commit b91099efd6e1fdcb31ec4ca7142439443c9ae536
Author: Szilard Nemeth 
AuthorDate: Mon Aug 12 15:05:28 2019 +0200

YARN-9451. AggregatedLogsBlock shows wrong NM http port. Contributed by 
Prabhu Joseph
---
 .../apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java | 14 --
 .../yarn/logaggregation/TestAggregatedLogsBlock.java   |  6 +-
 2 files changed, 17 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
index ef6876a..ef5c324 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.logaggregation.LogAggregationWebUtils;
 import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
 import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import com.google.inject.Inject;
 
@@ -104,8 +105,9 @@ public class AggregatedLogsBlock extends HtmlBlock {
 } catch (Exception fnf) {
   html.h1()
   .__("Logs not available for " + logEntity
-  + ". Aggregation may not be complete, "
-  + "Check back later or try the nodemanager at " + nodeId).__();
+  + ". Aggregation may not be complete, Check back later or "
+  + "try to find the container logs in the local directory of "
+  + "nodemanager " + nodeId).__();
   if(nmApplicationLogUrl != null)  {
 html.h1()
 .__("Or see application log at " + nmApplicationLogUrl)
@@ -129,6 +131,14 @@ public class AggregatedLogsBlock extends HtmlBlock {
 StringBuilder sb = new StringBuilder();
 String scheme = YarnConfiguration.useHttps(this.conf) ? "https://":
 "http://";;
+
+String webAppURLWithoutScheme =
+WebAppUtils.getNMWebAppURLWithoutScheme(conf);
+if (webAppURLWithoutScheme.contains(":")) {
+  String httpPort = webAppURLWithoutScheme.split(":")[1];
+  nodeId = NodeId.fromString(nodeId).getHost() + ":" + httpPort;
+}
+
 
sb.append(scheme).append(nodeId).append("/node/application/").append(appId);
 return sb.toString();
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
index 1135f9e..66008a8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
@@ -145,7 +145,11 @@ public class TestAggregatedLogsBlock {
 block.getWriter().flush();
 String out = data.toString();
 assertTrue(out
-.contains("Logs not available for entity. Aggregation may not be 
complete, Check back later or try the nodemanager at localhost:1234"));
+.contains("Logs not available for entity. Aggregation may not be "
++ "complete, Check back later or try to find the container logs "
++ "in the local directory of nodemanager localhost:1234"));
+assertTrue(out
+.contains("Or see application log at http://localhost:8042";));
 
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9451. AggregatedLogsBlock shows wrong NM http port. Contributed by Prabhu Joseph

2019-08-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 8442592  YARN-9451. AggregatedLogsBlock shows wrong NM http port. 
Contributed by Prabhu Joseph
8442592 is described below

commit 844259203f5bf630cb48b190f86dcac4beba0903
Author: Szilard Nemeth 
AuthorDate: Mon Aug 12 15:05:28 2019 +0200

YARN-9451. AggregatedLogsBlock shows wrong NM http port. Contributed by 
Prabhu Joseph

(cherry picked from commit b91099efd6e1fdcb31ec4ca7142439443c9ae536)
---
 .../apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java | 14 --
 .../yarn/logaggregation/TestAggregatedLogsBlock.java   |  6 +-
 2 files changed, 17 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
index ef6876a..ef5c324 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.logaggregation.LogAggregationWebUtils;
 import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
 import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import com.google.inject.Inject;
 
@@ -104,8 +105,9 @@ public class AggregatedLogsBlock extends HtmlBlock {
 } catch (Exception fnf) {
   html.h1()
   .__("Logs not available for " + logEntity
-  + ". Aggregation may not be complete, "
-  + "Check back later or try the nodemanager at " + nodeId).__();
+  + ". Aggregation may not be complete, Check back later or "
+  + "try to find the container logs in the local directory of "
+  + "nodemanager " + nodeId).__();
   if(nmApplicationLogUrl != null)  {
 html.h1()
 .__("Or see application log at " + nmApplicationLogUrl)
@@ -129,6 +131,14 @@ public class AggregatedLogsBlock extends HtmlBlock {
 StringBuilder sb = new StringBuilder();
 String scheme = YarnConfiguration.useHttps(this.conf) ? "https://":
 "http://";;
+
+String webAppURLWithoutScheme =
+WebAppUtils.getNMWebAppURLWithoutScheme(conf);
+if (webAppURLWithoutScheme.contains(":")) {
+  String httpPort = webAppURLWithoutScheme.split(":")[1];
+  nodeId = NodeId.fromString(nodeId).getHost() + ":" + httpPort;
+}
+
 
sb.append(scheme).append(nodeId).append("/node/application/").append(appId);
 return sb.toString();
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
index 1135f9e..66008a8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
@@ -145,7 +145,11 @@ public class TestAggregatedLogsBlock {
 block.getWriter().flush();
 String out = data.toString();
 assertTrue(out
-.contains("Logs not available for entity. Aggregation may not be 
complete, Check back later or try the nodemanager at localhost:1234"));
+.contains("Logs not available for entity. Aggregation may not be "
++ "complete, Check back later or try to find the container logs "
++ "in the local directory of nodemanager localhost:1234"));
+assertTrue(out
+.contains("Or see application log at http://localhost:8042";));
 
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9451. AggregatedLogsBlock shows wrong NM http port. Contributed by Prabhu Joseph

2019-08-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 148121d  YARN-9451. AggregatedLogsBlock shows wrong NM http port. 
Contributed by Prabhu Joseph
148121d is described below

commit 148121d889f04b8e967861b32c08db43c03ee9e4
Author: Szilard Nemeth 
AuthorDate: Mon Aug 12 15:05:28 2019 +0200

YARN-9451. AggregatedLogsBlock shows wrong NM http port. Contributed by 
Prabhu Joseph

(cherry picked from commit b91099efd6e1fdcb31ec4ca7142439443c9ae536)
---
 .../apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java | 14 --
 .../yarn/logaggregation/TestAggregatedLogsBlock.java   |  6 +-
 2 files changed, 17 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
index ef6876a..ef5c324 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.logaggregation.LogAggregationWebUtils;
 import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileController;
 import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import com.google.inject.Inject;
 
@@ -104,8 +105,9 @@ public class AggregatedLogsBlock extends HtmlBlock {
 } catch (Exception fnf) {
   html.h1()
   .__("Logs not available for " + logEntity
-  + ". Aggregation may not be complete, "
-  + "Check back later or try the nodemanager at " + nodeId).__();
+  + ". Aggregation may not be complete, Check back later or "
+  + "try to find the container logs in the local directory of "
+  + "nodemanager " + nodeId).__();
   if(nmApplicationLogUrl != null)  {
 html.h1()
 .__("Or see application log at " + nmApplicationLogUrl)
@@ -129,6 +131,14 @@ public class AggregatedLogsBlock extends HtmlBlock {
 StringBuilder sb = new StringBuilder();
 String scheme = YarnConfiguration.useHttps(this.conf) ? "https://":
 "http://";;
+
+String webAppURLWithoutScheme =
+WebAppUtils.getNMWebAppURLWithoutScheme(conf);
+if (webAppURLWithoutScheme.contains(":")) {
+  String httpPort = webAppURLWithoutScheme.split(":")[1];
+  nodeId = NodeId.fromString(nodeId).getHost() + ":" + httpPort;
+}
+
 
sb.append(scheme).append(nodeId).append("/node/application/").append(appId);
 return sb.toString();
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
index 1135f9e..66008a8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
@@ -145,7 +145,11 @@ public class TestAggregatedLogsBlock {
 block.getWriter().flush();
 String out = data.toString();
 assertTrue(out
-.contains("Logs not available for entity. Aggregation may not be 
complete, Check back later or try the nodemanager at localhost:1234"));
+.contains("Logs not available for entity. Aggregation may not be "
++ "complete, Check back later or try to find the container logs "
++ "in the local directory of nodemanager localhost:1234"));
+assertTrue(out
+.contains("Or see application log at http://localhost:8042";));
 
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9723. ApplicationPlacementContext is not required for terminated jobs during recovery. Contributed by Prabhu Joseph

2019-08-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e4b538b  YARN-9723. ApplicationPlacementContext is not required for 
terminated jobs during recovery. Contributed by Prabhu Joseph
e4b538b is described below

commit e4b538bbda6dc25d7f45bffd6a4ce49f3f84acdc
Author: Szilard Nemeth 
AuthorDate: Mon Aug 12 15:15:43 2019 +0200

YARN-9723. ApplicationPlacementContext is not required for terminated jobs 
during recovery. Contributed by Prabhu Joseph
---
 .../yarn/server/resourcemanager/RMAppManager.java   | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index bdc68ac..3cf3dd1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -64,6 +64,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRecoverEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
@@ -371,7 +372,7 @@ public class RMAppManager implements 
EventHandler,
 // Passing start time as -1. It will be eventually set in RMAppImpl
 // constructor.
 RMAppImpl application = createAndPopulateNewRMApp(
-submissionContext, submitTime, user, false, -1);
+submissionContext, submitTime, user, false, -1, null);
 try {
   if (UserGroupInformation.isSecurityEnabled()) {
 this.rmContext.getDelegationTokenRenewer()
@@ -408,18 +409,22 @@ public class RMAppManager implements 
EventHandler,
 // create and recover app.
 RMAppImpl application =
 createAndPopulateNewRMApp(appContext, appState.getSubmitTime(),
-appState.getUser(), true, appState.getStartTime());
+appState.getUser(), true, appState.getStartTime(),
+appState.getState());
 
 application.handle(new RMAppRecoverEvent(appId, rmState));
   }
 
   private RMAppImpl createAndPopulateNewRMApp(
   ApplicationSubmissionContext submissionContext, long submitTime,
-  String user, boolean isRecovery, long startTime) throws YarnException {
+  String user, boolean isRecovery, long startTime,
+  RMAppState recoveredFinalState) throws YarnException {
 
-ApplicationPlacementContext placementContext =
-placeApplication(rmContext.getQueuePlacementManager(),
-submissionContext, user, isRecovery);
+ApplicationPlacementContext placementContext = null;
+if (recoveredFinalState == null) {
+  placementContext = placeApplication(rmContext.getQueuePlacementManager(),
+  submissionContext, user, isRecovery);
+}
 
 // We only replace the queue when it's a new application
 if (!isRecovery) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9723. ApplicationPlacementContext is not required for terminated jobs during recovery. Contributed by Prabhu Joseph

2019-08-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new c5aea8c  YARN-9723. ApplicationPlacementContext is not required for 
terminated jobs during recovery. Contributed by Prabhu Joseph
c5aea8c is described below

commit c5aea8ca567fd4e12f8e784343c04a5304ece403
Author: Szilard Nemeth 
AuthorDate: Mon Aug 12 15:15:43 2019 +0200

YARN-9723. ApplicationPlacementContext is not required for terminated jobs 
during recovery. Contributed by Prabhu Joseph

(cherry picked from commit e4b538bbda6dc25d7f45bffd6a4ce49f3f84acdc)
---
 .../yarn/server/resourcemanager/RMAppManager.java   | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index feaff00..c7dcacf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -64,6 +64,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRecoverEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
@@ -367,7 +368,7 @@ public class RMAppManager implements 
EventHandler,
 // Passing start time as -1. It will be eventually set in RMAppImpl
 // constructor.
 RMAppImpl application = createAndPopulateNewRMApp(
-submissionContext, submitTime, user, false, -1);
+submissionContext, submitTime, user, false, -1, null);
 try {
   if (UserGroupInformation.isSecurityEnabled()) {
 this.rmContext.getDelegationTokenRenewer()
@@ -404,18 +405,22 @@ public class RMAppManager implements 
EventHandler,
 // create and recover app.
 RMAppImpl application =
 createAndPopulateNewRMApp(appContext, appState.getSubmitTime(),
-appState.getUser(), true, appState.getStartTime());
+appState.getUser(), true, appState.getStartTime(),
+appState.getState());
 
 application.handle(new RMAppRecoverEvent(appId, rmState));
   }
 
   private RMAppImpl createAndPopulateNewRMApp(
   ApplicationSubmissionContext submissionContext, long submitTime,
-  String user, boolean isRecovery, long startTime) throws YarnException {
+  String user, boolean isRecovery, long startTime,
+  RMAppState recoveredFinalState) throws YarnException {
 
-ApplicationPlacementContext placementContext =
-placeApplication(rmContext.getQueuePlacementManager(),
-submissionContext, user, isRecovery);
+ApplicationPlacementContext placementContext = null;
+if (recoveredFinalState == null) {
+  placementContext = placeApplication(rmContext.getQueuePlacementManager(),
+  submissionContext, user, isRecovery);
+}
 
 // We only replace the queue when it's a new application
 if (!isRecovery) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9723. ApplicationPlacementContext is not required for terminated jobs during recovery. Contributed by Prabhu Joseph

2019-08-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 9da9b6d  YARN-9723. ApplicationPlacementContext is not required for 
terminated jobs during recovery. Contributed by Prabhu Joseph
9da9b6d is described below

commit 9da9b6d58e3cf9322a68eb261deab6a1be3e98c3
Author: Szilard Nemeth 
AuthorDate: Mon Aug 12 15:15:43 2019 +0200

YARN-9723. ApplicationPlacementContext is not required for terminated jobs 
during recovery. Contributed by Prabhu Joseph

(cherry picked from commit e4b538bbda6dc25d7f45bffd6a4ce49f3f84acdc)
---
 .../yarn/server/resourcemanager/RMAppManager.java   | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 7f9f51c..46e92e3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -63,6 +63,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRecoverEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
@@ -322,7 +323,7 @@ public class RMAppManager implements 
EventHandler,
 // Passing start time as -1. It will be eventually set in RMAppImpl
 // constructor.
 RMAppImpl application = createAndPopulateNewRMApp(
-submissionContext, submitTime, user, false, -1);
+submissionContext, submitTime, user, false, -1, null);
 try {
   if (UserGroupInformation.isSecurityEnabled()) {
 this.rmContext.getDelegationTokenRenewer()
@@ -359,18 +360,22 @@ public class RMAppManager implements 
EventHandler,
 // create and recover app.
 RMAppImpl application =
 createAndPopulateNewRMApp(appContext, appState.getSubmitTime(),
-appState.getUser(), true, appState.getStartTime());
+appState.getUser(), true, appState.getStartTime(),
+appState.getState());
 
 application.handle(new RMAppRecoverEvent(appId, rmState));
   }
 
   private RMAppImpl createAndPopulateNewRMApp(
   ApplicationSubmissionContext submissionContext, long submitTime,
-  String user, boolean isRecovery, long startTime) throws YarnException {
+  String user, boolean isRecovery, long startTime,
+  RMAppState recoveredFinalState) throws YarnException {
 
-ApplicationPlacementContext placementContext =
-placeApplication(rmContext.getQueuePlacementManager(),
-submissionContext, user, isRecovery);
+ApplicationPlacementContext placementContext = null;
+if (recoveredFinalState == null) {
+  placementContext = placeApplication(rmContext.getQueuePlacementManager(),
+  submissionContext, user, isRecovery);
+}
 
 // We only replace the queue when it's a new application
 if (!isRecovery) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated (b040eb9 -> cb91ab7)

2019-08-13 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a change to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from b040eb9  HDFS-14148. HDFS OIV ReverseXML SnapshotSection parser throws 
exception when there are more than one snapshottable directory (#1274) 
Contributed by Siyao Meng.
 new a762a6b  Revert "YARN-9135. NM State store ResourceMappings 
serialization are tested with Strings instead of real Device objects. 
Contributed by Peter Bacsko"
 new cb91ab7  YARN-9135. NM State store ResourceMappings serialization are 
tested with Strings instead of real Device objects. Contributed by Peter Bacsko

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../containermanager/linux/resources/numa/NumaResourceAllocator.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] 01/02: Revert "YARN-9135. NM State store ResourceMappings serialization are tested with Strings instead of real Device objects. Contributed by Peter Bacsko"

2019-08-13 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit a762a6be2943ec54f72b294678d93fee6dbd8921
Author: Szilard Nemeth 
AuthorDate: Tue Aug 13 15:44:50 2019 +0200

Revert "YARN-9135. NM State store ResourceMappings serialization are tested 
with Strings instead of real Device objects. Contributed by Peter Bacsko"

This reverts commit b20fd9e21295add7e80f07b471bba5c76e433aed.
Commit is reverted since unnecessary files were added, accidentally.
---
 .../resources/numa/NumaResourceAllocation.java | 59 --
 .../resources/numa/NumaResourceAllocator.java  | 34 +
 .../recovery/NMLeveldbStateStoreService.java   |  5 +-
 .../recovery/TestNMLeveldbStateStoreService.java   | 52 ---
 4 files changed, 59 insertions(+), 91 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
index e91ac3e..f8d4739 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
@@ -17,11 +17,9 @@
  */
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.numa;
 
-import com.google.common.collect.ImmutableMap;
-
 import java.io.Serializable;
+import java.util.HashMap;
 import java.util.Map;
-import java.util.Objects;
 import java.util.Set;
 
 /**
@@ -30,18 +28,27 @@ import java.util.Set;
  */
 public class NumaResourceAllocation implements Serializable {
   private static final long serialVersionUID = 6339719798446595123L;
-  private final ImmutableMap nodeVsMemory;
-  private final ImmutableMap nodeVsCpus;
+  private Map nodeVsMemory;
+  private Map nodeVsCpus;
 
-  public NumaResourceAllocation(Map memoryAllocations,
-  Map cpuAllocations) {
-nodeVsMemory = ImmutableMap.copyOf(memoryAllocations);
-nodeVsCpus = ImmutableMap.copyOf(cpuAllocations);
+  public NumaResourceAllocation() {
+nodeVsMemory = new HashMap<>();
+nodeVsCpus = new HashMap<>();
   }
 
   public NumaResourceAllocation(String memNodeId, long memory, String 
cpuNodeId,
   int cpus) {
-this(ImmutableMap.of(memNodeId, memory), ImmutableMap.of(cpuNodeId, cpus));
+this();
+nodeVsMemory.put(memNodeId, memory);
+nodeVsCpus.put(cpuNodeId, cpus);
+  }
+
+  public void addMemoryNode(String memNodeId, long memory) {
+nodeVsMemory.put(memNodeId, memory);
+  }
+
+  public void addCpuNode(String cpuNodeId, int cpus) {
+nodeVsCpus.put(cpuNodeId, cpus);
   }
 
   public Set getMemNodes() {
@@ -52,37 +59,11 @@ public class NumaResourceAllocation implements Serializable 
{
 return nodeVsCpus.keySet();
   }
 
-  public ImmutableMap getNodeVsMemory() {
+  public Map getNodeVsMemory() {
 return nodeVsMemory;
   }
 
-  public ImmutableMap getNodeVsCpus() {
+  public Map getNodeVsCpus() {
 return nodeVsCpus;
   }
-
-  @Override
-  public String toString() {
-return "NumaResourceAllocation{" +
-"nodeVsMemory=" + nodeVsMemory +
-", nodeVsCpus=" + nodeVsCpus +
-'}';
-  }
-
-  @Override
-  public boolean equals(Object o) {
-if (this == o) {
-  return true;
-}
-if (o == null || getClass() != o.getClass()) {
-  return false;
-}
-NumaResourceAllocation that = (NumaResourceAllocation) o;
-return Objects.equals(nodeVsMemory, that.nodeVsMemory) &&
-Objects.equals(nodeVsCpus, that.nodeVsCpus);
-  }
-
-  @Override
-  public int hashCode() {
-return Objects.hash(nodeVsMemory, nodeVsCpus);
-  }
-}
\ No newline at end of file
+}
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
index f95e55e..e152bda 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linu

[hadoop] 02/02: YARN-9135. NM State store ResourceMappings serialization are tested with Strings instead of real Device objects. Contributed by Peter Bacsko

2019-08-13 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit cb91ab73b088ad68c5757cff3734d2667f5cb71c
Author: Szilard Nemeth 
AuthorDate: Fri Jul 12 17:20:42 2019 +0200

YARN-9135. NM State store ResourceMappings serialization are tested with 
Strings instead of real Device objects. Contributed by Peter Bacsko

(cherry picked from commit 8b3c6791b13fc57891cf81e83d4b626b4f2932e6)
---
 .../resources/numa/NumaResourceAllocation.java | 59 ++
 .../resources/numa/NumaResourceAllocator.java  | 34 -
 .../recovery/NMLeveldbStateStoreService.java   |  5 +-
 .../recovery/TestNMLeveldbStateStoreService.java   | 52 +++
 4 files changed, 91 insertions(+), 59 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
index f8d4739..e91ac3e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocation.java
@@ -17,9 +17,11 @@
  */
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.numa;
 
+import com.google.common.collect.ImmutableMap;
+
 import java.io.Serializable;
-import java.util.HashMap;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 
 /**
@@ -28,27 +30,18 @@ import java.util.Set;
  */
 public class NumaResourceAllocation implements Serializable {
   private static final long serialVersionUID = 6339719798446595123L;
-  private Map nodeVsMemory;
-  private Map nodeVsCpus;
+  private final ImmutableMap nodeVsMemory;
+  private final ImmutableMap nodeVsCpus;
 
-  public NumaResourceAllocation() {
-nodeVsMemory = new HashMap<>();
-nodeVsCpus = new HashMap<>();
+  public NumaResourceAllocation(Map memoryAllocations,
+  Map cpuAllocations) {
+nodeVsMemory = ImmutableMap.copyOf(memoryAllocations);
+nodeVsCpus = ImmutableMap.copyOf(cpuAllocations);
   }
 
   public NumaResourceAllocation(String memNodeId, long memory, String 
cpuNodeId,
   int cpus) {
-this();
-nodeVsMemory.put(memNodeId, memory);
-nodeVsCpus.put(cpuNodeId, cpus);
-  }
-
-  public void addMemoryNode(String memNodeId, long memory) {
-nodeVsMemory.put(memNodeId, memory);
-  }
-
-  public void addCpuNode(String cpuNodeId, int cpus) {
-nodeVsCpus.put(cpuNodeId, cpus);
+this(ImmutableMap.of(memNodeId, memory), ImmutableMap.of(cpuNodeId, cpus));
   }
 
   public Set getMemNodes() {
@@ -59,11 +52,37 @@ public class NumaResourceAllocation implements Serializable 
{
 return nodeVsCpus.keySet();
   }
 
-  public Map getNodeVsMemory() {
+  public ImmutableMap getNodeVsMemory() {
 return nodeVsMemory;
   }
 
-  public Map getNodeVsCpus() {
+  public ImmutableMap getNodeVsCpus() {
 return nodeVsCpus;
   }
-}
+
+  @Override
+  public String toString() {
+return "NumaResourceAllocation{" +
+"nodeVsMemory=" + nodeVsMemory +
+", nodeVsCpus=" + nodeVsCpus +
+'}';
+  }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+NumaResourceAllocation that = (NumaResourceAllocation) o;
+return Objects.equals(nodeVsMemory, that.nodeVsMemory) &&
+Objects.equals(nodeVsCpus, that.nodeVsCpus);
+  }
+
+  @Override
+  public int hashCode() {
+return Objects.hash(nodeVsMemory, nodeVsCpus);
+  }
+}
\ No newline at end of file
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
index e152bda..7b49b1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/numa/NumaResourceAllocator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-

[hadoop] branch branch-3.2 updated: YARN-9134. No test coverage for redefining FPGA / GPU resource types in TestResourceUtils. Contributed by Peter Bacsko

2019-08-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 9a87e74  YARN-9134. No test coverage for redefining FPGA / GPU 
resource types in TestResourceUtils. Contributed by Peter Bacsko
9a87e74 is described below

commit 9a87e74e54e45f6542510fa6933d9dcc836b0376
Author: Szilard Nemeth 
AuthorDate: Wed Aug 14 16:46:34 2019 +0200

YARN-9134. No test coverage for redefining FPGA / GPU resource types in 
TestResourceUtils. Contributed by Peter Bacsko
---
 .../yarn/util/resource/TestResourceUtils.java  | 160 +
 1 file changed, 97 insertions(+), 63 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
index c96982d..95cf83e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResourceUtils.java
@@ -28,9 +28,15 @@ import 
org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
+import java.io.IOException;
+import java.net.URL;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -38,6 +44,8 @@ import java.util.Map;
  * Test class to verify all resource utility methods.
  */
 public class TestResourceUtils {
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestResourceUtils.class);
 
   private File nodeResourcesFile;
   private File resourceTypesFile;
@@ -54,6 +62,9 @@ public class TestResourceUtils {
 }
   }
 
+  @Rule
+  public ExpectedException expexted = ExpectedException.none();
+
   public static void addNewTypesToResources(String... resourceTypes) {
 // Initialize resource map
 Map riMap = new HashMap<>();
@@ -78,14 +89,60 @@ public class TestResourceUtils {
 
   @After
   public void teardown() {
-if(nodeResourcesFile != null && nodeResourcesFile.exists()) {
+if (nodeResourcesFile != null && nodeResourcesFile.exists()) {
   nodeResourcesFile.delete();
 }
-if(resourceTypesFile != null && resourceTypesFile.exists()) {
+if (resourceTypesFile != null && resourceTypesFile.exists()) {
   resourceTypesFile.delete();
 }
   }
 
+  public static String setupResourceTypes(Configuration conf, String filename)
+  throws Exception {
+File source = new File(
+conf.getClassLoader().getResource(filename).getFile());
+File dest = new File(source.getParent(), "resource-types.xml");
+FileUtils.copyFile(source, dest);
+try {
+  ResourceUtils.getResourceTypes();
+} catch (Exception e) {
+  if (!dest.delete()) {
+LOG.error("Could not delete {}", dest);
+  }
+  throw e;
+}
+return dest.getAbsolutePath();
+  }
+
+  private Map setupResourceTypesInternal(
+  Configuration conf, String srcFileName) throws IOException {
+URL srcFileUrl = conf.getClassLoader().getResource(srcFileName);
+if (srcFileUrl == null) {
+  throw new IllegalArgumentException(
+  "Source file does not exist: " + srcFileName);
+}
+File source = new File(srcFileUrl.getFile());
+File dest = new File(source.getParent(), "resource-types.xml");
+FileUtils.copyFile(source, dest);
+this.resourceTypesFile = dest;
+return ResourceUtils.getResourceTypes();
+  }
+
+  private Map setupNodeResources(
+  Configuration conf, String srcFileName) throws IOException {
+URL srcFileUrl = conf.getClassLoader().getResource(srcFileName);
+if (srcFileUrl == null) {
+  throw new IllegalArgumentException(
+  "Source file does not exist: " + srcFileName);
+}
+File source = new File(srcFileUrl.getFile());
+File dest = new File(source.getParent(), "node-resources.xml");
+FileUtils.copyFile(source, dest);
+this.nodeResourcesFile = dest;
+return ResourceUtils
+.getNodeResourceInformation(conf);
+  }
+
   private void testMemoryAndVcores(Map res) {
 String memory = ResourceInformation.MEMORY_MB.getName();
 String vcores = ResourceInformation.VCORES.getName();
@@ -104,8 +161,7 @@ public class TestResourceUtils {
   }
 
   @Test
-  public void testGetResourceTypes() throws Exception {
-
+  public void testGetResourceTypes() {
 Map res = ResourceUtils.getResourc

[hadoop] branch trunk updated: YARN-9140. Code cleanup in ResourcePluginManager.initialize and in TestResourcePluginManager. Contributed by Peter Bacsko

2019-08-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e5e6093  YARN-9140. Code cleanup in ResourcePluginManager.initialize 
and in TestResourcePluginManager. Contributed by Peter Bacsko
e5e6093 is described below

commit e5e609384f68cc45b0c2bfbde0a49426c90017d3
Author: Szilard Nemeth 
AuthorDate: Wed Aug 14 16:58:22 2019 +0200

YARN-9140. Code cleanup in ResourcePluginManager.initialize and in 
TestResourcePluginManager. Contributed by Peter Bacsko
---
 .../resourceplugin/ResourcePluginManager.java  | 84 +
 .../resourceplugin/TestResourcePluginManager.java  | 86 ++
 2 files changed, 91 insertions(+), 79 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
index 1274b64..84cdd7a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
@@ -20,6 +20,7 @@ package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugi
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -44,7 +45,6 @@ import org.slf4j.LoggerFactory;
 import java.lang.reflect.Method;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
@@ -68,8 +68,29 @@ public class ResourcePluginManager {
   public void initialize(Context context)
   throws YarnException, ClassNotFoundException {
 Configuration conf = context.getConf();
-Map pluginMap = new HashMap<>();
+String[] plugins = getPluginsFromConfig(conf);
 
+Map pluginMap = Maps.newHashMap();
+if (plugins != null) {
+  pluginMap = initializePlugins(context, plugins);
+}
+
+// Try to load pluggable device plugins
+boolean pluggableDeviceFrameworkEnabled = conf.getBoolean(
+YarnConfiguration.NM_PLUGGABLE_DEVICE_FRAMEWORK_ENABLED,
+YarnConfiguration.DEFAULT_NM_PLUGGABLE_DEVICE_FRAMEWORK_ENABLED);
+
+if (pluggableDeviceFrameworkEnabled) {
+  initializePluggableDevicePlugins(context, conf, pluginMap);
+} else {
+  LOG.info("The pluggable device framework is not enabled."
+  + " If you want, please set true to {}",
+  YarnConfiguration.NM_PLUGGABLE_DEVICE_FRAMEWORK_ENABLED);
+}
+configuredPlugins = Collections.unmodifiableMap(pluginMap);
+  }
+
+  private String[] getPluginsFromConfig(Configuration conf) {
 String[] plugins = conf.getStrings(YarnConfiguration.NM_RESOURCE_PLUGINS);
 if (plugins == null || plugins.length == 0) {
   LOG.info("No Resource plugins found from configuration!");
@@ -77,25 +98,18 @@ public class ResourcePluginManager {
 LOG.info("Found Resource plugins from configuration: "
 + Arrays.toString(plugins));
 
-if (plugins != null) {
-  // Initialize each plugins
-  for (String resourceName : plugins) {
-resourceName = resourceName.trim();
-if (!SUPPORTED_RESOURCE_PLUGINS.contains(resourceName)) {
-  String msg =
-  "Trying to initialize resource plugin with name=" + resourceName
-  + ", it is not supported, list of supported plugins:"
-  + StringUtils.join(",", SUPPORTED_RESOURCE_PLUGINS);
-  LOG.error(msg);
-  throw new YarnException(msg);
-}
+return plugins;
+  }
 
-if (pluginMap.containsKey(resourceName)) {
-  LOG.warn("Ignoring duplicate Resource plugin definition: " +
-  resourceName);
-  continue;
-}
+  private Map initializePlugins(
+  Context context, String[] plugins) throws YarnException {
+Map pluginMap = Maps.newHashMap();
 
+for (String resourceName : plugins) {
+  resourceName = resourceName.trim();
+  ensurePluginIsSupported(resourceName);
+
+  if (!isPluginDuplicate(pluginMap, resourceNa

[hadoop] branch branch-3.2 updated: YARN-9140. Code cleanup in ResourcePluginManager.initialize and in TestResourcePluginManager. Contributed by Peter Bacsko

2019-08-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 4dc477b  YARN-9140. Code cleanup in ResourcePluginManager.initialize 
and in TestResourcePluginManager. Contributed by Peter Bacsko
4dc477b is described below

commit 4dc477b606485226e5bb04f86f2de001434c9c1e
Author: Szilard Nemeth 
AuthorDate: Wed Aug 14 17:01:41 2019 +0200

YARN-9140. Code cleanup in ResourcePluginManager.initialize and in 
TestResourcePluginManager. Contributed by Peter Bacsko
---
 .../resourceplugin/ResourcePluginManager.java  | 67 +++---
 .../resourceplugin/TestResourcePluginManager.java  | 48 +++-
 2 files changed, 67 insertions(+), 48 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
index d6edfdd..4ace3ae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
@@ -19,6 +19,7 @@
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin;
 
 import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -33,7 +34,6 @@ import org.slf4j.LoggerFactory;
 
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
@@ -56,6 +56,17 @@ public class ResourcePluginManager {
   throws YarnException {
 Configuration conf = context.getConf();
 
+String[] plugins = getPluginsFromConfig(conf);
+
+Map pluginMap = Maps.newHashMap();
+if (plugins != null) {
+  pluginMap = initializePlugins(context, plugins);
+}
+
+configuredPlugins = Collections.unmodifiableMap(pluginMap);
+  }
+
+  private String[] getPluginsFromConfig(Configuration conf) {
 String[] plugins = conf.getStrings(YarnConfiguration.NM_RESOURCE_PLUGINS);
 if (plugins == null || plugins.length == 0) {
   LOG.info("No Resource plugins found from configuration!");
@@ -63,27 +74,19 @@ public class ResourcePluginManager {
 LOG.info("Found Resource plugins from configuration: "
 + Arrays.toString(plugins));
 
-if (plugins != null) {
-  Map pluginMap = new HashMap<>();
-
-  // Initialize each plugins
-  for (String resourceName : plugins) {
-resourceName = resourceName.trim();
-if (!SUPPORTED_RESOURCE_PLUGINS.contains(resourceName)) {
-  String msg =
-  "Trying to initialize resource plugin with name=" + resourceName
-  + ", it is not supported, list of supported plugins:"
-  + StringUtils.join(",", SUPPORTED_RESOURCE_PLUGINS);
-  LOG.error(msg);
-  throw new YarnException(msg);
-}
+return plugins;
+  }
 
-if (pluginMap.containsKey(resourceName)) {
-  LOG.warn("Ignoring duplicate Resource plugin definition: " +
-  resourceName);
-  continue;
-}
 
+  private Map initializePlugins(
+  Context context, String[] plugins) throws YarnException {
+Map pluginMap = Maps.newHashMap();
+
+for (String resourceName : plugins) {
+  resourceName = resourceName.trim();
+  ensurePluginIsSupported(resourceName);
+
+  if (!isPluginDuplicate(pluginMap, resourceName)) {
 ResourcePlugin plugin = null;
 if (resourceName.equals(GPU_URI)) {
   final GpuDiscoverer gpuDiscoverer = new GpuDiscoverer();
@@ -103,11 +106,33 @@ public class ResourcePluginManager {
 LOG.info("Initialized plugin {}", plugin);
 pluginMap.put(resourceName, plugin);
   }
+}
+return pluginMap;
+  }
 
-  configuredPlugins = Collections.unmodifiableMap(pluginMap);
+  private void ensurePluginIsSupported(String resourceName)
+  throws YarnException {
+if (!SUPPORTED_RESOURCE_PLUGINS.contains(resourceName)) {
+  String msg =
+  "Trying to initialize resource plugin with name=" + resourceName
+  +

[hadoop] branch trunk updated: YARN-9133. Make tests more easy to comprehend in TestGpuResourceHandler. Contributed by Peter Bacsko

2019-08-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3e04104  YARN-9133. Make tests more easy to comprehend in 
TestGpuResourceHandler. Contributed by Peter Bacsko
3e04104 is described below

commit 3e0410449fb36da098c897e874a18258253eac81
Author: Szilard Nemeth 
AuthorDate: Wed Aug 14 17:13:25 2019 +0200

YARN-9133. Make tests more easy to comprehend in TestGpuResourceHandler. 
Contributed by Peter Bacsko
---
 .../resources/gpu/TestGpuResourceHandlerImpl.java  | 352 -
 1 file changed, 204 insertions(+), 148 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandlerImpl.java
index 4b50454..392497f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandlerImpl.java
@@ -18,6 +18,8 @@
 
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu;
 
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
@@ -28,6 +30,7 @@ import 
org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings;
@@ -43,9 +46,10 @@ import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreServic
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
 import 
org.apache.hadoop.yarn.util.resource.CustomResourceTypesConfigurationProvider;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -53,11 +57,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyList;
 import static org.mockito.ArgumentMatchers.anyString;
@@ -65,7 +71,6 @@ import static org.mockito.ArgumentMatchers.eq;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -103,11 +108,14 @@ public class TestGpuResourceHandlerImpl {
   }
 
   private File setupFakeGpuDiscoveryBinary() throws IOException {
-File fakeBinary = new File(getTestParentDirectory() + "/fake-nvidia-smi");
+File fakeBinary = new File(getTestParentDirectory() + "/nvidia-smi");
 touchFile(fakeBinary);
 return fakeBinary;
   }
 
+  @Rule
+  public ExpectedException expected = ExpectedException.none();
+
   @Before
   public void setup() throws IOException {
 createTestDataDirectory();
@@ -120,16 +128,20 @@ public class TestGpuResourceHandlerImpl {
 mockNMStateStore = mock(NMStateStoreService.class);
 
 Configuration conf = new Configuration();
+Context nmContext = createMockNmContext(conf);
+
+gpuDiscoverer = new GpuDiscoverer();
+gpuResourceHandler = new GpuResourceHandlerImpl(nmContext,
+mockCGroupsHandler, mockPrivilegedExecutor, gpuDiscoverer);
+  }
 
+  private Context createMockNmContext(Configuration conf) {
 Context nmctx = mock(Context.class);
 when(nmctx.getNMStateStore()).thenReturn(mockNMStateStore);
   

[hadoop] branch branch-3.2 updated: YARN-9133. Make tests more easy to comprehend in TestGpuResourceHandler. Contributed by Peter Bacsko

2019-08-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 4bb238c  YARN-9133. Make tests more easy to comprehend in 
TestGpuResourceHandler. Contributed by Peter Bacsko
4bb238c is described below

commit 4bb238c480581638655f11d94a5979cade1eb33c
Author: Szilard Nemeth 
AuthorDate: Wed Aug 14 17:16:54 2019 +0200

YARN-9133. Make tests more easy to comprehend in TestGpuResourceHandler. 
Contributed by Peter Bacsko
---
 .../resources/gpu/TestGpuResourceHandler.java  | 352 -
 1 file changed, 204 insertions(+), 148 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
index dad30ec..93af10a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
@@ -18,6 +18,8 @@
 
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu;
 
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Maps;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
@@ -28,6 +30,7 @@ import 
org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ResourceMappings;
@@ -43,9 +46,10 @@ import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreServic
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
 import org.junit.After;
 import org.apache.hadoop.yarn.util.resource.TestResourceUtils;
-import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.ExpectedException;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -53,11 +57,13 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyList;
 import static org.mockito.Matchers.anyString;
@@ -65,7 +71,6 @@ import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
@@ -103,11 +108,14 @@ public class TestGpuResourceHandler {
   }
 
   private File setupFakeGpuDiscoveryBinary() throws IOException {
-File fakeBinary = new File(getTestParentDirectory() + "/fake-nvidia-smi");
+File fakeBinary = new File(getTestParentDirectory() + "/nvidia-smi");
 touchFile(fakeBinary);
 return fakeBinary;
   }
 
+  @Rule
+  public ExpectedException expected = ExpectedException.none();
+
   @Before
   public void setup() throws IOException {
 createTestDataDirectory();
@@ -119,16 +127,20 @@ public class TestGpuResourceHandler {
 mockNMStateStore = mock(NMStateStoreService.class);
 
 Configuration conf = new Configuration();
+Context nmContext = createMockNmContext(conf);
+
+gpuDiscoverer = new GpuDiscoverer();
+gpuResourceHandler = new GpuResourceHandlerImpl(nmContext,
+mockCGroupsHandler, mockPrivilegedExecutor, gpuDiscoverer);
+  }
 
+  private Context createMockNmContext(Configuration conf) {
 Context nmctx = mock(Context.class);
 when(nmctx.getNMStateStore()).thenReturn(mockNMStateStore);
 when(nmctx.getConf()).thenReturn(conf);
 runnin

[hadoop] branch trunk updated: YARN-9676. Add DEBUG and TRACE level messages to AppLogAggregatorImpl… (#1261)

2019-08-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c89bdfa  YARN-9676. Add DEBUG and TRACE level messages to 
AppLogAggregatorImpl… (#1261)
c89bdfa is described below

commit c89bdfacc8715fa6d72acd85437ab8cd257c8aad
Author: Adam Antal 
AuthorDate: Wed Aug 14 17:35:16 2019 +0200

YARN-9676. Add DEBUG and TRACE level messages to AppLogAggregatorImpl… 
(#1261)

* YARN-9676. Add DEBUG and TRACE level messages to AppLogAggregatorImpl and 
connected classes

* Using {} placeholder, and increasing loglevel if log aggregation failed.
---
 .../logaggregation/AppLogAggregatorImpl.java   | 42 +-
 1 file changed, 25 insertions(+), 17 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index ef14d2a..77bec72 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -108,7 +108,6 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   // These variables are only for testing
   private final AtomicBoolean waiting = new AtomicBoolean(false);
   private int logAggregationTimes = 0;
-  private int cleanupOldLogTimes = 0;
   private long logFileSizeThreshold;
   private boolean renameTemporaryLogFileFailed = false;
 
@@ -196,10 +195,16 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   this.remoteNodeTmpLogFileForApp = getRemoteNodeTmpLogFileForApp();
 }
 boolean logAggregationInRolling =
-rollingMonitorInterval <= 0 || this.logAggregationContext == null
-|| this.logAggregationContext.getRolledLogsIncludePattern() == null
-|| this.logAggregationContext.getRolledLogsIncludePattern()
-.isEmpty() ? false : true;
+rollingMonitorInterval > 0 && this.logAggregationContext != null
+&& this.logAggregationContext.getRolledLogsIncludePattern() != null
+&& !this.logAggregationContext.getRolledLogsIncludePattern()
+.isEmpty();
+if (logAggregationInRolling) {
+  LOG.info("Rolling mode is turned on with include pattern {}",
+  this.logAggregationContext.getRolledLogsIncludePattern());
+} else {
+  LOG.debug("Rolling mode is turned off");
+}
 logControllerContext = new LogAggregationFileControllerContext(
 this.remoteNodeLogFileForApp,
 this.remoteNodeTmpLogFileForApp,
@@ -299,11 +304,13 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 }
 
 if (pendingContainerInThisCycle.isEmpty()) {
+  LOG.debug("No pending container in this cycle");
   sendLogAggregationReport(true, "", appFinished);
   return;
 }
 
 logAggregationTimes++;
+LOG.debug("Cycle #{} of log aggregator", logAggregationTimes);
 String diagnosticMessage = "";
 boolean logAggregationSucceedInThisCycle = true;
 DeletionTask deletionTask = null;
@@ -331,6 +338,8 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 appFinished, finishedContainers.contains(container));
 if (uploadedFilePathsInThisCycle.size() > 0) {
   uploadedLogsInThisCycle = true;
+  LOG.trace("Uploaded the following files for {}: {}",
+  container, uploadedFilePathsInThisCycle.toString());
   List uploadedFilePathsInThisCycleList = new ArrayList<>();
   
uploadedFilePathsInThisCycleList.addAll(uploadedFilePathsInThisCycle);
   if (LOG.isDebugEnabled()) {
@@ -386,6 +395,13 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   if (logAggregationSucceedInThisCycle && deletionTask != null) {
 delService.delete(deletionTask);
   }
+  if (!diagnosticMessage.isEmpty()) {
+LOG.debug("Sending log aggregation report along with the " +
+"following diagnostic message:\"{}\"", diagnosticMessage);
+  }
+  if (!logAggregationSucceedInThisCycle) {
+LOG.warn("Log aggregation did not succeed in thi

[hadoop] branch branch-3.2 updated: YARN-9676. Add DEBUG and TRACE level messages to AppLogAggregatorImpl… (#1261)

2019-08-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new d5446b3  YARN-9676. Add DEBUG and TRACE level messages to 
AppLogAggregatorImpl… (#1261)
d5446b3 is described below

commit d5446b3a23d822a77db0638844faa5032c12d608
Author: Adam Antal 
AuthorDate: Wed Aug 14 17:35:16 2019 +0200

YARN-9676. Add DEBUG and TRACE level messages to AppLogAggregatorImpl… 
(#1261)

* YARN-9676. Add DEBUG and TRACE level messages to AppLogAggregatorImpl and 
connected classes

* Using {} placeholder, and increasing loglevel if log aggregation failed.

(cherry picked from commit c89bdfacc8715fa6d72acd85437ab8cd257c8aad)
---
 .../logaggregation/AppLogAggregatorImpl.java   | 42 +-
 1 file changed, 25 insertions(+), 17 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 84eec4f..965734f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -108,7 +108,6 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   // These variables are only for testing
   private final AtomicBoolean waiting = new AtomicBoolean(false);
   private int logAggregationTimes = 0;
-  private int cleanupOldLogTimes = 0;
   private long logFileSizeThreshold;
   private boolean renameTemporaryLogFileFailed = false;
 
@@ -196,10 +195,16 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   this.remoteNodeTmpLogFileForApp = getRemoteNodeTmpLogFileForApp();
 }
 boolean logAggregationInRolling =
-rollingMonitorInterval <= 0 || this.logAggregationContext == null
-|| this.logAggregationContext.getRolledLogsIncludePattern() == null
-|| this.logAggregationContext.getRolledLogsIncludePattern()
-.isEmpty() ? false : true;
+rollingMonitorInterval > 0 && this.logAggregationContext != null
+&& this.logAggregationContext.getRolledLogsIncludePattern() != null
+&& !this.logAggregationContext.getRolledLogsIncludePattern()
+.isEmpty();
+if (logAggregationInRolling) {
+  LOG.info("Rolling mode is turned on with include pattern {}",
+  this.logAggregationContext.getRolledLogsIncludePattern());
+} else {
+  LOG.debug("Rolling mode is turned off");
+}
 logControllerContext = new LogAggregationFileControllerContext(
 this.remoteNodeLogFileForApp,
 this.remoteNodeTmpLogFileForApp,
@@ -299,11 +304,13 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 }
 
 if (pendingContainerInThisCycle.isEmpty()) {
+  LOG.debug("No pending container in this cycle");
   sendLogAggregationReport(true, "", appFinished);
   return;
 }
 
 logAggregationTimes++;
+LOG.debug("Cycle #{} of log aggregator", logAggregationTimes);
 String diagnosticMessage = "";
 boolean logAggregationSucceedInThisCycle = true;
 DeletionTask deletionTask = null;
@@ -331,6 +338,8 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
 appFinished, finishedContainers.contains(container));
 if (uploadedFilePathsInThisCycle.size() > 0) {
   uploadedLogsInThisCycle = true;
+  LOG.trace("Uploaded the following files for {}: {}",
+  container, uploadedFilePathsInThisCycle.toString());
   List uploadedFilePathsInThisCycleList = new ArrayList<>();
   
uploadedFilePathsInThisCycleList.addAll(uploadedFilePathsInThisCycle);
   if (LOG.isDebugEnabled()) {
@@ -386,6 +395,13 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   if (logAggregationSucceedInThisCycle && deletionTask != null) {
 delService.delete(deletionTask);
   }
+  if (!diagnosticMessage.isEmpty()) {
+LOG.debug("Sending log aggregation report along with the " +
+"following diagnostic message:\"{}\"", diagnosticMessage);
+  }
+  if

[hadoop] branch branch-3.1 updated: YARN-9140. Code cleanup in ResourcePluginManager.initialize and in TestResourcePluginManager. Contributed by Peter Bacsko

2019-08-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new aa0631a  YARN-9140. Code cleanup in ResourcePluginManager.initialize 
and in TestResourcePluginManager. Contributed by Peter Bacsko
aa0631a is described below

commit aa0631a042a9e0b546c3b6589dec19d975e19c09
Author: Szilard Nemeth 
AuthorDate: Wed Aug 14 19:04:09 2019 +0200

YARN-9140. Code cleanup in ResourcePluginManager.initialize and in 
TestResourcePluginManager. Contributed by Peter Bacsko
---
 .../resourceplugin/ResourcePluginManager.java  | 67 +++---
 .../resourceplugin/TestResourcePluginManager.java  | 45 +++
 2 files changed, 66 insertions(+), 46 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
index d6edfdd..4ace3ae 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
@@ -19,6 +19,7 @@
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin;
 
 import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -33,7 +34,6 @@ import org.slf4j.LoggerFactory;
 
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
@@ -56,6 +56,17 @@ public class ResourcePluginManager {
   throws YarnException {
 Configuration conf = context.getConf();
 
+String[] plugins = getPluginsFromConfig(conf);
+
+Map pluginMap = Maps.newHashMap();
+if (plugins != null) {
+  pluginMap = initializePlugins(context, plugins);
+}
+
+configuredPlugins = Collections.unmodifiableMap(pluginMap);
+  }
+
+  private String[] getPluginsFromConfig(Configuration conf) {
 String[] plugins = conf.getStrings(YarnConfiguration.NM_RESOURCE_PLUGINS);
 if (plugins == null || plugins.length == 0) {
   LOG.info("No Resource plugins found from configuration!");
@@ -63,27 +74,19 @@ public class ResourcePluginManager {
 LOG.info("Found Resource plugins from configuration: "
 + Arrays.toString(plugins));
 
-if (plugins != null) {
-  Map pluginMap = new HashMap<>();
-
-  // Initialize each plugins
-  for (String resourceName : plugins) {
-resourceName = resourceName.trim();
-if (!SUPPORTED_RESOURCE_PLUGINS.contains(resourceName)) {
-  String msg =
-  "Trying to initialize resource plugin with name=" + resourceName
-  + ", it is not supported, list of supported plugins:"
-  + StringUtils.join(",", SUPPORTED_RESOURCE_PLUGINS);
-  LOG.error(msg);
-  throw new YarnException(msg);
-}
+return plugins;
+  }
 
-if (pluginMap.containsKey(resourceName)) {
-  LOG.warn("Ignoring duplicate Resource plugin definition: " +
-  resourceName);
-  continue;
-}
 
+  private Map initializePlugins(
+  Context context, String[] plugins) throws YarnException {
+Map pluginMap = Maps.newHashMap();
+
+for (String resourceName : plugins) {
+  resourceName = resourceName.trim();
+  ensurePluginIsSupported(resourceName);
+
+  if (!isPluginDuplicate(pluginMap, resourceName)) {
 ResourcePlugin plugin = null;
 if (resourceName.equals(GPU_URI)) {
   final GpuDiscoverer gpuDiscoverer = new GpuDiscoverer();
@@ -103,11 +106,33 @@ public class ResourcePluginManager {
 LOG.info("Initialized plugin {}", plugin);
 pluginMap.put(resourceName, plugin);
   }
+}
+return pluginMap;
+  }
 
-  configuredPlugins = Collections.unmodifiableMap(pluginMap);
+  private void ensurePluginIsSupported(String resourceName)
+  throws YarnException {
+if (!SUPPORTED_RESOURCE_PLUGINS.contains(resourceName)) {
+  String msg =
+  "Trying to initialize resource plugin with name=" + resourceName
+  +

[hadoop] branch trunk updated: MAPREDUCE-7230. TestHSWebApp.testLogsViewSingle fails. Contributed by Prabhu Joseph

2019-08-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3f4f097  MAPREDUCE-7230. TestHSWebApp.testLogsViewSingle fails. 
Contributed by Prabhu Joseph
3f4f097 is described below

commit 3f4f097f1f29249c8ad5d59ccefcddf0a66b2e6b
Author: Szilard Nemeth 
AuthorDate: Thu Aug 15 17:07:43 2019 +0200

MAPREDUCE-7230. TestHSWebApp.testLogsViewSingle fails. Contributed by 
Prabhu Joseph
---
 .../java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
index 287c646..8cce1c3 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
@@ -233,8 +233,8 @@ public class TestHSWebApp {
 PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
 verify(spyPw).write(
 "Logs not available for container_10_0001_01_01."
-+ " Aggregation may not be complete, "
-+ "Check back later or try the nodemanager at "
++ " Aggregation may not be complete, Check back later or try to"
++ " find the container logs in the local directory of nodemanager "
 + MockJobs.NM_HOST + ":" + MockJobs.NM_PORT);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: MAPREDUCE-7230. TestHSWebApp.testLogsViewSingle fails. Contributed by Prabhu Joseph

2019-08-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new faafab9  MAPREDUCE-7230. TestHSWebApp.testLogsViewSingle fails. 
Contributed by Prabhu Joseph
faafab9 is described below

commit faafab93885a7b02a9db86719912afa461bb5791
Author: Szilard Nemeth 
AuthorDate: Thu Aug 15 17:07:43 2019 +0200

MAPREDUCE-7230. TestHSWebApp.testLogsViewSingle fails. Contributed by 
Prabhu Joseph

(cherry picked from commit 3f4f097f1f29249c8ad5d59ccefcddf0a66b2e6b)
---
 .../java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
index 287c646..8cce1c3 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
@@ -233,8 +233,8 @@ public class TestHSWebApp {
 PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
 verify(spyPw).write(
 "Logs not available for container_10_0001_01_01."
-+ " Aggregation may not be complete, "
-+ "Check back later or try the nodemanager at "
++ " Aggregation may not be complete, Check back later or try to"
++ " find the container logs in the local directory of nodemanager "
 + MockJobs.NM_HOST + ":" + MockJobs.NM_PORT);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: MAPREDUCE-7230. TestHSWebApp.testLogsViewSingle fails. Contributed by Prabhu Joseph

2019-08-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new a3fb032  MAPREDUCE-7230. TestHSWebApp.testLogsViewSingle fails. 
Contributed by Prabhu Joseph
a3fb032 is described below

commit a3fb0320101c2c98bf45e4057cab0f1ae19980de
Author: Szilard Nemeth 
AuthorDate: Thu Aug 15 17:07:43 2019 +0200

MAPREDUCE-7230. TestHSWebApp.testLogsViewSingle fails. Contributed by 
Prabhu Joseph

(cherry picked from commit 3f4f097f1f29249c8ad5d59ccefcddf0a66b2e6b)
---
 .../java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
index 287c646..8cce1c3 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHSWebApp.java
@@ -233,8 +233,8 @@ public class TestHSWebApp {
 PrintWriter spyPw = WebAppTests.getPrintWriter(injector);
 verify(spyPw).write(
 "Logs not available for container_10_0001_01_01."
-+ " Aggregation may not be complete, "
-+ "Check back later or try the nodemanager at "
++ " Aggregation may not be complete, Check back later or try to"
++ " find the container logs in the local directory of nodemanager "
 + MockJobs.NM_HOST + ":" + MockJobs.NM_PORT);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9488. Skip YARNFeatureNotEnabledException from ClientRMService. Contributed by Prabhu Joseph

2019-08-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1845a83  YARN-9488. Skip YARNFeatureNotEnabledException from 
ClientRMService. Contributed by Prabhu Joseph
1845a83 is described below

commit 1845a83cec6563482523d8c34b38c4e36c0aa9df
Author: Szilard Nemeth 
AuthorDate: Thu Aug 15 17:15:38 2019 +0200

YARN-9488. Skip YARNFeatureNotEnabledException from ClientRMService. 
Contributed by Prabhu Joseph
---
 .../apache/hadoop/yarn/server/resourcemanager/ClientRMService.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 298bea7..1e54cd6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -156,6 +156,7 @@ import 
org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.RPCUtil;
@@ -283,7 +284,8 @@ public class ClientRMService extends AbstractService 
implements
 
 this.server.addTerseExceptions(ApplicationNotFoundException.class,
 ApplicationAttemptNotFoundException.class,
-ContainerNotFoundException.class);
+ContainerNotFoundException.class,
+YARNFeatureNotEnabledException.class);
 
 // Enable service authorization?
 if (conf.getBoolean(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9488. Skip YARNFeatureNotEnabledException from ClientRMService. Contributed by Prabhu Joseph

2019-08-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new e616037  YARN-9488. Skip YARNFeatureNotEnabledException from 
ClientRMService. Contributed by Prabhu Joseph
e616037 is described below

commit e616037d1fe86449f33425c636905d96fff8e2a1
Author: Szilard Nemeth 
AuthorDate: Thu Aug 15 17:15:38 2019 +0200

YARN-9488. Skip YARNFeatureNotEnabledException from ClientRMService. 
Contributed by Prabhu Joseph

(cherry picked from commit 1845a83cec6563482523d8c34b38c4e36c0aa9df)
---
 .../apache/hadoop/yarn/server/resourcemanager/ClientRMService.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index e2765a3..6cc7b43 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -156,6 +156,7 @@ import 
org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.RPCUtil;
@@ -282,7 +283,8 @@ public class ClientRMService extends AbstractService 
implements
 
 this.server.addTerseExceptions(ApplicationNotFoundException.class,
 ApplicationAttemptNotFoundException.class,
-ContainerNotFoundException.class);
+ContainerNotFoundException.class,
+YARNFeatureNotEnabledException.class);
 
 // Enable service authorization?
 if (conf.getBoolean(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: YARN-9488. Skip YARNFeatureNotEnabledException from ClientRMService. Contributed by Prabhu Joseph

2019-08-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 9411437  YARN-9488. Skip YARNFeatureNotEnabledException from 
ClientRMService. Contributed by Prabhu Joseph
9411437 is described below

commit 94114378ce919f68dca5b49c14da350975dde828
Author: Szilard Nemeth 
AuthorDate: Thu Aug 15 17:15:38 2019 +0200

YARN-9488. Skip YARNFeatureNotEnabledException from ClientRMService. 
Contributed by Prabhu Joseph

(cherry picked from commit 1845a83cec6563482523d8c34b38c4e36c0aa9df)
---
 .../apache/hadoop/yarn/server/resourcemanager/ClientRMService.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 379251e..e0dead9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -144,6 +144,7 @@ import 
org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.exceptions.YARNFeatureNotEnabledException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.RPCUtil;
@@ -268,7 +269,8 @@ public class ClientRMService extends AbstractService 
implements
 
 this.server.addTerseExceptions(ApplicationNotFoundException.class,
 ApplicationAttemptNotFoundException.class,
-ContainerNotFoundException.class);
+ContainerNotFoundException.class,
+YARNFeatureNotEnabledException.class);
 
 // Enable service authorization?
 if (conf.getBoolean(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9679. Regular code cleanup in TestResourcePluginManager (#1122)

2019-08-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 22c4f38  YARN-9679. Regular code cleanup in TestResourcePluginManager 
(#1122)
22c4f38 is described below

commit 22c4f38c4b005a70c9b95d8aaa350763aaec5c5e
Author: Adam Antal 
AuthorDate: Thu Aug 15 17:32:05 2019 +0200

YARN-9679. Regular code cleanup in TestResourcePluginManager (#1122)
---
 .../resourceplugin/TestResourcePluginManager.java  | 86 +++---
 1 file changed, 43 insertions(+), 43 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java
index a41edba..28f917f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/TestResourcePluginManager.java
@@ -48,7 +48,6 @@ import 
org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.TestResourceUtils;
 import org.junit.After;
-import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -57,6 +56,8 @@ import java.util.List;
 import java.util.Map;
 import java.io.File;
 
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.Assert.fail;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyMap;
 import static org.mockito.Mockito.mock;
@@ -67,14 +68,10 @@ import static org.mockito.Mockito.spy;
 
 public class TestResourcePluginManager extends NodeManagerTestBase {
   private NodeManager nm;
-
-  private YarnConfiguration conf;
-
   private String tempResourceTypesFile;
 
   @Before
   public void setup() throws Exception {
-this.conf = createNMConfig();
 // setup resource-types.xml
 ResourceUtils.resetResourceTypes();
 String resourceTypesFile = "resource-types-pluggable-devices.xml";
@@ -82,7 +79,7 @@ public class TestResourcePluginManager extends 
NodeManagerTestBase {
 TestResourceUtils.setupResourceTypes(this.conf, resourceTypesFile);
   }
 
-  ResourcePluginManager stubResourcePluginmanager() {
+  private ResourcePluginManager stubResourcePluginmanager() {
 // Stub ResourcePluginManager
 final ResourcePluginManager rpm = mock(ResourcePluginManager.class);
 Map plugins = new HashMap<>();
@@ -117,7 +114,7 @@ public class TestResourcePluginManager extends 
NodeManagerTestBase {
 // cleanup resource-types.xml
 File dest = new File(this.tempResourceTypesFile);
 if (dest.exists()) {
-  dest.delete();
+  assertThat(dest.delete()).isTrue();
 }
   }
 
@@ -155,10 +152,10 @@ public class TestResourcePluginManager extends 
NodeManagerTestBase {
 }
   }
 
-  private class MyMockNM extends NodeManager {
+  private class ResourcePluginMockNM extends NodeManager {
 private final ResourcePluginManager rpm;
 
-public MyMockNM(ResourcePluginManager rpm) {
+ResourcePluginMockNM(ResourcePluginManager rpm) {
   this.rpm = rpm;
 }
 
@@ -196,28 +193,28 @@ public class TestResourcePluginManager extends 
NodeManagerTestBase {
 }
   }
 
-  /*
-   * Make sure ResourcePluginManager is initialized during NM start up.
+  /**
+   * Make sure {@link ResourcePluginManager} is initialized during NM start up.
*/
   @Test(timeout = 3)
   public void testResourcePluginManagerInitialization() throws Exception {
 final ResourcePluginManager rpm = stubResourcePluginmanager();
-nm = new MyMockNM(rpm);
+nm = new ResourcePluginMockNM(rpm);
 
 nm.init(conf);
 verify(rpm).initialize(
 any(Context.class));
   }
 
-  /*
-   * Make sure ResourcePluginManager is invoked during NM update.
+  /**
+   * Make sure {@link ResourcePluginManager} is invoked during NM update.
*/
   @Test(timeout = 3)
   public void testNodeStatusUpdaterWithResourcePluginsEnabled()
   throws Exception {
 final ResourcePluginManager rpm = stubResourcePluginmanager();
 
-nm = new MyMockNM(rpm);
+nm = new ResourcePluginMockNM(rpm);
 
 nm.init(conf);
 nm.start();
@@ -230,8 +227,8 @@ public class TestResourcePluginManager extends 
NodeManagerTestBase {
 .updateConfiguredResource(an

[hadoop] branch trunk updated: YARN-9749. TestAppLogAggregatorImpl#testDFSQuotaExceeded fails on trunk. Contributed by Adam Antal

2019-08-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2a05e0f  YARN-9749. TestAppLogAggregatorImpl#testDFSQuotaExceeded 
fails on trunk. Contributed by Adam Antal
2a05e0f is described below

commit 2a05e0ff3b5ab3be8654e9e96c6556865ef26096
Author: Szilard Nemeth 
AuthorDate: Fri Aug 16 08:52:09 2019 +0200

YARN-9749. TestAppLogAggregatorImpl#testDFSQuotaExceeded fails on trunk. 
Contributed by Adam Antal
---
 .../containermanager/logaggregation/AppLogAggregatorImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 77bec72..245dc10 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -395,7 +395,7 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   if (logAggregationSucceedInThisCycle && deletionTask != null) {
 delService.delete(deletionTask);
   }
-  if (!diagnosticMessage.isEmpty()) {
+  if (diagnosticMessage != null && !diagnosticMessage.isEmpty()) {
 LOG.debug("Sending log aggregation report along with the " +
 "following diagnostic message:\"{}\"", diagnosticMessage);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9749. TestAppLogAggregatorImpl#testDFSQuotaExceeded fails on trunk. Contributed by Adam Antal

2019-08-15 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 8fee380  YARN-9749. TestAppLogAggregatorImpl#testDFSQuotaExceeded 
fails on trunk. Contributed by Adam Antal
8fee380 is described below

commit 8fee3808c5aa89cb96b4ef60a71ca6a2ce9055ad
Author: Szilard Nemeth 
AuthorDate: Fri Aug 16 08:52:09 2019 +0200

YARN-9749. TestAppLogAggregatorImpl#testDFSQuotaExceeded fails on trunk. 
Contributed by Adam Antal

(cherry picked from commit 2a05e0ff3b5ab3be8654e9e96c6556865ef26096)
---
 .../containermanager/logaggregation/AppLogAggregatorImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 965734f..94a9700 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -395,7 +395,7 @@ public class AppLogAggregatorImpl implements 
AppLogAggregator {
   if (logAggregationSucceedInThisCycle && deletionTask != null) {
 delService.delete(deletionTask);
   }
-  if (!diagnosticMessage.isEmpty()) {
+  if (diagnosticMessage != null && !diagnosticMessage.isEmpty()) {
 LOG.debug("Sending log aggregation report along with the " +
 "following diagnostic message:\"{}\"", diagnosticMessage);
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9100. Add tests for GpuResourceAllocator and do minor code cleanup. Contributed by Peter Bacsko

2019-08-16 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2216ec5  YARN-9100. Add tests for GpuResourceAllocator and do minor 
code cleanup. Contributed by Peter Bacsko
2216ec5 is described below

commit 2216ec54e58e24ff09620fc2efa2f1733391d0c3
Author: Szilard Nemeth 
AuthorDate: Fri Aug 16 09:13:20 2019 +0200

YARN-9100. Add tests for GpuResourceAllocator and do minor code cleanup. 
Contributed by Peter Bacsko
---
 .../linux/resources/gpu/GpuResourceAllocator.java  | 106 ++---
 .../resources/gpu/GpuResourceHandlerImpl.java  |   2 +-
 .../resourceplugin/gpu/GpuResourcePlugin.java  |   4 +-
 .../resources/gpu/TestGpuResourceAllocator.java| 442 +
 .../resources/gpu/TestGpuResourceHandlerImpl.java  |   8 +-
 5 files changed, 509 insertions(+), 53 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
index 0b95ca7..2300776 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
@@ -19,6 +19,8 @@
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
 import org.slf4j.Logger;
@@ -38,34 +40,44 @@ import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
+import java.util.stream.Collectors;
 
 import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI;
 
 /**
- * Allocate GPU resources according to requirements
+ * Allocate GPU resources according to requirements.
  */
 public class GpuResourceAllocator {
   final static Logger LOG = LoggerFactory.
   getLogger(GpuResourceAllocator.class);
+
   private static final int WAIT_MS_PER_LOOP = 1000;
 
   private Set allowedGpuDevices = new TreeSet<>();
   private Map usedDevices = new TreeMap<>();
   private Context nmContext;
+  private final int waitPeriodForResource;
 
   public GpuResourceAllocator(Context ctx) {
 this.nmContext = ctx;
+// Wait for a maximum of 120 seconds if no available GPU are there which
+// are yet to be released.
+this.waitPeriodForResource = 120 * WAIT_MS_PER_LOOP;
+  }
+
+  @VisibleForTesting
+  GpuResourceAllocator(Context ctx, int waitPeriodForResource) {
+this.nmContext = ctx;
+this.waitPeriodForResource = waitPeriodForResource;
   }
 
   /**
-   * Contains allowed and denied devices
+   * Contains allowed and denied devices.
* Denied devices will be useful for cgroups devices module to do 
blacklisting
*/
   static class GpuAllocation {
@@ -91,20 +103,13 @@ public class GpuResourceAllocator {
   }
 
   /**
-   * Add GPU to allowed list
+   * Add GPU to the allowed list of GPUs.
* @param gpuDevice gpu device
*/
   public synchronized void addGpu(GpuDevice gpuDevice) {
 allowedGpuDevices.add(gpuDevice);
   }
 
-  private String getResourceHandlerExceptionMessage(int numRequestedGpuDevices,
-  ContainerId containerId) {
-return "Failed to find enough GPUs, requestor=" + containerId
-+ ", #RequestedGPUs=" + numRequestedGpuDevices + ", #availableGpus="
-+ getAvailableGpus();
-  }
-
   @VisibleForTesting
   public synchronized int getAvailableGpus() {
 return allowedGpuDevices.size() - usedDevices.size();
@@ -113,10 +118,10 @@ public class GpuResourceAllocator {
   public synchronized void recoverAssignedGpus(ContainerId containerId)
   throws ResourceHandlerException {
 Container c = nmContext.getContainers().get(containerId);
-if (null == c) {
+if (c == null) {
   throw new ResourceHandlerException(
-  "This shouldn't happen, cannot find container with id="
-  + containerId);

[hadoop] branch branch-3.2 updated: YARN-8586. Extract log aggregation related fields and methods from RMAppImpl. Contributed by Peter Bacsko

2019-08-16 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new df61637  YARN-8586. Extract log aggregation related fields and methods 
from RMAppImpl. Contributed by Peter Bacsko
df61637 is described below

commit df616370f01494f8a9abfca73465789e16a8a0d8
Author: Szilard Nemeth 
AuthorDate: Fri Aug 16 11:52:51 2019 +0200

YARN-8586. Extract log aggregation related fields and methods from 
RMAppImpl. Contributed by Peter Bacsko
---
 .../server/resourcemanager/rmapp/RMAppImpl.java| 315 ++---
 .../resourcemanager/rmapp/RMAppLogAggregation.java | 383 +
 2 files changed, 410 insertions(+), 288 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 2d22bb9..9d32257 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -19,18 +19,14 @@
 package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
 
 import java.net.InetAddress;
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
@@ -181,19 +177,7 @@ public class RMAppImpl implements RMApp, Recoverable {
   new AppFinishedTransition();
   private Set ranNodes = new ConcurrentSkipListSet();
 
-  private final boolean logAggregationEnabled;
-  private long logAggregationStartTime = 0;
-  private final long logAggregationStatusTimeout;
-  private final Map logAggregationStatus =
-  new ConcurrentHashMap();
-  private volatile LogAggregationStatus logAggregationStatusForAppReport;
-  private int logAggregationSucceed = 0;
-  private int logAggregationFailed = 0;
-  private Map> logAggregationDiagnosticsForNMs =
-  new HashMap>();
-  private Map> logAggregationFailureMessagesForNMs =
-  new HashMap>();
-  private final int maxLogAggregationDiagnosticsInMemory;
+  private final RMAppLogAggregation logAggregation;
   private Map applicationTimeouts =
   new HashMap();
 
@@ -510,26 +494,7 @@ public class RMAppImpl implements RMApp, Recoverable {
 applicationSchedulingEnvs
 .putAll(submissionContext.getApplicationSchedulingPropertiesMap());
 
-long localLogAggregationStatusTimeout =
-conf.getLong(YarnConfiguration.LOG_AGGREGATION_STATUS_TIME_OUT_MS,
-  YarnConfiguration.DEFAULT_LOG_AGGREGATION_STATUS_TIME_OUT_MS);
-if (localLogAggregationStatusTimeout <= 0) {
-  this.logAggregationStatusTimeout =
-  YarnConfiguration.DEFAULT_LOG_AGGREGATION_STATUS_TIME_OUT_MS;
-} else {
-  this.logAggregationStatusTimeout = localLogAggregationStatusTimeout;
-}
-this.logAggregationEnabled =
-conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
-  YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED);
-if (this.logAggregationEnabled) {
-  this.logAggregationStatusForAppReport = LogAggregationStatus.NOT_START;
-} else {
-  this.logAggregationStatusForAppReport = LogAggregationStatus.DISABLED;
-}
-maxLogAggregationDiagnosticsInMemory = conf.getInt(
-YarnConfiguration.RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY,
-
YarnConfiguration.DEFAULT_RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY);
+this.logAggregation = new RMAppLogAggregation(conf, readLock, writeLock);
 
 // amBlacklistingEnabled can be configured globally
 // Just use the global values
@@ -1087,13 +1052,9 @@ public class RMAppImpl implements RMApp, Recoverable {
   // otherwise, add it to ranNodes for further process
   app.ranNodes.add(nodeAddedEvent.getNodeId());
 
-  if (!app.logAggregationStatus.containsKey(nodeAddedEvent.getNodeId())) {
-app.logAggregationStatus.put(nodeAddedEvent.getNodeId(),
-  LogAggregationReport.newInstance(app.applicationId,
-app.logAggregationEnabled ? LogAggregationS

[hadoop] branch branch-3.1 updated: YARN-8586. Extract log aggregation related fields and methods from RMAppImpl. Contributed by Peter Bacsko

2019-08-16 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 0a379e9  YARN-8586. Extract log aggregation related fields and methods 
from RMAppImpl. Contributed by Peter Bacsko
0a379e9 is described below

commit 0a379e94ba4ac29b9854d225f1a4c533ad2fb187
Author: Szilard Nemeth 
AuthorDate: Fri Aug 16 12:15:27 2019 +0200

YARN-8586. Extract log aggregation related fields and methods from 
RMAppImpl. Contributed by Peter Bacsko
---
 .../server/resourcemanager/rmapp/RMAppImpl.java| 314 ++---
 .../resourcemanager/rmapp/RMAppLogAggregation.java | 383 +
 2 files changed, 407 insertions(+), 290 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 9bfb2ec..cb103c6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -19,24 +19,19 @@
 package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
 
 import java.net.InetAddress;
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -181,19 +176,7 @@ public class RMAppImpl implements RMApp, Recoverable {
   new AppFinishedTransition();
   private Set ranNodes = new ConcurrentSkipListSet();
 
-  private final boolean logAggregationEnabled;
-  private long logAggregationStartTime = 0;
-  private final long logAggregationStatusTimeout;
-  private final Map logAggregationStatus =
-  new ConcurrentHashMap();
-  private volatile LogAggregationStatus logAggregationStatusForAppReport;
-  private int logAggregationSucceed = 0;
-  private int logAggregationFailed = 0;
-  private Map> logAggregationDiagnosticsForNMs =
-  new HashMap>();
-  private Map> logAggregationFailureMessagesForNMs =
-  new HashMap>();
-  private final int maxLogAggregationDiagnosticsInMemory;
+  private final RMAppLogAggregation logAggregation;
   private Map applicationTimeouts =
   new HashMap();
 
@@ -510,26 +493,7 @@ public class RMAppImpl implements RMApp, Recoverable {
 applicationSchedulingEnvs
 .putAll(submissionContext.getApplicationSchedulingPropertiesMap());
 
-long localLogAggregationStatusTimeout =
-conf.getLong(YarnConfiguration.LOG_AGGREGATION_STATUS_TIME_OUT_MS,
-  YarnConfiguration.DEFAULT_LOG_AGGREGATION_STATUS_TIME_OUT_MS);
-if (localLogAggregationStatusTimeout <= 0) {
-  this.logAggregationStatusTimeout =
-  YarnConfiguration.DEFAULT_LOG_AGGREGATION_STATUS_TIME_OUT_MS;
-} else {
-  this.logAggregationStatusTimeout = localLogAggregationStatusTimeout;
-}
-this.logAggregationEnabled =
-conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
-  YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED);
-if (this.logAggregationEnabled) {
-  this.logAggregationStatusForAppReport = LogAggregationStatus.NOT_START;
-} else {
-  this.logAggregationStatusForAppReport = LogAggregationStatus.DISABLED;
-}
-maxLogAggregationDiagnosticsInMemory = conf.getInt(
-YarnConfiguration.RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY,
-
YarnConfiguration.DEFAULT_RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY);
+this.logAggregation = new RMAppLogAggregation(conf, readLock, writeLock);
 
 // amBlacklistingEnabled can be configured globally
 // Just use the global values
@@ -1087,13 +1051,9 @@ public class RMAppImpl implements RMApp, Recoverable {
   // otherwise, add it to ranNodes for further process
   app.ranNodes.add(nodeAddedEvent.getNodeId(

[hadoop] branch trunk updated: YARN-8586. Extract log aggregation related fields and methods from RMAppImpl. Contributed by Peter Bacsko

2019-08-16 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4456ea6  YARN-8586. Extract log aggregation related fields and methods 
from RMAppImpl. Contributed by Peter Bacsko
4456ea6 is described below

commit 4456ea67b949553b85e101e866b4b3f4b335f1f0
Author: Szilard Nemeth 
AuthorDate: Fri Aug 16 11:36:14 2019 +0200

YARN-8586. Extract log aggregation related fields and methods from 
RMAppImpl. Contributed by Peter Bacsko
---
 .../server/resourcemanager/rmapp/RMAppImpl.java| 315 ++---
 .../resourcemanager/rmapp/RMAppLogAggregation.java | 383 +
 2 files changed, 406 insertions(+), 292 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 3f9f9c8..d25dddc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -19,24 +19,19 @@
 package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
 
 import java.net.InetAddress;
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListSet;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
-import org.apache.commons.lang3.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -182,19 +177,7 @@ public class RMAppImpl implements RMApp, Recoverable {
   new AppFinishedTransition();
   private Set ranNodes = new ConcurrentSkipListSet();
 
-  private final boolean logAggregationEnabled;
-  private long logAggregationStartTime = 0;
-  private final long logAggregationStatusTimeout;
-  private final Map logAggregationStatus =
-  new ConcurrentHashMap();
-  private volatile LogAggregationStatus logAggregationStatusForAppReport;
-  private int logAggregationSucceed = 0;
-  private int logAggregationFailed = 0;
-  private Map> logAggregationDiagnosticsForNMs =
-  new HashMap>();
-  private Map> logAggregationFailureMessagesForNMs =
-  new HashMap>();
-  private final int maxLogAggregationDiagnosticsInMemory;
+  private final RMAppLogAggregation logAggregation;
   private Map applicationTimeouts =
   new HashMap();
 
@@ -511,26 +494,7 @@ public class RMAppImpl implements RMApp, Recoverable {
 applicationSchedulingEnvs
 .putAll(submissionContext.getApplicationSchedulingPropertiesMap());
 
-long localLogAggregationStatusTimeout =
-conf.getLong(YarnConfiguration.LOG_AGGREGATION_STATUS_TIME_OUT_MS,
-  YarnConfiguration.DEFAULT_LOG_AGGREGATION_STATUS_TIME_OUT_MS);
-if (localLogAggregationStatusTimeout <= 0) {
-  this.logAggregationStatusTimeout =
-  YarnConfiguration.DEFAULT_LOG_AGGREGATION_STATUS_TIME_OUT_MS;
-} else {
-  this.logAggregationStatusTimeout = localLogAggregationStatusTimeout;
-}
-this.logAggregationEnabled =
-conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
-  YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED);
-if (this.logAggregationEnabled) {
-  this.logAggregationStatusForAppReport = LogAggregationStatus.NOT_START;
-} else {
-  this.logAggregationStatusForAppReport = LogAggregationStatus.DISABLED;
-}
-maxLogAggregationDiagnosticsInMemory = conf.getInt(
-YarnConfiguration.RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY,
-
YarnConfiguration.DEFAULT_RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY);
+this.logAggregation = new RMAppLogAggregation(conf, readLock, writeLock);
 
 // amBlacklistingEnabled can be configured globally
 // Just use the global values
@@ -1090,13 +1054,9 @@ public class RMAppImpl implements RMApp, Recoverable {
   // otherwise, add it to ranNodes for further process
   app.ranNodes.add(nodeAddedEvent.getNodeId());
 
-  if (!app.logAggregationSta

[hadoop] branch trunk updated: YARN-9461. TestRMWebServicesDelegationTokenAuthentication.testCancelledDelegationToken fails with HTTP 400. Contributed by Peter Bacsko

2019-08-16 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9b8359b  YARN-9461. 
TestRMWebServicesDelegationTokenAuthentication.testCancelledDelegationToken 
fails with HTTP 400. Contributed by Peter Bacsko
9b8359b is described below

commit 9b8359bb085b29b868ec9e704bf655b58d3e36a7
Author: Szilard Nemeth 
AuthorDate: Fri Aug 16 12:31:58 2019 +0200

YARN-9461. 
TestRMWebServicesDelegationTokenAuthentication.testCancelledDelegationToken 
fails with HTTP 400. Contributed by Peter Bacsko
---
 ...RMWebServicesDelegationTokenAuthentication.java | 23 --
 1 file changed, 17 insertions(+), 6 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
index 41e56ae..6eecaa9f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokenAuthentication.java
@@ -59,8 +59,10 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
 import org.codehaus.jettison.json.JSONObject;
+import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -84,8 +86,8 @@ public class TestRMWebServicesDelegationTokenAuthentication {
 
   private static boolean miniKDCStarted = false;
   private static MiniKdc testMiniKDC;
-  private static MockRM rm;
   private static String sunSecurityKrb5RcacheValue;
+  private MockRM rm;
 
   String delegationTokenHeader;
 
@@ -107,7 +109,7 @@ public class TestRMWebServicesDelegationTokenAuthentication 
{
   System.setProperty(SUN_SECURITY_KRB5_RCACHE_KEY, "none");
   testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
   setupKDC();
-  setupAndStartRM();
+
 } catch (Exception e) {
   assertTrue("Couldn't create MiniKDC", false);
 }
@@ -118,9 +120,6 @@ public class TestRMWebServicesDelegationTokenAuthentication 
{
 if (testMiniKDC != null) {
   testMiniKDC.stop();
 }
-if (rm != null) {
-  rm.stop();
-}
 if (sunSecurityKrb5RcacheValue == null) {
   System.clearProperty(SUN_SECURITY_KRB5_RCACHE_KEY);
 } else {
@@ -129,6 +128,18 @@ public class 
TestRMWebServicesDelegationTokenAuthentication {
 }
   }
 
+  @Before
+  public void before() throws Exception {
+setupAndStartRM();
+  }
+
+  @After
+  public void after() {
+if (rm != null) {
+  rm.stop();
+}
+  }
+
   @Parameterized.Parameters
   public static Collection headers() {
 return Arrays.asList(new Object[][] { {OldDelegationTokenHeader}, 
{NewDelegationTokenHeader}});
@@ -139,7 +150,7 @@ public class TestRMWebServicesDelegationTokenAuthentication 
{
 this.delegationTokenHeader = header;
   }
 
-  private static void setupAndStartRM() throws Exception {
+  private void setupAndStartRM() throws Exception {
 Configuration rmconf = new Configuration();
 rmconf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
   YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-9100. Add tests for GpuResourceAllocator and do minor code cleanup. Contributed by Peter Bacsko

2019-08-16 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new a83718f  YARN-9100. Add tests for GpuResourceAllocator and do minor 
code cleanup. Contributed by Peter Bacsko
a83718f is described below

commit a83718f130acc6e4cf098cec20474562d43ae71a
Author: Szilard Nemeth 
AuthorDate: Fri Aug 16 15:24:44 2019 +0200

YARN-9100. Add tests for GpuResourceAllocator and do minor code cleanup. 
Contributed by Peter Bacsko
---
 .../linux/resources/gpu/GpuResourceAllocator.java  | 105 ++---
 .../resources/gpu/GpuResourceHandlerImpl.java  |   2 +-
 .../resourceplugin/gpu/GpuResourcePlugin.java  |   4 +-
 .../resources/gpu/TestGpuResourceAllocator.java| 449 +
 .../resources/gpu/TestGpuResourceHandler.java  |   8 +-
 5 files changed, 515 insertions(+), 53 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
index 2496ac8..274e0f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
@@ -19,6 +19,8 @@
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
@@ -38,18 +40,17 @@ import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
+import java.util.stream.Collectors;
 
 import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI;
 
 /**
- * Allocate GPU resources according to requirements
+ * Allocate GPU resources according to requirements.
  */
 public class GpuResourceAllocator {
   final static Log LOG = LogFactory.getLog(GpuResourceAllocator.class);
@@ -58,13 +59,23 @@ public class GpuResourceAllocator {
   private Set allowedGpuDevices = new TreeSet<>();
   private Map usedDevices = new TreeMap<>();
   private Context nmContext;
+  private final int waitPeriodForResource;
 
   public GpuResourceAllocator(Context ctx) {
 this.nmContext = ctx;
+// Wait for a maximum of 120 seconds if no available GPU are there which
+// are yet to be released.
+this.waitPeriodForResource = 120 * WAIT_MS_PER_LOOP;
+  }
+
+  @VisibleForTesting
+  GpuResourceAllocator(Context ctx, int waitPeriodForResource) {
+this.nmContext = ctx;
+this.waitPeriodForResource = waitPeriodForResource;
   }
 
   /**
-   * Contains allowed and denied devices
+   * Contains allowed and denied devices.
* Denied devices will be useful for cgroups devices module to do 
blacklisting
*/
   static class GpuAllocation {
@@ -90,20 +101,13 @@ public class GpuResourceAllocator {
   }
 
   /**
-   * Add GPU to allowed list
+   * Add GPU to the allowed list of GPUs.
* @param gpuDevice gpu device
*/
   public synchronized void addGpu(GpuDevice gpuDevice) {
 allowedGpuDevices.add(gpuDevice);
   }
 
-  private String getResourceHandlerExceptionMessage(int numRequestedGpuDevices,
-  ContainerId containerId) {
-return "Failed to find enough GPUs, requestor=" + containerId
-+ ", #RequestedGPUs=" + numRequestedGpuDevices + ", #availableGpus="
-+ getAvailableGpus();
-  }
-
   @VisibleForTesting
   public synchronized int getAvailableGpus() {
 return allowedGpuDevices.size() - usedDevices.size();
@@ -112,10 +116,10 @@ public class GpuResourceAllocator {
   public synchronized void recoverAssignedGpus(ContainerId containerId)
   throws ResourceHandlerException {
 Container c = nmContext.getContainers().get(containerId);
-if (null == c) {
+if (c == null) {
   throw new ResourceHandlerException(
-  "This shouldn't happen, cannot find container with id="
-  + containerId);

[hadoop] branch branch-3.1 updated: YARN-9100. Add tests for GpuResourceAllocator and do minor code cleanup. Contributed by Peter Bacsko

2019-08-16 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new fd2e353  YARN-9100. Add tests for GpuResourceAllocator and do minor 
code cleanup. Contributed by Peter Bacsko
fd2e353 is described below

commit fd2e3532362cef61c27c916d52b116bfcd82f384
Author: Szilard Nemeth 
AuthorDate: Fri Aug 16 15:27:10 2019 +0200

YARN-9100. Add tests for GpuResourceAllocator and do minor code cleanup. 
Contributed by Peter Bacsko
---
 .../linux/resources/gpu/GpuResourceAllocator.java  | 105 ++---
 .../resources/gpu/GpuResourceHandlerImpl.java  |   2 +-
 .../resourceplugin/gpu/GpuResourcePlugin.java  |   4 +-
 .../resources/gpu/TestGpuResourceAllocator.java| 448 +
 .../resources/gpu/TestGpuResourceHandler.java  |   6 +-
 5 files changed, 513 insertions(+), 52 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
index 2496ac8..274e0f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/GpuResourceAllocator.java
@@ -19,6 +19,8 @@
 package 
org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.gpu;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
@@ -38,18 +40,17 @@ import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeSet;
+import java.util.stream.Collectors;
 
 import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI;
 
 /**
- * Allocate GPU resources according to requirements
+ * Allocate GPU resources according to requirements.
  */
 public class GpuResourceAllocator {
   final static Log LOG = LogFactory.getLog(GpuResourceAllocator.class);
@@ -58,13 +59,23 @@ public class GpuResourceAllocator {
   private Set allowedGpuDevices = new TreeSet<>();
   private Map usedDevices = new TreeMap<>();
   private Context nmContext;
+  private final int waitPeriodForResource;
 
   public GpuResourceAllocator(Context ctx) {
 this.nmContext = ctx;
+// Wait for a maximum of 120 seconds if no available GPU are there which
+// are yet to be released.
+this.waitPeriodForResource = 120 * WAIT_MS_PER_LOOP;
+  }
+
+  @VisibleForTesting
+  GpuResourceAllocator(Context ctx, int waitPeriodForResource) {
+this.nmContext = ctx;
+this.waitPeriodForResource = waitPeriodForResource;
   }
 
   /**
-   * Contains allowed and denied devices
+   * Contains allowed and denied devices.
* Denied devices will be useful for cgroups devices module to do 
blacklisting
*/
   static class GpuAllocation {
@@ -90,20 +101,13 @@ public class GpuResourceAllocator {
   }
 
   /**
-   * Add GPU to allowed list
+   * Add GPU to the allowed list of GPUs.
* @param gpuDevice gpu device
*/
   public synchronized void addGpu(GpuDevice gpuDevice) {
 allowedGpuDevices.add(gpuDevice);
   }
 
-  private String getResourceHandlerExceptionMessage(int numRequestedGpuDevices,
-  ContainerId containerId) {
-return "Failed to find enough GPUs, requestor=" + containerId
-+ ", #RequestedGPUs=" + numRequestedGpuDevices + ", #availableGpus="
-+ getAvailableGpus();
-  }
-
   @VisibleForTesting
   public synchronized int getAvailableGpus() {
 return allowedGpuDevices.size() - usedDevices.size();
@@ -112,10 +116,10 @@ public class GpuResourceAllocator {
   public synchronized void recoverAssignedGpus(ContainerId containerId)
   throws ResourceHandlerException {
 Container c = nmContext.getContainers().get(containerId);
-if (null == c) {
+if (c == null) {
   throw new ResourceHandlerException(
-  "This shouldn't happen, cannot find container with id="
-  + containerId);

[hadoop] branch trunk updated: YARN-9217. Nodemanager will fail to start if GPU is misconfigured on the node or GPU drivers missing. Contributed by Peter Bacsko

2019-08-21 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e8fa192  YARN-9217. Nodemanager will fail to start if GPU is 
misconfigured on the node or GPU drivers missing. Contributed by Peter Bacsko
e8fa192 is described below

commit e8fa192f07b6f2e7a0b03813edca03c505a8ac1b
Author: Szilard Nemeth 
AuthorDate: Wed Aug 21 16:44:22 2019 +0200

YARN-9217. Nodemanager will fail to start if GPU is misconfigured on the 
node or GPU drivers missing. Contributed by Peter Bacsko
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java | 14 
 .../src/main/resources/yarn-default.xml| 11 +++
 .../linux/resources/ResourcesExceptionUtil.java| 42 +++
 .../resources/gpu/GpuResourceHandlerImpl.java  |  5 +-
 .../resourceplugin/ResourcePluginManager.java  |  6 +-
 .../resourceplugin/gpu/GpuDiscoverer.java  | 83 --
 .../gpu/GpuNodeResourceUpdateHandler.java  | 13 +++-
 .../resourceplugin/gpu/GpuResourcePlugin.java  | 35 +++--
 .../resourceplugin/gpu/NvidiaBinaryHelper.java | 63 
 .../resources/gpu/TestGpuResourceHandlerImpl.java  | 16 +++--
 .../resourceplugin/gpu/TestGpuDiscoverer.java  | 45 ++--
 11 files changed, 256 insertions(+), 77 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 134b698..1e55fe3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1647,6 +1647,20 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_RESOURCE_PLUGINS =
   NM_PREFIX + "resource-plugins";
 
+
+  /**
+   * Specifies whether the initialization of the Node Manager should continue
+   * if a certain device (GPU, FPGA, etc) was not found in the system. If set
+   * to "true", then an exception will be thrown if a device is missing or
+   * an error occurred during discovery.
+   */
+  @Private
+  public static final String NM_RESOURCE_PLUGINS_FAIL_FAST =
+  NM_RESOURCE_PLUGINS + ".fail-fast";
+
+  @Private
+  public static final boolean DEFAULT_NM_RESOURCE_PLUGINS_FAIL_FAST = true;
+
   /**
* This setting controls if pluggable device plugin framework is enabled.
* */
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 4b93d1e..7a672de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3920,6 +3920,17 @@
 
   
 
+  Specifies whether the initialization of the Node Manager should continue
+  if a certain device (GPU, FPGA, etc) was not found in the system. If set
+  to "true", then an exception will be thrown if a device is missing or
+  an error occurred during discovery.
+
+yarn.nodemanager.resource-plugins.fail-fast
+
+  
+
+  
+
   Specify GPU devices which can be managed by YARN NodeManager, split by 
comma
   Number of GPU devices will be reported to RM to make scheduling 
decisions.
   Set to auto (default) let YARN automatically discover GPU resource from
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourcesExceptionUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourcesExceptionUtil.java
new file mode 100644
index 000..f270f42
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourcesExceptionUtil.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.o

[hadoop] branch branch-3.2 updated: YARN-9217. Nodemanager will fail to start if GPU is misconfigured on the node or GPU drivers missing. Contributed by Peter Bacsko

2019-08-21 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 6980f17  YARN-9217. Nodemanager will fail to start if GPU is 
misconfigured on the node or GPU drivers missing. Contributed by Peter Bacsko
6980f17 is described below

commit 6980f1740fe4037653a4095ed42dfe5b84d24850
Author: Szilard Nemeth 
AuthorDate: Wed Aug 21 16:49:34 2019 +0200

YARN-9217. Nodemanager will fail to start if GPU is misconfigured on the 
node or GPU drivers missing. Contributed by Peter Bacsko
---
 .../apache/hadoop/yarn/conf/YarnConfiguration.java |  14 +++
 .../src/main/resources/yarn-default.xml|  11 +++
 .../linux/resources/ResourcesExceptionUtil.java|  42 
 .../resources/gpu/GpuResourceHandlerImpl.java  |  13 ++-
 .../resourceplugin/ResourcePluginManager.java  |   7 +-
 .../resourceplugin/gpu/GpuDiscoverer.java  | 108 -
 .../gpu/GpuNodeResourceUpdateHandler.java  |  13 ++-
 .../resourceplugin/gpu/GpuResourcePlugin.java  |  35 ++-
 .../resourceplugin/gpu/NvidiaBinaryHelper.java |  63 
 .../resources/gpu/TestGpuResourceHandler.java  |  16 +--
 .../resourceplugin/gpu/TestGpuDiscoverer.java  |  47 -
 11 files changed, 275 insertions(+), 94 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 79593ea..04a7003 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1612,6 +1612,20 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_RESOURCE_PLUGINS =
   NM_PREFIX + "resource-plugins";
 
+
+  /**
+   * Specifies whether the initialization of the Node Manager should continue
+   * if a certain device (GPU, FPGA, etc) was not found in the system. If set
+   * to "true", then an exception will be thrown if a device is missing or
+   * an error occurred during discovery.
+   */
+  @Private
+  public static final String NM_RESOURCE_PLUGINS_FAIL_FAST =
+  NM_RESOURCE_PLUGINS + ".fail-fast";
+
+  @Private
+  public static final boolean DEFAULT_NM_RESOURCE_PLUGINS_FAIL_FAST = true;
+
   /**
* Prefix for gpu configurations. Work in progress: This configuration
* parameter may be changed/removed in the future.
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 887a7c3..f99977e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3800,6 +3800,17 @@
 
   
 
+  Specifies whether the initialization of the Node Manager should continue
+  if a certain device (GPU, FPGA, etc) was not found in the system. If set
+  to "true", then an exception will be thrown if a device is missing or
+  an error occurred during discovery.
+
+yarn.nodemanager.resource-plugins.fail-fast
+
+  
+
+  
+
   Specify GPU devices which can be managed by YARN NodeManager, split by 
comma
   Number of GPU devices will be reported to RM to make scheduling 
decisions.
   Set to auto (default) let YARN automatically discover GPU resource from
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourcesExceptionUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourcesExceptionUtil.java
new file mode 100644
index 000..f270f42
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/ResourcesExceptionUtil.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy o

[hadoop] branch trunk updated: YARN-7291. Better input parsing for resource in allocation file. Contributed by Zoltan Siegl

2019-08-21 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7ab88db  YARN-7291. Better input parsing for resource in allocation 
file. Contributed by Zoltan Siegl
7ab88db is described below

commit 7ab88dbfa6fceaf8fea80eff1b23ed1ac486b393
Author: Szilard Nemeth 
AuthorDate: Wed Aug 21 17:01:18 2019 +0200

YARN-7291. Better input parsing for resource in allocation file. 
Contributed by Zoltan Siegl
---
 .../scheduler/fair/FairSchedulerConfiguration.java | 126 +++--
 .../fair/TestFairSchedulerConfiguration.java   | 105 +++--
 2 files changed, 189 insertions(+), 42 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
index e6b1de4..cfe07c9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java
@@ -20,10 +20,12 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 import static 
org.apache.hadoop.yarn.util.resource.ResourceUtils.RESOURCE_REQUEST_VALUE_PATTERN;
 
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import com.google.common.collect.Lists;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -216,6 +218,15 @@ public class FairSchedulerConfiguration extends 
Configuration {
 
   private static final String INVALID_RESOURCE_DEFINITION_PREFIX =
   "Error reading resource config--invalid resource definition: ";
+  private static final String RESOURCE_PERCENTAGE_PATTERN =
+  "^(-?(\\d+)(\\.\\d*)?)\\s*%\\s*";
+  private static final String RESOURCE_VALUE_PATTERN =
+  "^(-?\\d+)(\\.\\d*)?\\s*";
+  /**
+   * For resources separated by spaces instead of a comma.
+   */
+  private static final String RESOURCES_WITH_SPACES_PATTERN =
+  "-?\\d+(?:\\.\\d*)?\\s*[a-z]+\\s*";
 
   public FairSchedulerConfiguration() {
 super();
@@ -507,7 +518,7 @@ public class FairSchedulerConfiguration extends 
Configuration {
   try {
 if (asPercent) {
   double percentage = parseNewStyleResourceAsPercentage(value,
-  resourceValue);
+  resourceName, resourceValue);
   configurableResource.setPercentage(resourceName, percentage);
 } else {
   long parsedValue = parseNewStyleResourceAsAbsoluteValue(value,
@@ -526,10 +537,10 @@ public class FairSchedulerConfiguration extends 
Configuration {
   }
 
   private static double parseNewStyleResourceAsPercentage(
-  String value, String resourceValue)
+  String value, String resource, String resourceValue)
   throws AllocationConfigurationException {
 try {
-  return findPercentage(resourceValue, "");
+  return findPercentage(resourceValue, resource);
 } catch (AllocationConfigurationException ex) {
   throw createConfigException(value,
   "The resource values must all be percentages. \""
@@ -563,18 +574,39 @@ public class FairSchedulerConfiguration extends 
Configuration {
 getResourcePercentage(StringUtils.toLowerCase(value)));
   }
 
-  private static ConfigurableResource parseOldStyleResource(String value)
+  private static ConfigurableResource parseOldStyleResource(String input)
   throws AllocationConfigurationException {
-final String lCaseValue = StringUtils.toLowerCase(value);
-final int memory = parseOldStyleResourceMemory(lCaseValue);
-final int vcores = parseOldStyleResourceVcores(lCaseValue);
+final String lowerCaseInput = StringUtils.toLowerCase(input);
+String[] resources = lowerCaseInput.split(",");
+
+if (resources.length != 2) {
+  resources = findOldStyleResourcesInSpaceSeparatedInput(lowerCaseInput);
+  if (resources.length != 2) {
+throw new AllocationConfigurationException(
+"Cannot parse resource values from input: " + input);
+  }
+}
+final int memory = parseOldStyleResourceMemory(resources);
+final int vcores = parseOldStyleResourceVcores(resources

[hadoop] branch trunk updated: YARN-10035. Add ability to filter the Cluster Applications API request by name. Contributed by Adam Antal

2020-01-06 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 768ee22  YARN-10035. Add ability to filter the Cluster Applications 
API request by name. Contributed by Adam Antal
768ee22 is described below

commit 768ee22e9e73543d2fb193d9b6ec34a247cb0411
Author: Szilard Nemeth 
AuthorDate: Mon Jan 6 16:26:33 2020 +0100

YARN-10035. Add ability to filter the Cluster Applications API request by 
name. Contributed by Adam Antal
---
 .../protocolrecords/GetApplicationsRequest.java| 18 
 .../src/main/proto/yarn_service_protos.proto   |  1 +
 .../impl/pb/GetApplicationsRequestPBImpl.java  | 25 ++
 .../webapp/AHSWebServices.java |  6 --
 .../hadoop/yarn/server/webapp/WebServices.java |  7 +-
 .../server/resourcemanager/ClientRMService.java|  5 +
 .../webapp/ApplicationsRequestBuilder.java |  9 
 .../server/resourcemanager/webapp/RMWSConsts.java  |  1 +
 .../webapp/RMWebServiceProtocol.java   |  3 ++-
 .../resourcemanager/webapp/RMWebServices.java  |  2 ++
 .../resourcemanager/webapp/TestRMWebServices.java  |  9 +---
 .../webapp/DefaultRequestInterceptorREST.java  |  2 +-
 .../router/webapp/FederationInterceptorREST.java   |  4 ++--
 .../server/router/webapp/RouterWebServices.java|  3 ++-
 .../router/webapp/BaseRouterWebServicesTest.java   |  3 ++-
 .../webapp/MockDefaultRequestInterceptorREST.java  |  2 +-
 .../router/webapp/MockRESTRequestInterceptor.java  |  2 +-
 .../webapp/PassThroughRESTRequestInterceptor.java  |  4 ++--
 .../webapp/TestFederationInterceptorREST.java  |  2 +-
 .../webapp/TestFederationInterceptorRESTRetry.java |  6 +++---
 .../src/site/markdown/ResourceManagerRest.md   |  1 +
 21 files changed, 95 insertions(+), 20 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java
index a52b405..81d98b5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java
@@ -393,4 +393,22 @@ public abstract class GetApplicationsRequest {
   @Private
   @Unstable
   public abstract void setScope(ApplicationsRequestScope scope);
+
+  /**
+   * Set the name to filter applications.
+   *
+   * @return the name
+   */
+  @Private
+  @Unstable
+  public abstract String getName();
+
+  /**
+   * Get the name to filter applications.
+   *
+   * @param name of the application
+   */
+  @Private
+  @Unstable
+  public abstract void setName(String name);
 }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
index d562cdb..8a0273d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
@@ -206,6 +206,7 @@ message GetApplicationsRequestProto {
   optional int64 finish_end = 9;
   repeated string applicationTags = 10;
   optional ApplicationsRequestScopeProto scope = 11 [default = ALL];
+  optional string name = 12;
 }
 
 message GetApplicationsResponseProto {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
index 4c5fee0..9c3045e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
@@ -55,6 +55,7 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   Range finish = null;
   private Set applicationTags;
   private ApplicationsRequestScope scope;
+  private String name;
 
   public GetApplicationsRequestPBImpl() {
 builder = GetApplicationsRequestProto.newBuilder();
@@ -121,6 +122,9 @@ public class GetApplicationsRequestPBImpl extends 
GetApplicationsRequest {
   builder.clearQueues();
   builder.addAllQueues(queues);
 }
+if (name != null) {
+  builder.setName(name

[hadoop] branch trunk updated: YARN-10026. Pull out common code pieces from ATS v1.5 and v2. Contributed by Adam Antal

2020-01-06 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new dd2607e  YARN-10026. Pull out common code pieces from ATS v1.5 and v2. 
Contributed by Adam Antal
dd2607e is described below

commit dd2607e3ec3c349130e4143b0f67b23e11da420a
Author: Szilard Nemeth 
AuthorDate: Mon Jan 6 17:16:11 2020 +0100

YARN-10026. Pull out common code pieces from ATS v1.5 and v2. Contributed 
by Adam Antal
---
 .../webapp/AHSWebServices.java | 204 ++--
 .../webapp/TestAHSWebServices.java |  25 +-
 .../hadoop/yarn/server/webapp/AppInfoProvider.java |  54 +
 .../hadoop/yarn/server/webapp/BasicAppInfo.java|  47 
 .../hadoop/yarn/server/webapp/LogServlet.java  | 260 +
 .../hadoop/yarn/server/webapp/LogWebService.java   | 243 +++
 .../hadoop/yarn/server/webapp/WebServices.java |  33 ++-
 .../hadoop/yarn/server/webapp/package-info.java|  18 ++
 .../yarn/server/webapp/TestLogWebService.java  |  23 +-
 9 files changed, 481 insertions(+), 426 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index d9918d3..5e77718 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -32,25 +32,18 @@ import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.Response.Status;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.sun.jersey.api.client.ClientHandlerException;
-import com.sun.jersey.api.client.UniformInterfaceException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
-import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
-import org.apache.hadoop.yarn.server.webapp.LogWebServiceUtils;
+import org.apache.hadoop.yarn.server.webapp.LogServlet;
 import org.apache.hadoop.yarn.server.webapp.WebServices;
 import org.apache.hadoop.yarn.server.webapp.YarnWebServiceParams;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
@@ -61,33 +54,20 @@ import 
org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
-import com.google.common.base.Joiner;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
-import org.codehaus.jettison.json.JSONException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 @Singleton
 @Path("/ws/v1/applicationhistory")
 public class AHSWebServices extends WebServices {
 
-  private static final Logger LOG = LoggerFactory
-  .getLogger(AHSWebServices.class);
-  private static final String NM_DOWNLOAD_URI_STR =
-  "/ws/v1/node/containers";
-  private static final Joiner JOINER = Joiner.on("");
-  private static final Joiner DOT_JOINER = Joiner.on(". ");
-  private final Configuration conf;
-  private final LogAggregationFileControllerFactory factory;
+  private LogServlet logServlet;
 
   @Inject
   public AHSWebServices(ApplicationBaseProtocol appBaseProt,
   Configuration conf) {
 super(appBaseProt);
-this.conf = conf;
-this.factory = new LogAggregationFileControllerFactory(conf);
+this.logServlet = new LogServlet(conf, this);
   }
 
   @GET
@@ -244,87 +224,9 @@ public 

[hadoop] branch trunk updated: YARN-10067. Add dry-run feature to FS-CS converter tool. Contributed by Peter Bacsko

2020-01-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 24e6a9e  YARN-10067. Add dry-run feature to FS-CS converter tool. 
Contributed by Peter Bacsko
24e6a9e is described below

commit 24e6a9e43a210cdecaa8e87926eef09c869988f9
Author: Szilard Nemeth 
AuthorDate: Fri Jan 10 21:14:07 2020 +0100

YARN-10067. Add dry-run feature to FS-CS converter tool. Contributed by 
Peter Bacsko
---
 .../fair/converter/ConversionOptions.java  |  80 +
 .../fair/converter/DryRunResultHolder.java |  80 +
 .../FSConfigToCSConfigArgumentHandler.java |  82 +++---
 .../converter/FSConfigToCSConfigConverter.java |  26 +++--
 .../converter/FSConfigToCSConfigConverterMain.java |   6 +-
 .../converter/FSConfigToCSConfigRuleHandler.java   |  29 ++---
 .../scheduler/fair/converter/FSQueueConverter.java |  60 +-
 .../fair/converter/FSQueueConverterBuilder.java| 100 +
 .../TestFSConfigToCSConfigArgumentHandler.java |  89 +--
 .../converter/TestFSConfigToCSConfigConverter.java |  13 ++-
 .../TestFSConfigToCSConfigRuleHandler.java |  63 ++-
 .../fair/converter/TestFSQueueConverter.java   | 125 +++--
 12 files changed, 623 insertions(+), 130 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/ConversionOptions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/ConversionOptions.java
new file mode 100644
index 000..c116232
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/ConversionOptions.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.converter;
+
+import org.slf4j.Logger;
+
+public class ConversionOptions {
+  private DryRunResultHolder dryRunResultHolder;
+  private boolean dryRun;
+
+  public ConversionOptions(DryRunResultHolder dryRunResultHolder,
+  boolean dryRun) {
+this.dryRunResultHolder = dryRunResultHolder;
+this.dryRun = dryRun;
+  }
+
+  public void setDryRun(boolean dryRun) {
+this.dryRun = dryRun;
+  }
+
+  public void handleWarning(String msg, Logger log) {
+if (dryRun) {
+  dryRunResultHolder.addDryRunWarning(msg);
+} else {
+  log.warn(msg);
+}
+  }
+
+  public void handleError(String msg) {
+if (dryRun) {
+  dryRunResultHolder.addDryRunError(msg);
+} else {
+  throw new UnsupportedPropertyException(msg);
+}
+  }
+
+  public void handleConversionError(String msg) {
+if (dryRun) {
+  dryRunResultHolder.addDryRunError(msg);
+} else {
+  throw new ConversionException(msg);
+}
+  }
+
+  public void handlePreconditionError(String msg) {
+if (dryRun) {
+  dryRunResultHolder.addDryRunError(msg);
+} else {
+  throw new PreconditionException(msg);
+}
+  }
+
+  public void handleParsingFinished() {
+if (dryRun) {
+  dryRunResultHolder.printDryRunResults();
+}
+  }
+
+  public void handleGenericException(Exception e, String msg) {
+if (dryRun) {
+  dryRunResultHolder.addDryRunError(msg);
+} else {
+  FSConfigToCSConfigArgumentHandler.logAndStdErr(e, msg);
+}
+  }
+}
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/DryRunResultHolder.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/converter/DryRunResultHolder.java
new file mode 100644
index 0

[hadoop] branch branch-3.2 updated: YARN-10026. Pull out common code pieces from ATS v1.5 and v2. Contributed by Adam Antal

2020-01-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 6a7dfb3  YARN-10026. Pull out common code pieces from ATS v1.5 and v2. 
Contributed by Adam Antal
6a7dfb3 is described below

commit 6a7dfb3bf321b897e52ec14e618c3f5b7b855780
Author: Szilard Nemeth 
AuthorDate: Sun Jan 12 13:54:08 2020 +0100

YARN-10026. Pull out common code pieces from ATS v1.5 and v2. Contributed 
by Adam Antal
---
 .../webapp/AHSWebServices.java | 208 ++---
 .../webapp/TestAHSWebServices.java |  25 +-
 .../hadoop/yarn/server/webapp/AppInfoProvider.java |  54 +
 .../hadoop/yarn/server/webapp/BasicAppInfo.java|  47 
 .../hadoop/yarn/server/webapp/LogServlet.java  | 260 +
 .../hadoop/yarn/server/webapp/LogWebService.java   | 247 +++-
 .../hadoop/yarn/server/webapp/WebServices.java |  33 ++-
 .../hadoop/yarn/server/webapp/package-info.java|  18 ++
 .../yarn/server/webapp/TestLogWebService.java  |  23 +-
 9 files changed, 481 insertions(+), 434 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
index d94605f..607b88b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebServices.java
@@ -32,25 +32,18 @@ import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.ResponseBuilder;
-import javax.ws.rs.core.Response.Status;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.sun.jersey.api.client.ClientHandlerException;
-import com.sun.jersey.api.client.UniformInterfaceException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.JettyUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineAbout;
-import 
org.apache.hadoop.yarn.logaggregation.filecontroller.LogAggregationFileControllerFactory;
-import org.apache.hadoop.yarn.server.webapp.LogWebServiceUtils;
+import org.apache.hadoop.yarn.server.webapp.LogServlet;
 import org.apache.hadoop.yarn.server.webapp.WebServices;
 import org.apache.hadoop.yarn.server.webapp.YarnWebServiceParams;
 import org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo;
@@ -61,33 +54,20 @@ import 
org.apache.hadoop.yarn.server.webapp.dao.ContainerInfo;
 import org.apache.hadoop.yarn.server.webapp.dao.ContainersInfo;
 import org.apache.hadoop.yarn.util.timeline.TimelineUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
-import org.apache.hadoop.yarn.webapp.NotFoundException;
-import com.google.common.base.Joiner;
 import com.google.inject.Inject;
 import com.google.inject.Singleton;
-import org.codehaus.jettison.json.JSONException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 @Singleton
 @Path("/ws/v1/applicationhistory")
 public class AHSWebServices extends WebServices {
 
-  private static final Logger LOG = LoggerFactory
-  .getLogger(AHSWebServices.class);
-  private static final String NM_DOWNLOAD_URI_STR =
-  "/ws/v1/node/containers";
-  private static final Joiner JOINER = Joiner.on("");
-  private static final Joiner DOT_JOINER = Joiner.on(". ");
-  private final Configuration conf;
-  private final LogAggregationFileControllerFactory factory;
+  private LogServlet logServlet;
 
   @Inject
   public AHSWebServices(ApplicationBaseProtocol appBaseProt,
   Configuration conf) {
 super(appBaseProt);
-this.conf = conf;
-this.factory = new LogAggregationFileControllerFactory(conf);
+this.logServlet = new LogServlet(conf, this);
   }
 
   @GET
@@ -242,89 +222,9 @@ public 

[hadoop] branch trunk updated: YARN-9866. u:user2:%primary_group is not working as expected. Contributed by Manikandan R

2020-01-12 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d842dff  YARN-9866. u:user2:%primary_group is not working as expected. 
Contributed by Manikandan R
d842dff is described below

commit d842dfffa53c8b565f3d65af44ccd7e1cc706733
Author: Szilard Nemeth 
AuthorDate: Sun Jan 12 14:04:12 2020 +0100

YARN-9866. u:user2:%primary_group is not working as expected. Contributed 
by Manikandan R
---
 .../placement/UserGroupMappingPlacementRule.java   |   6 +-
 .../TestCapacitySchedulerQueueMappingFactory.java  | 206 +++--
 2 files changed, 152 insertions(+), 60 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
index d69272d..0caa602 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
@@ -220,7 +220,11 @@ public class UserGroupMappingPlacementRule extends 
PlacementRule {
   }
 }
 if (user.equals(mapping.source)) {
-  return getPlacementContext(mapping);
+  if (mapping.queue.equals(PRIMARY_GROUP_MAPPING)) {
+return getPlacementContext(mapping, groups.getGroups(user).get(0));
+  } else {
+return getPlacementContext(mapping);
+  }
 }
   }
   if (mapping.type == MappingType.GROUP) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java
index 6ee9a7b..4cec544 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerQueueMappingFactory.java
@@ -51,8 +51,6 @@ public class TestCapacitySchedulerQueueMappingFactory {
   public static final String USER = "user_";
   public static final String PARENT_QUEUE = "c";
 
-  private MockRM mockRM = null;
-
   public static CapacitySchedulerConfiguration setupQueueMappingsForRules(
   CapacitySchedulerConfiguration conf, String parentQueue,
   boolean overrideWithQueueMappings, int[] sourceIds) {
@@ -114,23 +112,30 @@ public class TestCapacitySchedulerQueueMappingFactory {
 // init queue mapping for UserGroupMappingRule and AppNameMappingRule
 setupQueueMappingsForRules(conf, PARENT_QUEUE, true, new int[] {1, 2, 3});
 
-mockRM = new MockRM(conf);
-CapacityScheduler cs = (CapacityScheduler) mockRM.getResourceScheduler();
-cs.updatePlacementRules();
-mockRM.start();
-cs.start();
-
-List rules = cs.getRMContext()
-.getQueuePlacementManager().getPlacementRules();
-
-List placementRuleNames = new ArrayList<>();
-for (PlacementRule pr : rules) {
-  placementRuleNames.add(pr.getName());
+MockRM mockRM = null;
+try {
+  mockRM = new MockRM(conf);
+  CapacityScheduler cs = (CapacityScheduler) mockRM.getResourceScheduler();
+  cs.updatePlacementRules();
+  mockRM.start();
+  cs.start();
+
+  List rules = cs.getRMContext()
+  .getQueuePlacementManager().getPlacementRules();
+
+  List placementRuleNames = new ArrayList<>();
+  for (PlacementRule pr : rules) {
+placementRuleNames.add(pr.getName());
+  }
+
+  // verify both placement rules were added successfully
+  assertThat(placementRuleNames, hasItems(QUEUE_MAPPING_RULE_USER_GROUP));
+  assertThat(placementRuleNames, hasItems(QUEUE_MAPPING_RULE_APP_NAME));
+} finally {
+  if(mockRM != null) {
+mockRM.close();
+  }
 }
-
-// verify both placement rules were added successfully
-assertThat(placementRuleNames, hasIte

[hadoop] branch trunk updated: YARN-9989. Typo in CapacityScheduler documentation: Runtime Configuration. Contributed by Kevin Su

2020-01-13 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2576c31  YARN-9989. Typo in CapacityScheduler documentation: Runtime 
Configuration. Contributed by Kevin Su
2576c31 is described below

commit 2576c31644badd5b49a21d4aecc738d0dcc71269
Author: Szilard Nemeth 
AuthorDate: Mon Jan 13 16:50:07 2020 +0100

YARN-9989. Typo in CapacityScheduler documentation: Runtime Configuration. 
Contributed by Kevin Su
---
 .../hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
index 3e3db91..81781d3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/CapacityScheduler.md
@@ -54,7 +54,7 @@ The `CapacityScheduler` supports the following features:
 
 * **Operability**
 
-* Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime unless the queue is STOPPED and nhas no 
pending/running apps.
+* Runtime Configuration - The queue definitions and properties such as 
capacity, ACLs can be changed, at runtime, by administrators in a secure manner 
to minimize disruption to users. Also, a console is provided for users and 
administrators to view current allocation of resources to various queues in the 
system. Administrators can *add additional queues* at runtime, but queues 
cannot be *deleted* at runtime unless the queue is STOPPED and has no 
pending/running apps.
 
 * Drain applications - Administrators can *stop* queues at runtime to 
ensure that while existing applications run to completion, no new applications 
can be submitted. If a queue is in `STOPPED` state, new applications cannot be 
submitted to *itself* or *any of its child queues*. Existing applications 
continue to completion, thus the queue can be *drained* gracefully. 
Administrators can also *start* the stopped queues.
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-9868. Validate %primary_group queue in CS queue manager. Contributed by Manikandan R

2020-01-13 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new b7ef8a3  YARN-9868. Validate %primary_group queue in CS queue manager. 
Contributed by Manikandan R
b7ef8a3 is described below

commit b7ef8a333f6717735caeee09ae4ce67bff7285e3
Author: Szilard Nemeth 
AuthorDate: Mon Jan 13 17:15:09 2020 +0100

YARN-9868. Validate %primary_group queue in CS queue manager. Contributed 
by Manikandan R
---
 .../placement/UserGroupMappingPlacementRule.java   | 22 --
 .../TestUserGroupMappingPlacementRule.java | 21 +
 .../TestCapacitySchedulerAutoCreatedQueueBase.java |  4 ++--
 .../TestCapacitySchedulerQueueMappingFactory.java  |  4 ++--
 4 files changed, 41 insertions(+), 10 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
index 0caa602..de80410 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
@@ -179,11 +179,15 @@ public class UserGroupMappingPlacementRule extends 
PlacementRule {
   if (mapping.getParentQueue() != null
   && mapping.getParentQueue().equals(PRIMARY_GROUP_MAPPING)
   && mapping.getQueue().equals(CURRENT_USER_MAPPING)) {
-QueueMapping queueMapping =
-new QueueMapping(mapping.getType(), mapping.getSource(),
-user, groups.getGroups(user).get(0));
-validateQueueMapping(queueMapping);
-return getPlacementContext(queueMapping, user);
+if (this.queueManager
+.getQueue(groups.getGroups(user).get(0)) != null) {
+  QueueMapping queueMapping = new QueueMapping(mapping.getType(),
+  mapping.getSource(), user, groups.getGroups(user).get(0));
+  validateQueueMapping(queueMapping);
+  return getPlacementContext(queueMapping, user);
+} else {
+  return null;
+}
   } else if (mapping.getParentQueue() != null
   && mapping.getParentQueue().equals(SECONDARY_GROUP_MAPPING)
   && mapping.getQueue().equals(CURRENT_USER_MAPPING)) {
@@ -203,7 +207,13 @@ public class UserGroupMappingPlacementRule extends 
PlacementRule {
   } else if (mapping.queue.equals(CURRENT_USER_MAPPING)) {
 return getPlacementContext(mapping, user);
   } else if (mapping.queue.equals(PRIMARY_GROUP_MAPPING)) {
-return getPlacementContext(mapping, groups.getGroups(user).get(0));
+if (this.queueManager
+.getQueue(groups.getGroups(user).get(0)) != null) {
+  return getPlacementContext(mapping,
+  groups.getGroups(user).get(0));
+} else {
+  return null;
+}
   } else if (mapping.queue.equals(SECONDARY_GROUP_MAPPING)) {
 String secondaryGroup = getSecondaryGroup(user);
 if (secondaryGroup != null) {
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java
index 7590756..5cd6ea1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.placement;
 
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -26,6 +27,7 @@ import java.util.Arrays;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.security.Grou

[hadoop] branch trunk updated: YARN-9912. Capacity scheduler: support u:user2:%secondary_group queue mapping. Contributed by Manikandan R

2020-01-13 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 621c5ea  YARN-9912. Capacity scheduler: support 
u:user2:%secondary_group queue mapping. Contributed by Manikandan R
621c5ea is described below

commit 621c5eac38697755cf29aa8618869c09092b6a80
Author: Szilard Nemeth 
AuthorDate: Mon Jan 13 17:23:00 2020 +0100

YARN-9912. Capacity scheduler: support u:user2:%secondary_group queue 
mapping. Contributed by Manikandan R
---
 .../placement/UserGroupMappingPlacementRule.java   | 20 +++--
 .../TestCapacitySchedulerAutoCreatedQueueBase.java |  9 ++-
 .../TestCapacitySchedulerQueueMappingFactory.java  | 85 ++
 3 files changed, 103 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
index de80410..5221ace 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
@@ -168,6 +168,11 @@ public class UserGroupMappingPlacementRule extends 
PlacementRule {
 break;
   }
 }
+
+if (secondaryGroup == null && LOG.isDebugEnabled()) {
+  LOG.debug("User {} is not associated with any Secondary "
+  + "Group. Hence it may use the 'default' queue", user);
+}
 return secondaryGroup;
   }
 
@@ -198,10 +203,6 @@ public class UserGroupMappingPlacementRule extends 
PlacementRule {
   validateQueueMapping(queueMapping);
   return getPlacementContext(queueMapping, user);
 } else {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("User {} is not associated with any Secondary Group. 
"
-+ "Hence it may use the 'default' queue", user);
-  }
   return null;
 }
   } else if (mapping.queue.equals(CURRENT_USER_MAPPING)) {
@@ -219,10 +220,6 @@ public class UserGroupMappingPlacementRule extends 
PlacementRule {
 if (secondaryGroup != null) {
   return getPlacementContext(mapping, secondaryGroup);
 } else {
-  if (LOG.isDebugEnabled()) {
-LOG.debug("User {} is not associated with any Secondary "
-+ "Group. Hence it may use the 'default' queue", user);
-  }
   return null;
 }
   } else {
@@ -232,6 +229,13 @@ public class UserGroupMappingPlacementRule extends 
PlacementRule {
 if (user.equals(mapping.source)) {
   if (mapping.queue.equals(PRIMARY_GROUP_MAPPING)) {
 return getPlacementContext(mapping, groups.getGroups(user).get(0));
+  } else if (mapping.queue.equals(SECONDARY_GROUP_MAPPING)) {
+String secondaryGroup = getSecondaryGroup(user);
+if (secondaryGroup != null) {
+  return getPlacementContext(mapping, secondaryGroup);
+} else {
+  return null;
+}
   } else {
 return getPlacementContext(mapping);
   }
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
index 59fbb84..9527e80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerAutoCreatedQueueBase.java
@@ -122,6 +122,7 @@ public class TestCapacitySchedulerAutoCreatedQueueBase {
   public static final String B1 = B + ".b1";
   public static final String B2 = B + ".b2";
   public st

[hadoop] branch trunk updated: YARN-10028. Integrate the new abstract log servlet to the JobHistory server. Contributed by Adam Antal

2020-01-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 13cea04  YARN-10028. Integrate the new abstract log servlet to the 
JobHistory server. Contributed by Adam Antal
13cea04 is described below

commit 13cea0412c11ce9ef7e475198a24e71788cf0b2f
Author: Szilard Nemeth 
AuthorDate: Tue Jan 14 11:00:08 2020 +0100

YARN-10028. Integrate the new abstract log servlet to the JobHistory 
server. Contributed by Adam Antal
---
 .../mapreduce/v2/hs/HistoryClientService.java  |  7 ++-
 .../mapreduce/v2/hs/webapp/HsWebServices.java  | 58 --
 .../mapreduce/v2/hs/webapp/TestHsWebServices.java  |  3 ++
 .../v2/hs/webapp/TestHsWebServicesAcls.java|  2 +-
 .../v2/hs/webapp/TestHsWebServicesAttempts.java|  3 ++
 .../v2/hs/webapp/TestHsWebServicesJobConf.java |  3 ++
 .../v2/hs/webapp/TestHsWebServicesJobs.java|  4 ++
 .../v2/hs/webapp/TestHsWebServicesJobsQuery.java   |  3 ++
 .../v2/hs/webapp/TestHsWebServicesTasks.java   |  3 ++
 .../org/apache/hadoop/yarn/webapp/WebApps.java | 12 -
 .../hadoop/yarn/webapp/WebServicesTestUtils.java   |  1 -
 .../hadoop/yarn/server/webapp/AppInfoProvider.java |  2 +
 .../hadoop/yarn/server/webapp/BasicAppInfo.java|  4 ++
 13 files changed, 96 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
index b0bf41b..b63aef4 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
@@ -79,6 +79,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.RPCUtil;
@@ -150,9 +152,11 @@ public class HistoryClientService extends AbstractService {
   }
 
   @VisibleForTesting
-  protected void initializeWebApp(Configuration conf) {
+  protected void initializeWebApp(Configuration conf) throws IOException {
 webApp = new HsWebApp(history);
 InetSocketAddress bindAddress = MRWebAppUtil.getJHSWebBindAddress(conf);
+ApplicationClientProtocol appClientProtocol =
+ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
 // NOTE: there should be a .at(InetSocketAddress)
 WebApps
 .$for("jobhistory", HistoryClientService.class, this, "ws")
@@ -163,6 +167,7 @@ public class HistoryClientService extends AbstractService {
 JHAdminConfig.MR_WEBAPP_SPNEGO_USER_NAME_KEY)
 .withCSRFProtection(JHAdminConfig.MR_HISTORY_CSRF_PREFIX)
 .withXFSProtection(JHAdminConfig.MR_HISTORY_XFS_PREFIX)
+.withAppClientProtocol(appClientProtocol)
 .at(NetUtils.getHostPortString(bindAddress)).start(webApp);
 
 String connectHost = 
MRWebAppUtil.getJHSWebappURLWithoutScheme(conf).split(":")[0];
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java
index dabb760..e3804e9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java
@@ -20,8 +20,10 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp;
 
 import java.io.IOException;
 
+import javax.annotation.Nullable;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.DefaultValue;
 import javax.ws.rs.GET;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
@@ -30,9 +32,12 @@ import javax.ws.rs.QueryParam;
 import javax.ws.rs.WebApplicationException;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
+im

[hadoop] branch branch-3.2 updated: HADOOP-16683. Disable retry of FailoverOnNetworkExceptionRetry in case of wrapped AccessControlException. Contributed by Adam Antal

2020-01-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 1d2c5df  HADOOP-16683. Disable retry of 
FailoverOnNetworkExceptionRetry in case of wrapped AccessControlException. 
Contributed by Adam Antal
1d2c5df is described below

commit 1d2c5dffa8489f389b9f52138feb67b61c492306
Author: Szilard Nemeth 
AuthorDate: Tue Jan 14 11:18:54 2020 +0100

HADOOP-16683. Disable retry of FailoverOnNetworkExceptionRetry in case of 
wrapped AccessControlException. Contributed by Adam Antal
---
 .../org/apache/hadoop/io/retry/RetryPolicies.java | 12 +++-
 .../org/apache/hadoop/io/retry/TestRetryProxy.java| 19 +++
 .../hadoop/io/retry/UnreliableImplementation.java |  7 +++
 .../apache/hadoop/io/retry/UnreliableInterface.java   |  4 
 4 files changed, 41 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index a89c3a7..fcbcc86 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -690,7 +690,8 @@ public class RetryPolicies {
   } else if (e instanceof InvalidToken) {
 return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
 "Invalid or Cancelled Token");
-  } else if (e instanceof AccessControlException) {
+  } else if (e instanceof AccessControlException ||
+  hasWrappedAccessControlException(e)) {
 return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
 "Access denied");
   } else if (e instanceof SocketException
@@ -761,4 +762,13 @@ public class RetryPolicies {
 return unwrapped instanceof RetriableException ? 
 (RetriableException) unwrapped : null;
   }
+
+  private static boolean hasWrappedAccessControlException(Exception e) {
+Throwable throwable = e;
+while (!(throwable instanceof AccessControlException) &&
+throwable.getCause() != null) {
+  throwable = throwable.getCause();
+}
+return throwable instanceof AccessControlException;
+  }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
index 2116fb2..a1135a0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
@@ -377,4 +377,23 @@ public class TestRetryProxy {
   assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
 }
   }
+
+  @Test
+  public void testWrappedAccessControlException() throws Exception {
+RetryPolicy policy = mock(RetryPolicy.class);
+RetryPolicy realPolicy = RetryPolicies.failoverOnNetworkException(5);
+setupMockPolicy(policy, realPolicy);
+
+UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create(
+UnreliableInterface.class, unreliableImpl, policy);
+
+try {
+  unreliable.failsWithWrappedAccessControlException();
+  fail("Should fail");
+} catch (IOException expected) {
+  verify(policy, times(1)).shouldRetry(any(Exception.class), anyInt(),
+  anyInt(), anyBoolean());
+  assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
+}
+  }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
index a20d898..15a84bb 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
@@ -139,6 +139,13 @@ class UnreliableImplementation implements 
UnreliableInterface {
 }
   }
 
+  public void failsWithWrappedAccessControlException()
+  throws IOException {
+AccessControlException ace = new AccessControlException();
+IOException ioe = new IOException(ace);
+throw new IOException(ioe);
+  }
+
   @Override
   public String succeedsOnceThenFailsReturningString()
   throws UnreliableException, IOException, StandbyException {
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.jav

  1   2   3   4   5   >