hadoop git commit: MAPREDUCE-6673. Add a test example job that grows in memory usage over time (Karthik Kambatla via Haibo Chen)

2017-04-14 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8515d35bd -> d4f553d42


MAPREDUCE-6673. Add a test example job that grows in memory usage over time 
(Karthik Kambatla via Haibo Chen)

Change-Id: Iccfc8c67c38c526cc61726d87bfcbcf69ac36fea
(cherry picked from commit 25ac44709b4bbed78b607ea48021237b64e01b9f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4f553d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4f553d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4f553d4

Branch: refs/heads/branch-2
Commit: d4f553d42f428eb9f05d7b8a2a3b2f9e7903d138
Parents: 8515d35
Author: Haibo Chen 
Authored: Fri Apr 14 17:33:04 2017 -0700
Committer: Haibo Chen 
Committed: Fri Apr 14 17:37:37 2017 -0700

--
 .../hadoop/mapreduce/GrowingSleepJob.java   | 68 
 .../apache/hadoop/test/MapredTestDriver.java|  3 +
 2 files changed, 71 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f553d4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
new file mode 100644
index 000..55740f7
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.util.ToolRunner;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * A sleep job whose mappers create 1MB buffer for every record.
+ */
+public class GrowingSleepJob extends SleepJob {
+  private static final Log LOG = LogFactory.getLog(GrowingSleepJob.class);
+
+  public static class GrowingSleepMapper extends SleepMapper {
+private final int MB = 1024 * 1024;
+private ArrayList bytes = new ArrayList<>();
+
+@Override
+public void map(IntWritable key, IntWritable value, Context context)
+throws IOException, InterruptedException {
+  super.map(key, value, context);
+  long free = Runtime.getRuntime().freeMemory();
+  if (free > 32 * MB) {
+LOG.info("Free memory = " + free +
+" bytes. Creating 1 MB on the heap.");
+bytes.add(new byte[MB]);
+  }
+}
+  }
+
+  public static void main(String[] args) throws Exception {
+int res = ToolRunner.run(new Configuration(), new GrowingSleepJob(), args);
+System.exit(res);
+  }
+
+  @Override
+  public Job createJob(int numMapper, int numReducer,
+   long mapSleepTime, int mapSleepCount,
+   long reduceSleepTime, int reduceSleepCount)
+  throws IOException {
+Job job = super.createJob(numMapper, numReducer, mapSleepTime,
+mapSleepCount, reduceSleepTime, reduceSleepCount);
+job.setMapperClass(GrowingSleepMapper.class);
+job.setJobName("Growing sleep job");
+return job;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f553d4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
 

hadoop git commit: MAPREDUCE-6673. Add a test example job that grows in memory usage over time (Karthik Kambatla via Haibo Chen)

2017-04-14 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0ac17dc64 -> 25ac44709


MAPREDUCE-6673. Add a test example job that grows in memory usage over time 
(Karthik Kambatla via Haibo Chen)

Change-Id: Iccfc8c67c38c526cc61726d87bfcbcf69ac36fea


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25ac4470
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25ac4470
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25ac4470

Branch: refs/heads/trunk
Commit: 25ac44709b4bbed78b607ea48021237b64e01b9f
Parents: 0ac17dc
Author: Haibo Chen 
Authored: Fri Apr 14 17:33:04 2017 -0700
Committer: Haibo Chen 
Committed: Fri Apr 14 17:36:03 2017 -0700

--
 .../hadoop/mapreduce/GrowingSleepJob.java   | 68 
 .../apache/hadoop/test/MapredTestDriver.java|  3 +
 2 files changed, 71 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25ac4470/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
new file mode 100644
index 000..55740f7
--- /dev/null
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/GrowingSleepJob.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.util.ToolRunner;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+/**
+ * A sleep job whose mappers create 1MB buffer for every record.
+ */
+public class GrowingSleepJob extends SleepJob {
+  private static final Log LOG = LogFactory.getLog(GrowingSleepJob.class);
+
+  public static class GrowingSleepMapper extends SleepMapper {
+private final int MB = 1024 * 1024;
+private ArrayList bytes = new ArrayList<>();
+
+@Override
+public void map(IntWritable key, IntWritable value, Context context)
+throws IOException, InterruptedException {
+  super.map(key, value, context);
+  long free = Runtime.getRuntime().freeMemory();
+  if (free > 32 * MB) {
+LOG.info("Free memory = " + free +
+" bytes. Creating 1 MB on the heap.");
+bytes.add(new byte[MB]);
+  }
+}
+  }
+
+  public static void main(String[] args) throws Exception {
+int res = ToolRunner.run(new Configuration(), new GrowingSleepJob(), args);
+System.exit(res);
+  }
+
+  @Override
+  public Job createJob(int numMapper, int numReducer,
+   long mapSleepTime, int mapSleepCount,
+   long reduceSleepTime, int reduceSleepCount)
+  throws IOException {
+Job job = super.createJob(numMapper, numReducer, mapSleepTime,
+mapSleepCount, reduceSleepTime, reduceSleepCount);
+job.setMapperClass(GrowingSleepMapper.class);
+job.setJobName("Growing sleep job");
+return job;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/25ac4470/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/test/MapredTestDriver.java
 

hadoop git commit: MAPREDUCE-6875. Rename mapred-site.xml.template to mapred-site.xml. (Yuanbo Liu via Haibo Chen)

2017-04-17 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk ac3cfdf3e -> f1de2c856


MAPREDUCE-6875. Rename mapred-site.xml.template to mapred-site.xml. (Yuanbo Liu 
via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1de2c85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1de2c85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1de2c85

Branch: refs/heads/trunk
Commit: f1de2c8560a4ce1a346727c2c265570c5b6b872e
Parents: ac3cfdf
Author: Haibo Chen 
Authored: Mon Apr 17 12:00:15 2017 -0700
Committer: Haibo Chen 
Committed: Mon Apr 17 12:25:30 2017 -0700

--
 hadoop-mapreduce-project/.gitignore |  1 -
 hadoop-mapreduce-project/conf/mapred-site.xml   | 21 
 .../conf/mapred-site.xml.template   | 21 
 3 files changed, 21 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1de2c85/hadoop-mapreduce-project/.gitignore
--
diff --git a/hadoop-mapreduce-project/.gitignore 
b/hadoop-mapreduce-project/.gitignore
index d230896..0a86cfe 100644
--- a/hadoop-mapreduce-project/.gitignore
+++ b/hadoop-mapreduce-project/.gitignore
@@ -29,7 +29,6 @@ conf/core-site.xml
 conf/hdfs-site.xml
 conf/hadoop-env.sh
 conf/hadoop-site.xml
-conf/mapred-site.xml
 conf/hadoop-policy.xml
 conf/capacity-scheduler.xml
 conf/fair-scheduler.xml

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1de2c85/hadoop-mapreduce-project/conf/mapred-site.xml
--
diff --git a/hadoop-mapreduce-project/conf/mapred-site.xml 
b/hadoop-mapreduce-project/conf/mapred-site.xml
new file mode 100644
index 000..761c352
--- /dev/null
+++ b/hadoop-mapreduce-project/conf/mapred-site.xml
@@ -0,0 +1,21 @@
+
+
+
+
+
+
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1de2c85/hadoop-mapreduce-project/conf/mapred-site.xml.template
--
diff --git a/hadoop-mapreduce-project/conf/mapred-site.xml.template 
b/hadoop-mapreduce-project/conf/mapred-site.xml.template
deleted file mode 100644
index 761c352..000
--- a/hadoop-mapreduce-project/conf/mapred-site.xml.template
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-
-
-
-
-
-
-


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: MAPREDUCE-6870. Add configuration for MR job to finish when all reducers are complete. (Peter Bacsko via Haibo Chen)

2017-08-10 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 312e57b95 -> a32e0138f


MAPREDUCE-6870. Add configuration for MR job to finish when all reducers are 
complete. (Peter Bacsko via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a32e0138
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a32e0138
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a32e0138

Branch: refs/heads/trunk
Commit: a32e0138fb63c92902e6613001f38a87c8a41321
Parents: 312e57b
Author: Haibo Chen 
Authored: Thu Aug 10 15:17:36 2017 -0700
Committer: Haibo Chen 
Committed: Thu Aug 10 15:17:36 2017 -0700

--
 .../mapreduce/v2/app/job/impl/JobImpl.java  |  35 -
 .../mapreduce/v2/app/job/impl/TestJobImpl.java  | 139 +++
 .../apache/hadoop/mapreduce/MRJobConfig.java|   6 +-
 .../src/main/resources/mapred-default.xml   |   8 ++
 4 files changed, 160 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a32e0138/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 4d155d0..6880b6c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -644,6 +644,8 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
   private float reduceProgress;
   private float cleanupProgress;
   private boolean isUber = false;
+  private boolean finishJobWhenReducersDone;
+  private boolean completingJob = false;
 
   private Credentials jobCredentials;
   private Token jobToken;
@@ -717,6 +719,9 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
 this.maxFetchFailuresNotifications = conf.getInt(
 MRJobConfig.MAX_FETCH_FAILURES_NOTIFICATIONS,
 MRJobConfig.DEFAULT_MAX_FETCH_FAILURES_NOTIFICATIONS);
+this.finishJobWhenReducersDone = conf.getBoolean(
+MRJobConfig.FINISH_JOB_WHEN_REDUCERS_DONE,
+MRJobConfig.DEFAULT_FINISH_JOB_WHEN_REDUCERS_DONE);
   }
 
   protected StateMachine 
getStateMachine() {
@@ -2021,7 +2026,9 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
 TimeUnit.MILLISECONDS);
 return JobStateInternal.FAIL_WAIT;
   }
-  
+
+  checkReadyForCompletionWhenAllReducersDone(job);
+
   return job.checkReadyForCommit();
 }
 
@@ -2052,6 +2059,32 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
   }
   job.metrics.killedTask(task);
 }
+
+   /** Improvement: if all reducers have finished, we check if we have
+   restarted mappers that are still running. This can happen in a
+   situation when a node becomes UNHEALTHY and mappers are rescheduled.
+   See MAPREDUCE-6870 for details */
+private void checkReadyForCompletionWhenAllReducersDone(JobImpl job) {
+  if (job.finishJobWhenReducersDone) {
+int totalReduces = job.getTotalReduces();
+int completedReduces = job.getCompletedReduces();
+
+if (totalReduces > 0 && totalReduces == completedReduces
+&& !job.completingJob) {
+
+  for (TaskId mapTaskId : job.mapTasks) {
+MapTaskImpl task = (MapTaskImpl) job.tasks.get(mapTaskId);
+if (!task.isFinished()) {
+  LOG.info("Killing map task " + task.getID());
+  job.eventHandler.handle(
+  new TaskEvent(task.getID(), TaskEventType.T_KILL));
+}
+  }
+
+  job.completingJob = true;
+}
+  }
+}
   }
 
   // Transition class for handling jobs with no tasks

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a32e0138/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
--
diff --git 

hadoop git commit: YARN-6670 Add separate NM overallocation thresholds for cpu and memory (Haibo Chen)

2017-07-10 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 153498bc3 -> 3764b8a06


YARN-6670 Add separate NM overallocation thresholds for cpu and memory (Haibo 
Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3764b8a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3764b8a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3764b8a0

Branch: refs/heads/YARN-1011
Commit: 3764b8a06f02eb92f8f21dc0fc4de62957989955
Parents: 153498b
Author: Haibo Chen 
Authored: Mon Jul 10 09:55:42 2017 -0700
Committer: Haibo Chen 
Committed: Mon Jul 10 09:56:20 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 36 +--
 .../src/main/resources/yarn-default.xml | 42 ++--
 .../server/api/records/ResourceThresholds.java  | 11 +++-
 .../monitor/ContainersMonitorImpl.java  | 67 +++-
 4 files changed, 124 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3764b8a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index bb34626..03fa1ce 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1582,17 +1582,39 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
-  /** Overallocation (= allocation based on utilization) configs. */
-  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
-  NM_PREFIX + "overallocation.allocation-threshold";
-  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
-  = 0f;
+  /**
+   * General overallocation threshold if no resource-type-specific
+   * threshold is provided.
+   */
+  public static final String NM_OVERALLOCATION_GENERAL_THRESHOLD =
+  NM_PREFIX + "overallocation.general-utilization-threshold";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_GENERAL_THRESHOLD = -1.0f;
+  /**
+   * The maximum value of utilization threshold for all resource types
+   * up to which the scheduler allocates OPPORTUNISTIC containers.
+   */
   @Private
-  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final float MAX_NM_OVERALLOCATION_THRESHOLD = 0.95f;
+
+  /**
+   * NM CPU utilization threshold up to which the scheduler allocates
+   * OPPORTUNISTIC containers after the node's capacity is fully allocated.
+   */
+  public static final String NM_OVERALLOCATION_CPU_UTILIZATION_THRESHOLD =
+  NM_PREFIX + "overallocation.cpu-utilization-threshold";
+
+  /**
+   * NM memory utilization threshold up to which the scheduler allocates
+   * OPPORTUNISTIC containers after the node's capacity is fully allocated.
+   */
+  public static final String NM_OVERALLOCATION_MEMORY_UTILIZATION_THRESHOLD =
+  NM_PREFIX + "overallocation.memory-utilization-threshold";
+
   public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
   NM_PREFIX + "overallocation.preemption-threshold";
   public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
-  = 0f;
+  = 0.96f;
 
   /**
* Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3764b8a0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index c131eec..42166a9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1564,14 +1564,44 @@
 
   
 The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node 
that
+  applies to all resource types (expressed as a float between 0 and 0.95).
+  By default, over-allocation is turned off (value = -1). When turned on,
+  the node allows running OPPORTUNISTIC containers when the 

hadoop git commit: YARN-6706. Refactor ContainerScheduler to make oversubscription change easier. (Haibo Chen via asuresh)

2017-07-17 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 8e458246c -> 4c501b46d


YARN-6706. Refactor ContainerScheduler to make oversubscription change easier. 
(Haibo Chen via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c501b46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c501b46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c501b46

Branch: refs/heads/YARN-1011
Commit: 4c501b46dd3674bdd2471a431014bc6d12ffd703
Parents: 8e45824
Author: Arun Suresh 
Authored: Mon Jul 17 14:07:23 2017 -0700
Committer: Haibo Chen 
Committed: Mon Jul 17 14:25:51 2017 -0700

--
 .../scheduler/ContainerScheduler.java   | 135 +--
 .../TestContainerManagerRecovery.java   |   2 +-
 .../TestContainerSchedulerQueuing.java  |  85 
 3 files changed, 177 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c501b46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index 24530b3..19243ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -192,7 +192,9 @@ public class ContainerScheduler extends AbstractService 
implements
 // decrement only if it was a running container
 Container completedContainer = runningContainers.remove(container
 .getContainerId());
-if (completedContainer != null) {
+// only a running container releases resources upon completion
+boolean resourceReleased = completedContainer != null;
+if (resourceReleased) {
   this.utilizationTracker.subtractContainerResource(container);
   if (container.getContainerTokenIdentifier().getExecutionType() ==
   ExecutionType.OPPORTUNISTIC) {
@@ -218,8 +220,7 @@ public class ContainerScheduler extends AbstractService 
implements
 boolean resourcesAvailable = true;
 while (cIter.hasNext() && resourcesAvailable) {
   Container container = cIter.next();
-  if (this.utilizationTracker.hasResourcesAvailable(container)) {
-startAllocatedContainer(container);
+  if (tryStartContainer(container)) {
 cIter.remove();
   } else {
 resourcesAvailable = false;
@@ -228,50 +229,95 @@ public class ContainerScheduler extends AbstractService 
implements
 return resourcesAvailable;
   }
 
-  @VisibleForTesting
-  protected void scheduleContainer(Container container) {
-if (maxOppQueueLength <= 0) {
-  startAllocatedContainer(container);
-  return;
+  private boolean tryStartContainer(Container container) {
+boolean containerStarted = false;
+if (resourceAvailableToStartContainer(container)) {
+  startContainer(container);
+  containerStarted = true;
 }
-if (queuedGuaranteedContainers.isEmpty() &&
-queuedOpportunisticContainers.isEmpty() &&
-this.utilizationTracker.hasResourcesAvailable(container)) {
-  startAllocatedContainer(container);
+return containerStarted;
+  }
+
+  /**
+   * Check if there is resource available to start a given container
+   * immediately. (This can be extended to include overallocated resources)
+   * @param container the container to start
+   * @return true if container can be launched directly
+   */
+  private boolean resourceAvailableToStartContainer(Container container) {
+return this.utilizationTracker.hasResourcesAvailable(container);
+  }
+
+  private boolean enqueueContainer(Container container) {
+boolean isGuaranteedContainer = container.getContainerTokenIdentifier().
+getExecutionType() == ExecutionType.GUARANTEED;
+
+boolean isQueued;
+if (isGuaranteedContainer) {
+  queuedGuaranteedContainers.put(container.getContainerId(), container);
+  isQueued = true;
 } else {
-  LOG.info("No available resources for container {} to start its execution 
"
- 

hadoop git commit: YARN-6685. Add job count in to SLS JSON input format. (Yufei Gu via Haibo Chen)

2017-07-20 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk c21c26039 -> 0ba8cda13


YARN-6685. Add job count in to SLS JSON input format. (Yufei Gu via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ba8cda1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ba8cda1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ba8cda1

Branch: refs/heads/trunk
Commit: 0ba8cda13549cc4a3946c440016f9d2a9e78740d
Parents: c21c260
Author: Haibo Chen 
Authored: Thu Jul 20 08:15:46 2017 -0700
Committer: Haibo Chen 
Committed: Thu Jul 20 08:15:46 2017 -0700

--
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 28 +++-
 .../src/site/markdown/SchedulerLoadSimulator.md |  3 ++-
 2 files changed, 23 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ba8cda1/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index a534f03..477cc4a 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -395,18 +395,28 @@ public class SLSRunner extends Configured implements Tool 
{
 String queue = jsonJob.get("job.queue.name").toString();
 increaseQueueAppNum(queue);
 
-String oldAppId = (String)jsonJob.get("job.id");
-if (oldAppId == null) {
-  oldAppId = Integer.toString(AM_ID);
-}
-
 String amType = (String)jsonJob.get("am.type");
 if (amType == null) {
   amType = SLSUtils.DEFAULT_JOB_TYPE;
 }
 
-runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime,
-getTaskContainers(jsonJob), null, getAMContainerResource(jsonJob));
+int jobCount = 1;
+if (jsonJob.containsKey("job.count")) {
+  jobCount = Integer.parseInt(jsonJob.get("job.count").toString());
+}
+jobCount = Math.max(jobCount, 1);
+
+String oldAppId = (String)jsonJob.get("job.id");
+// Job id is generated automatically if this job configuration allows
+// multiple job instances
+if(jobCount > 1) {
+  oldAppId = null;
+}
+
+for (int i = 0; i < jobCount; i++) {
+  runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime,
+  getTaskContainers(jsonJob), null, getAMContainerResource(jsonJob));
+}
   }
 
   private List getTaskContainers(Map jsonJob)
@@ -732,6 +742,10 @@ public class SLSRunner extends Configured implements Tool {
   SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS,
   SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS_DEFAULT);
   boolean isTracked = trackedApps.contains(oldJobId);
+
+  if (oldJobId == null) {
+oldJobId = Integer.toString(AM_ID);
+  }
   AM_ID++;
 
   amSim.init(heartbeatInterval, containerList, rm, this, jobStartTimeMS,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ba8cda1/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
--
diff --git 
a/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md 
b/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
index 6e00e9a..d1848e8 100644
--- a/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
+++ b/hadoop-tools/hadoop-sls/src/site/markdown/SchedulerLoadSimulator.md
@@ -336,8 +336,9 @@ Here we provide an example format of the sls json file, 
which contains 2 jobs. T
   "job.start.ms" : 0,  // job start time
   "job.end.ms" : 95375,// job finish time, optional, the default value 
is 0
   "job.queue.name" : "sls_queue_1", // the queue job will be submitted to
-  "job.id" : "job_1",  // the job id used to track the job, optional, 
the default value is an zero-based integer increasing with number of jobs
+  "job.id" : "job_1",  // the job id used to track the job, optional. 
The default value, an zero-based integer increasing with number of jobs, is 
used if this is not specified or job.count > 1
   "job.user" : "default",  // user, optional, the default value is 
"default"
+  "job.count" : 1, // number of jobs, optional, the default value 
is 1
   "job.tasks" : [ {
 "count": 1,// number of tasks, optional, the default value is 1
 "container.host" : "/default-rack/node1",  // host the container asks 
for


-
To unsubscribe, e-mail: 

hadoop git commit: YARN-6705 Add separate NM preemption thresholds for cpu and memory (Haibo Chen)

2017-07-12 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 3764b8a06 -> 8e458246c


YARN-6705 Add separate NM preemption thresholds for cpu and memory  (Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e458246
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e458246
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e458246

Branch: refs/heads/YARN-1011
Commit: 8e458246c59e9385724d48b42272fe2d81fe1192
Parents: 3764b8a
Author: Haibo Chen 
Authored: Wed Jul 12 12:32:13 2017 -0700
Committer: Haibo Chen 
Committed: Wed Jul 12 12:32:49 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 31 +--
 .../src/main/resources/yarn-default.xml | 34 ++--
 .../monitor/ContainersMonitorImpl.java  | 42 +---
 3 files changed, 85 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e458246/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 03fa1ce..2be9523 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1611,10 +1611,33 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_OVERALLOCATION_MEMORY_UTILIZATION_THRESHOLD =
   NM_PREFIX + "overallocation.memory-utilization-threshold";
 
-  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
-  NM_PREFIX + "overallocation.preemption-threshold";
-  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
-  = 0.96f;
+  /**
+   * The CPU utilization threshold, if went beyond for a few times in a row,
+   * OPPORTUNISTIC containers started due to overallocation should start
+   * getting preempted.
+   */
+  public static final String NM_OVERALLOCATION_CPU_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold.cpu";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_CPU_PREEMPTION_THRESHOLD = 0.99f;
+
+  /**
+   * The number of times that CPU utilization must go over the CPU preemption
+   * threshold consecutively before preemption starts to kick in.
+   */
+  public static final String NM_OVERALLOCATION_PREEMPTION_CPU_COUNT =
+  NM_PREFIX + "overallocation.preemption-threshold-count.cpu";
+  public static final int DEFAULT_NM_OVERALLOCATION_PREEMPTION_CPU_COUNT = 4;
+
+
+  /**
+   * The memory utilization threshold beyond which OPPORTUNISTIC containers
+   * started due to overallocation should start getting preempted.
+   */
+  public static final String NM_OVERALLOCATION_MEMORY_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold.memory";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_MEMORY_PREEMPTION_THRESHOLD = 0.95f;
 
   /**
* Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e458246/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 42166a9..837e9f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1606,11 +1606,37 @@
 
   
 When a node is over-allocated to improve utilization by
-  running OPPORTUNISTIC containers, this config captures the utilization
-  beyond which OPPORTUNISTIC containers should start getting preempted.
+  running OPPORTUNISTIC containers, this config captures the CPU
+  utilization beyond which OPPORTUNISTIC containers should start getting
+  preempted. This is used in combination with
+  yarn.nodemanager.overallocation.preemption-threshold-count.cpu, that is,
+  only when the CPU utilization goes over this threshold consecutively for
+  a few times will preemption kicks in.
 
-yarn.nodemanager.overallocation.preemption-threshold
-0.96
+

hadoop git commit: YARN-5067 Support specifying resources for AM containers in SLS. (Yufei Gu via Haibo Chen)

2017-06-30 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 38996fdcf -> 147df300b


YARN-5067 Support specifying resources for AM containers in SLS. (Yufei Gu via 
Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/147df300
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/147df300
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/147df300

Branch: refs/heads/trunk
Commit: 147df300bf00b5f4ed250426b6ccdd69085466da
Parents: 38996fd
Author: Haibo Chen 
Authored: Fri Jun 30 16:50:06 2017 -0700
Committer: Haibo Chen 
Committed: Fri Jun 30 17:03:44 2017 -0700

--
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   | 38 +++
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  | 39 +++-
 .../yarn/sls/appmaster/MRAMSimulator.java   | 11 +++---
 .../hadoop/yarn/sls/conf/SLSConfiguration.java  | 15 
 .../yarn/sls/appmaster/TestAMSimulator.java |  4 +-
 5 files changed, 68 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/147df300/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 02da056..a534f03 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -406,7 +406,7 @@ public class SLSRunner extends Configured implements Tool {
 }
 
 runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime,
-getTaskContainers(jsonJob), null);
+getTaskContainers(jsonJob), null, getAMContainerResource(jsonJob));
   }
 
   private List getTaskContainers(Map jsonJob)
@@ -558,7 +558,8 @@ public class SLSRunner extends Configured implements Tool {
 
 // Only supports the default job type currently
 runNewAM(SLSUtils.DEFAULT_JOB_TYPE, user, jobQueue, oldJobId,
-jobStartTimeMS, jobFinishTimeMS, containerList, null);
+jobStartTimeMS, jobFinishTimeMS, containerList, null,
+getAMContainerResource(null));
   }
 
   private Resource getDefaultContainerResource() {
@@ -676,7 +677,8 @@ public class SLSRunner extends Configured implements Tool {
 }
 
 runNewAM(SLSUtils.DEFAULT_JOB_TYPE, user, jobQueue, oldJobId,
-jobStartTimeMS, jobFinishTimeMS, containerList, rr);
+jobStartTimeMS, jobFinishTimeMS, containerList, rr,
+getAMContainerResource(null));
   }
 } finally {
   stjp.close();
@@ -684,6 +686,26 @@ public class SLSRunner extends Configured implements Tool {
 
   }
 
+  private Resource getAMContainerResource(Map jsonJob) {
+Resource amContainerResource =
+SLSConfiguration.getAMContainerResource(getConf());
+
+if (jsonJob == null) {
+  return amContainerResource;
+}
+
+if (jsonJob.containsKey("am.memory")) {
+  amContainerResource.setMemorySize(
+  Long.parseLong(jsonJob.get("am.memory").toString()));
+}
+
+if (jsonJob.containsKey("am.vcores")) {
+  amContainerResource.setVirtualCores(
+  Integer.parseInt(jsonJob.get("am.vcores").toString()));
+}
+return amContainerResource;
+  }
+
   private void increaseQueueAppNum(String queue) throws YarnException {
 SchedulerWrapper wrapper = (SchedulerWrapper)rm.getResourceScheduler();
 String queueName = wrapper.getRealQueueName(queue);
@@ -700,7 +722,7 @@ public class SLSRunner extends Configured implements Tool {
   private void runNewAM(String jobType, String user,
   String jobQueue, String oldJobId, long jobStartTimeMS,
   long jobFinishTimeMS, List containerList,
-  ReservationSubmissionRequest rr) {
+  ReservationSubmissionRequest rr, Resource amContainerResource) {
 
 AMSimulator amSim = (AMSimulator) ReflectionUtils.newInstance(
 amClassMap.get(jobType), new Configuration());
@@ -710,9 +732,11 @@ public class SLSRunner extends Configured implements Tool {
   SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS,
   SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS_DEFAULT);
   boolean isTracked = trackedApps.contains(oldJobId);
-  amSim.init(AM_ID++, heartbeatInterval, containerList,
-  rm, this, jobStartTimeMS, jobFinishTimeMS, user, jobQueue,
-  isTracked, oldJobId, rr, runner.getStartTimeMS());
+  AM_ID++;
+
+  amSim.init(heartbeatInterval, containerList, rm, this, jobStartTimeMS,
+  jobFinishTimeMS, user, jobQueue, isTracked, oldJobId, rr,
+  runner.getStartTimeMS(), 

hadoop git commit: YARN-6510. Fix profs stat file warning caused by process names that includes parenthesis. (Wilfred Spiegelenburg via Haibo Chen)

2017-04-26 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk edd693833 -> 4f3ca0396


YARN-6510. Fix profs stat file warning caused by process names that includes 
parenthesis. (Wilfred Spiegelenburg via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f3ca039
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f3ca039
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f3ca039

Branch: refs/heads/trunk
Commit: 4f3ca0396a810f54f7fd0489a224c1bb13143aa4
Parents: edd6938
Author: Haibo Chen 
Authored: Wed Apr 26 11:43:27 2017 -0700
Committer: Haibo Chen 
Committed: Wed Apr 26 11:46:55 2017 -0700

--
 .../org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java  | 2 +-
 .../apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java  | 8 
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f3ca039/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index a08b90e..d54611e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -58,7 +58,7 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   private static final String PROCFS = "/proc/";
 
   private static final Pattern PROCFS_STAT_FILE_FORMAT = Pattern.compile(
-  "^([\\d-]+)\\s\\(([^)]+)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
+  "^([\\d-]+)\\s\\((.*)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
   "([\\d-]+)\\s([\\d-]+\\s){7}(\\d+)\\s(\\d+)\\s([\\d-]+\\s){7}(\\d+)\\s" +
   "(\\d+)(\\s[\\d-]+){15}");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f3ca039/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index c5fd40c..aad513a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -419,7 +419,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "1", "400", "400",
@@ -555,7 +555,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "1", "300", "300",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "1", "300", "300",
   "30", "300"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "100", "100",
@@ -748,7 +748,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "200", "100",
@@ -771,7 +771,7 @@ public class TestProcfsBasedProcessTree {
   String[] cmdLines = new String[numProcesses];
   cmdLines[0] = "proc1 arg1 arg2";
   cmdLines[1] = "process two arg3 arg4";
-  cmdLines[2] = "proc3 arg5 arg6";
+  cmdLines[2] = "proc(3) 

hadoop git commit: YARN-6510. Fix profs stat file warning caused by process names that includes parenthesis. (Wilfred Spiegelenburg via Haibo Chen)

2017-04-26 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 cc66153e1 -> 894521673


YARN-6510. Fix profs stat file warning caused by process names that includes 
parenthesis. (Wilfred Spiegelenburg via Haibo Chen)

(cherry picked from commit 4f3ca0396a810f54f7fd0489a224c1bb13143aa4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89452167
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89452167
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89452167

Branch: refs/heads/branch-2
Commit: 894521673bf6a242cc102fe0c5f22290640a1d29
Parents: cc66153
Author: Haibo Chen 
Authored: Wed Apr 26 11:43:27 2017 -0700
Committer: Haibo Chen 
Committed: Wed Apr 26 11:47:23 2017 -0700

--
 .../org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java  | 2 +-
 .../apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java  | 8 
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89452167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index bb2a77f..e581af5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -58,7 +58,7 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   private static final String PROCFS = "/proc/";
 
   private static final Pattern PROCFS_STAT_FILE_FORMAT = Pattern.compile(
-  "^([\\d-]+)\\s\\(([^)]+)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
+  "^([\\d-]+)\\s\\((.*)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
   "([\\d-]+)\\s([\\d-]+\\s){7}(\\d+)\\s(\\d+)\\s([\\d-]+\\s){7}(\\d+)\\s" +
   "(\\d+)(\\s[\\d-]+){15}");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89452167/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index 841d333..a0a008d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -423,7 +423,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "1", "400", "400",
@@ -566,7 +566,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "1", "300", "300",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "1", "300", "300",
   "30", "300"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "100", "100",
@@ -812,7 +812,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "200", "100",
@@ -835,7 +835,7 @@ public class TestProcfsBasedProcessTree {
   String[] cmdLines = new String[numProcesses];
   cmdLines[0] = "proc1 arg1 arg2";
   cmdLines[1] = "process two arg3 

hadoop git commit: YARN-6500. Do not mount inaccessible cgroups directories in CgroupsLCEResourcesHandler. (Miklos Szegedi via Haibo Chen)

2017-04-24 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 52adf7191 -> 8ac50e132


YARN-6500. Do not mount inaccessible cgroups directories in 
CgroupsLCEResourcesHandler. (Miklos Szegedi via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ac50e13
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ac50e13
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ac50e13

Branch: refs/heads/trunk
Commit: 8ac50e1322cb3f84bd998635924d85846aa47c94
Parents: 52adf71
Author: Haibo Chen 
Authored: Mon Apr 24 11:37:52 2017 -0700
Committer: Haibo Chen 
Committed: Mon Apr 24 11:37:52 2017 -0700

--
 .../linux/resources/CGroupsHandlerImpl.java |  3 +-
 .../util/CgroupsLCEResourcesHandler.java| 13 ++--
 .../linux/resources/TestCGroupsHandlerImpl.java | 32 
 .../util/TestCgroupsLCEResourcesHandler.java| 29 ++
 4 files changed, 73 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ac50e13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index d5295c5..0f4c17e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -232,7 +232,8 @@ class CGroupsHandlerImpl implements CGroupsHandler {
* @param entries map of paths to mount options
* @return the first mount path that has the requested subsystem
*/
-  private static String findControllerInMtab(String controller,
+  @VisibleForTesting
+  static String findControllerInMtab(String controller,
   Map entries) {
 for (Map.Entry e : entries.entrySet()) {
   if (e.getValue().contains(controller)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ac50e13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index f04fcd2..cb4dcf6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -428,11 +428,18 @@ public class CgroupsLCEResourcesHandler implements 
LCEResourcesHandler {
 return ret;
   }
 
-  private String findControllerInMtab(String controller,
+  @VisibleForTesting
+  String findControllerInMtab(String controller,
   Map entries) {
 for (Entry e : entries.entrySet()) {
-  if (e.getValue().contains(controller))
-return e.getKey();
+  if (e.getValue().contains(controller)) {
+if (new File(e.getKey()).canRead()) {
+  return e.getKey();
+} else {
+  LOG.warn(String.format(
+  "Skipping inaccessible cgroup mount point %s", e.getKey()));
+}
+  }
 }
 
 return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ac50e13/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java

hadoop git commit: YARN-6500. Do not mount inaccessible cgroups directories in CgroupsLCEResourcesHandler. (Miklos Szegedi via Haibo Chen)

2017-04-24 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 421e51cb9 -> 49a2f286f


YARN-6500. Do not mount inaccessible cgroups directories in 
CgroupsLCEResourcesHandler. (Miklos Szegedi via Haibo Chen)

(cherry picked from commit 8ac50e1322cb3f84bd998635924d85846aa47c94)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49a2f286
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49a2f286
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49a2f286

Branch: refs/heads/branch-2
Commit: 49a2f286f72bab644fd6aa55b5bc51ae47120e1f
Parents: 421e51c
Author: Haibo Chen 
Authored: Mon Apr 24 11:37:52 2017 -0700
Committer: Haibo Chen 
Committed: Mon Apr 24 11:39:55 2017 -0700

--
 .../linux/resources/CGroupsHandlerImpl.java |  3 +-
 .../util/CgroupsLCEResourcesHandler.java| 13 ++--
 .../linux/resources/TestCGroupsHandlerImpl.java | 32 
 .../util/TestCgroupsLCEResourcesHandler.java| 29 ++
 4 files changed, 73 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49a2f286/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index d5295c5..0f4c17e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -232,7 +232,8 @@ class CGroupsHandlerImpl implements CGroupsHandler {
* @param entries map of paths to mount options
* @return the first mount path that has the requested subsystem
*/
-  private static String findControllerInMtab(String controller,
+  @VisibleForTesting
+  static String findControllerInMtab(String controller,
   Map entries) {
 for (Map.Entry e : entries.entrySet()) {
   if (e.getValue().contains(controller)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49a2f286/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
index f04fcd2..cb4dcf6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
@@ -428,11 +428,18 @@ public class CgroupsLCEResourcesHandler implements 
LCEResourcesHandler {
 return ret;
   }
 
-  private String findControllerInMtab(String controller,
+  @VisibleForTesting
+  String findControllerInMtab(String controller,
   Map entries) {
 for (Entry e : entries.entrySet()) {
-  if (e.getValue().contains(controller))
-return e.getKey();
+  if (e.getValue().contains(controller)) {
+if (new File(e.getKey()).canRead()) {
+  return e.getKey();
+} else {
+  LOG.warn(String.format(
+  "Skipping inaccessible cgroup mount point %s", e.getKey()));
+}
+  }
 }
 
 return null;


[14/50] [abbrv] hadoop git commit: HADOOP-14260. Configuration.dumpConfiguration should redact sensitive information. Contributed by John Zhuge.

2017-08-16 Thread haibochen
HADOOP-14260. Configuration.dumpConfiguration should redact sensitive 
information. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/582648be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/582648be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/582648be

Branch: refs/heads/YARN-1011
Commit: 582648befaf9908159f937d2cc8f549583a3483e
Parents: 4222c97
Author: John Zhuge 
Authored: Thu Aug 10 16:28:22 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 10:16:08 2017 -0700

--
 .../org/apache/hadoop/conf/Configuration.java   | 15 +++---
 .../apache/hadoop/conf/TestConfiguration.java   | 48 ++--
 2 files changed, 53 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/582648be/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 65e8569..edaee68 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -3146,7 +3146,8 @@ public class Configuration implements 
Iterable>,
   JsonGenerator dumpGenerator = dumpFactory.createGenerator(out);
   dumpGenerator.writeStartObject();
   dumpGenerator.writeFieldName("property");
-  appendJSONProperty(dumpGenerator, config, propertyName);
+  appendJSONProperty(dumpGenerator, config, propertyName,
+  new ConfigRedactor(config));
   dumpGenerator.writeEndObject();
   dumpGenerator.flush();
 }
@@ -3186,11 +3187,11 @@ public class Configuration implements 
Iterable>,
 dumpGenerator.writeFieldName("properties");
 dumpGenerator.writeStartArray();
 dumpGenerator.flush();
+ConfigRedactor redactor = new ConfigRedactor(config);
 synchronized (config) {
   for (Map.Entry item: config.getProps().entrySet()) {
-appendJSONProperty(dumpGenerator,
-config,
-item.getKey().toString());
+appendJSONProperty(dumpGenerator, config, item.getKey().toString(),
+redactor);
   }
 }
 dumpGenerator.writeEndArray();
@@ -3208,12 +3209,14 @@ public class Configuration implements 
Iterable>,
* @throws IOException
*/
   private static void appendJSONProperty(JsonGenerator jsonGen,
-  Configuration config, String name) throws IOException {
+  Configuration config, String name, ConfigRedactor redactor)
+  throws IOException {
 // skip writing if given property name is empty or null
 if(!Strings.isNullOrEmpty(name) && jsonGen != null) {
   jsonGen.writeStartObject();
   jsonGen.writeStringField("key", name);
-  jsonGen.writeStringField("value", config.get(name));
+  jsonGen.writeStringField("value",
+  redactor.redact(name, config.get(name)));
   jsonGen.writeBooleanField("isFinal",
   config.finalParameters.contains(name));
   String[] resources = config.updatingResource.get(name);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/582648be/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 92d3290..91f25fa 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -49,6 +49,7 @@ import static org.junit.Assert.assertArrayEquals;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -82,6 +83,11 @@ public class TestConfiguration extends TestCase {
   /** Four apostrophes. */
   public static final String ESCAPED = "";
 
+  private static final String SENSITIVE_CONFIG_KEYS =
+  CommonConfigurationKeysPublic.HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS;
+
+  private BufferedWriter out;
+
   @Override
   protected void setUp() 

[06/50] [abbrv] hadoop git commit: YARN-6631. Refactor loader.js in new Yarn UI. Contributed by Akhil P B.

2017-08-16 Thread haibochen
YARN-6631. Refactor loader.js in new Yarn UI. Contributed by Akhil P B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d953c23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d953c23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d953c23

Branch: refs/heads/YARN-1011
Commit: 8d953c2359c5b12cf5b1f3c14be3ff5bb74242d0
Parents: ac7d060
Author: Sunil G 
Authored: Thu Aug 10 11:53:26 2017 +0530
Committer: Sunil G 
Committed: Thu Aug 10 11:53:26 2017 +0530

--
 .../src/main/webapp/app/initializers/loader.js  | 42 +---
 1 file changed, 19 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d953c23/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index aa8fb07..55f6e1b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -20,25 +20,27 @@
 
 import Ember from 'ember';
 
-function getTimeLineURL() {
-  return '/conf?name=yarn.timeline-service.webapp.address';
+function getTimeLineURL(rmhost) {
+  var url = window.location.protocol + '//' +
+(ENV.hosts.localBaseAddress? ENV.hosts.localBaseAddress + '/' : '') + 
rmhost;
+
+  url += '/conf?name=yarn.timeline-service.webapp.address';
+  Ember.Logger.log("Get Timeline Address URL: " + url);
+  return url;
 }
 
 function updateConfigs(application) {
   var hostname = window.location.hostname;
-  var rmhost = hostname +
-(window.location.port ? ':' + window.location.port: '');
-
-  Ember.Logger.log("RM Address:" + rmhost);
+  var rmhost = hostname + (window.location.port ? ':' + window.location.port: 
'');
 
   if(!ENV.hosts.rmWebAddress) {
-ENV = {
-   hosts: {
-  rmWebAddress: rmhost,
-},
-};
+ENV.hosts.rmWebAddress = rmhost;
+  } else {
+rmhost = ENV.hosts.rmWebAddress;
   }
 
+  Ember.Logger.log("RM Address: " + rmhost);
+
   if(!ENV.hosts.timelineWebAddress) {
 var timelinehost = "";
 $.ajax({
@@ -46,7 +48,7 @@ function updateConfigs(application) {
   dataType: 'json',
   async: true,
   context: this,
-  url: getTimeLineURL(),
+  url: getTimeLineURL(rmhost),
   success: function(data) {
 timelinehost = data.property.value;
 ENV.hosts.timelineWebAddress = timelinehost;
@@ -54,24 +56,18 @@ function updateConfigs(application) {
 var address = timelinehost.split(":")[0];
 var port = timelinehost.split(":")[1];
 
-Ember.Logger.log("Timeline Address from RM:" + address + ":" + port);
+Ember.Logger.log("Timeline Address from RM: " + timelinehost);
 
 if(address === "0.0.0.0" || address === "localhost") {
   var updatedAddress =  hostname + ":" + port;
-
-  /* Timeline v2 is not supporting CORS, so make as default*/
-  ENV = {
- hosts: {
-rmWebAddress: rmhost,
-timelineWebAddress: updatedAddress,
-  },
-  };
-  Ember.Logger.log("Timeline Updated Address:" + updatedAddress);
+  ENV.hosts.timelineWebAddress = updatedAddress;
+  Ember.Logger.log("Timeline Updated Address: " + updatedAddress);
 }
 application.advanceReadiness();
-  },
+  }
 });
   } else {
+Ember.Logger.log("Timeline Address: " + ENV.hosts.timelineWebAddress);
 application.advanceReadiness();
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[12/50] [abbrv] hadoop git commit: HADOOP-14754. TestCommonConfigurationFields failed: core-default.xml has 2 wasb properties missing in classes. Contributed by John Zhuge.

2017-08-16 Thread haibochen
HADOOP-14754. TestCommonConfigurationFields failed: core-default.xml has 2 wasb 
properties missing in classes.
Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d964062f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d964062f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d964062f

Branch: refs/heads/YARN-1011
Commit: d964062f66c0772f4b1a029bfcdff921fbaaf91c
Parents: f13ca94
Author: Steve Loughran 
Authored: Fri Aug 11 10:18:17 2017 +0100
Committer: Steve Loughran 
Committed: Fri Aug 11 10:18:17 2017 +0100

--
 .../org/apache/hadoop/conf/TestCommonConfigurationFields.java  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d964062f/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index da37e68..d0e0a35 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -103,6 +103,12 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPrefixToSkipCompare.add("fs.s3n.");
 xmlPrefixToSkipCompare.add("s3native.");
 
+// WASB properties are in a different subtree.
+// - org.apache.hadoop.fs.azure.NativeAzureFileSystem
+xmlPrefixToSkipCompare.add("fs.wasb.impl");
+xmlPrefixToSkipCompare.add("fs.wasbs.impl");
+xmlPrefixToSkipCompare.add("fs.azure.");
+
 // ADL properties are in a different subtree
 // - org.apache.hadoop.hdfs.web.ADLConfKeys
 xmlPrefixToSkipCompare.add("adl.");


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: HADOOP-14741. Refactor curator based ZooKeeper communication into common library. (Íñigo Goiri via Subru).

2017-08-16 Thread haibochen
HADOOP-14741. Refactor curator based ZooKeeper communication into common 
library. (Íñigo Goiri via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbbf0e2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbbf0e2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbbf0e2a

Branch: refs/heads/YARN-1011
Commit: bbbf0e2a4136b30cad9dfd36ef138650a1adea60
Parents: 8c4b6d1
Author: Subru Krishnan 
Authored: Fri Aug 11 13:58:45 2017 -0700
Committer: Subru Krishnan 
Committed: Fri Aug 11 13:58:45 2017 -0700

--
 .../hadoop/fs/CommonConfigurationKeys.java  |  21 ++
 .../hadoop/util/curator/ZKCuratorManager.java   | 294 +++
 .../hadoop/util/curator/package-info.java   |  27 ++
 .../src/main/resources/core-default.xml |  46 +++
 .../util/curator/TestZKCuratorManager.java  |  95 ++
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 +-
 .../yarn/conf/TestYarnConfigurationFields.java  |   9 +
 .../src/main/resources/yarn-default.xml |  53 
 ...ActiveStandbyElectorBasedElectorService.java |   5 +-
 .../yarn/server/resourcemanager/RMZKUtils.java  |  81 -
 .../server/resourcemanager/ResourceManager.java |  83 +++---
 .../recovery/ZKRMStateStore.java|  38 ++-
 .../server/resourcemanager/RMHATestBase.java|   5 +-
 13 files changed, 567 insertions(+), 203 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbbf0e2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index e53f71e..0da4bbd 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -377,4 +377,25 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
 
   // HDFS client HTrace configuration.
   public static final String  FS_CLIENT_HTRACE_PREFIX = "fs.client.htrace.";
+
+  // Global ZooKeeper configuration keys
+  public static final String ZK_PREFIX = "hadoop.zk.";
+  /** ACL for the ZooKeeper ensemble. */
+  public static final String ZK_ACL = ZK_PREFIX + "acl";
+  public static final String ZK_ACL_DEFAULT = "world:anyone:rwcda";
+  /** Authentication for the ZooKeeper ensemble. */
+  public static final String ZK_AUTH = ZK_PREFIX + "auth";
+
+  /** Address of the ZooKeeper ensemble. */
+  public static final String ZK_ADDRESS = ZK_PREFIX + "address";
+  /** Maximum number of retries for a ZooKeeper operation. */
+  public static final String ZK_NUM_RETRIES = ZK_PREFIX + "num-retries";
+  public static final intZK_NUM_RETRIES_DEFAULT = 1000;
+  /** Timeout for a ZooKeeper operation in ZooKeeper in milliseconds. */
+  public static final String ZK_TIMEOUT_MS = ZK_PREFIX + "timeout-ms";
+  public static final intZK_TIMEOUT_MS_DEFAULT = 1;
+  /** How often to retry a ZooKeeper operation  in milliseconds. */
+  public static final String ZK_RETRY_INTERVAL_MS =
+  ZK_PREFIX + "retry-interval-ms";
+  public static final intZK_RETRY_INTERVAL_MS_DEFAULT = 1000;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbbf0e2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
new file mode 100644
index 000..3adf028
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
@@ -0,0 +1,294 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either 

[11/50] [abbrv] hadoop git commit: HDFS-12287. Remove a no-longer applicable TODO comment in DatanodeManager. Contributed by Chen Liang.

2017-08-16 Thread haibochen
HDFS-12287. Remove a no-longer applicable TODO comment in DatanodeManager. 
Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f13ca949
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f13ca949
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f13ca949

Branch: refs/heads/YARN-1011
Commit: f13ca94954072c9b898b142a5ff86f2c1f3ee55a
Parents: a32e013
Author: Yiqun Lin 
Authored: Fri Aug 11 14:13:45 2017 +0800
Committer: Yiqun Lin 
Committed: Fri Aug 11 14:13:45 2017 +0800

--
 .../apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java | 2 --
 1 file changed, 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f13ca949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index d705fec..78783ca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -212,8 +212,6 @@ public class DatanodeManager {
 this.namesystem = namesystem;
 this.blockManager = blockManager;
 
-// TODO: Enables DFSNetworkTopology by default after more stress
-// testings/validations.
 this.useDfsNetworkTopology = conf.getBoolean(
 DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY,
 DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: YARN-5927. BaseContainerManagerTest::waitForNMContainerState timeout accounting is not accurate. (Kai Sasaki via kasha)

2017-08-16 Thread haibochen
YARN-5927. BaseContainerManagerTest::waitForNMContainerState timeout accounting 
is not accurate. (Kai Sasaki via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c4b6d16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c4b6d16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c4b6d16

Branch: refs/heads/YARN-1011
Commit: 8c4b6d16a526610a03ccc85665744ad071e37400
Parents: 07fff43
Author: Karthik Kambatla 
Authored: Fri Aug 11 12:14:06 2017 -0700
Committer: Karthik Kambatla 
Committed: Fri Aug 11 12:15:43 2017 -0700

--
 .../containermanager/BaseContainerManagerTest.java| 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c4b6d16/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index 7980a80..d266ac1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -310,13 +310,13 @@ public abstract class BaseContainerManagerTest {
 new HashSet<>(finalStates);
 int timeoutSecs = 0;
 do {
-  Thread.sleep(2000);
+  Thread.sleep(1000);
   containerStatus =
   containerManager.getContainerStatuses(request)
   .getContainerStatuses().get(0);
   LOG.info("Waiting for container to get into one of states " + fStates
   + ". Current state is " + containerStatus.getState());
-  timeoutSecs += 2;
+  timeoutSecs += 1;
 } while (!fStates.contains(containerStatus.getState())
 && timeoutSecs < timeOutMax);
 LOG.info("Container state is " + containerStatus.getState());
@@ -371,7 +371,7 @@ public abstract class BaseContainerManagerTest {
 .containermanager.container.ContainerState currentState = null;
 int timeoutSecs = 0;
 do {
-  Thread.sleep(2000);
+  Thread.sleep(1000);
   container =
   containerManager.getContext().getContainers().get(containerID);
   if (container != null) {
@@ -381,9 +381,9 @@ public abstract class BaseContainerManagerTest {
 LOG.info("Waiting for NM container to get into one of the following " +
 "states: " + finalStates + ". Current state is " + currentState);
   }
-  timeoutSecs += 2;
+  timeoutSecs += 1;
 } while (!finalStates.contains(currentState)
-&& timeoutSecs++ < timeOutMax);
+&& timeoutSecs < timeOutMax);
 LOG.info("Container state is " + currentState);
 Assert.assertTrue("ContainerState is not correct (timedout)",
 finalStates.contains(currentState));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[13/50] [abbrv] hadoop git commit: HADOOP-10392. Use FileSystem#makeQualified(Path) instead of Path#makeQualified(FileSystem) (ajisakaa via aw)

2017-08-16 Thread haibochen
HADOOP-10392. Use FileSystem#makeQualified(Path) instead of 
Path#makeQualified(FileSystem) (ajisakaa via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4222c971
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4222c971
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4222c971

Branch: refs/heads/YARN-1011
Commit: 4222c971080f2b150713727092c7197df58c88e5
Parents: d964062
Author: Allen Wittenauer 
Authored: Fri Aug 11 09:25:56 2017 -0700
Committer: Allen Wittenauer 
Committed: Fri Aug 11 09:25:56 2017 -0700

--
 .../java/org/apache/hadoop/fs/FileUtil.java |  4 +--
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java |  4 +--
 .../java/org/apache/hadoop/io/SequenceFile.java |  2 +-
 .../apache/hadoop/fs/TestLocalFileSystem.java   |  6 ++---
 .../java/org/apache/hadoop/io/FileBench.java|  2 +-
 .../mapred/MiniMRClientClusterFactory.java  |  4 +--
 .../mapred/TestCombineFileInputFormat.java  |  6 ++---
 .../TestCombineSequenceFileInputFormat.java |  7 +++--
 .../mapred/TestCombineTextInputFormat.java  |  7 +++--
 .../mapred/TestConcatenatedCompressedInput.java |  6 ++---
 .../org/apache/hadoop/mapred/TestMapRed.java|  4 +--
 .../hadoop/mapred/TestMiniMRChildTask.java  |  4 +--
 .../hadoop/mapred/TestTextInputFormat.java  |  8 +++---
 .../TestWrappedRecordReaderClassloader.java |  4 +--
 .../lib/join/TestWrappedRRClassloader.java  |  4 +--
 .../mapreduce/util/MRAsyncDiskService.java  |  2 +-
 .../apache/hadoop/mapreduce/v2/TestMRJobs.java  |  4 +--
 .../v2/TestMRJobsWithHistoryService.java|  4 +--
 .../org/apache/hadoop/tools/HadoopArchives.java |  2 +-
 .../apache/hadoop/mapred/gridmix/Gridmix.java   |  2 +-
 .../hadoop/mapred/gridmix/PseudoLocalFs.java|  8 +-
 .../hadoop/mapred/gridmix/TestFilePool.java |  4 +--
 .../hadoop/mapred/gridmix/TestFileQueue.java|  8 +++---
 .../mapred/gridmix/TestPseudoLocalFs.java   |  2 +-
 .../hadoop/mapred/gridmix/TestUserResolve.java  |  4 +--
 .../hadoop/fs/swift/util/SwiftTestUtils.java|  2 +-
 .../fs/swift/SwiftFileSystemBaseTest.java   |  2 +-
 .../TestSwiftFileSystemPartitionedUploads.java  |  4 +--
 .../hadoop/tools/rumen/TestHistograms.java  |  6 ++---
 .../org/apache/hadoop/streaming/StreamJob.java  | 27 ++--
 30 files changed, 78 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4222c971/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index eb8a5c3..72b9615 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -295,8 +295,8 @@ public class FileUtil {
 Path dst)
 throws IOException {
 if (srcFS == dstFS) {
-  String srcq = src.makeQualified(srcFS).toString() + Path.SEPARATOR;
-  String dstq = dst.makeQualified(dstFS).toString() + Path.SEPARATOR;
+  String srcq = srcFS.makeQualified(src).toString() + Path.SEPARATOR;
+  String dstq = dstFS.makeQualified(dst).toString() + Path.SEPARATOR;
   if (dstq.startsWith(srcq)) {
 if (srcq.length() == dstq.length()) {
   throw new IOException("Cannot copy " + src + " to itself.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4222c971/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 4c1236b..644cf4e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -505,7 +505,7 @@ public class FTPFileSystem extends FileSystem {
   long modTime = -1; // Modification time of root dir not known.
   Path root = new Path("/");
   return new FileStatus(length, isDir, blockReplication, blockSize,
-  modTime, root.makeQualified(this));
+  modTime, this.makeQualified(root));
 }
 String pathName = parentPath.toUri().getPath();
 FTPFile[] ftpFiles = client.listFiles(pathName);
@@ -546,7 

[09/50] [abbrv] hadoop git commit: HDFS-11957. Enable POSIX ACL inheritance by default. Contributed by John Zhuge.

2017-08-16 Thread haibochen
HDFS-11957. Enable POSIX ACL inheritance by default. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/312e57b9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/312e57b9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/312e57b9

Branch: refs/heads/YARN-1011
Commit: 312e57b95477ec95e6735f5721c646ad1df019f8
Parents: a8b7546
Author: John Zhuge 
Authored: Fri Jun 9 08:42:16 2017 -0700
Committer: John Zhuge 
Committed: Thu Aug 10 10:30:47 2017 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java|  2 +-
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml   |  2 +-
 .../src/site/markdown/HdfsPermissionsGuide.md |  2 +-
 .../test/java/org/apache/hadoop/cli/TestAclCLI.java   |  2 ++
 .../hadoop/hdfs/server/namenode/FSAclBaseTest.java|  8 
 .../hdfs/server/namenode/TestFSImageWithAcl.java  | 14 --
 6 files changed, 17 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/312e57b9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index dc9bf76..f4c383e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -269,7 +269,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY =
   "dfs.namenode.posix.acl.inheritance.enabled";
   public static final boolean
-  DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_DEFAULT = false;
+  DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_DEFAULT = true;
   public static final String  DFS_NAMENODE_XATTRS_ENABLED_KEY = 
"dfs.namenode.xattrs.enabled";
   public static final boolean DFS_NAMENODE_XATTRS_ENABLED_DEFAULT = true;
   public static final String  DFS_ADMIN = "dfs.cluster.administrators";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/312e57b9/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 4942967..03becc9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -459,7 +459,7 @@
 
   
 dfs.namenode.posix.acl.inheritance.enabled
-false
+true
 
   Set to true to enable POSIX style ACL inheritance. When it is enabled
   and the create request comes from a compatible client, the NameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/312e57b9/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md
index c502534..82b5cec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsPermissionsGuide.md
@@ -322,7 +322,7 @@ Configuration Parameters
 
 *   `dfs.namenode.posix.acl.inheritance.enabled`
 
-Set to true to enable POSIX style ACL inheritance. Disabled by default.
+Set to true to enable POSIX style ACL inheritance. Enabled by default.
 When it is enabled and the create request comes from a compatible client,
 the NameNode will apply default ACLs from the parent directory to
 the create mode and ignore the client umask. If no default ACL is found,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/312e57b9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
index 75111bb..9cf2180 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
@@ -34,6 +34,8 @@ public class TestAclCLI extends CLITestHelperDFS {
 
   protected void initConf() {
 

[22/50] [abbrv] hadoop git commit: YARN-6896. Federation: routing REST invocations transparently to multiple RMs (part 1 - basic execution). (Contributed by Giovanni Matteo Fumarola via curino)

2017-08-16 Thread haibochen
YARN-6896. Federation: routing REST invocations transparently to multiple RMs 
(part 1 - basic execution). (Contributed by Giovanni Matteo Fumarola via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc59b5fb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc59b5fb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc59b5fb

Branch: refs/heads/YARN-1011
Commit: cc59b5fb26ccf58dffcd8850fa12ec65250f127d
Parents: 0996acd
Author: Carlo Curino 
Authored: Fri Aug 11 15:58:01 2017 -0700
Committer: Carlo Curino 
Committed: Fri Aug 11 15:58:01 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  10 +
 .../yarn/conf/TestYarnConfigurationFields.java  |   2 +
 .../webapp/DefaultRequestInterceptorREST.java   |  16 +-
 .../webapp/FederationInterceptorREST.java   | 750 +++
 .../webapp/BaseRouterWebServicesTest.java   |  37 +-
 .../MockDefaultRequestInterceptorREST.java  | 136 
 .../webapp/TestFederationInterceptorREST.java   | 379 ++
 .../TestFederationInterceptorRESTRetry.java | 274 +++
 .../TestableFederationInterceptorREST.java  |  54 ++
 .../src/site/markdown/Federation.md |   2 +-
 10 files changed, 1646 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc59b5fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index cd4d569..8acaef8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2721,6 +2721,16 @@ public class YarnConfiguration extends Configuration {
   "org.apache.hadoop.yarn.server.router.webapp."
   + "DefaultRequestInterceptorREST";
 
+  /**
+   * The interceptor class used in FederationInterceptorREST to communicate 
with
+   * each SubCluster.
+   */
+  public static final String ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS =
+  ROUTER_WEBAPP_PREFIX + "default-interceptor-class";
+  public static final String DEFAULT_ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS =
+  "org.apache.hadoop.yarn.server.router.webapp."
+  + "DefaultRequestInterceptorREST";
+
   
   // Other Configs
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc59b5fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
index b9ad31a..91a8b0a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java
@@ -81,6 +81,8 @@ public class TestYarnConfigurationFields extends 
TestConfigurationFieldsBase {
 .add(YarnConfiguration.ROUTER_CLIENTRM_ADDRESS);
 configurationPropsToSkipCompare
 .add(YarnConfiguration.ROUTER_RMADMIN_ADDRESS);
+configurationPropsToSkipCompare
+.add(YarnConfiguration.ROUTER_WEBAPP_DEFAULT_INTERCEPTOR_CLASS);
 
 // Federation policies configs to be ignored
 configurationPropsToSkipCompare

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc59b5fb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
index aa8e3eb..abd8ca6 100644
--- 

[04/50] [abbrv] hadoop git commit: HDFS-12278. LeaseManager operations are inefficient in 2.8. Contributed by Rushabh S Shah.

2017-08-16 Thread haibochen
HDFS-12278. LeaseManager operations are inefficient in 2.8. Contributed by 
Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b5c02f95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b5c02f95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b5c02f95

Branch: refs/heads/YARN-1011
Commit: b5c02f95b5a2fcb8931d4a86f8192caa18009ea9
Parents: ec69414
Author: Kihwal Lee 
Authored: Wed Aug 9 16:46:05 2017 -0500
Committer: Kihwal Lee 
Committed: Wed Aug 9 16:46:05 2017 -0500

--
 .../hadoop/hdfs/server/namenode/LeaseManager.java | 18 --
 1 file changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b5c02f95/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index 6578ba9..35ec063 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -26,10 +26,11 @@ import java.util.Collections;
 import java.util.Comparator;
 import java.util.HashSet;
 import java.util.List;
-import java.util.PriorityQueue;
+import java.util.NavigableSet;
 import java.util.Set;
 import java.util.SortedMap;
 import java.util.TreeMap;
+import java.util.TreeSet;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -87,11 +88,15 @@ public class LeaseManager {
   // Mapping: leaseHolder -> Lease
   private final SortedMap leases = new TreeMap<>();
   // Set of: Lease
-  private final PriorityQueue sortedLeases = new PriorityQueue<>(512,
+  private final NavigableSet sortedLeases = new TreeSet<>(
   new Comparator() {
 @Override
 public int compare(Lease o1, Lease o2) {
-  return Long.signum(o1.getLastUpdate() - o2.getLastUpdate());
+  if (o1.getLastUpdate() != o2.getLastUpdate()) {
+return Long.signum(o1.getLastUpdate() - o2.getLastUpdate());
+  } else {
+return o1.holder.compareTo(o2.holder);
+  }
 }
   });
   // INodeID -> Lease
@@ -528,9 +533,10 @@ public class LeaseManager {
 
 long start = monotonicNow();
 
-while(!sortedLeases.isEmpty() && sortedLeases.peek().expiredHardLimit()
-  && !isMaxLockHoldToReleaseLease(start)) {
-  Lease leaseToCheck = sortedLeases.peek();
+while(!sortedLeases.isEmpty() &&
+sortedLeases.first().expiredHardLimit()
+&& !isMaxLockHoldToReleaseLease(start)) {
+  Lease leaseToCheck = sortedLeases.first();
   LOG.info(leaseToCheck + " has expired hard limit");
 
   final List removing = new ArrayList<>();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: YARN-6741. Deleting all children of a Parent Queue on refresh throws exception. Contributed by Naganarasimha G R.

2017-08-16 Thread haibochen
YARN-6741. Deleting all children of a Parent Queue on refresh throws exception. 
Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8f74c39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8f74c39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8f74c39

Branch: refs/heads/YARN-1011
Commit: d8f74c3964fa429a4a53c3651d175792cf00ac81
Parents: 7769e96
Author: bibinchundatt 
Authored: Mon Aug 14 09:39:00 2017 +0530
Committer: bibinchundatt 
Committed: Mon Aug 14 09:39:00 2017 +0530

--
 .../capacity/CapacitySchedulerQueueManager.java |   4 +
 .../scheduler/capacity/ParentQueue.java |  39 +++
 .../capacity/TestCapacityScheduler.java | 114 ++-
 3 files changed, 137 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8f74c39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
index e33fbb3..1ceb6fb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
@@ -327,6 +327,10 @@ public class CapacitySchedulerQueueManager implements 
SchedulerQueueManager<
 + "it is not yet in stopped state. Current State : "
 + oldQueue.getState());
   }
+} else if (oldQueue instanceof ParentQueue
+&& newQueue instanceof LeafQueue) {
+  LOG.info("Converting the parent queue: " + oldQueue.getQueuePath()
+  + " to leaf queue.");
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8f74c39/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index f6ada4f..e0baa07 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -18,6 +18,14 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -34,7 +42,6 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.AccessType;
@@ -45,7 +52,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerStat
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
 import 

[29/50] [abbrv] hadoop git commit: YARN-6996. Change javax.cache library implementation from JSR107 to Apache Geronimo. (Ray Chiang via Subru).

2017-08-16 Thread haibochen
YARN-6996. Change javax.cache library implementation from JSR107 to Apache 
Geronimo. (Ray Chiang via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18f3603b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18f3603b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18f3603b

Branch: refs/heads/YARN-1011
Commit: 18f3603bce37e0e07c9075811b1179afc2c227eb
Parents: e2f6299
Author: Subru Krishnan 
Authored: Mon Aug 14 11:10:00 2017 -0700
Committer: Subru Krishnan 
Committed: Mon Aug 14 11:10:00 2017 -0700

--
 hadoop-project/pom.xml | 6 +++---
 .../hadoop-yarn-server/hadoop-yarn-server-common/pom.xml   | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18f3603b/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 6311cd9..8c1d374 100755
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -96,7 +96,7 @@
 2.0.0-M21
 1.0.0-M33
 
-1.0.0
+1.0-alpha-1
 3.3.1
 2.4.12
 6.2.1.jre7
@@ -1276,8 +1276,8 @@
   1.0.0
 
 
-  javax.cache
-  cache-api
+  org.apache.geronimo.specs
+  geronimo-jcache_1.0_spec
   ${jcache.version}
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18f3603b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index 5f85097..441a574 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -103,8 +103,8 @@
   leveldbjni-all
 
 
-  javax.cache
-  cache-api
+  org.apache.geronimo.specs
+  geronimo-jcache_1.0_spec
 
 
   org.ehcache


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: HADOOP-14673. Remove leftover hadoop_xml_escape from functions. Contributed by Ajay Kumar.

2017-08-16 Thread haibochen
HADOOP-14673. Remove leftover hadoop_xml_escape from functions. Contributed by 
Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04465113
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04465113
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04465113

Branch: refs/heads/YARN-1011
Commit: 044651139800b9e2e5b8f224772e6dbd6ded58c6
Parents: 8bef4ec
Author: Arpit Agarwal 
Authored: Mon Aug 14 16:22:10 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 14 16:22:10 2017 -0700

--
 .../src/main/bin/hadoop-functions.sh| 23 --
 .../src/test/scripts/hadoop_escape_chars.bats   | 32 
 2 files changed, 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04465113/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh 
b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 3cf21cf..9ea4587 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -2578,29 +2578,6 @@ function hadoop_parse_args
   hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}"
 }
 
-## @description  XML-escapes the characters (&'"<>) in the given parameter.
-## @audience private
-## @stabilityevolving
-## @replaceable  yes
-## @paramstring
-## @return   XML-escaped string
-function hadoop_xml_escape
-{
-  sed -e 's/&/\/g' -e 's/"/\\\/g' \
--e "s/'/\/g" -e 's//\\\/g' <<< "$1"
-}
-
-## @description  sed-escapes the characters (\/&) in the given parameter.
-## @audience private
-## @stabilityevolving
-## @replaceable  yes
-## @paramstring
-## @return   sed-escaped string
-function hadoop_sed_escape
-{
-  sed -e 's/[\/&]/\\&/g' <<< "$1"
-}
-
 ## @description Handle subcommands from main program entries
 ## @audience private
 ## @stability evolving

http://git-wip-us.apache.org/repos/asf/hadoop/blob/04465113/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats 
b/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
deleted file mode 100755
index 9b031f2..000
--- 
a/hadoop-common-project/hadoop-common/src/test/scripts/hadoop_escape_chars.bats
+++ /dev/null
@@ -1,32 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-load hadoop-functions_test_helper
-
-@test "hadoop_escape_sed (positive 1)" {
-  ret="$(hadoop_sed_escape "\pass&\0#\$asdf/g  ><'\"~\`!@#$%^&*()_+-=")"
-  expected="pass\&0#\$asdf\/g  ><'\"~\`!@#$%^\&*()_+-="
-  echo "actual >${ret}<"
-  echo "expected >${expected}<"
-  [ "${ret}" = "${expected}" ]
-}
-
-@test "hadoop_escape_xml (positive 1)" {
-  ret="$(hadoop_xml_escape "\pass&\0#\$asdf/g  ><'\"~\`!@#$%^&*()_+-=")"
-  expected="\\password\0#\$asdf/g  
~\`!@#\$%^*()_+-="
-  echo "actual >${ret}<"
-  echo "expected >${expected}<"
-  [ "${ret}" = "${expected}" ]
-}
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: YARN-5978. ContainerScheduler and ContainerManager changes to support ExecType update. (Kartheek Muthyala via asuresh)

2017-08-16 Thread haibochen
YARN-5978. ContainerScheduler and ContainerManager changes to support ExecType 
update. (Kartheek Muthyala via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d7be1d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d7be1d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d7be1d8

Branch: refs/heads/YARN-1011
Commit: 4d7be1d8575e9254c59d41460960708e3718503a
Parents: 0446511
Author: Arun Suresh 
Authored: Mon Aug 14 19:46:17 2017 -0700
Committer: Arun Suresh 
Committed: Mon Aug 14 19:46:17 2017 -0700

--
 .../yarn/client/api/impl/TestAMRMClient.java| 395 +--
 .../yarn/client/api/impl/TestNMClient.java  |   7 +-
 .../containermanager/ContainerManagerImpl.java  | 132 ---
 .../containermanager/container/Container.java   |   4 +-
 .../container/ContainerImpl.java|  37 +-
 .../monitor/ContainersMonitorImpl.java  |  15 -
 .../scheduler/ContainerScheduler.java   |  73 
 .../scheduler/ContainerSchedulerEventType.java  |   1 +
 .../UpdateContainerSchedulerEvent.java  |  85 
 .../nodemanager/TestNodeManagerResync.java  |  11 +-
 .../BaseContainerManagerTest.java   |  33 +-
 .../containermanager/TestContainerManager.java  | 267 -
 .../TestContainerManagerRecovery.java   |   2 +-
 .../TestContainerSchedulerQueuing.java  |  96 +
 .../nodemanager/webapp/MockContainer.java   |   2 +-
 .../scheduler/SchedulerApplicationAttempt.java  |   2 +-
 .../security/RMContainerTokenSecretManager.java |  30 +-
 17 files changed, 964 insertions(+), 228 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d7be1d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
index 1b2bca3..09b12f2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.client.api.impl;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
@@ -36,6 +37,7 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
@@ -142,6 +144,10 @@ public class TestAMRMClient {
 // set the minimum allocation so that resource decrease can go under 1024
 conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
 conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, 1);
+conf.setBoolean(
+YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
+conf.setInt(
+YarnConfiguration.NM_OPPORTUNISTIC_CONTAINERS_MAX_QUEUE_LENGTH, 10);
 yarnCluster = new MiniYARNCluster(TestAMRMClient.class.getName(), 
nodeCount, 1, 1);
 yarnCluster.init(conf);
 yarnCluster.start();
@@ -924,8 +930,8 @@ public class TestAMRMClient {
 // add exp=x to ANY
 client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024,
 1), null, null, Priority.UNDEFINED, true, "x"));
-Assert.assertEquals(1, client.ask.size());
-Assert.assertEquals("x", client.ask.iterator().next()
+assertEquals(1, client.ask.size());
+assertEquals("x", client.ask.iterator().next()
 .getNodeLabelExpression());
 
 // add exp=x then add exp=a to ANY in same priority, only exp=a should kept
@@ -933,8 +939,8 @@ public class TestAMRMClient {
 1), null, null, Priority.UNDEFINED, true, "x"));
 client.addContainerRequest(new ContainerRequest(Resource.newInstance(1024,
 1), null, null, Priority.UNDEFINED, true, "a"));
-Assert.assertEquals(1, client.ask.size());
-Assert.assertEquals("a", client.ask.iterator().next()
+assertEquals(1, client.ask.size());
+assertEquals("a", client.ask.iterator().next()
 .getNodeLabelExpression());
 
 // add exp=x to ANY, rack and node, only resource request has ANY resource
@@ -943,10 +949,10 @@ public class TestAMRMClient {
 client.addContainerRequest(new 

[40/50] [abbrv] hadoop git commit: HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin.

2017-08-16 Thread haibochen
HDFS-11696. Fix warnings from Spotbugs in hadoop-hdfs. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e43c28e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e43c28e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e43c28e

Branch: refs/heads/YARN-1011
Commit: 2e43c28e01fe006210e71aab179527669f6412ed
Parents: 645a8f2
Author: Yiqun Lin 
Authored: Tue Aug 15 16:48:49 2017 +0800
Committer: Yiqun Lin 
Committed: Tue Aug 15 16:48:49 2017 +0800

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  7 +++--
 .../hdfs/server/protocol/SlowDiskReports.java   |  5 ++--
 .../dev-support/findbugsExcludeFile.xml | 26 +++
 .../hdfs/qjournal/server/JournalNode.java   | 16 +++-
 .../hdfs/server/datanode/DataStorage.java   | 12 ++---
 .../namenode/NNStorageRetentionManager.java | 27 +++-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  |  6 ++---
 .../offlineImageViewer/ImageLoaderCurrent.java  | 10 +---
 .../namenode/TestNameNodeOptionParsing.java | 27 +++-
 9 files changed, 103 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 677ea35..88b273a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2901,9 +2901,12 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
 synchronized (DFSClient.class) {
   if (STRIPED_READ_THREAD_POOL == null) {
-STRIPED_READ_THREAD_POOL = DFSUtilClient.getThreadPoolExecutor(1,
+// Only after thread pool is fully constructed then save it to
+// volatile field.
+ThreadPoolExecutor threadPool = DFSUtilClient.getThreadPoolExecutor(1,
 numThreads, 60, "StripedRead-", true);
-STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
+threadPool.allowCoreThreadTimeOut(true);
+STRIPED_READ_THREAD_POOL = threadPool;
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
index 8095c2a..496389a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
@@ -101,8 +101,9 @@ public final class SlowDiskReports {
 }
 
 boolean areEqual;
-for (String disk : this.slowDisks.keySet()) {
-  if (!this.slowDisks.get(disk).equals(that.slowDisks.get(disk))) {
+for (Map.Entry> entry : this.slowDisks
+.entrySet()) {
+  if (!entry.getValue().equals(that.slowDisks.get(entry.getKey( {
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index 2a7824a..9582fcb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -264,4 +264,30 @@
 
 
 
+
+   
+   
+   
+ 
+ 
+ 
+   
+   
+   
+ 
+ 
+   
+   
+   
+ 
+ 
+   
+   
+   
+ 
+ 
+  
+  
+  
+
  

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e43c28e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
--
diff --git 

[24/50] [abbrv] hadoop git commit: HDFS-11303. Hedged read might hang infinitely if read data from all DN failed . Contributed by Chen Zhang, Wei-chiu Chuang, and John Zhuge.

2017-08-16 Thread haibochen
HDFS-11303. Hedged read might hang infinitely if read data from all DN failed . 
Contributed by Chen Zhang, Wei-chiu Chuang, and John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b242f09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b242f09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b242f09

Branch: refs/heads/YARN-1011
Commit: 8b242f09a61a7536d2422546bfa6c2aaf1d57ed6
Parents: 28d97b7
Author: John Zhuge 
Authored: Thu Aug 10 14:04:36 2017 -0700
Committer: John Zhuge 
Committed: Fri Aug 11 19:42:07 2017 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 11 ++--
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 63 
 2 files changed, 70 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b242f09/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index dcc997c..6bff172 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1131,8 +1131,9 @@ public class DFSInputStream extends FSInputStream
 Future firstRequest = hedgedService
 .submit(getFromDataNodeCallable);
 futures.add(firstRequest);
+Future future = null;
 try {
-  Future future = hedgedService.poll(
+  future = hedgedService.poll(
   conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
   if (future != null) {
 ByteBuffer result = future.get();
@@ -1142,16 +1143,18 @@ public class DFSInputStream extends FSInputStream
   }
   DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
   + "read", conf.getHedgedReadThresholdMillis(), chosenNode.info);
-  // Ignore this node on next go around.
-  ignored.add(chosenNode.info);
   dfsClient.getHedgedReadMetrics().incHedgedReadOps();
   // continue; no need to refresh block locations
 } catch (ExecutionException e) {
-  // Ignore
+  futures.remove(future);
 } catch (InterruptedException e) {
   throw new InterruptedIOException(
   "Interrupted while waiting for reading task");
 }
+// Ignore this node on next go around.
+// If poll timeout and the request still ongoing, don't consider it
+// again. If read data failed, don't consider it either.
+ignored.add(chosenNode.info);
   } else {
 // We are starting up a 'hedged' read. We have a read already
 // ongoing. Call getBestNodeDNAddrPair instead of chooseDataNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b242f09/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index 85fc97b..bcb02b3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -59,6 +59,8 @@ import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Supplier;
+import org.slf4j.LoggerFactory;
+import org.slf4j.Logger;
 
 /**
  * This class tests the DFS positional read functionality in a single node
@@ -72,6 +74,9 @@ public class TestPread {
   boolean simulatedStorage;
   boolean isHedgedRead;
 
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestPread.class.getName());
+
   @Before
   public void setup() {
 simulatedStorage = false;
@@ -551,6 +556,64 @@ public class TestPread {
 }
   }
 
+  @Test(timeout=3)
+  public void testHedgedReadFromAllDNFailed() throws IOException {
+Configuration conf = new Configuration();
+int numHedgedReadPoolThreads = 5;
+final int hedgedReadTimeoutMillis = 50;
+
+conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
+numHedgedReadPoolThreads);
+conf.setLong(HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY,
+hedgedReadTimeoutMillis);
+conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 

[25/50] [abbrv] hadoop git commit: HADOOP-14627. Support MSI and DeviceCode token provider in ADLS. Contributed by Atul Sikaria.

2017-08-16 Thread haibochen
HADOOP-14627. Support MSI and DeviceCode token provider in ADLS. Contributed by 
Atul Sikaria.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7769e961
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7769e961
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7769e961

Branch: refs/heads/YARN-1011
Commit: 7769e9614956283a86eda9e4e69aaa592c0ca960
Parents: 8b242f0
Author: John Zhuge 
Authored: Thu Aug 10 00:43:40 2017 -0700
Committer: John Zhuge 
Committed: Sun Aug 13 00:22:34 2017 -0700

--
 .../src/main/resources/core-default.xml | 37 +++-
 hadoop-tools/hadoop-azure-datalake/pom.xml  |  2 +-
 .../org/apache/hadoop/fs/adl/AdlConfKeys.java   |  8 ++
 .../org/apache/hadoop/fs/adl/AdlFileSystem.java | 21 +
 .../apache/hadoop/fs/adl/TokenProviderType.java |  2 +
 .../src/site/markdown/index.md  | 98 ++--
 .../hadoop/fs/adl/TestAzureADTokenProvider.java | 40 
 7 files changed, 198 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7769e961/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index ffcab2c..7c4b0f1 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -2586,11 +2586,16 @@
 ClientCredential
 
   Defines Azure Active Directory OAuth2 access token provider type.
-  Supported types are ClientCredential, RefreshToken, and Custom.
+  Supported types are ClientCredential, RefreshToken, MSI, DeviceCode,
+  and Custom.
   The ClientCredential type requires property fs.adl.oauth2.client.id,
   fs.adl.oauth2.credential, and fs.adl.oauth2.refresh.url.
   The RefreshToken type requires property fs.adl.oauth2.client.id and
   fs.adl.oauth2.refresh.token.
+  The MSI type requires properties fs.adl.oauth2.msi.port and
+  fs.adl.oauth2.msi.tenantguid.
+  The DeviceCode type requires property
+  fs.adl.oauth2.devicecode.clientapp.id.
   The Custom type requires property fs.adl.oauth2.access.token.provider.
 
   
@@ -2627,6 +2632,36 @@
 
   
 
+  
+fs.adl.oauth2.msi.port
+
+
+  The localhost port for the MSI token service. This is the port specified
+  when creating the Azure VM.
+  Used by MSI token provider.
+
+  
+
+  
+fs.adl.oauth2.msi.tenantguid
+
+
+  The tenant guid for the Azure AAD tenant under which the azure data lake
+  store account is created.
+  Used by MSI token provider.
+
+  
+
+  
+fs.adl.oauth2.devicecode.clientapp.id
+
+
+  The app id of the AAD native app in whose context the auth request
+  should be made.
+  Used by DeviceCode token provider.
+
+  
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7769e961/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 3aed5e1..47f12df 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -110,7 +110,7 @@
 
   com.microsoft.azure
   azure-data-lake-store-sdk
-  2.1.4
+  2.2.1
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7769e961/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
--
diff --git 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
index 31df222..f77d981 100644
--- 
a/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
+++ 
b/hadoop-tools/hadoop-azure-datalake/src/main/java/org/apache/hadoop/fs/adl/AdlConfKeys.java
@@ -54,6 +54,14 @@ public final class AdlConfKeys {
   public static final String TOKEN_PROVIDER_TYPE_CLIENT_CRED =
   "ClientCredential";
 
+  // MSI Auth Configuration
+  public static final String MSI_PORT = "fs.adl.oauth2.msi.port";
+  public static final String MSI_TENANT_GUID = "fs.adl.oauth2.msi.tenantguid";
+
+  // DeviceCode Auth configuration
+  public static final String DEVICE_CODE_CLIENT_APP_ID =
+  "fs.adl.oauth2.devicecode.clientapp.id";
+
   public static final String 

[41/50] [abbrv] hadoop git commit: HDFS-12054. FSNamesystem#addErasureCodingPolicies should call checkNameNodeSafeMode() to ensure Namenode is not in safemode. Contributed by lufei.

2017-08-16 Thread haibochen
HDFS-12054. FSNamesystem#addErasureCodingPolicies should call 
checkNameNodeSafeMode() to ensure Namenode is not in safemode. Contributed by 
lufei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1040bae6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1040bae6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1040bae6

Branch: refs/heads/YARN-1011
Commit: 1040bae6fcbae7079d8126368cdeac60831a4d0c
Parents: 2e43c28
Author: Wei-Chiu Chuang 
Authored: Tue Aug 15 07:38:43 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 15 07:38:43 2017 -0700

--
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   |  2 ++
 .../java/org/apache/hadoop/hdfs/TestSafeMode.java   | 16 
 2 files changed, 18 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040bae6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index b1639b2..caf73f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7081,6 +7081,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   checkOperation(OperationCategory.WRITE);
   for (ErasureCodingPolicy policy : policies) {
 try {
+  checkOperation(OperationCategory.WRITE);
+  checkNameNodeSafeMode("Cannot add erasure coding policy");
   ErasureCodingPolicy newPolicy =
   FSDirErasureCodingOp.addErasureCodePolicy(this, policy);
   addECPolicyName = newPolicy.getName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1040bae6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index f03b440..bc95ec7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@@ -48,6 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -466,6 +468,20 @@ public class TestSafeMode {
   // expected
 }
 
+ECSchema toAddSchema = new ECSchema("testcodec", 3, 2);
+ErasureCodingPolicy newPolicy =
+new ErasureCodingPolicy(toAddSchema, 128 * 1024);
+ErasureCodingPolicy[] policyArray =
+new ErasureCodingPolicy[]{newPolicy};
+try {
+  dfs.addErasureCodingPolicies(policyArray);
+  fail("AddErasureCodingPolicies should have failed.");
+} catch (IOException ioe) {
+  GenericTestUtils.assertExceptionContains(
+  "Cannot add erasure coding policy", ioe);
+  // expected
+}
+
 assertFalse("Could not leave SM",
 dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[44/50] [abbrv] hadoop git commit: YARN-7014. Fix off-by-one error causing heap corruption (Jason Lowe via nroberts)

2017-08-16 Thread haibochen
YARN-7014. Fix off-by-one error causing heap corruption (Jason Lowe via 
nroberts)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2654590
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2654590
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2654590

Branch: refs/heads/YARN-1011
Commit: d265459024b8e5f5eccf421627f684ca8f162112
Parents: dadb0c2
Author: Nathan Roberts 
Authored: Tue Aug 15 15:52:48 2017 -0500
Committer: Nathan Roberts 
Committed: Tue Aug 15 15:52:48 2017 -0500

--
 .../src/main/native/container-executor/impl/utils/string-utils.c  | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2654590/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
index 703d484..063df7e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/string-utils.c
@@ -44,8 +44,7 @@ int validate_container_id(const char* input) {
* container_e17_1410901177871_0001_01_05
* container_1410901177871_0001_01_05
*/
-  char* input_cpy = malloc(strlen(input));
-  strcpy(input_cpy, input);
+  char* input_cpy = strdup(input);
   char* p = strtok(input_cpy, "_");
   int idx = 0;
   while (p != NULL) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: MAPREDUCE-6923. Optimize MapReduce Shuffle I/O for small partitions. Contributed by Robert Schmidtke.

2017-08-16 Thread haibochen
MAPREDUCE-6923. Optimize MapReduce Shuffle I/O for small partitions. 
Contributed by Robert Schmidtke.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac7d0604
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac7d0604
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac7d0604

Branch: refs/heads/YARN-1011
Commit: ac7d0604bc73c0925eff240ad9837e14719d57b7
Parents: b5c02f9
Author: Ravi Prakash 
Authored: Wed Aug 9 15:39:52 2017 -0700
Committer: Ravi Prakash 
Committed: Wed Aug 9 15:39:52 2017 -0700

--
 .../main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java  | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac7d0604/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
index cb9b5e0..79045f9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/FadvisedFileRegion.java
@@ -111,7 +111,10 @@ public class FadvisedFileRegion extends DefaultFileRegion {
 
 long trans = actualCount;
 int readSize;
-ByteBuffer byteBuffer = ByteBuffer.allocate(this.shuffleBufferSize);
+ByteBuffer byteBuffer = ByteBuffer.allocate(
+Math.min(
+this.shuffleBufferSize,
+trans > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) trans));
 
 while(trans > 0L &&
 (readSize = fileChannel.read(byteBuffer, this.position+position)) > 0) 
{


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[18/50] [abbrv] hadoop git commit: YARN-6952. Enable scheduling monitor in FS (Contributed by Yufei Gu via Daniel Templeton)

2017-08-16 Thread haibochen
YARN-6952. Enable scheduling monitor in FS (Contributed by Yufei Gu via Daniel 
Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/218588be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/218588be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/218588be

Branch: refs/heads/YARN-1011
Commit: 218588be773123404af4fd26eed5c9e3625feaa7
Parents: bbbf0e2
Author: Daniel Templeton 
Authored: Fri Aug 11 14:02:38 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Aug 11 14:04:19 2017 -0700

--
 .../yarn/server/resourcemanager/ResourceManager.java  |  9 +++--
 .../resourcemanager/monitor/SchedulingEditPolicy.java |  4 ++--
 .../server/resourcemanager/monitor/SchedulingMonitor.java |  4 +---
 .../capacity/ProportionalCapacityPreemptionPolicy.java|  4 ++--
 .../monitor/invariants/InvariantsChecker.java | 10 +-
 .../monitor/invariants/MetricsInvariantChecker.java   |  7 +++
 6 files changed, 16 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/218588be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index cb7daf9..5333f25 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -90,7 +90,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
@@ -698,8 +697,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 }
   }
 
-  // creating monitors that handle preemption
-  createPolicyMonitors();
+  createSchedulerMonitors();
 
   masterService = createApplicationMasterService();
   addService(masterService) ;
@@ -800,9 +798,8 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
 
 }
 
-protected void createPolicyMonitors() {
-  if (scheduler instanceof PreemptableResourceScheduler
-  && conf.getBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS,
+protected void createSchedulerMonitors() {
+  if (conf.getBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS,
   YarnConfiguration.DEFAULT_RM_SCHEDULER_ENABLE_MONITORS)) {
 LOG.info("Loading policy monitors");
 List policies = conf.getInstances(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/218588be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java
index 47458a3..d2550e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/SchedulingEditPolicy.java
@@ -19,12 +19,12 @@ package 

[19/50] [abbrv] hadoop git commit: YARN-6884. AllocationFileLoaderService.loadQueue() has an if without braces (Contributed by weiyuan via Daniel Templeton)

2017-08-16 Thread haibochen
YARN-6884. AllocationFileLoaderService.loadQueue() has an if without braces
(Contributed by weiyuan via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7680d4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7680d4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7680d4c

Branch: refs/heads/YARN-1011
Commit: c7680d4cc4d9302a5b5efcf2467bd32ecea99585
Parents: 218588b
Author: Daniel Templeton 
Authored: Fri Aug 11 14:22:02 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Aug 11 14:22:02 2017 -0700

--
 .../scheduler/fair/AllocationFileLoaderService.java| 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7680d4c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index bc204cb..bf5b4c5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -294,8 +294,9 @@ public class AllocationFileLoaderService extends 
AbstractService {
   NodeList fields = element.getChildNodes();
   for (int j = 0; j < fields.getLength(); j++) {
 Node fieldNode = fields.item(j);
-if (!(fieldNode instanceof Element))
+if (!(fieldNode instanceof Element)) {
   continue;
+}
 Element field = (Element) fieldNode;
 if ("maxRunningApps".equals(field.getTagName())) {
   String text = ((Text)field.getFirstChild()).getData().trim();
@@ -490,8 +491,9 @@ public class AllocationFileLoaderService extends 
AbstractService {
 
 for (int j = 0; j < fields.getLength(); j++) {
   Node fieldNode = fields.item(j);
-  if (!(fieldNode instanceof Element))
+  if (!(fieldNode instanceof Element)) {
 continue;
+  }
   Element field = (Element) fieldNode;
   if ("minResources".equals(field.getTagName())) {
 String text = ((Text)field.getFirstChild()).getData().trim();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: YARN-6958. Moving logging APIs over to slf4j in hadoop-yarn-server-timelineservice. Contributed by Yeliang Cang. [Forced Update!]

2017-08-16 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 4c501b46d -> f6e03a59b (forced update)


YARN-6958. Moving logging APIs over to slf4j in 
hadoop-yarn-server-timelineservice. Contributed by Yeliang Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63cfcb90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63cfcb90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63cfcb90

Branch: refs/heads/YARN-1011
Commit: 63cfcb90ac6fbb79ba9ed6b3044cd999fc74e58c
Parents: 69afa26
Author: Akira Ajisaka 
Authored: Wed Aug 9 23:58:22 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Aug 9 23:58:22 2017 +0900

--
 .../server/timeline/LevelDBCacheTimelineStore.java| 14 +++---
 .../reader/filter/TimelineFilterUtils.java|  7 ---
 .../storage/HBaseTimelineReaderImpl.java  |  8 
 .../storage/HBaseTimelineWriterImpl.java  |  8 
 .../storage/TimelineSchemaCreator.java|  7 ---
 .../storage/application/ApplicationTable.java |  7 ---
 .../storage/apptoflow/AppToFlowTable.java |  7 ---
 .../timelineservice/storage/common/ColumnHelper.java  |  8 +---
 .../storage/common/HBaseTimelineStorageUtils.java |  8 
 .../timelineservice/storage/entity/EntityTable.java   |  7 ---
 .../storage/flow/FlowActivityTable.java   |  7 ---
 .../storage/flow/FlowRunCoprocessor.java  |  7 ---
 .../timelineservice/storage/flow/FlowRunTable.java|  7 ---
 .../timelineservice/storage/flow/FlowScanner.java |  7 ---
 .../storage/reader/TimelineEntityReader.java  |  7 ---
 .../collector/AppLevelTimelineCollector.java  |  7 ---
 .../collector/NodeTimelineCollectorManager.java   |  8 
 .../PerNodeTimelineCollectorsAuxService.java  | 10 +-
 .../timelineservice/collector/TimelineCollector.java  |  7 ---
 .../collector/TimelineCollectorManager.java   |  8 
 .../collector/TimelineCollectorWebService.java|  8 
 .../timelineservice/reader/TimelineReaderServer.java  |  9 +
 .../reader/TimelineReaderWebServices.java |  8 
 .../storage/FileSystemTimelineReaderImpl.java |  8 
 .../storage/common/TimelineStorageUtils.java  |  4 
 25 files changed, 102 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/63cfcb90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
index 7379dd6..f7a3d01 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/LevelDBCacheTimelineStore.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.yarn.server.timeline;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
@@ -34,6 +32,8 @@ import org.fusesource.leveldbjni.JniDBFactory;
 import org.iq80.leveldb.DB;
 import org.iq80.leveldb.DBIterator;
 import org.iq80.leveldb.Options;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
 import java.io.IOException;
@@ -58,8 +58,8 @@ import java.util.Map;
 @Private
 @Unstable
 public class LevelDBCacheTimelineStore extends KeyValueBasedTimelineStore {
-  private static final Log LOG
-  = LogFactory.getLog(LevelDBCacheTimelineStore.class);
+  private static final Logger LOG
+  = LoggerFactory.getLogger(LevelDBCacheTimelineStore.class);
   private static final String CACHED_LDB_FILE_PREFIX = "-timeline-cache.ldb";
   private String dbId;
   private DB entityDb;
@@ -102,7 +102,7 @@ public class LevelDBCacheTimelineStore extends 
KeyValueBasedTimelineStore {
 

[07/50] [abbrv] hadoop git commit: HADOOP-14183. Remove service loader config file for wasb fs. Contributed by Esfandiar Manii.

2017-08-16 Thread haibochen
HADOOP-14183. Remove service loader config file for wasb fs.
Contributed by Esfandiar Manii.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54356b1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54356b1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54356b1e

Branch: refs/heads/YARN-1011
Commit: 54356b1e8366a23fff1bb45601efffc743306efc
Parents: 8d953c2
Author: Steve Loughran 
Authored: Thu Aug 10 16:46:33 2017 +0100
Committer: Steve Loughran 
Committed: Thu Aug 10 16:46:33 2017 +0100

--
 .../src/main/resources/core-default.xml| 12 
 .../services/org.apache.hadoop.fs.FileSystem   | 17 -
 2 files changed, 12 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/54356b1e/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 593fd85..e6b6919 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1322,6 +1322,18 @@
 
 
 
+  fs.wasb.impl
+  org.apache.hadoop.fs.azure.NativeAzureFileSystem
+  The implementation class of the Native Azure 
Filesystem
+
+
+
+  fs.wasbs.impl
+  org.apache.hadoop.fs.azure.NativeAzureFileSystem$Secure
+  The implementation class of the Secure Native Azure 
Filesystem
+
+
+
   fs.azure.secure.mode
   false
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/54356b1e/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
 
b/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
deleted file mode 100644
index 9f4922b..000
--- 
a/hadoop-tools/hadoop-azure/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.azure.NativeAzureFileSystem
-org.apache.hadoop.fs.azure.NativeAzureFileSystem$Secure
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: YARN-6967. Limit application attempt's diagnostic message size thoroughly (Contributed by Chengbing Liu via Daniel Templeton)

2017-08-16 Thread haibochen
YARN-6967. Limit application attempt's diagnostic message size thoroughly
(Contributed by Chengbing Liu via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65364def
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65364def
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65364def

Branch: refs/heads/YARN-1011
Commit: 65364defb4a633ca20b39ebc38cd9c0db63a5835
Parents: c7680d4
Author: Daniel Templeton 
Authored: Fri Aug 11 14:28:55 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Aug 11 14:28:55 2017 -0700

--
 .../rmapp/attempt/RMAppAttemptImpl.java | 16 
 1 file changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65364def/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 4210c54..254768b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1315,7 +1315,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 // AFTER the initial saving on app-attempt-start
 // These fields can be visible from outside only after they are saved in
 // StateStore
-String diags = null;
+BoundedAppender diags = new BoundedAppender(diagnostics.limit);
 
 // don't leave the tracking URL pointing to a non-existent AM
 if (conf.getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED,
@@ -1329,15 +1329,15 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 int exitStatus = ContainerExitStatus.INVALID;
 switch (event.getType()) {
 case LAUNCH_FAILED:
-  diags = event.getDiagnosticMsg();
+  diags.append(event.getDiagnosticMsg());
   break;
 case REGISTERED:
-  diags = getUnexpectedAMRegisteredDiagnostics();
+  diags.append(getUnexpectedAMRegisteredDiagnostics());
   break;
 case UNREGISTERED:
   RMAppAttemptUnregistrationEvent unregisterEvent =
   (RMAppAttemptUnregistrationEvent) event;
-  diags = unregisterEvent.getDiagnosticMsg();
+  diags.append(unregisterEvent.getDiagnosticMsg());
   // reset finalTrackingUrl to url sent by am
   finalTrackingUrl = 
sanitizeTrackingUrl(unregisterEvent.getFinalTrackingUrl());
   finalStatus = unregisterEvent.getFinalApplicationStatus();
@@ -1345,16 +1345,16 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 case CONTAINER_FINISHED:
   RMAppAttemptContainerFinishedEvent finishEvent =
   (RMAppAttemptContainerFinishedEvent) event;
-  diags = getAMContainerCrashedDiagnostics(finishEvent);
+  diags.append(getAMContainerCrashedDiagnostics(finishEvent));
   exitStatus = finishEvent.getContainerStatus().getExitStatus();
   break;
 case KILL:
   break;
 case FAIL:
-  diags = event.getDiagnosticMsg();
+  diags.append(event.getDiagnosticMsg());
   break;
 case EXPIRE:
-  diags = getAMExpiredDiagnostics(event);
+  diags.append(getAMExpiredDiagnostics(event));
   break;
 default:
   break;
@@ -1368,7 +1368,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 ApplicationAttemptStateData.newInstance(
 applicationAttemptId,  getMasterContainer(),
 rmStore.getCredentialsFromAppAttempt(this),
-startTime, stateToBeStored, finalTrackingUrl, diags,
+startTime, stateToBeStored, finalTrackingUrl, diags.toString(),
 finalStatus, exitStatus,
   getFinishTime(), resUsage.getMemorySeconds(),
   resUsage.getVcoreSeconds(),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[21/50] [abbrv] hadoop git commit: YARN-6882. AllocationFileLoaderService.reloadAllocations() should use the diamond operator (Contributed by Larry Lo via Daniel Templeton)

2017-08-16 Thread haibochen
YARN-6882. AllocationFileLoaderService.reloadAllocations() should use the 
diamond operator
(Contributed by Larry Lo via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0996acde
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0996acde
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0996acde

Branch: refs/heads/YARN-1011
Commit: 0996acde6c325667aa19ae0740eb6b40bf4a682a
Parents: 65364de
Author: Daniel Templeton 
Authored: Fri Aug 11 14:50:46 2017 -0700
Committer: Daniel Templeton 
Committed: Fri Aug 11 14:50:46 2017 -0700

--
 .../scheduler/fair/AllocationFileLoaderService.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0996acde/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index bf5b4c5..313a27a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -266,7 +266,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
 Map configuredQueues = new HashMap<>();
 
 for (FSQueueType queueType : FSQueueType.values()) {
-  configuredQueues.put(queueType, new HashSet());
+  configuredQueues.put(queueType, new HashSet<>());
 }
 
 // Read and parse the allocations file.
@@ -280,7 +280,7 @@ public class AllocationFileLoaderService extends 
AbstractService {
   throw new AllocationConfigurationException("Bad fair scheduler config " +
   "file: top-level element not ");
 NodeList elements = root.getChildNodes();
-List queueElements = new ArrayList();
+List queueElements = new ArrayList<>();
 Element placementPolicyElement = null;
 for (int i = 0; i < elements.getLength(); i++) {
   Node node = elements.item(i);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: YARN-6905 Multiple HBaseTimelineStorage test failures due to missing FastNumberFormat (Contributed by Haibo Chen)

2017-08-16 Thread haibochen
YARN-6905 Multiple HBaseTimelineStorage test failures due to missing 
FastNumberFormat (Contributed by Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/608a06cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/608a06cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/608a06cc

Branch: refs/heads/YARN-1011
Commit: 608a06cca5d68b3155bd70a94bf29ae0942b9ca0
Parents: d72124a
Author: Vrushali C 
Authored: Mon Aug 14 11:40:27 2017 -0700
Committer: Vrushali C 
Committed: Mon Aug 14 11:41:11 2017 -0700

--
 .../storage/TestHBaseTimelineStorageApps.java   |  4 +-
 .../TestHBaseTimelineStorageEntities.java   | 14 ---
 .../storage/common/AppIdKeyConverter.java   |  3 +-
 .../common/HBaseTimelineStorageUtils.java   | 33 +
 .../TestCustomApplicationIdConversion.java  | 39 
 5 files changed, 86 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/608a06cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
index b3e5197..3948d23 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java
@@ -69,6 +69,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.Applica
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.application.ApplicationTable;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnNameConverter;
+import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.HBaseTimelineStorageUtils;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter;
 import org.apache.hadoop.yarn.server.timelineservice.storage.common.Separator;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.StringKeyConverter;
@@ -493,7 +494,8 @@ public class TestHBaseTimelineStorageApps {
 event.addInfo(expKey, expVal);
 
 final TimelineEntity entity = new ApplicationEntity();
-entity.setId(ApplicationId.newInstance(0, 1).toString());
+entity.setId(HBaseTimelineStorageUtils.convertApplicationIdToString(
+ApplicationId.newInstance(0, 1)));
 entity.addEvent(event);
 
 TimelineEntities entities = new TimelineEntities();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/608a06cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
index 4b4c3e1..e18d0d0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
@@ -62,6 +62,7 @@ import 
org.apache.hadoop.yarn.server.timelineservice.reader.filter.TimelinePrefi
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.TimelineReader.Field;
 import 
org.apache.hadoop.yarn.server.timelineservice.storage.common.EventColumnName;

[15/50] [abbrv] hadoop git commit: HADOOP-14760. Add missing override to LoadBalancingKMSClientProvider.

2017-08-16 Thread haibochen
HADOOP-14760. Add missing override to LoadBalancingKMSClientProvider.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07fff43f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07fff43f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07fff43f

Branch: refs/heads/YARN-1011
Commit: 07fff43f4a1e724c83ff8fcc90fac64aa04a39eb
Parents: 582648b
Author: Xiao Chen 
Authored: Fri Aug 11 11:41:16 2017 -0700
Committer: Xiao Chen 
Committed: Fri Aug 11 11:41:41 2017 -0700

--
 .../hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07fff43f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
index 6b20c99..6e010b1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
@@ -292,7 +292,9 @@ public class LoadBalancingKMSClientProvider extends 
KeyProvider implements
 }
   }
 
-  public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
+  @Override
+  public EncryptedKeyVersion reencryptEncryptedKey(
+  final EncryptedKeyVersion ekv)
   throws IOException, GeneralSecurityException {
 try {
   return doOp(new ProviderCallable() {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: HDFS-12221. Replace xcerces in XmlEditsVisitor. (Ajay Kumar via lei)

2017-08-16 Thread haibochen
HDFS-12221. Replace xcerces in XmlEditsVisitor. (Ajay Kumar via lei)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce797a17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce797a17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce797a17

Branch: refs/heads/YARN-1011
Commit: ce797a170669524224cfeaaf70647047e7626816
Parents: d8f74c3
Author: Lei Xu 
Authored: Mon Aug 14 10:27:47 2017 -0700
Committer: Lei Xu 
Committed: Mon Aug 14 10:27:47 2017 -0700

--
 .../hadoop-client-minicluster/pom.xml   |   6 --
 .../hadoop-client-runtime/pom.xml   |   7 ---
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   5 --
 .../offlineEditsViewer/XmlEditsVisitor.java |  41 
 .../hadoop-hdfs/src/test/resources/editsStored  | Bin 5850 -> 5850 bytes
 .../src/test/resources/editsStored.xml  |  62 +--
 .../hadoop-mapreduce-client/pom.xml |  10 +--
 hadoop-project-dist/pom.xml |  10 +--
 hadoop-project/pom.xml  |   8 ---
 hadoop-yarn-project/hadoop-yarn/pom.xml |  10 +--
 10 files changed, 62 insertions(+), 97 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 5255640..5cf1fad 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -629,12 +629,6 @@
   
 
 
-  xerces:xercesImpl
-  
-**/*
-  
-
-
   
org.apache.hadoop:hadoop-mapreduce-client-jobclient:*
   
 testjar/*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-client-modules/hadoop-client-runtime/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 2f64152..24c6b7a 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -174,13 +174,6 @@
 
org/apache/jasper/compiler/Localizer.class
   
 
-
-
-  xerces:xercesImpl
-  
-META-INF/services/*
-  
-
 
 
   com.sun.jersey:*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 1c50d31..fa1044d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -174,11 +174,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   compile
 
 
-  xerces
-  xercesImpl
-  compile
-
-
   org.apache.htrace
   htrace-core4
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce797a17/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
index 7a39ba6..ddf7933 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
@@ -20,17 +20,21 @@ package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 import java.io.IOException;
 import java.io.OutputStream;
 
+import javax.xml.transform.OutputKeys;
+import javax.xml.transform.TransformerConfigurationException;
+import javax.xml.transform.sax.SAXTransformerFactory;
+import javax.xml.transform.sax.TransformerHandler;
+import javax.xml.transform.stream.StreamResult;
+import org.xml.sax.ContentHandler;
+import org.xml.sax.SAXException;
+import 

[08/50] [abbrv] hadoop git commit: HADOOP-14743. CompositeGroupsMapping should not swallow exceptions. Contributed by Wei-Chiu Chuang.

2017-08-16 Thread haibochen
HADOOP-14743. CompositeGroupsMapping should not swallow exceptions. Contributed 
by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8b75466
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8b75466
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8b75466

Branch: refs/heads/YARN-1011
Commit: a8b75466b21edfe8b12beb4420492817f0e03147
Parents: 54356b1
Author: Wei-Chiu Chuang 
Authored: Thu Aug 10 09:35:27 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Thu Aug 10 09:35:27 2017 -0700

--
 .../java/org/apache/hadoop/security/CompositeGroupsMapping.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8b75466/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
index b8cfdf7..b762df2 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/CompositeGroupsMapping.java
@@ -74,7 +74,9 @@ public class CompositeGroupsMapping
   try {
 groups = provider.getGroups(user);
   } catch (Exception e) {
-//LOG.warn("Exception trying to get groups for user " + user, e);  
+LOG.warn("Unable to get groups for user {} via {} because: {}",
+user, provider.getClass().getSimpleName(), e.toString());
+LOG.debug("Stacktrace: ", e);
   }
   if (groups != null && ! groups.isEmpty()) {
 groupSet.addAll(groups);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/50] [abbrv] hadoop git commit: MAPREDUCE-6870. Add configuration for MR job to finish when all reducers are complete. (Peter Bacsko via Haibo Chen)

2017-08-16 Thread haibochen
MAPREDUCE-6870. Add configuration for MR job to finish when all reducers are 
complete. (Peter Bacsko via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a32e0138
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a32e0138
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a32e0138

Branch: refs/heads/YARN-1011
Commit: a32e0138fb63c92902e6613001f38a87c8a41321
Parents: 312e57b
Author: Haibo Chen 
Authored: Thu Aug 10 15:17:36 2017 -0700
Committer: Haibo Chen 
Committed: Thu Aug 10 15:17:36 2017 -0700

--
 .../mapreduce/v2/app/job/impl/JobImpl.java  |  35 -
 .../mapreduce/v2/app/job/impl/TestJobImpl.java  | 139 +++
 .../apache/hadoop/mapreduce/MRJobConfig.java|   6 +-
 .../src/main/resources/mapred-default.xml   |   8 ++
 4 files changed, 160 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a32e0138/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 4d155d0..6880b6c 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -644,6 +644,8 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
   private float reduceProgress;
   private float cleanupProgress;
   private boolean isUber = false;
+  private boolean finishJobWhenReducersDone;
+  private boolean completingJob = false;
 
   private Credentials jobCredentials;
   private Token jobToken;
@@ -717,6 +719,9 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
 this.maxFetchFailuresNotifications = conf.getInt(
 MRJobConfig.MAX_FETCH_FAILURES_NOTIFICATIONS,
 MRJobConfig.DEFAULT_MAX_FETCH_FAILURES_NOTIFICATIONS);
+this.finishJobWhenReducersDone = conf.getBoolean(
+MRJobConfig.FINISH_JOB_WHEN_REDUCERS_DONE,
+MRJobConfig.DEFAULT_FINISH_JOB_WHEN_REDUCERS_DONE);
   }
 
   protected StateMachine 
getStateMachine() {
@@ -2021,7 +2026,9 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
 TimeUnit.MILLISECONDS);
 return JobStateInternal.FAIL_WAIT;
   }
-  
+
+  checkReadyForCompletionWhenAllReducersDone(job);
+
   return job.checkReadyForCommit();
 }
 
@@ -2052,6 +2059,32 @@ public class JobImpl implements 
org.apache.hadoop.mapreduce.v2.app.job.Job,
   }
   job.metrics.killedTask(task);
 }
+
+   /** Improvement: if all reducers have finished, we check if we have
+   restarted mappers that are still running. This can happen in a
+   situation when a node becomes UNHEALTHY and mappers are rescheduled.
+   See MAPREDUCE-6870 for details */
+private void checkReadyForCompletionWhenAllReducersDone(JobImpl job) {
+  if (job.finishJobWhenReducersDone) {
+int totalReduces = job.getTotalReduces();
+int completedReduces = job.getCompletedReduces();
+
+if (totalReduces > 0 && totalReduces == completedReduces
+&& !job.completingJob) {
+
+  for (TaskId mapTaskId : job.mapTasks) {
+MapTaskImpl task = (MapTaskImpl) job.tasks.get(mapTaskId);
+if (!task.isFinished()) {
+  LOG.info("Killing map task " + task.getID());
+  job.eventHandler.handle(
+  new TaskEvent(task.getID(), TaskEventType.T_KILL));
+}
+  }
+
+  job.completingJob = true;
+}
+  }
+}
   }
 
   // Transition class for handling jobs with no tasks

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a32e0138/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java
 

[03/50] [abbrv] hadoop git commit: YARN-6033. Add support for sections in container-executor configuration file. (Varun Vasudev via wandga)

2017-08-16 Thread haibochen
YARN-6033. Add support for sections in container-executor configuration file. 
(Varun Vasudev via wandga)

Change-Id: Ibc6d2a959debe5d8ff2b51504149742449d1f1da


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec694145
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec694145
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec694145

Branch: refs/heads/YARN-1011
Commit: ec694145cf9c0ade7606813871ca2a4a371def8e
Parents: 63cfcb9
Author: Wangda Tan 
Authored: Wed Aug 9 10:51:29 2017 -0700
Committer: Wangda Tan 
Committed: Wed Aug 9 10:51:29 2017 -0700

--
 .../hadoop-yarn-server-nodemanager/pom.xml  |  38 ++
 .../src/CMakeLists.txt  |  22 +
 .../container-executor/impl/configuration.c | 672 +--
 .../container-executor/impl/configuration.h | 182 +++--
 .../impl/container-executor.c   |  39 +-
 .../impl/container-executor.h   |  52 +-
 .../container-executor/impl/get_executable.c|   1 +
 .../main/native/container-executor/impl/main.c  |  17 +-
 .../main/native/container-executor/impl/util.c  | 134 
 .../main/native/container-executor/impl/util.h  | 115 
 .../test-configurations/configuration-1.cfg |  31 +
 .../test-configurations/configuration-2.cfg |  28 +
 .../test/test-configurations/old-config.cfg |  25 +
 .../test/test-container-executor.c  |  15 +-
 .../test/test_configuration.cc  | 432 
 .../native/container-executor/test/test_main.cc |  29 +
 .../native/container-executor/test/test_util.cc | 138 
 17 files changed, 1649 insertions(+), 321 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec694145/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
index 28ee0d9..a50a769 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
@@ -215,6 +215,44 @@
   ${project.build.directory}/native-results
 
   
+  
+cetest
+cmake-test
+test
+
+  
+  cetest
+  
${project.build.directory}/native/test
+  ${basedir}/src
+  
${project.build.directory}/native/test/cetest
+  
+--gtest_filter=-Perf.
+
--gtest_output=xml:${project.build.directory}/surefire-reports/TEST-cetest.xml
+  
+  
${project.build.directory}/surefire-reports
+
+  
+
+  
+  
+org.apache.maven.plugins
+maven-antrun-plugin
+
+  
+make
+compile
+
+  run
+
+
+  
+
+  
+
+  
+
+  
 
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec694145/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
index 5b52536..100d7ca 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/CMakeLists.txt
@@ -19,6 +19,9 @@ cmake_minimum_required(VERSION 2.6 FATAL_ERROR)
 list(APPEND CMAKE_MODULE_PATH 
${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common)
 include(HadoopCommon)
 
+# Set gtest path
+set(GTEST_SRC_DIR 
${CMAKE_SOURCE_DIR}/../../../../../hadoop-common-project/hadoop-common/src/main/native/gtest)
+
 # determine if container-executor.conf.dir is an absolute
 # path in case the OS we're compiling on doesn't have
 # a hook in get_executable. We'll use this define
@@ -80,12 +83,20 @@ endfunction()
 include_directories(
 ${CMAKE_CURRENT_SOURCE_DIR}
 

[02/50] [abbrv] hadoop git commit: YARN-6033. Add support for sections in container-executor configuration file. (Varun Vasudev via wandga)

2017-08-16 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec694145/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_configuration.cc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_configuration.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_configuration.cc
new file mode 100644
index 000..6ee0ab2
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test_configuration.cc
@@ -0,0 +1,432 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include 
+#include 
+
+extern "C" {
+#include "util.h"
+#include "configuration.h"
+#include "configuration.c"
+}
+
+
+namespace ContainerExecutor {
+  class TestConfiguration : public ::testing::Test {
+  protected:
+virtual void SetUp() {
+  new_config_format_file = "test-configurations/configuration-1.cfg";
+  old_config_format_file = "test-configurations/old-config.cfg";
+  mixed_config_format_file = "test-configurations/configuration-2.cfg";
+  loadConfigurations();
+  return;
+}
+
+void loadConfigurations() {
+  int ret = 0;
+  ret = read_config(new_config_format_file.c_str(), _config_format);
+  ASSERT_EQ(0, ret);
+  ret = read_config(old_config_format_file.c_str(), _config_format);
+  ASSERT_EQ(0, ret);
+  ret = read_config(mixed_config_format_file.c_str(),
+_config_format);
+  ASSERT_EQ(0, ret);
+}
+
+virtual void TearDown() {
+  free_configuration(_config_format);
+  free_configuration(_config_format);
+  return;
+}
+
+std::string new_config_format_file;
+std::string old_config_format_file;
+std::string mixed_config_format_file;
+struct configuration new_config_format;
+struct configuration old_config_format;
+struct configuration mixed_config_format;
+  };
+
+
+  TEST_F(TestConfiguration, test_get_configuration_values_delimiter) {
+char **split_values;
+split_values = get_configuration_values_delimiter(NULL, "", 
_config_format, "%");
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values_delimiter("yarn.local.dirs", NULL,
+  _config_format, "%");
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values_delimiter("yarn.local.dirs", "",
+  NULL, "%");
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values_delimiter("yarn.local.dirs", "",
+  _config_format, NULL);
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values_delimiter("yarn.local.dirs", 
"abcd",
+  _config_format, "%");
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values_delimiter("yarn.local.dirs", "",
+  _config_format, "%");
+ASSERT_STREQ("/var/run/yarn", split_values[0]);
+ASSERT_STREQ("/tmp/mydir", split_values[1]);
+ASSERT_EQ(NULL, split_values[2]);
+free(split_values);
+split_values = get_configuration_values_delimiter("allowed.system.users",
+  "", _config_format, "%");
+ASSERT_STREQ("nobody,daemon", split_values[0]);
+ASSERT_EQ(NULL, split_values[1]);
+free(split_values);
+  }
+
+  TEST_F(TestConfiguration, test_get_configuration_values) {
+char **split_values;
+split_values = get_configuration_values(NULL, "", _config_format);
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values("yarn.local.dirs", NULL, 
_config_format);
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values("yarn.local.dirs", "", NULL);
+ASSERT_EQ(NULL, split_values);
+split_values = get_configuration_values("yarn.local.dirs", "abcd", 
_config_format);
+ASSERT_EQ(NULL, split_values);
+split_values = 

[32/50] [abbrv] hadoop git commit: YARN-6881. LOG is unused in AllocationConfiguration (Contributed by weiyuan via Daniel Templeton)

2017-08-16 Thread haibochen
YARN-6881. LOG is unused in AllocationConfiguration (Contributed by weiyuan via 
Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b09c327
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b09c327
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b09c327

Branch: refs/heads/YARN-1011
Commit: 6b09c327057947049ef7984afbb5ed225f15fc2d
Parents: 608a06c
Author: Daniel Templeton 
Authored: Mon Aug 14 11:55:33 2017 -0700
Committer: Daniel Templeton 
Committed: Mon Aug 14 11:55:33 2017 -0700

--
 .../resourcemanager/scheduler/fair/AllocationConfiguration.java   | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b09c327/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
index f143aa6..71e6f7f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationConfiguration.java
@@ -23,8 +23,6 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.api.records.QueueACL;
@@ -41,7 +39,6 @@ import org.apache.hadoop.yarn.util.resource.Resources;
 import com.google.common.annotations.VisibleForTesting;
 
 public class AllocationConfiguration extends ReservationSchedulerConfiguration 
{
-  private static final Log LOG = LogFactory.getLog(FSQueue.class.getName());
   private static final AccessControlList EVERYBODY_ACL = new 
AccessControlList("*");
   private static final AccessControlList NOBODY_ACL = new AccessControlList(" 
");
   private static final ResourceCalculator RESOURCE_CALCULATOR =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: YARN-6987. Log app attempt during InvalidStateTransition. Contributed by Jonathan Eagles

2017-08-16 Thread haibochen
YARN-6987. Log app attempt during InvalidStateTransition. Contributed by 
Jonathan Eagles


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3325ef65
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3325ef65
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3325ef65

Branch: refs/heads/YARN-1011
Commit: 3325ef653d6f364a82dd32485d9ef6d987380ce3
Parents: 6b09c32
Author: Jason Lowe 
Authored: Mon Aug 14 14:40:08 2017 -0500
Committer: Jason Lowe 
Committed: Mon Aug 14 14:40:08 2017 -0500

--
 .../hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java   | 3 ++-
 .../server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java| 3 ++-
 2 files changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3325ef65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index fa2f20c..03be793 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -888,7 +888,8 @@ public class RMAppImpl implements RMApp, Recoverable {
 /* keep the master in sync with the state machine */
 this.stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.error("Can't handle this event at current state", e);
+LOG.error("App: " + appID
++ " can't handle this event at current state", e);
 /* TODO fail the application on the failed transition */
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3325ef65/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 254768b..7d453bd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -911,7 +911,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 /* keep the master in sync with the state machine */
 this.stateMachine.doTransition(event.getType(), event);
   } catch (InvalidStateTransitionException e) {
-LOG.error("Can't handle this event at current state", e);
+LOG.error("App attempt: " + appAttemptID
++ " can't handle this event at current state", e);
 /* TODO fail the application on the failed transition */
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: YARN-5146. Support for Fair Scheduler in new YARN UI. Contributed by Abdullah Yousufi.

2017-08-16 Thread haibochen
YARN-5146. Support for Fair Scheduler in new YARN UI. Contributed by Abdullah 
Yousufi.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dadb0c22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dadb0c22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dadb0c22

Branch: refs/heads/YARN-1011
Commit: dadb0c2225adef5cb0126610733c285b51f4f43e
Parents: e3ae3e2
Author: Sunil G 
Authored: Tue Aug 15 21:58:44 2017 +0530
Committer: Sunil G 
Committed: Tue Aug 15 21:58:44 2017 +0530

--
 .../src/main/webapp/app/adapters/yarn-queue.js  |  30 -
 .../app/adapters/yarn-queue/capacity-queue.js   |  23 
 .../app/adapters/yarn-queue/fair-queue.js   |  23 
 .../app/adapters/yarn-queue/fifo-queue.js   |  23 
 .../app/adapters/yarn-queue/yarn-queue.js   |  30 +
 .../main/webapp/app/components/tree-selector.js |  19 ++-
 .../src/main/webapp/app/models/yarn-queue.js|  94 --
 .../app/models/yarn-queue/capacity-queue.js |  95 ++
 .../webapp/app/models/yarn-queue/fair-queue.js  |  79 
 .../webapp/app/models/yarn-queue/fifo-queue.js  |  52 
 .../webapp/app/models/yarn-queue/yarn-queue.js  |  23 
 .../main/webapp/app/routes/cluster-overview.js  |   4 +-
 .../src/main/webapp/app/routes/yarn-queue.js|  26 ++--
 .../src/main/webapp/app/routes/yarn-queues.js   |  12 +-
 .../main/webapp/app/routes/yarn-queues/index.js |  25 
 .../app/routes/yarn-queues/queues-selector.js   |  25 
 .../main/webapp/app/serializers/yarn-queue.js   | 129 ---
 .../serializers/yarn-queue/capacity-queue.js| 128 ++
 .../app/serializers/yarn-queue/fair-queue.js|  92 +
 .../app/serializers/yarn-queue/fifo-queue.js|  59 +
 .../app/serializers/yarn-queue/yarn-queue.js|  47 +++
 .../components/queue-configuration-table.hbs|  54 
 .../templates/components/queue-navigator.hbs|   7 +-
 .../yarn-queue/capacity-queue-conf-table.hbs|  54 
 .../yarn-queue/capacity-queue-info.hbs  |  84 
 .../components/yarn-queue/capacity-queue.hbs|  63 +
 .../yarn-queue/fair-queue-conf-table.hbs|  52 
 .../components/yarn-queue/fair-queue-info.hbs   |  66 ++
 .../components/yarn-queue/fair-queue.hbs|  63 +
 .../yarn-queue/fifo-queue-conf-table.hbs|  56 
 .../components/yarn-queue/fifo-queue-info.hbs   |  47 +++
 .../components/yarn-queue/fifo-queue.hbs|  48 +++
 .../webapp/app/templates/yarn-queue/info.hbs|  73 +--
 .../main/webapp/app/templates/yarn-queues.hbs   |  54 +---
 .../src/main/webapp/app/utils/color-utils.js|   1 -
 35 files changed, 1266 insertions(+), 494 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadb0c22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
deleted file mode 100644
index f2017df..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue.js
+++ /dev/null
@@ -1,30 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import AbstractAdapter from './abstract';
-
-export default AbstractAdapter.extend({
-  address: "rmWebAddress",
-  restNameSpace: "cluster",
-  serverName: "RM",
-
-  pathForType(/*modelName*/) {
-return 'scheduler'; // move to some common place, return path by modelname.
-  }
-
-});

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dadb0c22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-queue/capacity-queue.js
--
diff --git 

[49/50] [abbrv] hadoop git commit: YARN-6670 Add separate NM overallocation thresholds for cpu and memory (Haibo Chen)

2017-08-16 Thread haibochen
YARN-6670 Add separate NM overallocation thresholds for cpu and memory (Haibo 
Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5baae1be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5baae1be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5baae1be

Branch: refs/heads/YARN-1011
Commit: 5baae1bee02fa3f65fd1f8aff174820034d9ceab
Parents: 89dbd480
Author: Haibo Chen 
Authored: Mon Jul 10 09:55:42 2017 -0700
Committer: Haibo Chen 
Committed: Wed Aug 16 10:02:43 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 36 +--
 .../src/main/resources/yarn-default.xml | 42 ++--
 .../server/api/records/ResourceThresholds.java  | 11 +++-
 .../monitor/ContainersMonitorImpl.java  | 67 +++-
 4 files changed, 124 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5baae1be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 9e0a2a4..a54bd11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1614,17 +1614,39 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
-  /** Overallocation (= allocation based on utilization) configs. */
-  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
-  NM_PREFIX + "overallocation.allocation-threshold";
-  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
-  = 0f;
+  /**
+   * General overallocation threshold if no resource-type-specific
+   * threshold is provided.
+   */
+  public static final String NM_OVERALLOCATION_GENERAL_THRESHOLD =
+  NM_PREFIX + "overallocation.general-utilization-threshold";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_GENERAL_THRESHOLD = -1.0f;
+  /**
+   * The maximum value of utilization threshold for all resource types
+   * up to which the scheduler allocates OPPORTUNISTIC containers.
+   */
   @Private
-  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final float MAX_NM_OVERALLOCATION_THRESHOLD = 0.95f;
+
+  /**
+   * NM CPU utilization threshold up to which the scheduler allocates
+   * OPPORTUNISTIC containers after the node's capacity is fully allocated.
+   */
+  public static final String NM_OVERALLOCATION_CPU_UTILIZATION_THRESHOLD =
+  NM_PREFIX + "overallocation.cpu-utilization-threshold";
+
+  /**
+   * NM memory utilization threshold up to which the scheduler allocates
+   * OPPORTUNISTIC containers after the node's capacity is fully allocated.
+   */
+  public static final String NM_OVERALLOCATION_MEMORY_UTILIZATION_THRESHOLD =
+  NM_PREFIX + "overallocation.memory-utilization-threshold";
+
   public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
   NM_PREFIX + "overallocation.preemption-threshold";
   public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
-  = 0f;
+  = 0.96f;
 
   /**
* Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5baae1be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index c87c200..d76e13e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1546,14 +1546,44 @@
 
   
 The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node 
that
+  applies to all resource types (expressed as a float between 0 and 0.95).
+  By default, over-allocation is turned off (value = -1). When turned on,
+  the node allows running OPPORTUNISTIC containers when the aggregate
+  utilization for each resource type is under the value specified here

[28/50] [abbrv] hadoop git commit: YARN-6959. RM may allocate wrong AM Container for new attempt. Contributed by Yuqi Wang

2017-08-16 Thread haibochen
YARN-6959. RM may allocate wrong AM Container for new attempt. Contributed by 
Yuqi Wang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2f6299f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2f6299f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2f6299f

Branch: refs/heads/YARN-1011
Commit: e2f6299f6f580d7a03f2377d19ac85f55fd4e73b
Parents: ce797a1
Author: Jian He 
Authored: Mon Aug 14 10:51:04 2017 -0700
Committer: Jian He 
Committed: Mon Aug 14 10:51:30 2017 -0700

--
 .../scheduler/AbstractYarnScheduler.java|  1 +
 .../scheduler/capacity/CapacityScheduler.java   | 13 ++
 .../scheduler/fair/FairScheduler.java   | 15 ++-
 .../scheduler/fifo/FifoScheduler.java   | 15 ++-
 .../scheduler/fair/TestFairScheduler.java   | 46 ++--
 5 files changed, 63 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index d506f4d..79caab0 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -323,6 +323,7 @@ public abstract class AbstractYarnScheduler
 
   }
 
+  // TODO: Rename it to getCurrentApplicationAttempt
   public T getApplicationAttempt(ApplicationAttemptId applicationAttemptId) {
 SchedulerApplication app = applications.get(
 applicationAttemptId.getApplicationId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 3286982..e4ca003 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -903,6 +903,19 @@ public class CapacityScheduler extends
   ContainerUpdates updateRequests) {
 FiCaSchedulerApp application = getApplicationAttempt(applicationAttemptId);
 if (application == null) {
+  LOG.error("Calling allocate on removed or non existent application " +
+  applicationAttemptId.getApplicationId());
+  return EMPTY_ALLOCATION;
+}
+
+// The allocate may be the leftover from previous attempt, and it will
+// impact current attempt, such as confuse the request and allocation for
+// current attempt's AM container.
+// Note outside precondition check for the attempt id may be
+// outdated here, so double check it here is necessary.
+if (!application.getApplicationAttemptId().equals(applicationAttemptId)) {
+  LOG.error("Calling allocate on previous or removed " +
+  "or non existent application attempt " + applicationAttemptId);
   return EMPTY_ALLOCATION;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2f6299f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 

[30/50] [abbrv] hadoop git commit: HDFS-12162. Update listStatus document to describe the behavior when the argument is a file. Contributed by Ajay Kumar.

2017-08-16 Thread haibochen
HDFS-12162. Update listStatus document to describe the behavior when the 
argument is a file. Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d72124a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d72124a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d72124a4

Branch: refs/heads/YARN-1011
Commit: d72124a44268e21ada036242bfbccafc23c52ed0
Parents: 18f3603
Author: Anu Engineer 
Authored: Mon Aug 14 11:32:49 2017 -0700
Committer: Anu Engineer 
Committed: Mon Aug 14 11:32:49 2017 -0700

--
 .../hadoop/fs/http/server/FSOperations.java |  2 +-
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 39 
 2 files changed, 40 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d72124a4/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index c008802..4b5918a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -669,7 +669,7 @@ public class FSOperations {
 /**
  * Creates a list-status executor.
  *
- * @param path the directory to retrieve the status of its contents.
+ * @param path the directory/file to retrieve the status of its contents.
  * @param filter glob filter to use.
  *
  * @throws IOException thrown if the filter expression is incorrect.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d72124a4/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 7544c80..03834eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -495,6 +495,45 @@ See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileSt
 
 See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus
 
+### List a File
+
+* Submit a HTTP GET request.
+
+curl -i  "http://:/webhdfs/v1/?op=LISTSTATUS"
+
+The client receives a response with a [`FileStatuses` JSON 
object](#FileStatuses_JSON_Schema):
+
+HTTP/1.1 200 OK
+Content-Type: application/json
+Content-Length: 427
+
+{
+  "FileStatuses":
+  {
+"FileStatus":
+[
+  {
+"accessTime"  : 1320171722771,
+"blockSize"   : 33554432,
+"childrenNum" : 0,
+"fileId"  : 16390,
+"group"   : "supergroup",
+"length"  : 1366,
+"modificationTime": 1501770633062,
+"owner"   : "webuser",
+"pathSuffix"  : "",
+"permission"  : "644",
+"replication" : 1,
+"storagePolicy"   : 0,
+"type": "FILE"
+  }
+]
+  }
+}
+
+See also: 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus
+
+
 ### Iteratively List a Directory
 
 * Submit a HTTP GET request.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: YARN-6705 Add separate NM preemption thresholds for cpu and memory (Haibo Chen)

2017-08-16 Thread haibochen
YARN-6705 Add separate NM preemption thresholds for cpu and memory  (Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6e03a59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6e03a59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6e03a59

Branch: refs/heads/YARN-1011
Commit: f6e03a59b0ad4ab0ac5e6b520884b7c7e8019986
Parents: 5baae1b
Author: Haibo Chen 
Authored: Wed Jul 12 12:32:13 2017 -0700
Committer: Haibo Chen 
Committed: Wed Aug 16 10:02:43 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java | 31 +--
 .../src/main/resources/yarn-default.xml | 34 ++--
 .../monitor/ContainersMonitorImpl.java  | 42 +---
 3 files changed, 85 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6e03a59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index a54bd11..6fb75de 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1643,10 +1643,33 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_OVERALLOCATION_MEMORY_UTILIZATION_THRESHOLD =
   NM_PREFIX + "overallocation.memory-utilization-threshold";
 
-  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
-  NM_PREFIX + "overallocation.preemption-threshold";
-  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
-  = 0.96f;
+  /**
+   * The CPU utilization threshold, if went beyond for a few times in a row,
+   * OPPORTUNISTIC containers started due to overallocation should start
+   * getting preempted.
+   */
+  public static final String NM_OVERALLOCATION_CPU_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold.cpu";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_CPU_PREEMPTION_THRESHOLD = 0.99f;
+
+  /**
+   * The number of times that CPU utilization must go over the CPU preemption
+   * threshold consecutively before preemption starts to kick in.
+   */
+  public static final String NM_OVERALLOCATION_PREEMPTION_CPU_COUNT =
+  NM_PREFIX + "overallocation.preemption-threshold-count.cpu";
+  public static final int DEFAULT_NM_OVERALLOCATION_PREEMPTION_CPU_COUNT = 4;
+
+
+  /**
+   * The memory utilization threshold beyond which OPPORTUNISTIC containers
+   * started due to overallocation should start getting preempted.
+   */
+  public static final String NM_OVERALLOCATION_MEMORY_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold.memory";
+  public static final float
+  DEFAULT_NM_OVERALLOCATION_MEMORY_PREEMPTION_THRESHOLD = 0.95f;
 
   /**
* Interval of time the linux container executor should try cleaning up

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6e03a59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index d76e13e..9b9b816 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1588,11 +1588,37 @@
 
   
 When a node is over-allocated to improve utilization by
-  running OPPORTUNISTIC containers, this config captures the utilization
-  beyond which OPPORTUNISTIC containers should start getting preempted.
+  running OPPORTUNISTIC containers, this config captures the CPU
+  utilization beyond which OPPORTUNISTIC containers should start getting
+  preempted. This is used in combination with
+  yarn.nodemanager.overallocation.preemption-threshold-count.cpu, that is,
+  only when the CPU utilization goes over this threshold consecutively for
+  a few times will preemption kicks in.
 
-yarn.nodemanager.overallocation.preemption-threshold
-0.96
+yarn.nodemanager.overallocation.preemption-threshold.cpu
+0.99
+  
+
+  
+When a node is over-allocated to 

[34/50] [abbrv] hadoop git commit: YARN-6917. Queue path is recomputed from scratch on every allocation. Contributed by Eric Payne

2017-08-16 Thread haibochen
YARN-6917. Queue path is recomputed from scratch on every allocation. 
Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55587928
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55587928
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55587928

Branch: refs/heads/YARN-1011
Commit: 5558792894169425bff054364a1ab4c48b347fb9
Parents: 3325ef6
Author: Jason Lowe 
Authored: Mon Aug 14 15:31:34 2017 -0500
Committer: Jason Lowe 
Committed: Mon Aug 14 15:31:34 2017 -0500

--
 .../resourcemanager/scheduler/capacity/AbstractCSQueue.java  | 8 
 .../server/resourcemanager/scheduler/capacity/LeafQueue.java | 5 -
 .../resourcemanager/scheduler/capacity/ParentQueue.java  | 6 --
 3 files changed, 8 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 5fbdead..d7c452a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -76,6 +76,7 @@ public abstract class AbstractCSQueue implements CSQueue {
   private static final Log LOG = LogFactory.getLog(AbstractCSQueue.class);  
   volatile CSQueue parent;
   final String queueName;
+  private final String queuePath;
   volatile int numContainers;
   
   final Resource minimumAllocation;
@@ -119,6 +120,8 @@ public abstract class AbstractCSQueue implements CSQueue {
 this.labelManager = cs.getRMContext().getNodeLabelManager();
 this.parent = parent;
 this.queueName = queueName;
+this.queuePath =
+  ((parent == null) ? "" : (parent.getQueuePath() + ".")) + this.queueName;
 this.resourceCalculator = cs.getResourceCalculator();
 this.activitiesManager = cs.getActivitiesManager();
 
@@ -150,6 +153,11 @@ public abstract class AbstractCSQueue implements CSQueue {
 queueCapacities,
 parent == null ? null : parent.getQueueCapacities());
   }
+
+  @Override
+  public String getQueuePath() {
+return queuePath;
+  }
   
   @Override
   public float getCapacity() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 2e502b7..d15431e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -299,11 +299,6 @@ public class LeafQueue extends AbstractCSQueue {
 }
   }
 
-  @Override
-  public String getQueuePath() {
-return getParent().getQueuePath() + "." + getQueueName();
-  }
-
   /**
* Used only by tests.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55587928/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
--
diff --git 

[23/50] [abbrv] hadoop git commit: YARN-6687. Validate that the duration of the periodic reservation is less than the periodicity. (subru via curino)

2017-08-16 Thread haibochen
YARN-6687. Validate that the duration of the periodic reservation is less than 
the periodicity. (subru via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28d97b79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28d97b79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28d97b79

Branch: refs/heads/YARN-1011
Commit: 28d97b79b69bb2be02d9320105e155eeed6f9e78
Parents: cc59b5f
Author: Carlo Curino 
Authored: Fri Aug 11 16:58:04 2017 -0700
Committer: Carlo Curino 
Committed: Fri Aug 11 16:58:04 2017 -0700

--
 .../reservation/ReservationInputValidator.java  | 18 ++--
 .../TestReservationInputValidator.java  | 93 
 2 files changed, 106 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d97b79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
index 0e9a825..027d066 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationInputValidator.java
@@ -129,11 +129,12 @@ public class ReservationInputValidator {
   Resources.multiply(rr.getCapability(), rr.getConcurrency()));
 }
 // verify the allocation is possible (skip for ANY)
-if (contract.getDeadline() - contract.getArrival() < minDuration
+long duration = contract.getDeadline() - contract.getArrival();
+if (duration < minDuration
 && type != ReservationRequestInterpreter.R_ANY) {
   message =
   "The time difference ("
-  + (contract.getDeadline() - contract.getArrival())
+  + (duration)
   + ") between arrival (" + contract.getArrival() + ") "
   + "and deadline (" + contract.getDeadline() + ") must "
   + " be greater or equal to the minimum resource duration ("
@@ -158,15 +159,22 @@ public class ReservationInputValidator {
 // check that the recurrence is a positive long value.
 String recurrenceExpression = contract.getRecurrenceExpression();
 try {
-  Long recurrence = Long.parseLong(recurrenceExpression);
+  long recurrence = Long.parseLong(recurrenceExpression);
   if (recurrence < 0) {
 message = "Negative Period : " + recurrenceExpression + ". Please try"
-+ " again with a non-negative long value as period";
++ " again with a non-negative long value as period.";
+throw RPCUtil.getRemoteException(message);
+  }
+  // verify duration is less than recurrence for periodic reservations
+  if (recurrence > 0 && duration > recurrence) {
+message = "Duration of the requested reservation: " + duration
++ " is greater than the recurrence: " + recurrence
++ ". Please try again with a smaller duration.";
 throw RPCUtil.getRemoteException(message);
   }
 } catch (NumberFormatException e) {
   message = "Invalid period " + recurrenceExpression + ". Please try"
-  + " again with a non-negative long value as period";
+  + " again with a non-negative long value as period.";
   throw RPCUtil.getRemoteException(message);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d97b79/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestReservationInputValidator.java
index 2917cd9..90a681d 100644

[37/50] [abbrv] hadoop git commit: YARN-5978. ContainerScheduler and ContainerManager changes to support ExecType update. (Kartheek Muthyala via asuresh)

2017-08-16 Thread haibochen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d7be1d8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
index aeba399..a1c247b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/TestContainerSchedulerQueuing.java
@@ -27,6 +27,8 @@ import java.util.List;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.ContainerUpdateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
@@ -37,6 +39,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
+import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.ConfigurationException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -951,4 +954,97 @@ public class TestContainerSchedulerQueuing extends 
BaseContainerManagerTest {
 map.get(org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED)
 .getContainerId());
   }
+
+  /**
+   * Starts one OPPORTUNISTIC container that takes up the whole node's
+   * resources, and submit one more that will be queued. Now promote the
+   * queued OPPORTUNISTIC container, which should kill the current running
+   * OPPORTUNISTIC container to make room for the promoted request.
+   * @throws Exception
+   */
+  @Test
+  public void testPromotionOfOpportunisticContainers() throws Exception {
+containerManager.start();
+
+ContainerLaunchContext containerLaunchContext =
+recordFactory.newRecordInstance(ContainerLaunchContext.class);
+
+List list = new ArrayList<>();
+list.add(StartContainerRequest.newInstance(
+containerLaunchContext,
+createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER,
+context.getNodeId(),
+user, BuilderUtils.newResource(2048, 1),
+context.getContainerTokenSecretManager(), null,
+ExecutionType.OPPORTUNISTIC)));
+list.add(StartContainerRequest.newInstance(
+containerLaunchContext,
+createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER,
+context.getNodeId(),
+user, BuilderUtils.newResource(1024, 1),
+context.getContainerTokenSecretManager(), null,
+ExecutionType.OPPORTUNISTIC)));
+
+StartContainersRequest allRequests =
+StartContainersRequest.newInstance(list);
+containerManager.startContainers(allRequests);
+
+Thread.sleep(5000);
+
+// Ensure first container is running and others are queued.
+List statList = new ArrayList();
+for (int i = 0; i < 3; i++) {
+  statList.add(createContainerId(i));
+}
+GetContainerStatusesRequest statRequest = GetContainerStatusesRequest
+.newInstance(Arrays.asList(createContainerId(0)));
+List containerStatuses = containerManager
+.getContainerStatuses(statRequest).getContainerStatuses();
+for (ContainerStatus status : containerStatuses) {
+  if (status.getContainerId().equals(createContainerId(0))) {
+Assert.assertEquals(
+org.apache.hadoop.yarn.api.records.ContainerState.RUNNING,
+status.getState());
+  } else {
+Assert.assertEquals(
+org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED,
+status.getState());
+  }
+}
+
+ContainerScheduler containerScheduler =
+containerManager.getContainerScheduler();
+// Ensure two containers are properly queued.
+

[45/50] [abbrv] hadoop git commit: HDFS-12301. NN File Browser UI: Navigate to a path when enter is pressed

2017-08-16 Thread haibochen
HDFS-12301. NN File Browser UI: Navigate to a path when enter is pressed


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f34646d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f34646d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f34646d6

Branch: refs/heads/YARN-1011
Commit: f34646d652310442cb5339aa269f10dfa838
Parents: d265459
Author: Ravi Prakash 
Authored: Tue Aug 15 15:44:59 2017 -0700
Committer: Ravi Prakash 
Committed: Tue Aug 15 15:44:59 2017 -0700

--
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.js  | 6 ++
 1 file changed, 6 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f34646d6/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 3e276a9..dae3519 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -370,6 +370,12 @@
 
 var b = function() { browse_directory($('#directory').val()); };
 $('#btn-nav-directory').click(b);
+//Also navigate to the directory when a user presses enter.
+$('#directory').on('keyup', function (e) {
+  if (e.which == 13) {
+browse_directory($('#directory').val());
+  }
+});
 var dir = window.location.hash.slice(1);
 if(dir == "") {
   window.location.hash = "/";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: HADOOP-14773. Extend ZKCuratorManager API for more reusability. (Íñigo Goiri via Subru).

2017-08-16 Thread haibochen
HADOOP-14773. Extend ZKCuratorManager API for more reusability. (Íñigo Goiri 
via Subru).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75dd866b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75dd866b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75dd866b

Branch: refs/heads/YARN-1011
Commit: 75dd866bfb8b63cb9f13179d4365b05c48e0907d
Parents: f34646d
Author: Subru Krishnan 
Authored: Tue Aug 15 16:53:59 2017 -0700
Committer: Subru Krishnan 
Committed: Tue Aug 15 16:53:59 2017 -0700

--
 .../hadoop/util/curator/ZKCuratorManager.java   | 54 ++--
 .../util/curator/TestZKCuratorManager.java  |  2 +-
 .../recovery/ZKRMStateStore.java| 19 +--
 3 files changed, 52 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75dd866b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
index 3adf028..9a031af 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java
@@ -33,9 +33,12 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.ZKUtil;
 import org.apache.zookeeper.CreateMode;
 import org.apache.zookeeper.data.ACL;
+import org.apache.zookeeper.data.Stat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Helper class that provides utility methods specific to ZK operations.
  */
@@ -179,7 +182,6 @@ public final class ZKCuratorManager {
   /**
* Get the data in a ZNode.
* @param path Path of the ZNode.
-   * @param stat Output statistics of the ZNode.
* @return The data in the ZNode.
* @throws Exception If it cannot contact Zookeeper.
*/
@@ -190,16 +192,38 @@ public final class ZKCuratorManager {
   /**
* Get the data in a ZNode.
* @param path Path of the ZNode.
-   * @param stat Output statistics of the ZNode.
+   * @param stat
+   * @return The data in the ZNode.
+   * @throws Exception If it cannot contact Zookeeper.
+   */
+  public byte[] getData(final String path, Stat stat) throws Exception {
+return curator.getData().storingStatIn(stat).forPath(path);
+  }
+
+  /**
+   * Get the data in a ZNode.
+   * @param path Path of the ZNode.
* @return The data in the ZNode.
* @throws Exception If it cannot contact Zookeeper.
*/
-  public String getSringData(final String path) throws Exception {
+  public String getStringData(final String path) throws Exception {
 byte[] bytes = getData(path);
 return new String(bytes, Charset.forName("UTF-8"));
   }
 
   /**
+   * Get the data in a ZNode.
+   * @param path Path of the ZNode.
+   * @param stat Output statistics of the ZNode.
+   * @return The data in the ZNode.
+   * @throws Exception If it cannot contact Zookeeper.
+   */
+  public String getStringData(final String path, Stat stat) throws Exception {
+byte[] bytes = getData(path, stat);
+return new String(bytes, Charset.forName("UTF-8"));
+  }
+
+  /**
* Set data into a ZNode.
* @param path Path of the ZNode.
* @param data Data to set.
@@ -272,14 +296,36 @@ public final class ZKCuratorManager {
   }
 
   /**
+   * Utility function to ensure that the configured base znode exists.
+   * This recursively creates the znode as well as all of its parents.
+   * @param path Path of the znode to create.
+   * @throws Exception If it cannot create the file.
+   */
+  public void createRootDirRecursively(String path) throws Exception {
+String[] pathParts = path.split("/");
+Preconditions.checkArgument(
+pathParts.length >= 1 && pathParts[0].isEmpty(),
+"Invalid path: %s", path);
+StringBuilder sb = new StringBuilder();
+
+for (int i = 1; i < pathParts.length; i++) {
+  sb.append("/").append(pathParts[i]);
+  create(sb.toString());
+}
+  }
+
+  /**
* Delete a ZNode.
* @param path Path of the ZNode.
+   * @return If the znode was deleted.
* @throws Exception If it cannot contact ZooKeeper.
*/
-  public void delete(final String path) throws Exception {
+  public boolean delete(final String path) throws Exception {
 if (exists(path)) {
   curator.delete().deletingChildrenIfNeeded().forPath(path);
+  return true;
 }
+return false;
   }
 
   /**


[47/50] [abbrv] hadoop git commit: YARN-6965. Duplicate instantiation in FairSchedulerQueueInfo. Contributed by Masahiro Tanaka.

2017-08-16 Thread haibochen
YARN-6965. Duplicate instantiation in FairSchedulerQueueInfo. Contributed by 
Masahiro Tanaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/588c190a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/588c190a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/588c190a

Branch: refs/heads/YARN-1011
Commit: 588c190afd49bdbd5708f7805bf6c68f09fee142
Parents: 75dd866
Author: Akira Ajisaka 
Authored: Wed Aug 16 14:06:22 2017 +0900
Committer: Akira Ajisaka 
Committed: Wed Aug 16 14:06:22 2017 +0900

--
 .../server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java   | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/588c190a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
index a4607c2..79339c7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java
@@ -99,7 +99,6 @@ public class FairSchedulerQueueInfo {
 steadyFairResources = new ResourceInfo(queue.getSteadyFairShare());
 fairResources = new ResourceInfo(queue.getFairShare());
 minResources = new ResourceInfo(queue.getMinShare());
-maxResources = new ResourceInfo(queue.getMaxShare());
 maxResources = new ResourceInfo(
 Resources.componentwiseMin(queue.getMaxShare(),
 scheduler.getClusterResource()));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[48/50] [abbrv] hadoop git commit: YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)

2017-08-16 Thread haibochen
YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89dbd480
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89dbd480
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89dbd480

Branch: refs/heads/YARN-1011
Commit: 89dbd4800983ef61eb0f70f624caf9e7d32391b6
Parents: 588c190
Author: Karthik Kambatla 
Authored: Fri Jan 29 14:31:45 2016 -0800
Committer: Haibo Chen 
Committed: Wed Aug 16 10:02:43 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 ++-
 .../src/main/resources/yarn-default.xml |  21 
 .../RegisterNodeManagerRequest.java |  14 ++-
 .../pb/RegisterNodeManagerRequestPBImpl.java|  45 +++-
 .../server/api/records/OverAllocationInfo.java  |  45 
 .../server/api/records/ResourceThresholds.java  |  45 
 .../impl/pb/OverAllocationInfoPBImpl.java   | 106 +++
 .../impl/pb/ResourceThresholdsPBImpl.java   |  93 
 .../yarn_server_common_service_protos.proto |  10 ++
 .../hadoop/yarn/server/nodemanager/Context.java |   5 +
 .../yarn/server/nodemanager/NodeManager.java|  17 +++
 .../nodemanager/NodeStatusUpdaterImpl.java  |   6 +-
 .../monitor/ContainersMonitorImpl.java  |  34 ++
 .../amrmproxy/BaseAMRMProxyTest.java|  11 ++
 14 files changed, 455 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89dbd480/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 8acaef8..9e0a2a4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1607,7 +1607,6 @@ public class YarnConfiguration extends Configuration {
   public static final boolean 
DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE =
   false;
 
-
   // Configurations for applicaiton life time monitor feature
   public static final String RM_APPLICATION_MONITOR_INTERVAL_MS =
   RM_PREFIX + "application-timeouts.monitor.interval-ms";
@@ -1615,6 +1614,18 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
+  /** Overallocation (= allocation based on utilization) configs. */
+  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
+  NM_PREFIX + "overallocation.allocation-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
+  = 0f;
+  @Private
+  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
+  = 0f;
+
   /**
* Interval of time the linux container executor should try cleaning up
* cgroups entry when cleaning up a container. This is required due to what 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89dbd480/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index dbf115b..c87c200 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1545,6 +1545,27 @@
   
 
   
+The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node,
+  expressed as a float between 0 and 0.95. By default, over-allocation is
+  turned off (value = 0). When turned on, the node allows running
+  OPPORTUNISTIC containers when the aggregate utilization is under the
+  value specified here multiplied by the node's advertised capacity.
+
+yarn.nodemanager.overallocation.allocation-threshold
+0f
+  
+
+  
+When a node is over-allocated to improve utilization by
+  

[39/50] [abbrv] hadoop git commit: HADOOP-14726. Mark FileStatus::isDir as final

2017-08-16 Thread haibochen
HADOOP-14726. Mark FileStatus::isDir as final


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/645a8f2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/645a8f2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/645a8f2a

Branch: refs/heads/YARN-1011
Commit: 645a8f2a4d09acb5a21820f52ee78784d9e4cc8a
Parents: 4d7be1d
Author: Chris Douglas 
Authored: Mon Aug 14 21:57:20 2017 -0700
Committer: Chris Douglas 
Committed: Mon Aug 14 21:57:20 2017 -0700

--
 .../java/org/apache/hadoop/fs/FileStatus.java| 19 +--
 .../hadoop/fs/viewfs/ViewFsFileStatus.java   |  8 +---
 .../fs/viewfs/ViewFsLocatedFileStatus.java   |  6 --
 .../hadoop/hdfs/protocolPB/PBHelperClient.java   |  2 +-
 .../apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java   |  6 --
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java |  8 
 .../apache/hadoop/hdfs/server/mover/Mover.java   |  2 +-
 .../hdfs/server/namenode/NamenodeFsck.java   |  4 ++--
 .../hadoop/hdfs/TestDFSUpgradeFromImage.java |  3 +--
 .../hdfs/server/mover/TestStorageMover.java  |  2 +-
 .../hadoop/hdfs/server/namenode/TestStartup.java |  4 ++--
 .../server/namenode/ha/TestEditLogTailer.java|  4 ++--
 .../namenode/ha/TestFailureToReadEdits.java  |  6 +++---
 .../namenode/ha/TestInitializeSharedEdits.java   |  2 +-
 .../lib/input/TestCombineFileInputFormat.java|  2 +-
 .../azure/TestOutOfBandAzureBlobOperations.java  |  8 
 .../hadoop/fs/swift/snative/SwiftFileStatus.java | 16 
 .../snative/SwiftNativeFileSystemStore.java  |  4 ++--
 .../fs/swift/TestSwiftFileSystemDirectories.java |  4 ++--
 .../TestSwiftFileSystemPartitionedUploads.java   |  2 +-
 20 files changed, 46 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/645a8f2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 2f22ea0..8575439 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -172,7 +172,7 @@ public class FileStatus implements Writable, 
Comparable,
* @return true if this is a file
*/
   public boolean isFile() {
-return !isdir && !isSymlink();
+return !isDirectory() && !isSymlink();
   }
 
   /**
@@ -182,20 +182,20 @@ public class FileStatus implements Writable, 
Comparable,
   public boolean isDirectory() {
 return isdir;
   }
-  
+
   /**
-   * Old interface, instead use the explicit {@link FileStatus#isFile()}, 
-   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()} 
+   * Old interface, instead use the explicit {@link FileStatus#isFile()},
+   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
* @return true if this is a directory.
-   * @deprecated Use {@link FileStatus#isFile()},  
-   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()} 
+   * @deprecated Use {@link FileStatus#isFile()},
+   * {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
* instead.
*/
   @Deprecated
-  public boolean isDir() {
-return isdir;
+  public final boolean isDir() {
+return isDirectory();
   }
-  
+
   /**
* Is this a symbolic link?
* @return true if this is a symbolic link
@@ -448,7 +448,6 @@ public class FileStatus implements Writable, 
Comparable,
 FileStatus other = PBHelper.convert(proto);
 isdir = other.isDirectory();
 length = other.getLen();
-isdir = other.isDirectory();
 block_replication = other.getReplication();
 blocksize = other.getBlockSize();
 modification_time = other.getModificationTime();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/645a8f2a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
index e0f62e4..ce03ced 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFsFileStatus.java
@@ -61,13 +61,7 @@ class ViewFsFileStatus extends FileStatus {
 

[42/50] [abbrv] hadoop git commit: HDFS-12066. When Namenode is in safemode, may not allowed to remove an user's erasure coding policy. Contributed by lufei.

2017-08-16 Thread haibochen
HDFS-12066. When Namenode is in safemode,may not allowed to remove an user's 
erasure coding policy. Contributed by lufei.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3ae3e26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3ae3e26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3ae3e26

Branch: refs/heads/YARN-1011
Commit: e3ae3e26446c2e98b7aebc4ea66256cfdb4a397f
Parents: 1040bae
Author: Wei-Chiu Chuang 
Authored: Tue Aug 15 07:41:10 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Tue Aug 15 07:41:43 2017 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 3 +++
 .../src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java  | 9 +
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3ae3e26/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index caf73f7..1cfaa54 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -7113,6 +7113,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 boolean success = false;
 writeLock();
 try {
+  checkOperation(OperationCategory.WRITE);
+  checkNameNodeSafeMode("Cannot remove erasure coding policy "
+  + ecPolicyName);
   FSDirErasureCodingOp.removeErasureCodePolicy(this, ecPolicyName);
   success = true;
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3ae3e26/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index bc95ec7..f25d28f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -482,6 +482,15 @@ public class TestSafeMode {
   // expected
 }
 
+try {
+  dfs.removeErasureCodingPolicy("testECName");
+  fail("RemoveErasureCodingPolicy should have failed.");
+} catch (IOException ioe) {
+  GenericTestUtils.assertExceptionContains(
+  "Cannot remove erasure coding policy", ioe);
+  // expected
+}
+
 assertFalse("Could not leave SM",
 dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure durations. Contributed by Hanisha Koneru.

2017-08-16 Thread haibochen
HADOOP-14732. ProtobufRpcEngine should use Time.monotonicNow to measure 
durations. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bef4eca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bef4eca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bef4eca

Branch: refs/heads/YARN-1011
Commit: 8bef4eca28a3466707cc4ea0de0330449319a5eb
Parents: 5558792
Author: Arpit Agarwal 
Authored: Mon Aug 14 15:53:35 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon Aug 14 15:53:35 2017 -0700

--
 .../java/org/apache/hadoop/ipc/ProtobufRpcEngine.java | 14 +++---
 1 file changed, 7 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bef4eca/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 639bbad..2c0cfe5 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -190,7 +190,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 throws ServiceException {
   long startTime = 0;
   if (LOG.isDebugEnabled()) {
-startTime = Time.now();
+startTime = Time.monotonicNow();
   }
   
   if (args.length != 2) { // RpcController + Message
@@ -245,7 +245,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
 
   if (LOG.isDebugEnabled()) {
-long callTime = Time.now() - startTime;
+long callTime = Time.monotonicNow() - startTime;
 LOG.debug("Call: " + method.getName() + " took " + callTime + "ms");
   }
   
@@ -373,19 +373,19 @@ public class ProtobufRpcEngine implements RpcEngine {
 this.server = currentCallInfo.get().server;
 this.call = Server.getCurCall().get();
 this.methodName = currentCallInfo.get().methodName;
-this.setupTime = Time.now();
+this.setupTime = Time.monotonicNow();
   }
 
   @Override
   public void setResponse(Message message) {
-long processingTime = Time.now() - setupTime;
+long processingTime = Time.monotonicNow() - setupTime;
 call.setDeferredResponse(RpcWritable.wrap(message));
 server.updateDeferredMetrics(methodName, processingTime);
   }
 
   @Override
   public void error(Throwable t) {
-long processingTime = Time.now() - setupTime;
+long processingTime = Time.monotonicNow() - setupTime;
 String detailedMetricsName = t.getClass().getSimpleName();
 server.updateDeferredMetrics(detailedMetricsName, processingTime);
 call.setDeferredError(t);
@@ -513,7 +513,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 Message param = request.getValue(prototype);
 
 Message result;
-long startTime = Time.now();
+long startTime = Time.monotonicNow();
 int qTime = (int) (startTime - receiveTime);
 Exception exception = null;
 boolean isDeferred = false;
@@ -537,7 +537,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   throw e;
 } finally {
   currentCallInfo.set(null);
-  int processingTime = (int) (Time.now() - startTime);
+  int processingTime = (int) (Time.monotonicNow() - startTime);
   if (LOG.isDebugEnabled()) {
 String msg =
 "Served: " + methodName + (isDeferred ? ", deferred" : "") +


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6323. Rolling upgrade/config change is broken on timeline v2. (Vrushali C via Haibo Chen)

2017-08-22 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355_branch2 371565911 -> f8711ad13


YARN-6323. Rolling upgrade/config change is broken on timeline v2. (Vrushali C 
via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8711ad1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8711ad1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8711ad1

Branch: refs/heads/YARN-5355_branch2
Commit: f8711ad13bcf19f69e8c561400ed77dbd03db069
Parents: 3715659
Author: Haibo Chen 
Authored: Mon Aug 21 10:45:10 2017 -0700
Committer: Haibo Chen 
Committed: Tue Aug 22 08:28:51 2017 -0700

--
 .../org/apache/hadoop/yarn/conf/YarnConfiguration.java|  2 ++
 .../containermanager/ContainerManagerImpl.java| 10 ++
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8711ad1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index d16b1ee..a97cb31 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2145,6 +2145,8 @@ public class YarnConfiguration extends Configuration {
 
   public static final int DEFAULT_NUMBER_OF_ASYNC_ENTITIES_TO_MERGE = 10;
 
+  /** default version for any flow. */
+  public static final String DEFAULT_FLOW_VERSION = "1";
 
   /**
* The time period for which timeline v2 client will wait for draining

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8711ad1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index a9d5f47..c3a4866 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -394,6 +394,16 @@ public class ContainerManagerImpl extends CompositeService 
implements
 LOG.debug(
 "Recovering Flow context: " + fc + " for an application " + appId);
   }
+} else {
+  // in upgrade situations, where there is no prior existing flow context,
+  // default would be used.
+  fc = new FlowContext(TimelineUtils.generateDefaultFlowName(null, appId),
+  YarnConfiguration.DEFAULT_FLOW_VERSION, appId.getClusterTimestamp());
+  if (LOG.isDebugEnabled()) {
+LOG.debug(
+"No prior existing flow context found. Using default Flow context: 
"
++ fc + " for an application " + appId);
+  }
 }
 
 LOG.info("Recovering application " + appId);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6457. Allow custom SSL configuration to be supplied in WebApps. (Sanjay M Pujare via Haibo Chen)

2017-05-09 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 8007d024b -> 3799625e9


YARN-6457. Allow custom SSL configuration to be supplied in WebApps. (Sanjay M 
Pujare via Haibo Chen)

(cherry picked from commit 1769b12a773dc6c83f13663a77da33fa78878730)
(cherry picked from commit 86fcb8a66ccc0a0cd0c41073bb3a49315033aed5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3799625e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3799625e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3799625e

Branch: refs/heads/branch-2.8
Commit: 3799625e90fff5a879710746f444924caf94f6e8
Parents: 8007d02
Author: Haibo Chen 
Authored: Mon May 8 15:43:20 2017 -0700
Committer: Haibo Chen 
Committed: Tue May 9 08:52:37 2017 -0700

--
 .../hadoop/yarn/webapp/util/WebAppUtils.java   | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3799625e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index f45465a..076ee29 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -369,16 +369,21 @@ public class WebAppUtils {
   /**
* Load the SSL keystore / truststore into the HttpServer builder.
* @param builder the HttpServer2.Builder to populate with ssl config
-   * @param sslConf the Configuration instance to use during loading of SSL 
conf
+   * @param conf the Configuration instance to load custom SSL config from
+   *
+   * @return HttpServer2.Builder instance (passed in as the first parameter)
+   * after loading SSL stores
*/
   public static HttpServer2.Builder loadSslConfiguration(
-  HttpServer2.Builder builder, Configuration sslConf) {
-if (sslConf == null) {
-  sslConf = new Configuration(false);
+  HttpServer2.Builder builder, Configuration conf) {
+
+Configuration sslConf = new Configuration(false);
+
+sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
+if (conf != null) {
+  sslConf.addResource(conf);
 }
 boolean needsClientAuth = 
YarnConfiguration.YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
-sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
-
 return builder
 .needsClientAuth(needsClientAuth)
 .keyPassword(getPassword(sslConf, WEB_APP_KEY_PASSWORD_KEY))


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6457. Allow custom SSL configuration to be supplied in WebApps. (Sanjay M Pujare via Haibo Chen)

2017-05-09 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 018789215 -> f8e893f7e


YARN-6457. Allow custom SSL configuration to be supplied in WebApps. (Sanjay M 
Pujare via Haibo Chen)

(cherry picked from commit 1769b12a773dc6c83f13663a77da33fa78878730)
(cherry picked from commit 86fcb8a66ccc0a0cd0c41073bb3a49315033aed5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8e893f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8e893f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8e893f7

Branch: refs/heads/branch-2.7
Commit: f8e893f7ea56e989d2b66ece53abe4991690599d
Parents: 0187892
Author: Haibo Chen 
Authored: Mon May 8 15:43:20 2017 -0700
Committer: Haibo Chen 
Committed: Tue May 9 08:56:56 2017 -0700

--
 .../hadoop/yarn/webapp/util/WebAppUtils.java   | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8e893f7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index f8e67ee..27e5784 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -334,16 +334,21 @@ public class WebAppUtils {
   /**
* Load the SSL keystore / truststore into the HttpServer builder.
* @param builder the HttpServer2.Builder to populate with ssl config
-   * @param sslConf the Configuration instance to use during loading of SSL 
conf
+   * @param conf the Configuration instance to load custom SSL config from
+   *
+   * @return HttpServer2.Builder instance (passed in as the first parameter)
+   * after loading SSL stores
*/
   public static HttpServer2.Builder loadSslConfiguration(
-  HttpServer2.Builder builder, Configuration sslConf) {
-if (sslConf == null) {
-  sslConf = new Configuration(false);
+  HttpServer2.Builder builder, Configuration conf) {
+
+Configuration sslConf = new Configuration(false);
+
+sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
+if (conf != null) {
+  sslConf.addResource(conf);
 }
 boolean needsClientAuth = 
YarnConfiguration.YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
-sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
-
 return builder
 .needsClientAuth(needsClientAuth)
 .keyPassword(getPassword(sslConf, WEB_APP_KEY_PASSWORD_KEY))


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6457. Allow custom SSL configuration to be supplied in WebApps. (Sanjay M Pujare via Haibo Chen)

2017-05-09 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.1 953dd19d8 -> a7652d862


YARN-6457. Allow custom SSL configuration to be supplied in WebApps. (Sanjay M 
Pujare via Haibo Chen)

(cherry picked from commit 1769b12a773dc6c83f13663a77da33fa78878730)
(cherry picked from commit 86fcb8a66ccc0a0cd0c41073bb3a49315033aed5)
(cherry picked from commit 3799625e90fff5a879710746f444924caf94f6e8)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a7652d86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a7652d86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a7652d86

Branch: refs/heads/branch-2.8.1
Commit: a7652d862c80f30535ab156c18e503e9f91d2b54
Parents: 953dd19
Author: Haibo Chen 
Authored: Mon May 8 15:43:20 2017 -0700
Committer: Haibo Chen 
Committed: Tue May 9 09:43:09 2017 -0700

--
 .../hadoop/yarn/webapp/util/WebAppUtils.java   | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a7652d86/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index f45465a..076ee29 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -369,16 +369,21 @@ public class WebAppUtils {
   /**
* Load the SSL keystore / truststore into the HttpServer builder.
* @param builder the HttpServer2.Builder to populate with ssl config
-   * @param sslConf the Configuration instance to use during loading of SSL 
conf
+   * @param conf the Configuration instance to load custom SSL config from
+   *
+   * @return HttpServer2.Builder instance (passed in as the first parameter)
+   * after loading SSL stores
*/
   public static HttpServer2.Builder loadSslConfiguration(
-  HttpServer2.Builder builder, Configuration sslConf) {
-if (sslConf == null) {
-  sslConf = new Configuration(false);
+  HttpServer2.Builder builder, Configuration conf) {
+
+Configuration sslConf = new Configuration(false);
+
+sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
+if (conf != null) {
+  sslConf.addResource(conf);
 }
 boolean needsClientAuth = 
YarnConfiguration.YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
-sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
-
 return builder
 .needsClientAuth(needsClientAuth)
 .keyPassword(getPassword(sslConf, WEB_APP_KEY_PASSWORD_KEY))


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6561. Update exception message during timeline collector aux service initialization. (Vrushali C via Haibo Chen)

2017-05-09 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 4c4453f87 -> 4c8b9d23a


YARN-6561. Update exception message during timeline collector aux service 
initialization. (Vrushali C via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c8b9d23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c8b9d23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c8b9d23

Branch: refs/heads/YARN-5355
Commit: 4c8b9d23a1b4f92bf370617a0b42ac68bdcf33ac
Parents: 4c4453f
Author: Haibo Chen 
Authored: Tue May 9 21:37:30 2017 -0700
Committer: Haibo Chen 
Committed: Tue May 9 21:39:25 2017 -0700

--
 .../collector/PerNodeTimelineCollectorsAuxService.java| 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c8b9d23/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
index 041e7c2..266bd04 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
@@ -73,7 +73,12 @@ public class PerNodeTimelineCollectorsAuxService extends 
AuxiliaryService {
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
 if (!YarnConfiguration.timelineServiceV2Enabled(conf)) {
-  throw new YarnException("Timeline service v2 is not enabled");
+  throw new YarnException(
+  "Looks like timeline_collector is set as an auxillary service in "
+  + YarnConfiguration.NM_AUX_SERVICES
+  + ". But Timeline service v2 is not enabled,"
+  + " so timeline_collector needs to be removed"
+  + " from that list of auxillary services.");
 }
 collectorLingerPeriod =
 conf.getLong(YarnConfiguration.ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6435. [ATSv2] Can't retrieve more than 1000 versions of metrics in time series. (Rohith Sharma K S via Haibo Chen)

2017-05-09 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 4a4ff354e -> 4c4453f87


YARN-6435. [ATSv2] Can't retrieve more than 1000 versions of metrics in time 
series. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c4453f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c4453f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c4453f8

Branch: refs/heads/YARN-5355
Commit: 4c4453f8796abf31124962ab9461097861fabae2
Parents: 4a4ff35
Author: Haibo Chen 
Authored: Tue May 9 21:10:18 2017 -0700
Committer: Haibo Chen 
Committed: Tue May 9 21:10:18 2017 -0700

--
 .../storage/application/ApplicationTable.java   | 12 ++--
 .../timelineservice/storage/entity/EntityTable.java | 12 ++--
 2 files changed, 20 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c4453f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
index a02f768..cb4fc92 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
@@ -82,6 +82,13 @@ public class ApplicationTable extends 
BaseTable {
   private static final String METRICS_TTL_CONF_NAME = PREFIX
   + ".table.metrics.ttl";
 
+  /**
+   * config param name that specifies max-versions for metrics column family in
+   * entity table.
+   */
+  private static final String METRICS_MAX_VERSIONS =
+  PREFIX + ".table.metrics.max-versions";
+
   /** default value for application table name. */
   private static final String DEFAULT_TABLE_NAME =
   "timelineservice.application";
@@ -90,7 +97,7 @@ public class ApplicationTable extends 
BaseTable {
   private static final int DEFAULT_METRICS_TTL = 2592000;
 
   /** default max number of versions. */
-  private static final int DEFAULT_METRICS_MAX_VERSIONS = 1000;
+  private static final int DEFAULT_METRICS_MAX_VERSIONS = 1;
 
   private static final Log LOG = LogFactory.getLog(ApplicationTable.class);
 
@@ -136,7 +143,8 @@ public class ApplicationTable extends 
BaseTable {
 metricsCF.setBlockCacheEnabled(true);
 // always keep 1 version (the latest)
 metricsCF.setMinVersions(1);
-metricsCF.setMaxVersions(DEFAULT_METRICS_MAX_VERSIONS);
+metricsCF.setMaxVersions(
+hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS));
 metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME,
 DEFAULT_METRICS_TTL));
 applicationTableDescp.setRegionSplitPolicyClassName(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c4453f8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
index 027c8d5..633a4da 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
@@ -83,6 +83,13 @@ public class EntityTable extends BaseTable {
   private static final String METRICS_TTL_CONF_NAME = PREFIX
   + ".table.metrics.ttl";
 
+  /**
+   * config param name that specifies 

hadoop git commit: YARN-6435. [ATSv2] Can't retrieve more than 1000 versions of metrics in time series. (Rohith Sharma K S via Haibo Chen)

2017-05-09 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 166be0ee9 -> 461ee44d2


YARN-6435. [ATSv2] Can't retrieve more than 1000 versions of metrics in time 
series. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/461ee44d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/461ee44d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/461ee44d

Branch: refs/heads/trunk
Commit: 461ee44d287b1fcf0bf15d662aebd3e6f2b83a72
Parents: 166be0e
Author: Haibo Chen 
Authored: Tue May 9 21:10:18 2017 -0700
Committer: Haibo Chen 
Committed: Tue May 9 21:12:57 2017 -0700

--
 .../storage/application/ApplicationTable.java   | 12 ++--
 .../timelineservice/storage/entity/EntityTable.java | 12 ++--
 2 files changed, 20 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/461ee44d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
index a02f768..cb4fc92 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/application/ApplicationTable.java
@@ -82,6 +82,13 @@ public class ApplicationTable extends 
BaseTable {
   private static final String METRICS_TTL_CONF_NAME = PREFIX
   + ".table.metrics.ttl";
 
+  /**
+   * config param name that specifies max-versions for metrics column family in
+   * entity table.
+   */
+  private static final String METRICS_MAX_VERSIONS =
+  PREFIX + ".table.metrics.max-versions";
+
   /** default value for application table name. */
   private static final String DEFAULT_TABLE_NAME =
   "timelineservice.application";
@@ -90,7 +97,7 @@ public class ApplicationTable extends 
BaseTable {
   private static final int DEFAULT_METRICS_TTL = 2592000;
 
   /** default max number of versions. */
-  private static final int DEFAULT_METRICS_MAX_VERSIONS = 1000;
+  private static final int DEFAULT_METRICS_MAX_VERSIONS = 1;
 
   private static final Log LOG = LogFactory.getLog(ApplicationTable.class);
 
@@ -136,7 +143,8 @@ public class ApplicationTable extends 
BaseTable {
 metricsCF.setBlockCacheEnabled(true);
 // always keep 1 version (the latest)
 metricsCF.setMinVersions(1);
-metricsCF.setMaxVersions(DEFAULT_METRICS_MAX_VERSIONS);
+metricsCF.setMaxVersions(
+hbaseConf.getInt(METRICS_MAX_VERSIONS, DEFAULT_METRICS_MAX_VERSIONS));
 metricsCF.setTimeToLive(hbaseConf.getInt(METRICS_TTL_CONF_NAME,
 DEFAULT_METRICS_TTL));
 applicationTableDescp.setRegionSplitPolicyClassName(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/461ee44d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
index b194f07..ddf0406 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/entity/EntityTable.java
@@ -83,6 +83,13 @@ public class EntityTable extends BaseTable {
   private static final String METRICS_TTL_CONF_NAME = PREFIX
   + ".table.metrics.ttl";
 
+  /**
+   * config param name that specifies 

hadoop git commit: YARN-6561. Update exception message during timeline collector aux service initialization. (Vrushali C via Haibo Chen)

2017-05-09 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 461ee44d2 -> ab2bb93a2


YARN-6561. Update exception message during timeline collector aux service 
initialization. (Vrushali C via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab2bb93a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab2bb93a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab2bb93a

Branch: refs/heads/trunk
Commit: ab2bb93a2ab1651b73ec9ba2d1deec4deafdecaf
Parents: 461ee44
Author: Haibo Chen 
Authored: Tue May 9 21:37:30 2017 -0700
Committer: Haibo Chen 
Committed: Tue May 9 21:37:30 2017 -0700

--
 .../collector/PerNodeTimelineCollectorsAuxService.java| 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab2bb93a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
index 041e7c2..266bd04 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
@@ -73,7 +73,12 @@ public class PerNodeTimelineCollectorsAuxService extends 
AuxiliaryService {
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
 if (!YarnConfiguration.timelineServiceV2Enabled(conf)) {
-  throw new YarnException("Timeline service v2 is not enabled");
+  throw new YarnException(
+  "Looks like timeline_collector is set as an auxillary service in "
+  + YarnConfiguration.NM_AUX_SERVICES
+  + ". But Timeline service v2 is not enabled,"
+  + " so timeline_collector needs to be removed"
+  + " from that list of auxillary services.");
 }
 collectorLingerPeriod =
 conf.getLong(YarnConfiguration.ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6561. Update exception message during timeline collector aux service initialization. (Vrushali C via Haibo Chen)

2017-05-09 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 f07a97c1a -> f1f7d6534


YARN-6561. Update exception message during timeline collector aux service 
initialization. (Vrushali C via Haibo Chen)

(cherry picked from commit 4c8b9d23a1b4f92bf370617a0b42ac68bdcf33ac)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1f7d653
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1f7d653
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1f7d653

Branch: refs/heads/YARN-5355-branch-2
Commit: f1f7d6534d8f4a9a4f9bf4b1c83b2dce463e40aa
Parents: f07a97c
Author: Haibo Chen 
Authored: Tue May 9 21:37:30 2017 -0700
Committer: Haibo Chen 
Committed: Tue May 9 22:04:30 2017 -0700

--
 .../collector/PerNodeTimelineCollectorsAuxService.java| 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1f7d653/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
index 041e7c2..266bd04 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/PerNodeTimelineCollectorsAuxService.java
@@ -73,7 +73,12 @@ public class PerNodeTimelineCollectorsAuxService extends 
AuxiliaryService {
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
 if (!YarnConfiguration.timelineServiceV2Enabled(conf)) {
-  throw new YarnException("Timeline service v2 is not enabled");
+  throw new YarnException(
+  "Looks like timeline_collector is set as an auxillary service in "
+  + YarnConfiguration.NM_AUX_SERVICES
+  + ". But Timeline service v2 is not enabled,"
+  + " so timeline_collector needs to be removed"
+  + " from that list of auxillary services.");
 }
 collectorLingerPeriod =
 conf.getLong(YarnConfiguration.ATS_APP_COLLECTOR_LINGER_PERIOD_IN_MS,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6580. Incorrect logger for FairSharePolicy. (Vrushali C via Haibo Chen)

2017-05-12 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk a9e24a13b -> 6c35001b9


YARN-6580. Incorrect logger for FairSharePolicy. (Vrushali C via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c35001b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c35001b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c35001b

Branch: refs/heads/trunk
Commit: 6c35001b9f93fd85a02c3465e87bfd1612f4cce9
Parents: a9e24a1
Author: Haibo Chen 
Authored: Fri May 12 13:05:40 2017 -0700
Committer: Haibo Chen 
Committed: Fri May 12 13:05:40 2017 -0700

--
 .../resourcemanager/scheduler/fair/policies/FairSharePolicy.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c35001b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index f8cdb45..c3ec47a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -42,7 +42,7 @@ import com.google.common.annotations.VisibleForTesting;
 @Private
 @Unstable
 public class FairSharePolicy extends SchedulingPolicy {
-  private static final Log LOG = LogFactory.getLog(FifoPolicy.class);
+  private static final Log LOG = LogFactory.getLog(FairSharePolicy.class);
   @VisibleForTesting
   public static final String NAME = "fair";
   private static final DefaultResourceCalculator RESOURCE_CALCULATOR =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6580. Incorrect logger for FairSharePolicy. (Vrushali C via Haibo Chen)

2017-05-12 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 53d9f56a1 -> 6284cc466


YARN-6580. Incorrect logger for FairSharePolicy. (Vrushali C via Haibo Chen)

(cherry picked from commit 6c35001b9f93fd85a02c3465e87bfd1612f4cce9)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6284cc46
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6284cc46
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6284cc46

Branch: refs/heads/branch-2
Commit: 6284cc46632a906cb86a479af2fdfccd1937e6b1
Parents: 53d9f56
Author: Haibo Chen 
Authored: Fri May 12 13:05:40 2017 -0700
Committer: Haibo Chen 
Committed: Fri May 12 13:07:17 2017 -0700

--
 .../resourcemanager/scheduler/fair/policies/FairSharePolicy.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6284cc46/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index f8cdb45..c3ec47a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -42,7 +42,7 @@ import com.google.common.annotations.VisibleForTesting;
 @Private
 @Unstable
 public class FairSharePolicy extends SchedulingPolicy {
-  private static final Log LOG = LogFactory.getLog(FifoPolicy.class);
+  private static final Log LOG = LogFactory.getLog(FairSharePolicy.class);
   @VisibleForTesting
   public static final String NAME = "fair";
   private static final DefaultResourceCalculator RESOURCE_CALCULATOR =


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6555. Store application flow context in NM state store for work-preserving restart. (Rohith Sharma K S via Haibo Chen)

2017-05-25 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2b5ad4876 -> 47474fffa


YARN-6555. Store application flow context in NM state store for work-preserving 
restart. (Rohith Sharma K S via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47474fff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47474fff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47474fff

Branch: refs/heads/trunk
Commit: 47474fffac085e0e5ea46336bf80ccd0677017a3
Parents: 2b5ad48
Author: Haibo Chen 
Authored: Thu May 25 21:15:27 2017 -0700
Committer: Haibo Chen 
Committed: Thu May 25 21:15:27 2017 -0700

--
 .../containermanager/ContainerManagerImpl.java  | 71 +---
 .../application/ApplicationImpl.java| 27 ++--
 .../yarn_server_nodemanager_recovery.proto  |  7 ++
 .../TestContainerManagerRecovery.java   | 40 +--
 4 files changed, 111 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47474fff/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index f65f1ac..50268b9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -85,6 +85,7 @@ import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationACLMapProto;
 import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
+import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.FlowContextProto;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.ContainerType;
@@ -381,10 +382,20 @@ public class ContainerManagerImpl extends 
CompositeService implements
   new LogAggregationContextPBImpl(p.getLogAggregationContext());
 }
 
+FlowContext fc = null;
+if (p.getFlowContext() != null) {
+  FlowContextProto fcp = p.getFlowContext();
+  fc = new FlowContext(fcp.getFlowName(), fcp.getFlowVersion(),
+  fcp.getFlowRunId());
+  if (LOG.isDebugEnabled()) {
+LOG.debug(
+"Recovering Flow context: " + fc + " for an application " + appId);
+  }
+}
+
 LOG.info("Recovering application " + appId);
-//TODO: Recover flow and flow run ID
-ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), appId,
-creds, context, p.getAppLogAggregationInitedTime());
+ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), fc,
+appId, creds, context, p.getAppLogAggregationInitedTime());
 context.getApplications().put(appId, app);
 app.handle(new ApplicationInitEvent(appId, acls, logAggregationContext));
   }
@@ -936,7 +947,7 @@ public class ContainerManagerImpl extends CompositeService 
implements
   private ContainerManagerApplicationProto buildAppProto(ApplicationId appId,
   String user, Credentials credentials,
   Map appAcls,
-  LogAggregationContext logAggregationContext) {
+  LogAggregationContext logAggregationContext, FlowContext flowContext) {
 
 ContainerManagerApplicationProto.Builder builder =
 ContainerManagerApplicationProto.newBuilder();
@@ -971,6 +982,16 @@ public class ContainerManagerImpl extends CompositeService 
implements
   }
 }
 
+builder.clearFlowContext();
+if (flowContext != null && flowContext.getFlowName() != null
+&& flowContext.getFlowVersion() != null) {
+  FlowContextProto fcp =
+  FlowContextProto.newBuilder().setFlowName(flowContext.getFlowName())
+  .setFlowVersion(flowContext.getFlowVersion())
+  .setFlowRunId(flowContext.getFlowRunId()).build();
+  builder.setFlowContext(fcp);
+}
+
 return builder.build();
   }
 

hadoop git commit: YARN-6555. Store application flow context in NM state store for work-preserving restart. (Rohith Sharma K S via Haibo Chen)

2017-05-25 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355-branch-2 f1f7d6534 -> 303d7e0a2


YARN-6555. Store application flow context in NM state store for work-preserving 
restart. (Rohith Sharma K S via Haibo Chen)

(cherry picked from commit 47474fffac085e0e5ea46336bf80ccd0677017a3)
(cherry picked from commit 8817cb5c8424359b880c6d700e53092f0269c1bb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/303d7e0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/303d7e0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/303d7e0a

Branch: refs/heads/YARN-5355-branch-2
Commit: 303d7e0a284544b13d5ea04ef699823d31b7933e
Parents: f1f7d65
Author: Haibo Chen 
Authored: Thu May 25 21:15:27 2017 -0700
Committer: Haibo Chen 
Committed: Thu May 25 21:38:58 2017 -0700

--
 .../containermanager/ContainerManagerImpl.java  | 71 +---
 .../application/ApplicationImpl.java| 27 ++--
 .../yarn_server_nodemanager_recovery.proto  |  7 ++
 .../TestContainerManagerRecovery.java   | 40 +--
 4 files changed, 111 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/303d7e0a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 1d822fe..a9d5f47 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -85,6 +85,7 @@ import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationACLMapProto;
 import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
+import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.FlowContextProto;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.ContainerType;
@@ -384,10 +385,20 @@ public class ContainerManagerImpl extends 
CompositeService implements
   new LogAggregationContextPBImpl(p.getLogAggregationContext());
 }
 
+FlowContext fc = null;
+if (p.getFlowContext() != null) {
+  FlowContextProto fcp = p.getFlowContext();
+  fc = new FlowContext(fcp.getFlowName(), fcp.getFlowVersion(),
+  fcp.getFlowRunId());
+  if (LOG.isDebugEnabled()) {
+LOG.debug(
+"Recovering Flow context: " + fc + " for an application " + appId);
+  }
+}
+
 LOG.info("Recovering application " + appId);
-//TODO: Recover flow and flow run ID
-ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), appId,
-creds, context, p.getAppLogAggregationInitedTime());
+ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), fc,
+appId, creds, context, p.getAppLogAggregationInitedTime());
 context.getApplications().put(appId, app);
 app.handle(new ApplicationInitEvent(appId, acls, logAggregationContext));
   }
@@ -941,7 +952,7 @@ public class ContainerManagerImpl extends CompositeService 
implements
   private ContainerManagerApplicationProto buildAppProto(ApplicationId appId,
   String user, Credentials credentials,
   Map appAcls,
-  LogAggregationContext logAggregationContext) {
+  LogAggregationContext logAggregationContext, FlowContext flowContext) {
 
 ContainerManagerApplicationProto.Builder builder =
 ContainerManagerApplicationProto.newBuilder();
@@ -976,6 +987,16 @@ public class ContainerManagerImpl extends CompositeService 
implements
   }
 }
 
+builder.clearFlowContext();
+if (flowContext != null && flowContext.getFlowName() != null
+&& flowContext.getFlowVersion() != null) {
+  FlowContextProto fcp =
+  FlowContextProto.newBuilder().setFlowName(flowContext.getFlowName())
+  

hadoop git commit: YARN-6555. Store application flow context in NM state store for work-preserving restart. (Rohith Sharma K S via Haibo Chen)

2017-05-25 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-5355 3e052dbbe -> 8817cb5c8


YARN-6555. Store application flow context in NM state store for work-preserving 
restart. (Rohith Sharma K S via Haibo Chen)

(cherry picked from commit 47474fffac085e0e5ea46336bf80ccd0677017a3)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8817cb5c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8817cb5c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8817cb5c

Branch: refs/heads/YARN-5355
Commit: 8817cb5c8424359b880c6d700e53092f0269c1bb
Parents: 3e052db
Author: Haibo Chen 
Authored: Thu May 25 21:15:27 2017 -0700
Committer: Haibo Chen 
Committed: Thu May 25 21:35:58 2017 -0700

--
 .../containermanager/ContainerManagerImpl.java  | 71 +---
 .../application/ApplicationImpl.java| 27 ++--
 .../yarn_server_nodemanager_recovery.proto  |  7 ++
 .../TestContainerManagerRecovery.java   | 40 +--
 4 files changed, 111 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8817cb5c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 125b046..37dd598 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -85,6 +85,7 @@ import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationACLMapProto;
 import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.ContainerManagerApplicationProto;
+import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.FlowContextProto;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
 import org.apache.hadoop.yarn.server.api.ContainerType;
@@ -384,10 +385,20 @@ public class ContainerManagerImpl extends 
CompositeService implements
   new LogAggregationContextPBImpl(p.getLogAggregationContext());
 }
 
+FlowContext fc = null;
+if (p.getFlowContext() != null) {
+  FlowContextProto fcp = p.getFlowContext();
+  fc = new FlowContext(fcp.getFlowName(), fcp.getFlowVersion(),
+  fcp.getFlowRunId());
+  if (LOG.isDebugEnabled()) {
+LOG.debug(
+"Recovering Flow context: " + fc + " for an application " + appId);
+  }
+}
+
 LOG.info("Recovering application " + appId);
-//TODO: Recover flow and flow run ID
-ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), appId,
-creds, context, p.getAppLogAggregationInitedTime());
+ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), fc,
+appId, creds, context, p.getAppLogAggregationInitedTime());
 context.getApplications().put(appId, app);
 app.handle(new ApplicationInitEvent(appId, acls, logAggregationContext));
   }
@@ -949,7 +960,7 @@ public class ContainerManagerImpl extends CompositeService 
implements
   private ContainerManagerApplicationProto buildAppProto(ApplicationId appId,
   String user, Credentials credentials,
   Map appAcls,
-  LogAggregationContext logAggregationContext) {
+  LogAggregationContext logAggregationContext, FlowContext flowContext) {
 
 ContainerManagerApplicationProto.Builder builder =
 ContainerManagerApplicationProto.newBuilder();
@@ -984,6 +995,16 @@ public class ContainerManagerImpl extends CompositeService 
implements
   }
 }
 
+builder.clearFlowContext();
+if (flowContext != null && flowContext.getFlowName() != null
+&& flowContext.getFlowVersion() != null) {
+  FlowContextProto fcp =
+  FlowContextProto.newBuilder().setFlowName(flowContext.getFlowName())
+  .setFlowVersion(flowContext.getFlowVersion())
+  .setFlowRunId(flowContext.getFlowRunId()).build();
+   

hadoop git commit: YARN-6477. Dispatcher no longer needs the raw types suppression. (Maya Wexler via Haibo Chen)

2017-05-30 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 91d6fe151 -> 4b4a6524f


YARN-6477. Dispatcher no longer needs the raw types suppression. (Maya Wexler 
via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b4a6524
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b4a6524
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b4a6524

Branch: refs/heads/trunk
Commit: 4b4a6524f2df3a891e9d5486ec39f7987766d84f
Parents: 91d6fe1
Author: Haibo Chen 
Authored: Tue May 30 16:58:15 2017 -0700
Committer: Haibo Chen 
Committed: Tue May 30 16:58:15 2017 -0700

--
 .../src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4a6524/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
index 0858a0b..ce5a513 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
@@ -26,7 +26,6 @@ import 
org.apache.hadoop.classification.InterfaceStability.Evolving;
  * event handlers based on event types.
  * 
  */
-@SuppressWarnings("rawtypes")
 @Public
 @Evolving
 public interface Dispatcher {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6457. Allow custom SSL configuration to be supplied in WebApps. (Sanjay M Pujare via Haibo Chen)

2017-05-08 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk ff5ec3b84 -> 1769b12a7


YARN-6457. Allow custom SSL configuration to be supplied in WebApps. (Sanjay M 
Pujare via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1769b12a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1769b12a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1769b12a

Branch: refs/heads/trunk
Commit: 1769b12a773dc6c83f13663a77da33fa78878730
Parents: ff5ec3b
Author: Haibo Chen 
Authored: Mon May 8 15:43:20 2017 -0700
Committer: Haibo Chen 
Committed: Mon May 8 15:46:12 2017 -0700

--
 .../hadoop/yarn/webapp/util/WebAppUtils.java   | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1769b12a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index 64a4b2b..eabd9b3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -375,16 +375,21 @@ public class WebAppUtils {
   /**
* Load the SSL keystore / truststore into the HttpServer builder.
* @param builder the HttpServer2.Builder to populate with ssl config
-   * @param sslConf the Configuration instance to use during loading of SSL 
conf
+   * @param conf the Configuration instance to load custom SSL config from
+   *
+   * @return HttpServer2.Builder instance (passed in as the first parameter)
+   * after loading SSL stores
*/
   public static HttpServer2.Builder loadSslConfiguration(
-  HttpServer2.Builder builder, Configuration sslConf) {
-if (sslConf == null) {
-  sslConf = new Configuration(false);
+  HttpServer2.Builder builder, Configuration conf) {
+
+Configuration sslConf = new Configuration(false);
+
+sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
+if (conf != null) {
+  sslConf.addResource(conf);
 }
 boolean needsClientAuth = 
YarnConfiguration.YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
-sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
-
 return builder
 .needsClientAuth(needsClientAuth)
 .keyPassword(getPassword(sslConf, WEB_APP_KEY_PASSWORD_KEY))


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6457. Allow custom SSL configuration to be supplied in WebApps. (Sanjay M Pujare via Haibo Chen)

2017-05-08 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b8870d815 -> 86fcb8a66


YARN-6457. Allow custom SSL configuration to be supplied in WebApps. (Sanjay M 
Pujare via Haibo Chen)

(cherry picked from commit 1769b12a773dc6c83f13663a77da33fa78878730)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86fcb8a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86fcb8a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86fcb8a6

Branch: refs/heads/branch-2
Commit: 86fcb8a66ccc0a0cd0c41073bb3a49315033aed5
Parents: b8870d8
Author: Haibo Chen 
Authored: Mon May 8 15:43:20 2017 -0700
Committer: Haibo Chen 
Committed: Mon May 8 15:47:01 2017 -0700

--
 .../hadoop/yarn/webapp/util/WebAppUtils.java   | 17 +++--
 1 file changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86fcb8a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
index 29e41ea..a32b2be 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
@@ -371,16 +371,21 @@ public class WebAppUtils {
   /**
* Load the SSL keystore / truststore into the HttpServer builder.
* @param builder the HttpServer2.Builder to populate with ssl config
-   * @param sslConf the Configuration instance to use during loading of SSL 
conf
+   * @param conf the Configuration instance to load custom SSL config from
+   *
+   * @return HttpServer2.Builder instance (passed in as the first parameter)
+   * after loading SSL stores
*/
   public static HttpServer2.Builder loadSslConfiguration(
-  HttpServer2.Builder builder, Configuration sslConf) {
-if (sslConf == null) {
-  sslConf = new Configuration(false);
+  HttpServer2.Builder builder, Configuration conf) {
+
+Configuration sslConf = new Configuration(false);
+
+sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
+if (conf != null) {
+  sslConf.addResource(conf);
 }
 boolean needsClientAuth = 
YarnConfiguration.YARN_SSL_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
-sslConf.addResource(YarnConfiguration.YARN_SSL_SERVER_RESOURCE_DEFAULT);
-
 return builder
 .needsClientAuth(needsClientAuth)
 .keyPassword(getPassword(sslConf, WEB_APP_KEY_PASSWORD_KEY))


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/50] [abbrv] hadoop git commit: Update maven version for 3.0.0-alpha4 development

2017-05-31 Thread haibochen
Update maven version for 3.0.0-alpha4 development


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16ad896d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16ad896d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16ad896d

Branch: refs/heads/YARN-1011
Commit: 16ad896d5cb8ab21e9cb2763df7c15cfcc0a6ede
Parents: 303c8dc
Author: Andrew Wang 
Authored: Fri May 26 14:09:44 2017 -0700
Committer: Andrew Wang 
Committed: Fri May 26 14:09:44 2017 -0700

--
 hadoop-assemblies/pom.xml| 4 ++--
 hadoop-build-tools/pom.xml   | 2 +-
 hadoop-client-modules/hadoop-client-api/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-check-invariants/pom.xml | 4 ++--
 .../hadoop-client-check-test-invariants/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-integration-tests/pom.xml| 4 ++--
 hadoop-client-modules/hadoop-client-minicluster/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client-runtime/pom.xml  | 4 ++--
 hadoop-client-modules/hadoop-client/pom.xml  | 4 ++--
 hadoop-client-modules/pom.xml| 2 +-
 hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml| 4 ++--
 hadoop-cloud-storage-project/pom.xml | 4 ++--
 hadoop-common-project/hadoop-annotations/pom.xml | 4 ++--
 hadoop-common-project/hadoop-auth-examples/pom.xml   | 4 ++--
 hadoop-common-project/hadoop-auth/pom.xml| 4 ++--
 hadoop-common-project/hadoop-common/pom.xml  | 4 ++--
 hadoop-common-project/hadoop-kms/pom.xml | 4 ++--
 hadoop-common-project/hadoop-minikdc/pom.xml | 4 ++--
 hadoop-common-project/hadoop-nfs/pom.xml | 4 ++--
 hadoop-common-project/pom.xml| 4 ++--
 hadoop-dist/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml   | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml| 4 ++--
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml  | 4 ++--
 hadoop-hdfs-project/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml  | 4 ++--
 .../hadoop-mapreduce-client-common/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml | 4 ++--
 .../hadoop-mapreduce-client-hs-plugins/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-jobclient/pom.xml| 4 ++--
 .../hadoop-mapreduce-client-nativetask/pom.xml   | 4 ++--
 .../hadoop-mapreduce-client-shuffle/pom.xml  | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml | 4 ++--
 hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml   | 4 ++--
 hadoop-mapreduce-project/pom.xml | 4 ++--
 hadoop-maven-plugins/pom.xml | 2 +-
 hadoop-minicluster/pom.xml   | 4 ++--
 hadoop-project-dist/pom.xml  | 4 ++--
 hadoop-project/pom.xml   | 4 ++--
 hadoop-tools/hadoop-aliyun/pom.xml   | 2 +-
 hadoop-tools/hadoop-archive-logs/pom.xml | 4 ++--
 hadoop-tools/hadoop-archives/pom.xml | 4 ++--
 hadoop-tools/hadoop-aws/pom.xml  | 4 ++--
 hadoop-tools/hadoop-azure-datalake/pom.xml   | 2 +-
 hadoop-tools/hadoop-azure/pom.xml| 2 +-
 hadoop-tools/hadoop-datajoin/pom.xml | 4 ++--
 hadoop-tools/hadoop-distcp/pom.xml   | 4 ++--
 hadoop-tools/hadoop-extras/pom.xml   | 4 ++--
 hadoop-tools/hadoop-gridmix/pom.xml  | 4 ++--
 hadoop-tools/hadoop-kafka/pom.xml| 4 ++--
 hadoop-tools/hadoop-openstack/pom.xml| 4 ++--
 hadoop-tools/hadoop-pipes/pom.xml| 4 ++--
 hadoop-tools/hadoop-rumen/pom.xml| 4 ++--
 hadoop-tools/hadoop-sls/pom.xml  | 4 ++--
 hadoop-tools/hadoop-streaming/pom.xml| 4 ++--
 

[17/50] [abbrv] hadoop git commit: Addendum patch to fix Docker sanitization.

2017-05-31 Thread haibochen
Addendum patch to fix Docker sanitization.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1a56a3db
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1a56a3db
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1a56a3db

Branch: refs/heads/YARN-1011
Commit: 1a56a3db599659091284e3016d0309052966d018
Parents: bc28da6
Author: Varun Vasudev 
Authored: Wed May 24 16:03:28 2017 +0530
Committer: Varun Vasudev 
Committed: Thu May 25 14:53:57 2017 +0530

--
 .../impl/container-executor.c   |  6 +-
 .../test/test-container-executor.c  | 20 +---
 2 files changed, 6 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a56a3db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 3a87646..5d138f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1292,13 +1292,9 @@ char* sanitize_docker_command(const char *line) {
   }
 
   if(optind < split_counter) {
-quote_and_append_arg(, _size, "", linesplit[optind++]);
-strcat(output, "'");
 while(optind < split_counter) {
-  strcat(output, linesplit[optind++]);
-  strcat(output, " ");
+  quote_and_append_arg(, _size, "", linesplit[optind++]);
 }
-strcat(output, "'");
   }
 
   return output;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1a56a3db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index ff76d4a..83d11ec 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1087,11 +1087,6 @@ void test_trim_function() {
 
 void test_sanitize_docker_command() {
 
-/*
-  char *input[] = {
-"run "
-  };
-*/
   char *input[] = {
 "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged 
--rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true 
--cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL 
--cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP 
--cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID 
--cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE 
--cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro 
-v /yarn/local/cdir:/yarn/local/cdir -v 
/yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash 
/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
 "run --name=$CID --user=nobody -d --workdir=/yarn/local/cdir --privileged 
--rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true 
--cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL 
--cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP 
--cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID 
--cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE 
--cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro 
-v /yarn/local/cdir:/yarn/local/cdir -v 
/yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash 
/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
@@ -1099,17 +1094,12 @@ void test_sanitize_docker_command() {
 "run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged 
--rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true 

[41/50] [abbrv] hadoop git commit: HADOOP-14456. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.

2017-05-31 Thread haibochen
HADOOP-14456. Modifier 'static' is redundant for inner enums. Contributed by 
ZhangBing Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62857be2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62857be2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62857be2

Branch: refs/heads/YARN-1011
Commit: 62857be2110aaded84a93fc9891742a1271b2b85
Parents: af03c33
Author: Brahma Reddy Battula 
Authored: Wed May 31 01:07:58 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Wed May 31 01:07:58 2017 +0800

--
 .../src/main/java/org/apache/hadoop/crypto/OpensslCipher.java  | 4 ++--
 .../main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java | 2 +-
 .../src/main/java/org/apache/hadoop/fs/Options.java| 2 +-
 .../org/apache/hadoop/fs/shell/CommandWithDestination.java | 2 +-
 .../main/java/org/apache/hadoop/ha/ActiveStandbyElector.java   | 4 ++--
 .../src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java  | 2 +-
 .../src/main/java/org/apache/hadoop/io/SequenceFile.java   | 2 +-
 .../hadoop/io/compress/zlib/BuiltInGzipDecompressor.java   | 2 +-
 .../org/apache/hadoop/io/compress/zlib/ZlibCompressor.java | 6 +++---
 .../org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java   | 2 +-
 .../main/java/org/apache/hadoop/io/file/tfile/Compression.java | 2 +-
 .../src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java  | 2 +-
 .../src/main/java/org/apache/hadoop/ipc/Server.java| 2 +-
 .../main/java/org/apache/hadoop/security/SaslRpcServer.java| 4 ++--
 .../java/org/apache/hadoop/security/UserGroupInformation.java  | 2 +-
 .../main/java/org/apache/hadoop/security/ssl/SSLFactory.java   | 2 +-
 .../token/delegation/web/DelegationTokenAuthenticator.java | 2 +-
 .../src/main/java/org/apache/hadoop/util/StringUtils.java  | 2 +-
 .../test/java/org/apache/hadoop/fs/FileContextTestHelper.java  | 2 +-
 .../test/java/org/apache/hadoop/fs/FileSystemTestHelper.java   | 2 +-
 .../src/test/java/org/apache/hadoop/io/TestIOUtils.java| 2 +-
 .../org/apache/hadoop/io/retry/UnreliableImplementation.java   | 2 +-
 .../main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java | 2 +-
 .../src/main/java/org/apache/hadoop/mount/MountInterface.java  | 2 +-
 .../src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java | 2 +-
 .../main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java | 2 +-
 .../src/main/java/org/apache/hadoop/oncrpc/RpcMessage.java | 2 +-
 27 files changed, 32 insertions(+), 32 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
index 2eb16ee..6a03bb6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
@@ -47,7 +47,7 @@ public final class OpensslCipher {
   public static final int DECRYPT_MODE = 0;
   
   /** Currently only support AES/CTR/NoPadding. */
-  private static enum AlgMode {
+  private enum AlgMode {
 AES_CTR;
 
 static int get(String algorithm, String mode) 
@@ -61,7 +61,7 @@ public final class OpensslCipher {
 }
   }
   
-  private static enum Padding {
+  private enum Padding {
 NoPadding;
 
 static int get(String padding) throws NoSuchPaddingException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62857be2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
index 74052eb..8411ffb 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
@@ -198,7 +198,7 @@ public class ValueQueue  {
* "n" values and Queue is empty.
* This decides how many values to return when client calls "getAtMost"
*/
-  public static enum SyncGenerationPolicy {
+  public enum SyncGenerationPolicy {
 ATLEAST_ONE, // Return atleast 1 value
 LOW_WATERMARK, // Return min(n, lowWatermark * numValues) values
 ALL // Return 

[45/50] [abbrv] hadoop git commit: YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.

2017-05-31 Thread haibochen
YARN-6366. Refactor the NodeManager DeletionService to support additional 
DeletionTask types. Contributed by Shane Kumpf.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/547f18cb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/547f18cb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/547f18cb

Branch: refs/heads/YARN-1011
Commit: 547f18cb96aeda55cc19b38be2be4d631b3a5f4f
Parents: 4b4a652
Author: Varun Vasudev 
Authored: Wed May 31 16:15:35 2017 +0530
Committer: Varun Vasudev 
Committed: Wed May 31 16:15:35 2017 +0530

--
 .../server/nodemanager/DeletionService.java | 468 ---
 .../nodemanager/api/impl/pb/NMProtoUtils.java   | 110 +
 .../nodemanager/api/impl/pb/package-info.java   |  25 +
 .../recovery/DeletionTaskRecoveryInfo.java  |  73 +++
 .../deletion/recovery/package-info.java |  25 +
 .../deletion/task/DeletionTask.java | 258 ++
 .../deletion/task/DeletionTaskType.java |  24 +
 .../deletion/task/FileDeletionTask.java | 202 
 .../deletion/task/package-info.java |  25 +
 .../localizer/LocalResourcesTrackerImpl.java|  13 +-
 .../localizer/ResourceLocalizationService.java  |  40 +-
 .../logaggregation/AppLogAggregatorImpl.java|  60 ++-
 .../loghandler/NonAggregatingLogHandler.java|   7 +-
 .../yarn_server_nodemanager_recovery.proto  |   1 +
 .../server/nodemanager/TestDeletionService.java |  57 ++-
 .../nodemanager/TestNodeManagerReboot.java  |  99 +---
 .../api/impl/pb/TestNMProtoUtils.java   |  91 
 .../BaseContainerManagerTest.java   |   7 +-
 .../deletion/task/FileDeletionMatcher.java  |  84 
 .../deletion/task/TestFileDeletionTask.java |  85 
 .../TestLocalResourcesTrackerImpl.java  |   5 +-
 .../TestResourceLocalizationService.java|  33 +-
 .../TestAppLogAggregatorImpl.java   |  15 +-
 .../TestLogAggregationService.java  |  17 +-
 .../TestNonAggregatingLogHandler.java   |   8 +-
 25 files changed, 1274 insertions(+), 558 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/547f18cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index aac0af9..38d69a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -21,11 +21,8 @@ package org.apache.hadoop.yarn.server.nodemanager;
 import static java.util.concurrent.TimeUnit.SECONDS;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -38,461 +35,176 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
-import 
org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
+import org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.NMProtoUtils;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery.DeletionTaskRecoveryInfo;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
 import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
-import 
org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState;
 

[40/50] [abbrv] hadoop git commit: YARN-6635. Refactor yarn-app pages in new YARN UI. Contributed by Akhil PB.

2017-05-31 Thread haibochen
YARN-6635. Refactor yarn-app pages in new YARN UI. Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af03c333
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af03c333
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af03c333

Branch: refs/heads/YARN-1011
Commit: af03c3334610bc4d8788e7c7b21d5aa6b946fe26
Parents: 07e60f8
Author: Sunil G 
Authored: Tue May 30 13:52:40 2017 +0530
Committer: Sunil G 
Committed: Tue May 30 13:52:40 2017 +0530

--
 .../webapp/app/controllers/app-table-columns.js |   4 +-
 .../webapp/app/controllers/yarn-app-attempt.js  |   8 +-
 .../webapp/app/controllers/yarn-app-attempts.js |  57 --
 .../src/main/webapp/app/controllers/yarn-app.js |  56 +++---
 .../webapp/app/controllers/yarn-app/attempts.js |  24 +++
 .../webapp/app/controllers/yarn-app/charts.js   |  28 +++
 .../webapp/app/controllers/yarn-app/info.js |  32 
 .../app/controllers/yarn-apps/services.js   |  31 
 .../webapp/app/controllers/yarn-flowrun/info.js |   2 +-
 .../src/main/webapp/app/router.js   |   8 +-
 .../main/webapp/app/routes/yarn-app-attempts.js |  35 
 .../src/main/webapp/app/routes/yarn-app.js  |  35 +---
 .../main/webapp/app/routes/yarn-app/attempts.js |  37 
 .../main/webapp/app/routes/yarn-app/charts.js   |  53 ++
 .../src/main/webapp/app/routes/yarn-app/info.js |  37 
 .../webapp/app/routes/yarn-apps/services.js |  33 
 .../main/webapp/app/templates/application.hbs   |   2 +-
 .../app/templates/components/app-table.hbs  |   6 +-
 .../webapp/app/templates/yarn-app-attempts.hbs  |  58 --
 .../src/main/webapp/app/templates/yarn-app.hbs  | 185 +--
 .../webapp/app/templates/yarn-app/attempts.hbs  |  29 +++
 .../webapp/app/templates/yarn-app/charts.hbs|  43 +
 .../main/webapp/app/templates/yarn-app/info.hbs | 167 +
 .../webapp/app/templates/yarn-app/loading.hbs   |  23 +++
 .../src/main/webapp/app/templates/yarn-apps.hbs |   5 +-
 .../webapp/app/templates/yarn-apps/services.hbs |  25 ---
 .../main/webapp/app/templates/yarn-services.hbs |   3 +-
 .../unit/controllers/yarn-app-attempts-test.js  |  30 ---
 .../unit/controllers/yarn-app/attempts-test.js  |  30 +++
 .../unit/controllers/yarn-app/charts-test.js|  30 +++
 .../unit/controllers/yarn-app/info-test.js  |  30 +++
 .../unit/controllers/yarn-apps/services-test.js |  30 ---
 .../tests/unit/routes/yarn-app-attempts-test.js |  29 ---
 .../tests/unit/routes/yarn-app/attempts-test.js |  29 +++
 .../tests/unit/routes/yarn-app/charts-test.js   |  29 +++
 .../tests/unit/routes/yarn-app/info-test.js |  29 +++
 36 files changed, 714 insertions(+), 578 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
index 704abfb..8a34f1a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js
@@ -38,7 +38,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.id,
-  href: `#/yarn-app/${row.id}`
+  href: `#/yarn-app/${row.id}/info`
 };
   }
   }, {
@@ -112,7 +112,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.get('appName'),
-  href: `#/yarn-app/${row.id}?service=${row.get('appName')}`
+  href: `#/yarn-app/${row.id}/info?service=${row.get('appName')}`
 };
   }
 }, {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af03c333/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
index fbe6fa9..1121a84 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app-attempt.js
@@ -34,10 +34,10 @@ 

[43/50] [abbrv] hadoop git commit: YARN-6477. Dispatcher no longer needs the raw types suppression. (Maya Wexler via Haibo Chen)

2017-05-31 Thread haibochen
YARN-6477. Dispatcher no longer needs the raw types suppression. (Maya Wexler 
via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b4a6524
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b4a6524
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b4a6524

Branch: refs/heads/YARN-1011
Commit: 4b4a6524f2df3a891e9d5486ec39f7987766d84f
Parents: 91d6fe1
Author: Haibo Chen 
Authored: Tue May 30 16:58:15 2017 -0700
Committer: Haibo Chen 
Committed: Tue May 30 16:58:15 2017 -0700

--
 .../src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b4a6524/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
index 0858a0b..ce5a513 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
@@ -26,7 +26,6 @@ import 
org.apache.hadoop.classification.InterfaceStability.Evolving;
  * event handlers based on event types.
  * 
  */
-@SuppressWarnings("rawtypes")
 @Public
 @Evolving
 public interface Dispatcher {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: HDFS-11419. Performance analysis of new DFSNetworkTopology#chooseRandom. Contributed by Chen Liang.

2017-05-31 Thread haibochen
HDFS-11419. Performance analysis of new DFSNetworkTopology#chooseRandom. 
Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0f346af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0f346af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0f346af

Branch: refs/heads/YARN-1011
Commit: d0f346af26293f0ac8d118f98628f5528c1d6811
Parents: ca6bcc3
Author: Arpit Agarwal 
Authored: Mon May 22 20:25:34 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon May 22 20:25:34 2017 -0700

--
 .../net/TestDFSNetworkTopologyPerformance.java  | 524 +++
 1 file changed, 524 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0f346af/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
new file mode 100644
index 000..77a059a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
@@ -0,0 +1,524 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.net;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.net.Node;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.text.NumberFormat;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.Random;
+import java.util.Set;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Performance test of the new DFSNetworkTopology chooseRandom.
+ *
+ * NOTE that the tests are not for correctness but for performance comparison,
+ * so the tests are printing and writing down values rather than doing 
assertion
+ * checks or timeout checks. Therefore, it is pointless to run these
+ * tests without something reading the value. So disabled the tests to for now,
+ * anyone interested in looking at the numbers can enable them.
+ */
+@Ignore
+public class TestDFSNetworkTopologyPerformance {
+  public static final Logger LOG =
+  LoggerFactory.getLogger(TestDFSNetworkTopologyPerformance.class);
+  private static NetworkTopology cluster;
+  private static DFSNetworkTopology dfscluster;
+  private DatanodeDescriptor[] dataNodes;
+
+  private final static int NODE_NUM = 2000;
+  private final static int OP_NUM = 2;
+
+  private final static int L1_NUM = 5;
+  private final static int L2_NUM = 10;
+  private final static int L3_NUM = 10;
+
+  private final static float NS_TO_MS = 100;
+
+  private final static Random RANDOM = new Random();
+
+  private Node node;
+  private long totalStart;
+  private long totalEnd;
+  private int totalTrials;
+  private float totalMs;
+  private Set excluded;
+  private static String[] racks;
+  private static String[] hosts;
+  private static StorageType[] types;
+
+  private static long[] records;
+  private long localStart;
+  private long localEnd;
+
+
+  @BeforeClass
+  public static void init() throws Exception {
+racks = new String[NODE_NUM];
+hosts = new String[NODE_NUM];
+types = new StorageType[NODE_NUM];
+records = new long[OP_NUM];
+for (int i = 0; i < NODE_NUM; i++) {
+  racks[i] = getRandLocation();
+  hosts[i] = "host" + i;
+}
+  }
+
+  @Before
+  public void setup() 

[05/50] [abbrv] hadoop git commit: HDFS-11864. Document Metrics to track usage of memory for writes. Contributed by Yiqun Lin.

2017-05-31 Thread haibochen
HDFS-11864. Document Metrics to track usage of memory for writes. Contributed 
by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/52661e09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/52661e09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/52661e09

Branch: refs/heads/YARN-1011
Commit: 52661e0912a79d1e851afc2b46c941ce952ca63f
Parents: 1b5451b
Author: Brahma Reddy Battula 
Authored: Tue May 23 23:52:42 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Tue May 23 23:52:42 2017 +0800

--
 .../hadoop-common/src/site/markdown/Metrics.md   | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/52661e09/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
--
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index bd44f74..a14c86d 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -283,6 +283,21 @@ Each metrics record contains tags such as SessionId and 
Hostname as additional i
 | `WritesFromLocalClient` | Total number of write operations from local client 
|
 | `WritesFromRemoteClient` | Total number of write operations from remote 
client |
 | `BlocksGetLocalPathInfo` | Total number of operations to get local path 
names of blocks |
+| `RamDiskBlocksWrite` | Total number of blocks written to memory |
+| `RamDiskBlocksWriteFallback` | Total number of blocks written to memory but 
not satisfied (failed-over to disk) |
+| `RamDiskBytesWrite` | Total number of bytes written to memory |
+| `RamDiskBlocksReadHits` | Total number of times a block in memory was read |
+| `RamDiskBlocksEvicted` | Total number of blocks evicted in memory |
+| `RamDiskBlocksEvictedWithoutRead` | Total number of blocks evicted in memory 
without ever being read from memory |
+| `RamDiskBlocksEvictionWindowMsNumOps` | Number of blocks evicted in memory|
+| `RamDiskBlocksEvictionWindowMsAvgTime` | Average time of blocks in memory 
before being evicted in milliseconds |
+| `RamDiskBlocksEvictionWindows`*num*`s(50|75|90|95|99)thPercentileLatency` | 
The 50/75/90/95/99th percentile of latency between memory write and eviction in 
milliseconds. Percentile measurement is off by default, by watching no 
intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
+| `RamDiskBlocksLazyPersisted` | Total number of blocks written to disk by 
lazy writer |
+| `RamDiskBlocksDeletedBeforeLazyPersisted` | Total number of blocks deleted 
by application before being persisted to disk |
+| `RamDiskBytesLazyPersisted` | Total number of bytes written to disk by lazy 
writer |
+| `RamDiskBlocksLazyPersistWindowMsNumOps` | Number of blocks written to disk 
by lazy writer |
+| `RamDiskBlocksLazyPersistWindowMsAvgTime` | Average time of blocks written 
to disk by lazy writer in milliseconds |
+| `RamDiskBlocksLazyPersistWindows`*num*`s(50|75|90|95|99)thPercentileLatency` 
| The 50/75/90/95/99th percentile of latency between memory write and disk 
persist in milliseconds. Percentile measurement is off by default, by watching 
no intervals. The intervals are specified by 
`dfs.metrics.percentiles.intervals`. |
 | `FsyncCount` | Total number of fsync |
 | `VolumeFailures` | Total number of volume failures occurred |
 | `ReadBlockOpNumOps` | Total number of read operations |


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: YARN-6615. AmIpFilter drops query parameters on redirect. Contributed by Wilfred Spiegelenburg

2017-05-31 Thread haibochen
YARN-6615. AmIpFilter drops query parameters on redirect. Contributed by 
Wilfred Spiegelenburg


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bf1949c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bf1949c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bf1949c

Branch: refs/heads/YARN-1011
Commit: 8bf1949c0efed700781eb47cf18f9f88443ed506
Parents: a62be38
Author: Jason Lowe 
Authored: Wed May 24 11:22:42 2017 -0500
Committer: Jason Lowe 
Committed: Wed May 24 11:22:42 2017 -0500

--
 .../hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java | 6 ++
 .../hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java   | 8 
 2 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf1949c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
index fe6fc32..6579191 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
@@ -150,6 +150,12 @@ public class AmIpFilter implements Filter {
 insertPoint += PROXY_PATH.length();
 redirect.insert(insertPoint, "/redirect");
   }
+  // add the query parameters on the redirect if there were any
+  String queryString = httpReq.getQueryString();
+  if (queryString != null && !queryString.isEmpty()) {
+redirect.append("?");
+redirect.append(queryString);
+  }
 
   ProxyUtils.sendRedirect(httpReq, httpResp, redirect.toString());
 } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf1949c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
index 9dc0ce0..b788f5d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/amfilter/TestAmFilter.java
@@ -176,6 +176,14 @@ public class TestAmFilter {
 redirect = response.getHeader(ProxyUtils.LOCATION);
 assertEquals("http://bogus/proxy/redirect/application_00_0;, redirect);
 
+// check for query parameters
+
Mockito.when(request.getRequestURI()).thenReturn("/proxy/application_00_0");
+Mockito.when(request.getQueryString()).thenReturn("id=0");
+testFilter.doFilter(request, response, chain);
+assertEquals(HttpURLConnection.HTTP_MOVED_TEMP, response.status);
+redirect = response.getHeader(ProxyUtils.LOCATION);
+assertEquals("http://bogus/proxy/redirect/application_00_0?id=0;, 
redirect);
+
 // "127.0.0.1" contains in host list. Without cookie
 Mockito.when(request.getRemoteAddr()).thenReturn("127.0.0.1");
 testFilter.doFilter(request, response, chain);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)

2017-05-31 Thread haibochen
YARN-4512 [YARN-1011]. Provide a knob to turn on over-allocation. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/153498bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/153498bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/153498bc

Branch: refs/heads/YARN-1011
Commit: 153498bc3adb830f3ae37825fed856fae22eea16
Parents: 4369690
Author: Karthik Kambatla 
Authored: Fri Jan 29 14:31:45 2016 -0800
Committer: Haibo Chen 
Committed: Wed May 31 14:25:41 2017 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  13 ++-
 .../src/main/resources/yarn-default.xml |  21 
 .../RegisterNodeManagerRequest.java |  14 ++-
 .../pb/RegisterNodeManagerRequestPBImpl.java|  45 +++-
 .../server/api/records/OverAllocationInfo.java  |  45 
 .../server/api/records/ResourceThresholds.java  |  45 
 .../impl/pb/OverAllocationInfoPBImpl.java   | 106 +++
 .../impl/pb/ResourceThresholdsPBImpl.java   |  93 
 .../yarn_server_common_service_protos.proto |  10 ++
 .../hadoop/yarn/server/nodemanager/Context.java |   5 +
 .../yarn/server/nodemanager/NodeManager.java|  17 +++
 .../nodemanager/NodeStatusUpdaterImpl.java  |   6 +-
 .../monitor/ContainersMonitorImpl.java  |  34 ++
 .../amrmproxy/BaseAMRMProxyTest.java|  11 ++
 14 files changed, 455 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/153498bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 5e4c826..bb34626 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1575,7 +1575,6 @@ public class YarnConfiguration extends Configuration {
   public static final boolean 
DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE =
   false;
 
-
   // Configurations for applicaiton life time monitor feature
   public static final String RM_APPLICATION_MONITOR_INTERVAL_MS =
   RM_PREFIX + "application-timeouts.monitor.interval-ms";
@@ -1583,6 +1582,18 @@ public class YarnConfiguration extends Configuration {
   public static final long DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS =
   3000;
 
+  /** Overallocation (= allocation based on utilization) configs. */
+  public static final String NM_OVERALLOCATION_ALLOCATION_THRESHOLD =
+  NM_PREFIX + "overallocation.allocation-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_ALLOCATION_THRESHOLD
+  = 0f;
+  @Private
+  public static final float MAX_NM_OVERALLOCATION_ALLOCATION_THRESHOLD = 0.95f;
+  public static final String NM_OVERALLOCATION_PREEMPTION_THRESHOLD =
+  NM_PREFIX + "overallocation.preemption-threshold";
+  public static final float DEFAULT_NM_OVERALLOCATION_PREEMPTION_THRESHOLD
+  = 0f;
+
   /**
* Interval of time the linux container executor should try cleaning up
* cgroups entry when cleaning up a container. This is required due to what 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/153498bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index e687eef..c131eec 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1563,6 +1563,27 @@
   
 
   
+The extent of over-allocation (container-allocation based on
+  current utilization instead of prior allocation) allowed on this node,
+  expressed as a float between 0 and 0.95. By default, over-allocation is
+  turned off (value = 0). When turned on, the node allows running
+  OPPORTUNISTIC containers when the aggregate utilization is under the
+  value specified here multiplied by the node's advertised capacity.
+
+yarn.nodemanager.overallocation.allocation-threshold
+0f
+  
+
+  
+When a node is over-allocated to improve utilization by
+  

[29/50] [abbrv] hadoop git commit: YARN-6646. Modifier 'static' is redundant for inner enums (Contributed by ZhangBing Lin via Daniel Templeton)

2017-05-31 Thread haibochen
YARN-6646. Modifier 'static' is redundant for inner enums
(Contributed by ZhangBing Lin via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d81372df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d81372df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d81372df

Branch: refs/heads/YARN-1011
Commit: d81372dfad32488e7c46ffcfccdf0aa26bee04a5
Parents: aea4293
Author: Daniel Templeton 
Authored: Fri May 26 12:05:48 2017 -0700
Committer: Daniel Templeton 
Committed: Fri May 26 12:05:48 2017 -0700

--
 .../hadoop/yarn/api/records/timelineservice/TimelineMetric.java  | 2 +-
 .../yarn/applications/distributedshell/ApplicationMaster.java| 4 ++--
 .../hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java | 4 ++--
 .../ApplicationHistoryManagerOnTimelineStore.java| 2 +-
 .../hadoop/yarn/server/nodemanager/CMgrCompletedAppsEvent.java   | 2 +-
 .../yarn/server/nodemanager/CMgrCompletedContainersEvent.java| 2 +-
 .../org/apache/hadoop/yarn/server/nodemanager/NMAuditLogger.java | 2 +-
 .../apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java | 2 +-
 .../resourcemanager/metrics/AbstractSystemMetricsPublisher.java  | 2 +-
 .../server/timelineservice/reader/filter/TimelineFilterList.java | 2 +-
 10 files changed, 12 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
index 5c908d6..2fa6d30 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineMetric.java
@@ -43,7 +43,7 @@ public class TimelineMetric {
   /**
* Type of metric.
*/
-  public static enum Type {
+  public enum Type {
 SINGLE_VALUE,
 TIME_SERIES
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index 4daebb5..ab4607a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -179,13 +179,13 @@ public class ApplicationMaster {
 
   @VisibleForTesting
   @Private
-  public static enum DSEvent {
+  public enum DSEvent {
 DS_APP_ATTEMPT_START, DS_APP_ATTEMPT_END, DS_CONTAINER_START, 
DS_CONTAINER_END
   }
   
   @VisibleForTesting
   @Private
-  public static enum DSEntity {
+  public enum DSEntity {
 DS_APP_ATTEMPT, DS_CONTAINER
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81372df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
index 515a8e8..20be71e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/impl/NMClientAsyncImpl.java
@@ 

[02/50] [abbrv] hadoop git commit: HDFS-11866. JournalNode Sync should be off by default in hdfs-default.xml. Contributed by Hanisha Koneru.

2017-05-31 Thread haibochen
HDFS-11866. JournalNode Sync should be off by default in hdfs-default.xml. 
Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca6bcc3c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca6bcc3c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca6bcc3c

Branch: refs/heads/YARN-1011
Commit: ca6bcc3c76babb2f7def1fd413d0917783224110
Parents: 8e0f83e
Author: Arpit Agarwal 
Authored: Mon May 22 17:53:47 2017 -0700
Committer: Arpit Agarwal 
Committed: Mon May 22 17:53:47 2017 -0700

--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca6bcc3c/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index f0f2220..9ddd343 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -3839,7 +3839,7 @@
 
 
   dfs.journalnode.enable.sync
-  true
+  false
   
 If true, the journal nodes wil sync with each other. The journal nodes
 will periodically gossip with other journal nodes to compare edit log


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: HDFS-11817. A faulty node can cause a lease leak and NPE on accessing data. Contributed by Kihwal Lee.

2017-05-31 Thread haibochen
HDFS-11817. A faulty node can cause a lease leak and NPE on accessing data. 
Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b5ad487
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b5ad487
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b5ad487

Branch: refs/heads/YARN-1011
Commit: 2b5ad48762587abbcd8bdb50d0ae98f8080d926c
Parents: 8759009
Author: Kihwal Lee 
Authored: Thu May 25 17:17:38 2017 -0500
Committer: Kihwal Lee 
Committed: Thu May 25 17:17:38 2017 -0500

--
 .../BlockUnderConstructionFeature.java  |  9 +++-
 .../server/blockmanagement/DatanodeManager.java |  3 +-
 .../hdfs/server/namenode/FSDirTruncateOp.java   |  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../hdfs/server/namenode/LeaseManager.java  | 15 +--
 .../TestBlockUnderConstructionFeature.java  |  8 ++--
 .../namenode/TestBlockUnderConstruction.java| 45 
 .../TestCommitBlockSynchronization.java |  2 +-
 8 files changed, 73 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b5ad487/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
index 7453184..61390d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockUnderConstructionFeature.java
@@ -223,10 +223,17 @@ public class BlockUnderConstructionFeature {
* Initialize lease recovery for this block.
* Find the first alive data-node starting from the previous primary and
* make it primary.
+   * @param blockInfo Block to be recovered
+   * @param recoveryId Recovery ID (new gen stamp)
+   * @param startRecovery Issue recovery command to datanode if true.
*/
-  public void initializeBlockRecovery(BlockInfo blockInfo, long recoveryId) {
+  public void initializeBlockRecovery(BlockInfo blockInfo, long recoveryId,
+  boolean startRecovery) {
 setBlockUCState(BlockUCState.UNDER_RECOVERY);
 blockRecoveryId = recoveryId;
+if (!startRecovery) {
+  return;
+}
 if (replicas.length == 0) {
   NameNode.blockStateChangeLog.warn("BLOCK*" +
   " BlockUnderConstructionFeature.initializeBlockRecovery:" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b5ad487/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 7dcc9fd..c303594 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -642,10 +642,11 @@ public class DatanodeManager {
   String format, Object... args) throws UnregisteredNodeException {
 storageIDs = storageIDs == null ? new String[0] : storageIDs;
 if (datanodeID.length != storageIDs.length) {
+  // Error for pre-2.0.0-alpha clients.
   final String err = (storageIDs.length == 0?
   "Missing storageIDs: It is likely that the HDFS client,"
   + " who made this call, is running in an older version of Hadoop"
-  + " which does not support storageIDs."
+  + "(pre-2.0.0-alpha)  which does not support storageIDs."
   : "Length mismatched: storageIDs.length=" + storageIDs.length + " != 
"
   ) + " datanodeID.length=" + datanodeID.length;
   throw new HadoopIllegalArgumentException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b5ad487/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
 

[12/50] [abbrv] hadoop git commit: Revert "HDFS-11515. -du throws ConcurrentModificationException. Contributed by Istvan Fajth, Wei-Chiu Chuang."

2017-05-31 Thread haibochen
Revert "HDFS-11515. -du throws ConcurrentModificationException. Contributed by 
Istvan Fajth, Wei-Chiu Chuang."

This reverts commit bc7aff7cec07bbc3fed63a00c8f1584c34670998.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cba5612
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cba5612
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cba5612

Branch: refs/heads/YARN-1011
Commit: 2cba5612282509001a221b9751e1fd36c084807f
Parents: 0e83ed5
Author: Wei-Chiu Chuang 
Authored: Wed May 24 17:20:27 2017 -0700
Committer: Wei-Chiu Chuang 
Committed: Wed May 24 17:20:27 2017 -0700

--
 .../snapshot/DirectoryWithSnapshotFeature.java  |  5 --
 .../snapshot/TestRenameWithSnapshots.java   |  6 +-
 .../namenode/snapshot/TestSnapshotDeletion.java | 75 
 3 files changed, 2 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cba5612/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 9840679..9addbfa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -633,11 +633,6 @@ public class DirectoryWithSnapshotFeature implements 
INode.Feature {
 for(DirectoryDiff d : diffs) {
   for(INode deletedNode : d.getChildrenDiff().getList(ListType.DELETED)) {
 context.reportDeletedSnapshottedNode(deletedNode);
-if (deletedNode.isDirectory()){
-  DirectoryWithSnapshotFeature sf =
-  deletedNode.asDirectory().getDirectoryWithSnapshotFeature();
-  sf.computeContentSummary4Snapshot(context);
-}
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cba5612/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
index d06c384..d1b3aa6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
@@ -26,7 +26,6 @@ import static org.mockito.Matchers.anyBoolean;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.spy;
-import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
 
 import java.io.File;
 import java.io.IOException;
@@ -2430,7 +2429,7 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDu() throws Exception {
-File tempFile = File.createTempFile("testDu-", ".tmp", getTestDir());
+File tempFile = File.createTempFile("testDu-", ".tmp");
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);
@@ -2540,8 +2539,7 @@ public class TestRenameWithSnapshots {
*/
   @Test (timeout=30)
   public void testDuMultipleDirs() throws Exception {
-File tempFile = File.createTempFile("testDuMultipleDirs-", ".tmp",
-getTestDir());
+File tempFile = File.createTempFile("testDuMultipleDirs-", "" + ".tmp");
 tempFile.deleteOnExit();
 
 final FileSystem localfs = FileSystem.getLocal(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cba5612/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index 7926e44..ca53788 100644
--- 

[10/50] [abbrv] hadoop git commit: HDFS-11823. Extend TestDFSStripedIutputStream/TestDFSStripedOutputStream with a random EC policy. Contributed by Takanobu Asanuma.

2017-05-31 Thread haibochen
HDFS-11823. Extend TestDFSStripedIutputStream/TestDFSStripedOutputStream with a 
random EC policy. Contributed by Takanobu Asanuma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c8dd6d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c8dd6d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c8dd6d3

Branch: refs/heads/YARN-1011
Commit: 1c8dd6d3d10773f281538e1dea0ffdca9db34bfe
Parents: dcf4559
Author: Jing Zhao 
Authored: Wed May 24 11:14:19 2017 -0700
Committer: Jing Zhao 
Committed: Wed May 24 11:14:19 2017 -0700

--
 .../apache/hadoop/hdfs/StripedFileTestUtil.java | 12 ++
 .../TestDFSRSDefault10x4StripedInputStream.java | 34 ---
 ...TestDFSRSDefault10x4StripedOutputStream.java | 35 ---
 ...fault10x4StripedOutputStreamWithFailure.java | 35 ---
 ...DFSStripedInputStreamWithRandomECPolicy.java | 45 
 ...tputStreamWithFailureWithRandomECPolicy.java | 45 
 ...FSStripedOutputStreamWithRandomECPolicy.java | 45 
 .../hdfs/TestDFSXORStripedInputStream.java  | 32 --
 .../hdfs/TestDFSXORStripedOutputStream.java | 34 ---
 ...estDFSXORStripedOutputStreamWithFailure.java | 35 ---
 10 files changed, 147 insertions(+), 205 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c8dd6d3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
index 1bab5db..057e94a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
@@ -49,6 +49,7 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Random;
 import java.util.Set;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -568,4 +569,15 @@ public class StripedFileTestUtil {
   public static ErasureCodingPolicy getDefaultECPolicy() {
 return SystemErasureCodingPolicies.getPolicies().get(0);
   }
+
+  /**
+   * Get non-default Erasure Coding Policy randomly.
+   * @return ErasureCodingPolicy
+   */
+  public static ErasureCodingPolicy getRandomNonDefaultECPolicy() {
+Random rand = new Random();
+List policies = SystemErasureCodingPolicies
+.getPolicies();
+return policies.get(1 + rand.nextInt(policies.size() - 1));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c8dd6d3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
deleted file mode 100644
index 1d09a6c..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-
-/**
- * This tests read operation of DFS striped file with RS-10-4-64k
- *  erasure code policy.
- */
-public class TestDFSRSDefault10x4StripedInputStream extends
-TestDFSStripedInputStream {
-
-  public ErasureCodingPolicy getEcPolicy() {
-return 

[06/50] [abbrv] hadoop git commit: HDFS-11793. Allow to enable user defined erasure coding policy. Contributed by Sammi Chen

2017-05-31 Thread haibochen
HDFS-11793. Allow to enable user defined erasure coding policy. Contributed by 
Sammi Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a62be38a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a62be38a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a62be38a

Branch: refs/heads/YARN-1011
Commit: a62be38a5e5d3a61dfb59054b3f5fd5b1b7053b3
Parents: 52661e0
Author: Kai Zheng 
Authored: Wed May 24 18:45:52 2017 +0800
Committer: Kai Zheng 
Committed: Wed May 24 18:45:52 2017 +0800

--
 .../hadoop/io/erasurecode/CodecRegistry.java| 19 ++
 .../apache/hadoop/io/erasurecode/CodecUtil.java |  4 ++
 .../io/erasurecode/TestCodecRegistry.java   |  7 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +-
 .../hadoop/hdfs/DistributedFileSystem.java  |  9 ++-
 .../hdfs/protocol/AddECPolicyResponse.java  | 66 ++
 .../hdfs/protocol/AddingECPolicyResponse.java   | 66 --
 .../hadoop/hdfs/protocol/ClientProtocol.java|  7 +-
 .../hdfs/protocol/ErasureCodingPolicy.java  |  9 ++-
 .../ClientNamenodeProtocolTranslatorPB.java | 10 +--
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  | 22 +++---
 .../apache/hadoop/hdfs/util/ECPolicyLoader.java |  2 +-
 .../src/main/proto/erasurecoding.proto  |  2 +-
 .../src/main/proto/hdfs.proto   |  2 +-
 .../hdfs/protocol/TestErasureCodingPolicy.java  | 10 +--
 .../src/main/conf/user_ec_policies.xml.template |  1 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  8 +--
 .../namenode/ErasureCodingPolicyManager.java| 72 ++--
 .../server/namenode/FSDirErasureCodingOp.java   |  4 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 35 +++---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  5 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  6 +-
 .../src/main/resources/hdfs-default.xml |  7 ++
 .../src/site/markdown/HDFSErasureCoding.md  |  3 +-
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 39 ---
 .../hadoop/hdfs/TestErasureCodingPolicies.java  | 54 +++
 .../hadoop/hdfs/protocolPB/TestPBHelper.java| 26 +++
 28 files changed, 289 insertions(+), 212 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a62be38a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
index d53d598..fcf1349 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
@@ -127,28 +127,20 @@ public final class CodecRegistry {
   /**
* Get all coder names of the given codec.
* @param codecName the name of codec
-   * @return an array of all coder names
+   * @return an array of all coder names, null if not exist
*/
   public String[] getCoderNames(String codecName) {
 String[] coderNames = coderNameMap.get(codecName);
-if (coderNames == null) {
-  throw new IllegalArgumentException("No available raw coder factory for "
-  + codecName);
-}
 return coderNames;
   }
 
   /**
* Get all coder factories of the given codec.
* @param codecName the name of codec
-   * @return a list of all coder factories
+   * @return a list of all coder factories, null if not exist
*/
   public List getCoders(String codecName) {
 List coders = coderMap.get(codecName);
-if (coders == null) {
-  throw new IllegalArgumentException("No available raw coder factory for "
-  + codecName);
-}
 return coders;
   }
 
@@ -164,7 +156,7 @@ public final class CodecRegistry {
* Get a specific coder factory defined by codec name and coder name.
* @param codecName name of the codec
* @param coderName name of the coder
-   * @return the specific coder
+   * @return the specific coder, null if not exist
*/
   public RawErasureCoderFactory getCoderByName(
   String codecName, String coderName) {
@@ -176,10 +168,7 @@ public final class CodecRegistry {
 return coder;
   }
 }
-
-// if not found, throw exception
-throw new IllegalArgumentException("No implementation for coder "
-+ coderName + " of codec " + codecName);
+return null;
   }
 
   /**


[39/50] [abbrv] hadoop git commit: HADOOP-14458. Add missing imports to TestAliyunOSSFileSystemContract.java. Contributed by Mingliang Liu.

2017-05-31 Thread haibochen
HADOOP-14458. Add missing imports to TestAliyunOSSFileSystemContract.java. 
Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07e60f85
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07e60f85
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07e60f85

Branch: refs/heads/YARN-1011
Commit: 07e60f85d87ca9a585d351a308ee0ecfa9293750
Parents: d4015f8
Author: Akira Ajisaka 
Authored: Tue May 30 15:11:10 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue May 30 15:11:10 2017 +0900

--
 .../fs/aliyun/oss/TestAliyunOSSFileSystemContract.java   | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07e60f85/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
--
diff --git 
a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
 
b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
index 419ddee..321e958 100644
--- 
a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
+++ 
b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
@@ -20,17 +20,22 @@ package org.apache.hadoop.fs.aliyun.oss;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
+
 import org.junit.Before;
 import org.junit.Test;
 
-import static org.junit.Assume.*;
-import org.apache.hadoop.fs.FileStatus;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeNotNull;
+import static org.junit.Assume.assumeTrue;
+
 /**
  * Tests a live Aliyun OSS system.
  *


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: YARN-6582. FSAppAttempt demand can be updated atomically in updateDemand(). (Karthik Kambatla via Yufei Gu)

2017-05-31 Thread haibochen
YARN-6582. FSAppAttempt demand can be updated atomically in updateDemand(). 
(Karthik Kambatla via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87590090
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87590090
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87590090

Branch: refs/heads/YARN-1011
Commit: 87590090c887829e874a7132be9cf8de061437d6
Parents: 3fd6a2d
Author: Yufei Gu 
Authored: Thu May 25 14:22:13 2017 -0700
Committer: Yufei Gu 
Committed: Thu May 25 14:22:13 2017 -0700

--
 .../scheduler/fair/FSAppAttempt.java| 23 +---
 1 file changed, 10 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87590090/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 4f7e164..a5772ba 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -1286,24 +1286,21 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
   @Override
   public void updateDemand() {
-demand = Resources.createResource(0);
 // Demand is current consumption plus outstanding requests
-Resources.addTo(demand, getCurrentConsumption());
+Resource tmpDemand = Resources.clone(getCurrentConsumption());
 
 // Add up outstanding resource requests
-try {
-  writeLock.lock();
-  for (SchedulerRequestKey k : getSchedulerKeys()) {
-PendingAsk pendingAsk = getPendingAsk(k, ResourceRequest.ANY);
-if (pendingAsk.getCount() > 0) {
-  Resources.multiplyAndAddTo(demand,
-  pendingAsk.getPerAllocationResource(),
-  pendingAsk.getCount());
-}
+for (SchedulerRequestKey k : getSchedulerKeys()) {
+  PendingAsk pendingAsk = getPendingAsk(k, ResourceRequest.ANY);
+  if (pendingAsk.getCount() > 0) {
+Resources.multiplyAndAddTo(tmpDemand,
+pendingAsk.getPerAllocationResource(),
+pendingAsk.getCount());
   }
-} finally {
-  writeLock.unlock();
 }
+
+// Update demand
+demand = tmpDemand;
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: HDFS-11879. Fix JN sync interval in case of exception. Contributed by Hanisha Koneru.

2017-05-31 Thread haibochen
HDFS-11879. Fix JN sync interval in case of exception. Contributed by Hanisha 
Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/11615631
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/11615631
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/11615631

Branch: refs/heads/YARN-1011
Commit: 11615631360ba49c1e9d256ed4f65119d99fd67d
Parents: 29b7df9
Author: Arpit Agarwal 
Authored: Thu May 25 14:01:53 2017 -0700
Committer: Arpit Agarwal 
Committed: Thu May 25 14:01:53 2017 -0700

--
 .../hdfs/qjournal/server/JournalNodeSyncer.java | 40 
 1 file changed, 25 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/11615631/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
index 99bd499..479f6a0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
@@ -172,7 +172,6 @@ public class JournalNodeSyncer {
   } else {
 syncJournals();
   }
-  Thread.sleep(journalSyncInterval);
 } catch (Throwable t) {
   if (!shouldSync) {
 if (t instanceof InterruptedException) {
@@ -194,6 +193,17 @@ public class JournalNodeSyncer {
   LOG.error(
   "JournalNodeSyncer daemon received Runtime exception. ", t);
 }
+try {
+  Thread.sleep(journalSyncInterval);
+} catch (InterruptedException e) {
+  if (!shouldSync) {
+LOG.info("Stopping JournalNode Sync.");
+  } else {
+LOG.warn("JournalNodeSyncer interrupted", e);
+  }
+  Thread.currentThread().interrupt();
+  return;
+}
   }
 });
 syncJournalDaemon.start();
@@ -320,30 +330,30 @@ public class JournalNodeSyncer {
 
 List missingEditLogs = Lists.newArrayList();
 
-int thisJnIndex = 0, otherJnIndex = 0;
-int thisJnNumLogs = thisJournalEditLogs.size();
-int otherJnNumLogs = otherJournalEditLogs.size();
+int localJnIndex = 0, remoteJnIndex = 0;
+int localJnNumLogs = thisJournalEditLogs.size();
+int remoteJnNumLogs = otherJournalEditLogs.size();
 
-while (thisJnIndex < thisJnNumLogs && otherJnIndex < otherJnNumLogs) {
-  long localJNstartTxId = thisJournalEditLogs.get(thisJnIndex)
+while (localJnIndex < localJnNumLogs && remoteJnIndex < remoteJnNumLogs) {
+  long localJNstartTxId = thisJournalEditLogs.get(localJnIndex)
   .getStartTxId();
-  long remoteJNstartTxId = otherJournalEditLogs.get(otherJnIndex)
+  long remoteJNstartTxId = otherJournalEditLogs.get(remoteJnIndex)
   .getStartTxId();
 
   if (localJNstartTxId == remoteJNstartTxId) {
-thisJnIndex++;
-otherJnIndex++;
+localJnIndex++;
+remoteJnIndex++;
   } else if (localJNstartTxId > remoteJNstartTxId) {
-missingEditLogs.add(otherJournalEditLogs.get(otherJnIndex));
-otherJnIndex++;
+missingEditLogs.add(otherJournalEditLogs.get(remoteJnIndex));
+remoteJnIndex++;
   } else {
-thisJnIndex++;
+localJnIndex++;
   }
 }
 
-if (otherJnIndex < otherJnNumLogs) {
-  for (; otherJnIndex < otherJnNumLogs; otherJnIndex++) {
-missingEditLogs.add(otherJournalEditLogs.get(otherJnIndex));
+if (remoteJnIndex < remoteJnNumLogs) {
+  for (; remoteJnIndex < remoteJnNumLogs; remoteJnIndex++) {
+missingEditLogs.add(otherJournalEditLogs.get(remoteJnIndex));
   }
 }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: HADOOP-14430 the accessTime of FileStatus returned by SFTPFileSystem's getFileStatus method is always 0. Contributed by Hongyuan Li.

2017-05-31 Thread haibochen
HADOOP-14430 the accessTime of FileStatus returned by SFTPFileSystem's
getFileStatus method is always 0.
Contributed by Hongyuan Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8bf0e2d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8bf0e2d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8bf0e2d6

Branch: refs/heads/YARN-1011
Commit: 8bf0e2d6b38a2cbd3c3d45557ede7575c1f18312
Parents: 1ba9704
Author: Steve Loughran 
Authored: Thu May 25 15:19:58 2017 +0100
Committer: Steve Loughran 
Committed: Thu May 25 15:19:58 2017 +0100

--
 .../org/apache/hadoop/fs/sftp/SFTPFileSystem.java |  2 +-
 .../org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java | 14 ++
 2 files changed, 15 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf0e2d6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
index 30cf4d3..d91d391 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
@@ -278,7 +278,7 @@ public class SFTPFileSystem extends FileSystem {
 // block sizes on server. The assumption could be less than ideal.
 long blockSize = DEFAULT_BLOCK_SIZE;
 long modTime = attr.getMTime() * 1000; // convert to milliseconds
-long accessTime = 0;
+long accessTime = attr.getATime() * 1000L;
 FsPermission permission = getPermissions(sftpFile);
 // not be able to get the real user group name, just use the user and group
 // id

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8bf0e2d6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
index 8dc5324..9b514e1 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.fs.sftp;
 
 import java.io.IOException;
 import java.net.URI;
+import java.nio.file.Files;
+import java.nio.file.attribute.BasicFileAttributes;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -28,6 +30,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.test.GenericTestUtils;
 
@@ -305,4 +308,15 @@ public class TestSFTPFileSystem {
 sftpFs.rename(file1, file2);
   }
 
+  @Test
+  public void testGetAccessTime() throws IOException {
+Path file = touch(localFs, name.getMethodName().toLowerCase());
+LocalFileSystem local = (LocalFileSystem)localFs;
+java.nio.file.Path path = (local).pathToFile(file).toPath();
+long accessTime1 = Files.readAttributes(path, BasicFileAttributes.class)
+.lastAccessTime().toMillis();
+long accessTime2 = sftpFs.getFileStatus(file).getAccessTime();
+assertEquals(accessTime1, accessTime2);
+  }
+
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   3   4   5   6   7   8   >