[hadoop] branch branch-2.10 updated: HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

2021-04-29 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 77d9c6d  HDFS-15652. Make block size from NNThroughputBenchmark 
configurable (#2416)
77d9c6d is described below

commit 77d9c6d0f75ff5ca690d9aeb2ae6a5e27418b23c
Author: Hui Fei 
AuthorDate: Wed Oct 28 09:13:25 2020 +0800

HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

(cherry picked from commit 8a6d5b9151cd4a922372835b18a9a031c9d3475e)
---
 .../server/namenode/NNThroughputBenchmark.java | 41 +++---
 .../server/namenode/TestNNThroughputBenchmark.java |  8 -
 2 files changed, 36 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 2147129..b120d7a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -168,6 +168,7 @@ public class NNThroughputBenchmark implements Tool {
 
 protected final String baseDir;
 protected short replication;
+protected int blockSize;
 protected int  numThreads = 0;// number of threads
 protected int  numOpsRequired = 0;// number of operations requested
 protected int  numOpsExecuted = 0;// number of operations executed
@@ -229,6 +230,7 @@ public class NNThroughputBenchmark implements Tool {
 OperationStatsBase() {
   baseDir = BASE_DIR_NAME + "/" + getOpName();
   replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
3);
+  blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
   numOpsRequired = 10;
   numThreads = 3;
   logLevel = Level.ERROR;
@@ -516,7 +518,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_CREATE_NAME = "create";
 static final String OP_CREATE_USAGE = 
-  "-op create [-threads T] [-files N] [-filesPerDir P] [-close]";
+"-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-close]";
 
 protected FileNameGenerator nameGenerator;
 protected String[][] fileNames;
@@ -541,6 +544,9 @@ public class NNThroughputBenchmark implements Tool {
 if(args.get(i).equals("-files")) {
   if(i+1 == args.size())  printUsage();
   numOpsRequired = Integer.parseInt(args.get(++i));
+} else if (args.get(i).equals("-blockSize")) {
+  if(i+1 == args.size())  printUsage();
+  blockSize = Integer.parseInt(args.get(++i));
 } else if(args.get(i).equals("-threads")) {
   if(i+1 == args.size())  printUsage();
   numThreads = Integer.parseInt(args.get(++i));
@@ -596,7 +602,7 @@ public class NNThroughputBenchmark implements Tool {
   clientProto.create(fileNames[daemonId][inputIdx], 
FsPermission.getDefault(),
   clientName, new EnumSetWritable(EnumSet
   .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true, 
-  replication, BLOCK_SIZE, CryptoProtocolVersion.supported());
+  replication, blockSize, CryptoProtocolVersion.supported());
   long end = Time.now();
   for(boolean written = !closeUponCreate; !written; 
 written = clientProto.complete(fileNames[daemonId][inputIdx],
@@ -716,7 +722,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_OPEN_NAME = "open";
 static final String OP_USAGE_ARGS = 
-  " [-threads T] [-files N] [-filesPerDir P] [-useExisting]";
+" [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-useExisting]";
 static final String OP_OPEN_USAGE = 
   "-op " + OP_OPEN_NAME + OP_USAGE_ARGS;
 
@@ -748,6 +755,7 @@ public class NNThroughputBenchmark implements Tool {
   "-op", "create", 
   "-threads", String.valueOf(this.numThreads), 
   "-files", String.valueOf(numOpsRequired),
+  "-blockSize", String.valueOf(blockSize),
   "-filesPerDir", 
   String.valueOf(nameGenerator.getFilesPerDirectory()),
   "-close"};
@@ -778,7 +786,8 @@ public class NNThroughputBenchmark implements Tool {
 long executeOp(int daemonId, int inputIdx, String ignore) 
 throws IOException {
   long start = Time.now();
-  clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, 
BLOCK_SIZE);
+  clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L,
+  blockSize);
   long end = Time.now();
   

[hadoop] branch branch-3.1 updated: HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

2021-04-29 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new c4c7801  HDFS-15652. Make block size from NNThroughputBenchmark 
configurable (#2416)
c4c7801 is described below

commit c4c78016eb5aa87951ef9c357df8445e35cd0d77
Author: Hui Fei 
AuthorDate: Wed Oct 28 09:13:25 2020 +0800

HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

(cherry picked from commit 8a6d5b9151cd4a922372835b18a9a031c9d3475e)
---
 .../server/namenode/NNThroughputBenchmark.java | 41 +++---
 .../server/namenode/TestNNThroughputBenchmark.java |  8 -
 2 files changed, 36 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 4e0bce8..19a24f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -168,6 +168,7 @@ public class NNThroughputBenchmark implements Tool {
 
 protected final String baseDir;
 protected short replication;
+protected int blockSize;
 protected int  numThreads = 0;// number of threads
 protected int  numOpsRequired = 0;// number of operations requested
 protected int  numOpsExecuted = 0;// number of operations executed
@@ -229,6 +230,7 @@ public class NNThroughputBenchmark implements Tool {
 OperationStatsBase() {
   baseDir = BASE_DIR_NAME + "/" + getOpName();
   replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
3);
+  blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
   numOpsRequired = 10;
   numThreads = 3;
   logLevel = Level.ERROR;
@@ -516,7 +518,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_CREATE_NAME = "create";
 static final String OP_CREATE_USAGE = 
-  "-op create [-threads T] [-files N] [-filesPerDir P] [-close]";
+"-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-close]";
 
 protected FileNameGenerator nameGenerator;
 protected String[][] fileNames;
@@ -541,6 +544,9 @@ public class NNThroughputBenchmark implements Tool {
 if(args.get(i).equals("-files")) {
   if(i+1 == args.size())  printUsage();
   numOpsRequired = Integer.parseInt(args.get(++i));
+} else if (args.get(i).equals("-blockSize")) {
+  if(i+1 == args.size())  printUsage();
+  blockSize = Integer.parseInt(args.get(++i));
 } else if(args.get(i).equals("-threads")) {
   if(i+1 == args.size())  printUsage();
   numThreads = Integer.parseInt(args.get(++i));
@@ -597,7 +603,7 @@ public class NNThroughputBenchmark implements Tool {
   FsPermission.getDefault(), clientName,
   new EnumSetWritable(EnumSet
   .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true,
-  replication, BLOCK_SIZE, CryptoProtocolVersion.supported(), null);
+  replication, blockSize, CryptoProtocolVersion.supported(), null);
   long end = Time.now();
   for (boolean written = !closeUponCreate; !written;
 written = clientProto.complete(fileNames[daemonId][inputIdx],
@@ -718,7 +724,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_OPEN_NAME = "open";
 static final String OP_USAGE_ARGS = 
-  " [-threads T] [-files N] [-filesPerDir P] [-useExisting]";
+" [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-useExisting]";
 static final String OP_OPEN_USAGE = 
   "-op " + OP_OPEN_NAME + OP_USAGE_ARGS;
 
@@ -750,6 +757,7 @@ public class NNThroughputBenchmark implements Tool {
   "-op", "create", 
   "-threads", String.valueOf(this.numThreads), 
   "-files", String.valueOf(numOpsRequired),
+  "-blockSize", String.valueOf(blockSize),
   "-filesPerDir", 
   String.valueOf(nameGenerator.getFilesPerDirectory()),
   "-close"};
@@ -780,7 +788,8 @@ public class NNThroughputBenchmark implements Tool {
 long executeOp(int daemonId, int inputIdx, String ignore) 
 throws IOException {
   long start = Time.now();
-  clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, 
BLOCK_SIZE);
+  clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L,
+  blockSize);
   long end = Time.now();
   return end-start;
 }
@@ -1070,7 +1079,7 @@ public 

[hadoop] branch branch-3.2 updated: HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

2021-04-29 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new ab4e90c  HDFS-15652. Make block size from NNThroughputBenchmark 
configurable (#2416)
ab4e90c is described below

commit ab4e90cc28d8f022b8b14416830fde64ea3a06d4
Author: Hui Fei 
AuthorDate: Wed Oct 28 09:13:25 2020 +0800

HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

(cherry picked from commit 8a6d5b9151cd4a922372835b18a9a031c9d3475e)
---
 .../server/namenode/NNThroughputBenchmark.java | 41 +++---
 .../server/namenode/TestNNThroughputBenchmark.java |  8 -
 2 files changed, 36 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 1a5718a..b8045eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -169,6 +169,7 @@ public class NNThroughputBenchmark implements Tool {
 
 protected final String baseDir;
 protected short replication;
+protected int blockSize;
 protected int  numThreads = 0;// number of threads
 protected int  numOpsRequired = 0;// number of operations requested
 protected int  numOpsExecuted = 0;// number of operations executed
@@ -230,6 +231,7 @@ public class NNThroughputBenchmark implements Tool {
 OperationStatsBase() {
   baseDir = BASE_DIR_NAME + "/" + getOpName();
   replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
3);
+  blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
   numOpsRequired = 10;
   numThreads = 3;
   logLevel = Level.ERROR;
@@ -517,7 +519,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_CREATE_NAME = "create";
 static final String OP_CREATE_USAGE = 
-  "-op create [-threads T] [-files N] [-filesPerDir P] [-close]";
+"-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-close]";
 
 protected FileNameGenerator nameGenerator;
 protected String[][] fileNames;
@@ -542,6 +545,9 @@ public class NNThroughputBenchmark implements Tool {
 if(args.get(i).equals("-files")) {
   if(i+1 == args.size())  printUsage();
   numOpsRequired = Integer.parseInt(args.get(++i));
+} else if (args.get(i).equals("-blockSize")) {
+  if(i+1 == args.size())  printUsage();
+  blockSize = Integer.parseInt(args.get(++i));
 } else if(args.get(i).equals("-threads")) {
   if(i+1 == args.size())  printUsage();
   numThreads = Integer.parseInt(args.get(++i));
@@ -598,7 +604,7 @@ public class NNThroughputBenchmark implements Tool {
   FsPermission.getDefault(), clientName,
   new EnumSetWritable(EnumSet
   .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true,
-  replication, BLOCK_SIZE, CryptoProtocolVersion.supported(), null);
+  replication, blockSize, CryptoProtocolVersion.supported(), null);
   long end = Time.now();
   for (boolean written = !closeUponCreate; !written;
 written = clientProto.complete(fileNames[daemonId][inputIdx],
@@ -719,7 +725,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_OPEN_NAME = "open";
 static final String OP_USAGE_ARGS = 
-  " [-threads T] [-files N] [-filesPerDir P] [-useExisting]";
+" [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-useExisting]";
 static final String OP_OPEN_USAGE = 
   "-op " + OP_OPEN_NAME + OP_USAGE_ARGS;
 
@@ -751,6 +758,7 @@ public class NNThroughputBenchmark implements Tool {
   "-op", "create", 
   "-threads", String.valueOf(this.numThreads), 
   "-files", String.valueOf(numOpsRequired),
+  "-blockSize", String.valueOf(blockSize),
   "-filesPerDir", 
   String.valueOf(nameGenerator.getFilesPerDirectory()),
   "-close"};
@@ -781,7 +789,8 @@ public class NNThroughputBenchmark implements Tool {
 long executeOp(int daemonId, int inputIdx, String ignore) 
 throws IOException {
   long start = Time.now();
-  clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, 
BLOCK_SIZE);
+  clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L,
+  blockSize);
   long end = Time.now();
   return end-start;
 }
@@ -1071,7 +1080,7 @@ public 

[hadoop] branch branch-3.3 updated: HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

2021-04-29 Thread shv
This is an automated email from the ASF dual-hosted git repository.

shv pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new 9aa6106  HDFS-15652. Make block size from NNThroughputBenchmark 
configurable (#2416)
9aa6106 is described below

commit 9aa610668902d60abceb8c78523c47f25b346c0a
Author: Hui Fei 
AuthorDate: Wed Oct 28 09:13:25 2020 +0800

HDFS-15652. Make block size from NNThroughputBenchmark configurable (#2416)

(cherry picked from commit 8a6d5b9151cd4a922372835b18a9a031c9d3475e)
---
 .../server/namenode/NNThroughputBenchmark.java | 41 +++---
 .../server/namenode/TestNNThroughputBenchmark.java |  8 -
 2 files changed, 36 insertions(+), 13 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index 245f5be..513c609 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -169,6 +169,7 @@ public class NNThroughputBenchmark implements Tool {
 
 protected final String baseDir;
 protected short replication;
+protected int blockSize;
 protected int  numThreads = 0;// number of threads
 protected int  numOpsRequired = 0;// number of operations requested
 protected int  numOpsExecuted = 0;// number of operations executed
@@ -230,6 +231,7 @@ public class NNThroughputBenchmark implements Tool {
 OperationStatsBase() {
   baseDir = BASE_DIR_NAME + "/" + getOpName();
   replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
3);
+  blockSize = config.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
   numOpsRequired = 10;
   numThreads = 3;
   logLevel = Level.ERROR;
@@ -517,7 +519,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_CREATE_NAME = "create";
 static final String OP_CREATE_USAGE = 
-  "-op create [-threads T] [-files N] [-filesPerDir P] [-close]";
+"-op create [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-close]";
 
 protected FileNameGenerator nameGenerator;
 protected String[][] fileNames;
@@ -542,6 +545,9 @@ public class NNThroughputBenchmark implements Tool {
 if(args.get(i).equals("-files")) {
   if(i+1 == args.size())  printUsage();
   numOpsRequired = Integer.parseInt(args.get(++i));
+} else if (args.get(i).equals("-blockSize")) {
+  if(i+1 == args.size())  printUsage();
+  blockSize = Integer.parseInt(args.get(++i));
 } else if(args.get(i).equals("-threads")) {
   if(i+1 == args.size())  printUsage();
   numThreads = Integer.parseInt(args.get(++i));
@@ -598,7 +604,7 @@ public class NNThroughputBenchmark implements Tool {
   FsPermission.getDefault(), clientName,
   new EnumSetWritable(EnumSet
   .of(CreateFlag.CREATE, CreateFlag.OVERWRITE)), true,
-  replication, BLOCK_SIZE, CryptoProtocolVersion.supported(), null,
+  replication, blockSize, CryptoProtocolVersion.supported(), null,
   null);
   long end = Time.now();
   for (boolean written = !closeUponCreate; !written;
@@ -720,7 +726,8 @@ public class NNThroughputBenchmark implements Tool {
 // Operation types
 static final String OP_OPEN_NAME = "open";
 static final String OP_USAGE_ARGS = 
-  " [-threads T] [-files N] [-filesPerDir P] [-useExisting]";
+" [-threads T] [-files N] [-blockSize S] [-filesPerDir P]"
++ " [-useExisting]";
 static final String OP_OPEN_USAGE = 
   "-op " + OP_OPEN_NAME + OP_USAGE_ARGS;
 
@@ -752,6 +759,7 @@ public class NNThroughputBenchmark implements Tool {
   "-op", "create", 
   "-threads", String.valueOf(this.numThreads), 
   "-files", String.valueOf(numOpsRequired),
+  "-blockSize", String.valueOf(blockSize),
   "-filesPerDir", 
   String.valueOf(nameGenerator.getFilesPerDirectory()),
   "-close"};
@@ -782,7 +790,8 @@ public class NNThroughputBenchmark implements Tool {
 long executeOp(int daemonId, int inputIdx, String ignore) 
 throws IOException {
   long start = Time.now();
-  clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L, 
BLOCK_SIZE);
+  clientProto.getBlockLocations(fileNames[daemonId][inputIdx], 0L,
+  blockSize);
   long end = Time.now();
   return end-start;
 }
@@ -1072,7 +1081,7 @@ public class NNThroughputBenchmark implements Tool {
 

[hadoop] branch branch-3.3 updated: YARN-10707. Support custom resources in ResourceUtilization, and update Node GPU Utilization to use. Contributed by Qi Zhu

2021-04-29 Thread ebadger
This is an automated email from the ASF dual-hosted git repository.

ebadger pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new da2ebfa  YARN-10707. Support custom resources in ResourceUtilization, 
and update Node GPU Utilization to use. Contributed by Qi Zhu
da2ebfa is described below

commit da2ebfa8a93d5085206431c6553b151a516aa077
Author: Eric Badger 
AuthorDate: Thu Apr 29 17:52:03 2021 +

YARN-10707. Support custom resources in ResourceUtilization, and update 
Node GPU Utilization to use. Contributed by Qi Zhu

(cherry picked from commit 803ac4b1a0fc5c0b6c25a5df0733b3ebcdb2f294)
---
 .../yarn/api/records/ResourceUtilization.java  | 128 -
 .../src/main/proto/yarn_protos.proto   |   6 +
 .../yarn/api/records/impl/pb/ProtoUtils.java   |  29 +
 .../records/impl/pb/ResourceUtilizationPBImpl.java |  31 -
 .../hadoop/yarn/api/BasePBImplRecordsTest.java |   2 +
 .../yarn/api/records/TestResourceUtilization.java  |  49 
 .../nodemanager/NodeResourceMonitorImpl.java   |  30 +++--
 .../gpu/GpuNodeResourceUpdateHandler.java  |  43 +--
 .../resourceplugin/gpu/TestGpuResourcePlugin.java  |   4 +-
 9 files changed, 297 insertions(+), 25 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
index f6c5a69..ff3cec3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
@@ -22,6 +22,9 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.HashMap;
+import java.util.Map;
+
 /**
  * 
  * ResourceUtilization models the utilization of a set of computer
@@ -33,14 +36,26 @@ import org.apache.hadoop.yarn.util.Records;
 public abstract class ResourceUtilization implements
 Comparable {
 
+  private Map customResources
+  = new HashMap<>();
+
   @Public
   @Unstable
-  public static ResourceUtilization newInstance(int pmem, int vmem, float cpu) 
{
+  public static ResourceUtilization newInstance(int pmem, int vmem,
+  float cpu) {
+return newInstance(pmem, vmem, cpu, null);
+  }
+
+  @Public
+  @Unstable
+  public static ResourceUtilization newInstance(int pmem, int vmem,
+  float cpu, Map customResources) {
 ResourceUtilization utilization =
 Records.newRecord(ResourceUtilization.class);
 utilization.setPhysicalMemory(pmem);
 utilization.setVirtualMemory(vmem);
 utilization.setCPU(cpu);
+utilization.setCustomResources(customResources);
 return utilization;
   }
 
@@ -49,7 +64,9 @@ public abstract class ResourceUtilization implements
   public static ResourceUtilization newInstance(
   ResourceUtilization resourceUtil) {
 return newInstance(resourceUtil.getPhysicalMemory(),
-resourceUtil.getVirtualMemory(), resourceUtil.getCPU());
+resourceUtil.getVirtualMemory(),
+resourceUtil.getCPU(),
+resourceUtil.getCustomResources());
   }
 
   /**
@@ -106,6 +123,51 @@ public abstract class ResourceUtilization implements
   @Unstable
   public abstract void setCPU(float cpu);
 
+  /**
+   * Get custom resource utilization
+   * (The amount of custom resource used).
+   *
+   * @param resourceName resourceName of custom resource
+   * @return resourceName utilization
+   */
+  @Public
+  @Unstable
+  public float getCustomResource(String resourceName) {
+if (customResources != null && resourceName != null) {
+  return customResources.get(resourceName);
+}
+return 0f;
+  }
+
+  @Public
+  @Unstable
+  public Map getCustomResources() {
+return customResources;
+  }
+
+  @Public
+  @Unstable
+  public void setCustomResources(Map customResources) {
+if (customResources != null) {
+  this.customResources = customResources;
+}
+  }
+
+  /**
+   * Set custom resource utilization
+   * (The amount of custom resource used).
+   * @param resourceName resourceName
+   * @param utilization utilization of custom resource
+   *
+   */
+  @Public
+  @Unstable
+  public void setCustomResource(String resourceName, float utilization) {
+if (resourceName != null && !resourceName.isEmpty()) {
+  customResources.put(resourceName, utilization);
+}
+  }
+
   @Override
   public int hashCode() {
 final int prime = 263167;
@@ -113,6 +175,12 @@ public abstract class ResourceUtilization implements
 result = prime * result + 

[hadoop] branch trunk updated: YARN-10707. Support custom resources in ResourceUtilization, and update Node GPU Utilization to use. Contributed by Qi Zhu

2021-04-29 Thread ebadger
This is an automated email from the ASF dual-hosted git repository.

ebadger pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 803ac4b  YARN-10707. Support custom resources in ResourceUtilization, 
and update Node GPU Utilization to use. Contributed by Qi Zhu
803ac4b is described below

commit 803ac4b1a0fc5c0b6c25a5df0733b3ebcdb2f294
Author: Eric Badger 
AuthorDate: Thu Apr 29 17:42:13 2021 +

YARN-10707. Support custom resources in ResourceUtilization, and update 
Node GPU Utilization to use. Contributed by Qi Zhu
---
 .../yarn/api/records/ResourceUtilization.java  | 128 -
 .../src/main/proto/yarn_protos.proto   |   6 +
 .../yarn/api/records/impl/pb/ProtoUtils.java   |  29 +
 .../records/impl/pb/ResourceUtilizationPBImpl.java |  31 -
 .../hadoop/yarn/api/BasePBImplRecordsTest.java |   2 +
 .../yarn/api/records/TestResourceUtilization.java  |  49 
 .../nodemanager/NodeResourceMonitorImpl.java   |  30 +++--
 .../gpu/GpuNodeResourceUpdateHandler.java  |  43 +--
 .../resourceplugin/gpu/TestGpuResourcePlugin.java  |   4 +-
 9 files changed, 297 insertions(+), 25 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
index f6c5a69..ff3cec3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceUtilization.java
@@ -22,6 +22,9 @@ import 
org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.HashMap;
+import java.util.Map;
+
 /**
  * 
  * ResourceUtilization models the utilization of a set of computer
@@ -33,14 +36,26 @@ import org.apache.hadoop.yarn.util.Records;
 public abstract class ResourceUtilization implements
 Comparable {
 
+  private Map customResources
+  = new HashMap<>();
+
   @Public
   @Unstable
-  public static ResourceUtilization newInstance(int pmem, int vmem, float cpu) 
{
+  public static ResourceUtilization newInstance(int pmem, int vmem,
+  float cpu) {
+return newInstance(pmem, vmem, cpu, null);
+  }
+
+  @Public
+  @Unstable
+  public static ResourceUtilization newInstance(int pmem, int vmem,
+  float cpu, Map customResources) {
 ResourceUtilization utilization =
 Records.newRecord(ResourceUtilization.class);
 utilization.setPhysicalMemory(pmem);
 utilization.setVirtualMemory(vmem);
 utilization.setCPU(cpu);
+utilization.setCustomResources(customResources);
 return utilization;
   }
 
@@ -49,7 +64,9 @@ public abstract class ResourceUtilization implements
   public static ResourceUtilization newInstance(
   ResourceUtilization resourceUtil) {
 return newInstance(resourceUtil.getPhysicalMemory(),
-resourceUtil.getVirtualMemory(), resourceUtil.getCPU());
+resourceUtil.getVirtualMemory(),
+resourceUtil.getCPU(),
+resourceUtil.getCustomResources());
   }
 
   /**
@@ -106,6 +123,51 @@ public abstract class ResourceUtilization implements
   @Unstable
   public abstract void setCPU(float cpu);
 
+  /**
+   * Get custom resource utilization
+   * (The amount of custom resource used).
+   *
+   * @param resourceName resourceName of custom resource
+   * @return resourceName utilization
+   */
+  @Public
+  @Unstable
+  public float getCustomResource(String resourceName) {
+if (customResources != null && resourceName != null) {
+  return customResources.get(resourceName);
+}
+return 0f;
+  }
+
+  @Public
+  @Unstable
+  public Map getCustomResources() {
+return customResources;
+  }
+
+  @Public
+  @Unstable
+  public void setCustomResources(Map customResources) {
+if (customResources != null) {
+  this.customResources = customResources;
+}
+  }
+
+  /**
+   * Set custom resource utilization
+   * (The amount of custom resource used).
+   * @param resourceName resourceName
+   * @param utilization utilization of custom resource
+   *
+   */
+  @Public
+  @Unstable
+  public void setCustomResource(String resourceName, float utilization) {
+if (resourceName != null && !resourceName.isEmpty()) {
+  customResources.put(resourceName, utilization);
+}
+  }
+
   @Override
   public int hashCode() {
 final int prime = 263167;
@@ -113,6 +175,12 @@ public abstract class ResourceUtilization implements
 result = prime * result + getVirtualMemory();
 result = prime * result + getPhysicalMemory();
 result = 31 * result 

Error while running github feature from .asf.yaml in hadoop!

2021-04-29 Thread Apache Infrastructure


An error occurred while running github feature in .asf.yaml!:
'next'

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



Error while running github feature from .asf.yaml in hadoop!

2021-04-29 Thread Apache Infrastructure


An error occurred while running github feature in .asf.yaml!:
'next'

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15561. RBF: Remove NPE when local namenode is not configured (#2954). Contributed by Fengnan Li.

2021-04-29 Thread hexiaoqiao
This is an automated email from the ASF dual-hosted git repository.

hexiaoqiao pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 552e9dc  HDFS-15561. RBF: Remove NPE when local namenode is not 
configured (#2954). Contributed by Fengnan Li.
552e9dc is described below

commit 552e9dcc6c778df02e24d514750bf68151d0a77f
Author: lfengnan 
AuthorDate: Thu Apr 29 06:14:18 2021 -0700

HDFS-15561. RBF: Remove NPE when local namenode is not configured (#2954). 
Contributed by Fengnan Li.

Reviewed-by: He Xiaoqiao 
---
 .../hdfs/server/federation/router/Router.java  | 17 +-
 .../router/TestRouterNamenodeHeartbeat.java| 38 ++
 .../router/TestRouterWebHdfsMethods.java   |  1 +
 3 files changed, 55 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
index d6e5a1c..ae9b62b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
@@ -550,14 +550,20 @@ public class Router extends CompositeService implements
*
* @return Updater of the status for the local Namenode.
*/
-  protected NamenodeHeartbeatService createLocalNamenodeHeartbeatService() {
+  @VisibleForTesting
+  public NamenodeHeartbeatService createLocalNamenodeHeartbeatService() {
 // Detect NN running in this machine
 String nsId = DFSUtil.getNamenodeNameServiceId(conf);
+if (nsId == null) {
+  LOG.error("Cannot find local nameservice id");
+  return null;
+}
 String nnId = null;
 if (HAUtil.isHAEnabled(conf, nsId)) {
   nnId = HAUtil.getNameNodeId(conf, nsId);
   if (nnId == null) {
 LOG.error("Cannot find namenode id for local {}", nsId);
+return null;
   }
 }
 
@@ -787,4 +793,13 @@ public class Router extends CompositeService implements
 return adminServer;
   }
 
+  /**
+   * Set router configuration.
+   * @param conf
+   */
+  @VisibleForTesting
+  public void setConf(Configuration conf) {
+this.conf = conf;
+  }
+
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java
index d2bc5d6..38419ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java
@@ -17,15 +17,21 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
+import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES;
 import static 
org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMESERVICES;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.federation.MockResolver;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster;
 import 
org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.NamenodeContext;
@@ -106,6 +112,38 @@ public class TestRouterNamenodeHeartbeat {
   }
 
   @Test
+  public void testLocalNamenodeHeartbeatService() throws IOException {
+Router router = new Router();
+Configuration conf = new Configuration();
+assertEquals(null, DFSUtil.getNamenodeNameServiceId(conf));
+
+// case 1: no local nn is configured
+router.setConf(conf);
+assertNull(router.createLocalNamenodeHeartbeatService());
+
+// case 2: local nn is configured
+conf.set(DFS_NAMESERVICES, "ns1");
+assertEquals("ns1", DFSUtil.getNamenodeNameServiceId(conf));
+conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),
+"nn1,nn2");
+conf.set(DFSUtil.addKeySuffixes(
+DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"),
+"localhost:8020");
+conf.set(DFSUtil.addKeySuffixes(
+DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"),
+

[hadoop-thirdparty] annotated tag release-1.1.0-RC0 created (now dc11e4c)

2021-04-29 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a change to annotated tag release-1.1.0-RC0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git.


  at dc11e4c  (tag)
 tagging 813a3a9c0336fe86fce882d0b0e0c087da02be56 (commit)
  by Wei-Chiu Chuang
  on Thu Apr 29 17:47:54 2021 +0800

- Log -
Release candidate - 1.1.0-RC0
-BEGIN PGP SIGNATURE-

iQIzBAABCAAdFiEEzTLXc/9Bw/nnS9t/s2LhwCGFS50FAmCKgMoACgkQs2LhwCGF
S53CkhAAjuWJir2p/oV7Q47VPXPphdilKxCDVF3+PilLDeJ/jqPUOGRRMBV0duNQ
xKtDu7lLS7eLXFpe8K210ocWiiE2Pm9Wj6e1i8/Tqehly45cCQ16RFVolNd1jn56
BYg67D4vfOK7vlXYZKZtKhmadmBUVyLLTKesc01IFIE9IxlyX3KLdVW69EFaFL7r
xrcZCA0Qi3BFx6/TJCeWniRqU1M7G840XTWwjD9eiyI+YEndY/hhgwhNqXES1ECd
GS26Jz94b8edPyDdrR3DF5vBswVX6Y9EELtTUxAPeMyl30nDN5F0hitHukSxQp7G
3r+xMOW1+Z5IeltrJItw/MzqGWjCr1JHUeokjSaiac1Gzb+1G0l+XCmIkaw9YjLC
6HPX0ALYsYKz11z4r7Omu/qjg5BVdR1VWCuIp8jO9bdz9lC8iech1LBLJFjdXYEP
6NtmHLSsxxLUKCDUOhJKKy6lKWhIlZO6AmrBknyU8hHsM/Dzi5cMUtg8lx0KvGTz
8QhMPiyVQoMEbKxWcjOrmSgwJ2zsUw35TmUeHl+ba4OVTpzTSx54R/EeIqwygD8D
xjNGKedMDY38BvZ2Du4NkuQXw6e8V+vscnt6AeQMTyTupjjPuwaEI0n7tGNnzoHL
k76zVvCglMCwNpG4/d1NFCq72Mbn7dAB9p3CBtftoeRFrgIK0vc=
=FESG
-END PGP SIGNATURE-
---

No new revisions were added by this update.

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] 01/01: Release 1.1.0

2021-04-29 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-1.1.0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git

commit 813a3a9c0336fe86fce882d0b0e0c087da02be56
Author: Wei-Chiu Chuang 
AuthorDate: Thu Apr 29 17:10:47 2021 +0800

Release 1.1.0
---
 hadoop-shaded-guava/pom.xml| 2 +-
 hadoop-shaded-protobuf_3_7/pom.xml | 2 +-
 pom.xml| 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-shaded-guava/pom.xml b/hadoop-shaded-guava/pom.xml
index 8329a4e..38be558 100644
--- a/hadoop-shaded-guava/pom.xml
+++ b/hadoop-shaded-guava/pom.xml
@@ -23,7 +23,7 @@
 
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.1.0-SNAPSHOT
+1.1.0
 ..
 
 4.0.0
diff --git a/hadoop-shaded-protobuf_3_7/pom.xml 
b/hadoop-shaded-protobuf_3_7/pom.xml
index 656fee9..53d8c88 100644
--- a/hadoop-shaded-protobuf_3_7/pom.xml
+++ b/hadoop-shaded-protobuf_3_7/pom.xml
@@ -23,7 +23,7 @@
   
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.1.0-SNAPSHOT
+1.1.0
 ..
   
   4.0.0
diff --git a/pom.xml b/pom.xml
index 8c67c0b..a427844 100644
--- a/pom.xml
+++ b/pom.xml
@@ -23,7 +23,7 @@
   4.0.0
   org.apache.hadoop.thirdparty
   hadoop-thirdparty
-  1.1.0-SNAPSHOT
+  1.1.0
   
 org.apache
 apache

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch branch-1.1.0 created (now 813a3a9)

2021-04-29 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a change to branch branch-1.1.0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git.


  at 813a3a9  Release 1.1.0

This branch includes the following new commits:

 new 813a3a9  Release 1.1.0

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk updated: Preparing for 1.2.0 development

2021-04-29 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3a2d75e  Preparing for 1.2.0 development
3a2d75e is described below

commit 3a2d75edfb85e3540a1fec53c11a06112f8b33a2
Author: Wei-Chiu Chuang 
AuthorDate: Thu Apr 29 17:02:47 2021 +0800

Preparing for 1.2.0 development
---
 hadoop-shaded-guava/pom.xml| 2 +-
 hadoop-shaded-protobuf_3_7/pom.xml | 2 +-
 pom.xml| 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-shaded-guava/pom.xml b/hadoop-shaded-guava/pom.xml
index 8329a4e..c4d898a 100644
--- a/hadoop-shaded-guava/pom.xml
+++ b/hadoop-shaded-guava/pom.xml
@@ -23,7 +23,7 @@
 
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.1.0-SNAPSHOT
+1.2.0-SNAPSHOT
 ..
 
 4.0.0
diff --git a/hadoop-shaded-protobuf_3_7/pom.xml 
b/hadoop-shaded-protobuf_3_7/pom.xml
index 656fee9..ae288fb 100644
--- a/hadoop-shaded-protobuf_3_7/pom.xml
+++ b/hadoop-shaded-protobuf_3_7/pom.xml
@@ -23,7 +23,7 @@
   
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.1.0-SNAPSHOT
+1.2.0-SNAPSHOT
 ..
   
   4.0.0
diff --git a/pom.xml b/pom.xml
index 8c67c0b..bdb2e7d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -23,7 +23,7 @@
   4.0.0
   org.apache.hadoop.thirdparty
   hadoop-thirdparty
-  1.1.0-SNAPSHOT
+  1.2.0-SNAPSHOT
   
 org.apache
 apache

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] 01/01: Preparing for 1.1.1 development

2021-04-29 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-1.1
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git

commit a96f25bd8c35aaa31fe729b568f14d59cc73dcd9
Author: Wei-Chiu Chuang 
AuthorDate: Thu Apr 29 17:04:03 2021 +0800

Preparing for 1.1.1 development
---
 hadoop-shaded-guava/pom.xml| 2 +-
 hadoop-shaded-protobuf_3_7/pom.xml | 2 +-
 pom.xml| 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-shaded-guava/pom.xml b/hadoop-shaded-guava/pom.xml
index 8329a4e..60a8478 100644
--- a/hadoop-shaded-guava/pom.xml
+++ b/hadoop-shaded-guava/pom.xml
@@ -23,7 +23,7 @@
 
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.1.0-SNAPSHOT
+1.1.1-SNAPSHOT
 ..
 
 4.0.0
diff --git a/hadoop-shaded-protobuf_3_7/pom.xml 
b/hadoop-shaded-protobuf_3_7/pom.xml
index 656fee9..c6cc7a2 100644
--- a/hadoop-shaded-protobuf_3_7/pom.xml
+++ b/hadoop-shaded-protobuf_3_7/pom.xml
@@ -23,7 +23,7 @@
   
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.1.0-SNAPSHOT
+1.1.1-SNAPSHOT
 ..
   
   4.0.0
diff --git a/pom.xml b/pom.xml
index 8c67c0b..a923ec2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -23,7 +23,7 @@
   4.0.0
   org.apache.hadoop.thirdparty
   hadoop-thirdparty
-  1.1.0-SNAPSHOT
+  1.1.1-SNAPSHOT
   
 org.apache
 apache

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch branch-1.1 created (now a96f25b)

2021-04-29 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a change to branch branch-1.1
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git.


  at a96f25b  Preparing for 1.1.1 development

This branch includes the following new commits:

 new a96f25b  Preparing for 1.1.1 development

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



Error while running github feature from .asf.yaml in hadoop!

2021-04-29 Thread Apache Infrastructure


An error occurred while running github feature in .asf.yaml!:
'next'

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (f41a368 -> b2e5476)

2021-04-29 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from f41a368  HADOOP-11245. Update NFS gateway to use Netty4 (#2832)
 add b2e5476  HDFS-15624. fix the function of setting quota by storage type 
(#2377) (#2955)

No new revisions were added by this update.

Summary of changes:
 .../src/main/java/org/apache/hadoop/fs/StorageType.java|  7 +++
 .../test/java/org/apache/hadoop/fs/shell/TestCount.java|  4 ++--
 .../hdfs/server/federation/router/TestRouterQuota.java | 14 +++---
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java   |  7 +++
 .../hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java |  3 ++-
 .../org/apache/hadoop/hdfs/TestBlockStoragePolicy.java |  6 +++---
 .../org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java |  3 ++-
 7 files changed, 26 insertions(+), 18 deletions(-)

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



Error while running github feature from .asf.yaml in hadoop!

2021-04-29 Thread Apache Infrastructure


An error occurred while running github feature in .asf.yaml!:
'next'

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-11245. Update NFS gateway to use Netty4 (#2832)

2021-04-29 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f41a368  HADOOP-11245. Update NFS gateway to use Netty4 (#2832)
f41a368 is described below

commit f41a368c146ab5ebea770017621256d3ff526046
Author: Wei-Chiu Chuang 
AuthorDate: Wed Apr 28 23:43:11 2021 -0700

HADOOP-11245. Update NFS gateway to use Netty4 (#2832)

Reviewed-by: Tsz-Wo Nicholas Sze 
---
 hadoop-common-project/hadoop-nfs/pom.xml   |   2 +-
 .../java/org/apache/hadoop/mount/MountdBase.java   |  14 ++-
 .../java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java  |   7 +-
 .../apache/hadoop/oncrpc/RegistrationClient.java   |  13 +--
 .../java/org/apache/hadoop/oncrpc/RpcInfo.java |  12 +-
 .../java/org/apache/hadoop/oncrpc/RpcProgram.java  |  19 ++--
 .../java/org/apache/hadoop/oncrpc/RpcResponse.java |  23 ++--
 .../java/org/apache/hadoop/oncrpc/RpcUtil.java | 123 +++-
 .../org/apache/hadoop/oncrpc/SimpleTcpClient.java  |  78 -
 .../hadoop/oncrpc/SimpleTcpClientHandler.java  |  30 ++---
 .../org/apache/hadoop/oncrpc/SimpleTcpServer.java  |  76 +++--
 .../org/apache/hadoop/oncrpc/SimpleUdpServer.java  |  65 +++
 .../main/java/org/apache/hadoop/oncrpc/XDR.java|  12 +-
 .../java/org/apache/hadoop/portmap/Portmap.java| 126 +
 .../apache/hadoop/portmap/RpcProgramPortmap.java   |  46 
 .../org/apache/hadoop/oncrpc/TestFrameDecoder.java | 100 
 .../org/apache/hadoop/portmap/TestPortmap.java |   2 +-
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml|   2 +-
 .../hadoop/hdfs/nfs/mount/RpcProgramMountd.java|  12 +-
 .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java |  12 +-
 .../apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   |   2 +-
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java   |  14 ++-
 .../org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java  |   2 +-
 .../apache/hadoop/hdfs/nfs/nfs3/WriteManager.java  |   2 +-
 .../hadoop/hdfs/nfs/TestOutOfOrderWrite.java   |  32 +++---
 .../hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java   |   2 +-
 .../apache/hadoop/hdfs/nfs/nfs3/TestWrites.java|   2 +-
 27 files changed, 472 insertions(+), 358 deletions(-)

diff --git a/hadoop-common-project/hadoop-nfs/pom.xml 
b/hadoop-common-project/hadoop-nfs/pom.xml
index f5de8407..33d8b37 100644
--- a/hadoop-common-project/hadoop-nfs/pom.xml
+++ b/hadoop-common-project/hadoop-nfs/pom.xml
@@ -90,7 +90,7 @@
 
 
   io.netty
-  netty
+  netty-all
   compile
 
 
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
index 0ff3084..58d3e51 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
@@ -41,6 +41,8 @@ abstract public class MountdBase {
   private final RpcProgram rpcProgram;
   private int udpBoundPort; // Will set after server starts
   private int tcpBoundPort; // Will set after server starts
+  private SimpleUdpServer udpServer = null;
+  private SimpleTcpServer tcpServer = null;
 
   public RpcProgram getRpcProgram() {
 return rpcProgram;
@@ -57,7 +59,7 @@ abstract public class MountdBase {
 
   /* Start UDP server */
   private void startUDPServer() {
-SimpleUdpServer udpServer = new SimpleUdpServer(rpcProgram.getPort(),
+udpServer = new SimpleUdpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
 try {
@@ -76,7 +78,7 @@ abstract public class MountdBase {
 
   /* Start TCP server */
   private void startTCPServer() {
-SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
+tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
 try {
@@ -118,6 +120,14 @@ abstract public class MountdBase {
   rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
   tcpBoundPort = 0;
 }
+if (udpServer != null) {
+  udpServer.shutdown();
+  udpServer = null;
+}
+if (tcpServer != null) {
+  tcpServer.shutdown();
+  tcpServer = null;
+}
   }
 
   /**
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
index ff83a5f..e6ea29b 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
@@ -35,6 +35,7 @@ public abstract class Nfs3Base {
   public static final Logger LOG =