[hadoop] branch branch-2 updated: HDFS-13339. Volume reference can't be released and may lead to deadlock when DataXceiver does a check volume. Contributed by Jim Brennan, Zsolt Venczel.

2020-01-14 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2 by this push:
 new c675719  HDFS-13339. Volume reference can't be released and may lead 
to deadlock when DataXceiver does a check volume. Contributed by Jim Brennan, 
Zsolt Venczel.
c675719 is described below

commit c675719c3fb0fe5f6b0be624935fdf22bb228e0f
Author: Wei-Chiu Chuang 
AuthorDate: Tue Jan 14 15:39:59 2020 -0800

HDFS-13339. Volume reference can't be released and may lead to deadlock 
when DataXceiver does a check volume. Contributed by Jim Brennan, Zsolt Venczel.
---
 .../hdfs/server/datanode/checker/DatasetVolumeChecker.java  | 13 -
 .../server/datanode/checker/TestDatasetVolumeChecker.java   |  9 +
 2 files changed, 21 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index 0f59b84..05a9ae8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -47,6 +47,7 @@ import java.util.HashSet;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
@@ -104,6 +105,8 @@ public class DatasetVolumeChecker {
   private static final VolumeCheckContext IGNORED_CONTEXT =
   new VolumeCheckContext();
 
+  private final ExecutorService checkVolumeResultHandlerExecutorService;
+
   /**
* @param conf Configuration object.
* @param timer {@link Timer} object used for throttling checks.
@@ -165,6 +168,12 @@ public class DatasetVolumeChecker {
 .setNameFormat("DataNode DiskChecker thread %d")
 .setDaemon(true)
 .build()));
+
+checkVolumeResultHandlerExecutorService = Executors.newCachedThreadPool(
+new ThreadFactoryBuilder()
+.setNameFormat("VolumeCheck ResultHandler thread %d")
+.setDaemon(true)
+.build());
   }
 
   /**
@@ -295,7 +304,9 @@ public class DatasetVolumeChecker {
   Futures.addCallback(olf.get(),
   new ResultHandler(volumeReference, new HashSet(),
   new HashSet(),
-  new AtomicLong(1), callback));
+  new AtomicLong(1), callback),
+  checkVolumeResultHandlerExecutorService
+  );
   return true;
 } else {
   IOUtils.cleanup(null, volumeReference);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
index 2a1c824..08aa1c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
@@ -19,11 +19,13 @@
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
 import com.google.common.base.Optional;
+import com.google.common.base.Supplier;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.*;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.VolumeCheckContext;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.FakeTimer;
 import org.junit.Rule;
@@ -121,6 +123,13 @@ public class TestDatasetVolumeChecker {
   }
 });
 
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return (numCallbackInvocations.get() > 0);
+  }
+}, 5, 1);
+
 // Ensure that the check was invoked at least once.
 verify(volume, times(1)).check(any(VolumeCheckContext.class));
 if (result) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.9 updated: HDFS-13339. Volume reference can't be released and may lead to deadlock when DataXceiver does a check volume. Contributed by Jim Brennan, Zsolt Venczel.

2020-01-14 Thread weichiu
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
 new b1a7534  HDFS-13339. Volume reference can't be released and may lead 
to deadlock when DataXceiver does a check volume. Contributed by Jim Brennan, 
Zsolt Venczel.
b1a7534 is described below

commit b1a753496d01af903a1f10262acb2f336a645ce2
Author: Wei-Chiu Chuang 
AuthorDate: Tue Jan 14 15:39:59 2020 -0800

HDFS-13339. Volume reference can't be released and may lead to deadlock 
when DataXceiver does a check volume. Contributed by Jim Brennan, Zsolt Venczel.

(cherry picked from commit c675719c3fb0fe5f6b0be624935fdf22bb228e0f)
---
 .../hdfs/server/datanode/checker/DatasetVolumeChecker.java  | 13 -
 .../server/datanode/checker/TestDatasetVolumeChecker.java   |  9 +
 2 files changed, 21 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
index cba6710..92069f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java
@@ -46,6 +46,7 @@ import java.util.HashSet;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
@@ -103,6 +104,8 @@ public class DatasetVolumeChecker {
   private static final VolumeCheckContext IGNORED_CONTEXT =
   new VolumeCheckContext();
 
+  private final ExecutorService checkVolumeResultHandlerExecutorService;
+
   /**
* @param conf Configuration object.
* @param timer {@link Timer} object used for throttling checks.
@@ -163,6 +166,12 @@ public class DatasetVolumeChecker {
 .setNameFormat("DataNode DiskChecker thread %d")
 .setDaemon(true)
 .build()));
+
+checkVolumeResultHandlerExecutorService = Executors.newCachedThreadPool(
+new ThreadFactoryBuilder()
+.setNameFormat("VolumeCheck ResultHandler thread %d")
+.setDaemon(true)
+.build());
   }
 
   /**
@@ -293,7 +302,9 @@ public class DatasetVolumeChecker {
   Futures.addCallback(olf.get(),
   new ResultHandler(volumeReference, new HashSet(),
   new HashSet(),
-  new AtomicLong(1), callback));
+  new AtomicLong(1), callback),
+  checkVolumeResultHandlerExecutorService
+  );
   return true;
 } else {
   IOUtils.cleanup(null, volumeReference);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
index 2a1c824..08aa1c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/checker/TestDatasetVolumeChecker.java
@@ -19,11 +19,13 @@
 package org.apache.hadoop.hdfs.server.datanode.checker;
 
 import com.google.common.base.Optional;
+import com.google.common.base.Supplier;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.*;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.VolumeCheckContext;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.FakeTimer;
 import org.junit.Rule;
@@ -121,6 +123,13 @@ public class TestDatasetVolumeChecker {
   }
 });
 
+GenericTestUtils.waitFor(new Supplier() {
+  @Override
+  public Boolean get() {
+return (numCallbackInvocations.get() > 0);
+  }
+}, 5, 1);
+
 // Ensure that the check was invoked at least once.
 verify(volume, times(1)).check(any(VolumeCheckContext.class));
 if (result) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16005. NativeAzureFileSystem does not support setXAttr.

2020-01-14 Thread dazhou
This is an automated email from the ASF dual-hosted git repository.

dazhou pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new c36f09d  HADOOP-16005. NativeAzureFileSystem does not support setXAttr.
c36f09d is described below

commit c36f09deb91454c086926c01f872d8ca4419aee0
Author: Clemens Wolff 
AuthorDate: Tue Jan 14 17:28:37 2020 -0800

HADOOP-16005. NativeAzureFileSystem does not support setXAttr.

Contributed by Clemens Wolff.
---
 .../fs/azure/AzureNativeFileSystemStore.java   |  67 ++---
 .../hadoop/fs/azure/NativeAzureFileSystem.java |  71 ++
 .../hadoop/fs/azure/NativeFileSystemStore.java |   4 +
 .../hadoop/fs/azurebfs/AzureBlobFileSystem.java|  79 
 .../fs/azurebfs/AzureBlobFileSystemStore.java  |   9 ++
 .../fs/azure/NativeAzureFileSystemBaseTest.java|  62 
 .../ITestAzureBlobFileSystemAttributes.java| 104 +
 7 files changed, 384 insertions(+), 12 deletions(-)

diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index 239dec2..414d2f2 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -29,6 +29,8 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URLDecoder;
 import java.net.URLEncoder;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
 import java.security.InvalidKeyException;
 import java.util.Calendar;
 import java.util.Date;
@@ -247,6 +249,8 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 
   private static final int DEFAULT_CONCURRENT_WRITES = 8;
 
+  private static final Charset METADATA_ENCODING = StandardCharsets.UTF_8;
+
   // Concurrent reads reads of data written out of band are disable by default.
   //
   private static final boolean DEFAULT_READ_TOLERATE_CONCURRENT_APPEND = false;
@@ -1662,17 +1666,30 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 removeMetadataAttribute(blob, OLD_IS_FOLDER_METADATA_KEY);
   }
 
-  private static void storeLinkAttribute(CloudBlobWrapper blob,
-  String linkTarget) throws UnsupportedEncodingException {
-// We have to URL encode the link attribute as the link URI could
+  private static String encodeMetadataAttribute(String value) throws 
UnsupportedEncodingException {
+// We have to URL encode the attribute as it could
 // have URI special characters which unless encoded will result
 // in 403 errors from the server. This is due to metadata properties
 // being sent in the HTTP header of the request which is in turn used
 // on the server side to authorize the request.
-String encodedLinkTarget = null;
-if (linkTarget != null) {
-  encodedLinkTarget = URLEncoder.encode(linkTarget, "UTF-8");
-}
+return value == null ? null : URLEncoder.encode(value, 
METADATA_ENCODING.name());
+  }
+
+  private static String decodeMetadataAttribute(String encoded) throws 
UnsupportedEncodingException {
+return encoded == null ? null : URLDecoder.decode(encoded, 
METADATA_ENCODING.name());
+  }
+
+  private static String ensureValidAttributeName(String attribute) {
+// Attribute names must be valid C# identifiers so we have to
+// convert the namespace dots (e.g. "user.something") in the
+// attribute names. Using underscores here to be consistent with
+// the constant metadata keys defined earlier in the file
+return attribute.replace('.', '_');
+  }
+
+  private static void storeLinkAttribute(CloudBlobWrapper blob,
+  String linkTarget) throws UnsupportedEncodingException {
+String encodedLinkTarget = encodeMetadataAttribute(linkTarget);
 storeMetadataAttribute(blob,
 LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY,
 encodedLinkTarget);
@@ -1686,11 +1703,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 String encodedLinkTarget = getMetadataAttribute(blob,
 LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY,
 OLD_LINK_BACK_TO_UPLOAD_IN_PROGRESS_METADATA_KEY);
-String linkTarget = null;
-if (encodedLinkTarget != null) {
-  linkTarget = URLDecoder.decode(encodedLinkTarget, "UTF-8");
-}
-return linkTarget;
+return decodeMetadataAttribute(encodedLinkTarget);
   }
 
   private static boolean retrieveFolderAttribute(CloudBlobWrapper blob) {
@@ -2212,6 +2225,36 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   }
 
   @Override
+  public byte[] retrieveAttribute(String key, String attribute) throws 

[hadoop] branch trunk updated (c36f09d -> a2fdd7c)

2020-01-14 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from c36f09d  HADOOP-16005. NativeAzureFileSystem does not support setXAttr.
 add a2fdd7c  MAPREDUCE-7256. Fix javadoc error in 
SimpleExponentialSmoothing. (#1804)

No new revisions were added by this update.

Summary of changes:
 .../v2/app/speculate/forecast/SimpleExponentialSmoothing.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: MAPREDUCE-7256. Fix javadoc error in SimpleExponentialSmoothing. (#1804)

2020-01-14 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 483d54f  MAPREDUCE-7256. Fix javadoc error in 
SimpleExponentialSmoothing. (#1804)
483d54f is described below

commit 483d54f1a712bf721623415b83dd7a71196f0bb8
Author: Masatake Iwasaki 
AuthorDate: Wed Jan 15 12:09:09 2020 +0900

MAPREDUCE-7256. Fix javadoc error in SimpleExponentialSmoothing. (#1804)

(cherry picked from commit a2fdd7c2b59463c39a4d4250f908a9e2d3653ead)
---
 .../v2/app/speculate/forecast/SimpleExponentialSmoothing.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
index 0e00068..dd9dc28 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
@@ -131,8 +131,8 @@ public class SimpleExponentialSmoothing {
   /**
* checks if the task is hanging up.
* @param timeStamp current time of the scan.
-   * @return true if we have number of samples > kMinimumReads and the record
-   * timestamp has expired.
+   * @return true if we have number of samples {@literal >} kMinimumReads and 
the
+   * record timestamp has expired.
*/
   public boolean isDataStagnated(final long timeStamp) {
 ForecastRecord rec = forecastRefEntry.get();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-2.10 updated: MAPREDUCE-7256. Fix javadoc error in SimpleExponentialSmoothing. (#1804)

2020-01-14 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
 new 7a309f1  MAPREDUCE-7256. Fix javadoc error in 
SimpleExponentialSmoothing. (#1804)
7a309f1 is described below

commit 7a309f1f919f25b1110b7106bc1bf2f64e3814fb
Author: Masatake Iwasaki 
AuthorDate: Wed Jan 15 12:09:09 2020 +0900

MAPREDUCE-7256. Fix javadoc error in SimpleExponentialSmoothing. (#1804)

(cherry picked from commit a2fdd7c2b59463c39a4d4250f908a9e2d3653ead)
---
 .../v2/app/speculate/forecast/SimpleExponentialSmoothing.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
index 0e00068..dd9dc28 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
@@ -131,8 +131,8 @@ public class SimpleExponentialSmoothing {
   /**
* checks if the task is hanging up.
* @param timeStamp current time of the scan.
-   * @return true if we have number of samples > kMinimumReads and the record
-   * timestamp has expired.
+   * @return true if we have number of samples {@literal >} kMinimumReads and 
the
+   * record timestamp has expired.
*/
   public boolean isDataStagnated(final long timeStamp) {
 ForecastRecord rec = forecastRefEntry.get();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: MAPREDUCE-7256. Fix javadoc error in SimpleExponentialSmoothing. (#1804)

2020-01-14 Thread aajisaka
This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new ce52a66  MAPREDUCE-7256. Fix javadoc error in 
SimpleExponentialSmoothing. (#1804)
ce52a66 is described below

commit ce52a66fcbbb7c745d525cb09e9cfc03da6d9343
Author: Masatake Iwasaki 
AuthorDate: Wed Jan 15 12:09:09 2020 +0900

MAPREDUCE-7256. Fix javadoc error in SimpleExponentialSmoothing. (#1804)

(cherry picked from commit a2fdd7c2b59463c39a4d4250f908a9e2d3653ead)
---
 .../v2/app/speculate/forecast/SimpleExponentialSmoothing.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
index 0e00068..dd9dc28 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/SimpleExponentialSmoothing.java
@@ -131,8 +131,8 @@ public class SimpleExponentialSmoothing {
   /**
* checks if the task is hanging up.
* @param timeStamp current time of the scan.
-   * @return true if we have number of samples > kMinimumReads and the record
-   * timestamp has expired.
+   * @return true if we have number of samples {@literal >} kMinimumReads and 
the
+   * record timestamp has expired.
*/
   public boolean isDataStagnated(final long timeStamp) {
 ForecastRecord rec = forecastRefEntry.get();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-10028. Integrate the new abstract log servlet to the JobHistory server. Contributed by Adam Antal

2020-01-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 13cea04  YARN-10028. Integrate the new abstract log servlet to the 
JobHistory server. Contributed by Adam Antal
13cea04 is described below

commit 13cea0412c11ce9ef7e475198a24e71788cf0b2f
Author: Szilard Nemeth 
AuthorDate: Tue Jan 14 11:00:08 2020 +0100

YARN-10028. Integrate the new abstract log servlet to the JobHistory 
server. Contributed by Adam Antal
---
 .../mapreduce/v2/hs/HistoryClientService.java  |  7 ++-
 .../mapreduce/v2/hs/webapp/HsWebServices.java  | 58 --
 .../mapreduce/v2/hs/webapp/TestHsWebServices.java  |  3 ++
 .../v2/hs/webapp/TestHsWebServicesAcls.java|  2 +-
 .../v2/hs/webapp/TestHsWebServicesAttempts.java|  3 ++
 .../v2/hs/webapp/TestHsWebServicesJobConf.java |  3 ++
 .../v2/hs/webapp/TestHsWebServicesJobs.java|  4 ++
 .../v2/hs/webapp/TestHsWebServicesJobsQuery.java   |  3 ++
 .../v2/hs/webapp/TestHsWebServicesTasks.java   |  3 ++
 .../org/apache/hadoop/yarn/webapp/WebApps.java | 12 -
 .../hadoop/yarn/webapp/WebServicesTestUtils.java   |  1 -
 .../hadoop/yarn/server/webapp/AppInfoProvider.java |  2 +
 .../hadoop/yarn/server/webapp/BasicAppInfo.java|  4 ++
 13 files changed, 96 insertions(+), 9 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
index b0bf41b..b63aef4 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
@@ -79,6 +79,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.RPCUtil;
@@ -150,9 +152,11 @@ public class HistoryClientService extends AbstractService {
   }
 
   @VisibleForTesting
-  protected void initializeWebApp(Configuration conf) {
+  protected void initializeWebApp(Configuration conf) throws IOException {
 webApp = new HsWebApp(history);
 InetSocketAddress bindAddress = MRWebAppUtil.getJHSWebBindAddress(conf);
+ApplicationClientProtocol appClientProtocol =
+ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
 // NOTE: there should be a .at(InetSocketAddress)
 WebApps
 .$for("jobhistory", HistoryClientService.class, this, "ws")
@@ -163,6 +167,7 @@ public class HistoryClientService extends AbstractService {
 JHAdminConfig.MR_WEBAPP_SPNEGO_USER_NAME_KEY)
 .withCSRFProtection(JHAdminConfig.MR_HISTORY_CSRF_PREFIX)
 .withXFSProtection(JHAdminConfig.MR_HISTORY_XFS_PREFIX)
+.withAppClientProtocol(appClientProtocol)
 .at(NetUtils.getHostPortString(bindAddress)).start(webApp);
 
 String connectHost = 
MRWebAppUtil.getJHSWebappURLWithoutScheme(conf).split(":")[0];
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java
index dabb760..e3804e9 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java
@@ -20,8 +20,10 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp;
 
 import java.io.IOException;
 
+import javax.annotation.Nullable;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.DefaultValue;
 import javax.ws.rs.GET;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
@@ -30,9 +32,12 @@ import javax.ws.rs.QueryParam;
 import javax.ws.rs.WebApplicationException;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
 

[hadoop] branch branch-3.1 updated: HADOOP-16683. Disable retry of FailoverOnNetworkExceptionRetry in case of wrapped AccessControlException. Contributed by Adam Antal

2020-01-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 3a09593  HADOOP-16683. Disable retry of 
FailoverOnNetworkExceptionRetry in case of wrapped AccessControlException. 
Contributed by Adam Antal
3a09593 is described below

commit 3a09593a73c1c919bebf344bd4a2afdddcbb85c9
Author: Szilard Nemeth 
AuthorDate: Tue Jan 14 11:19:52 2020 +0100

HADOOP-16683. Disable retry of FailoverOnNetworkExceptionRetry in case of 
wrapped AccessControlException. Contributed by Adam Antal
---
 .../org/apache/hadoop/io/retry/RetryPolicies.java | 12 +++-
 .../org/apache/hadoop/io/retry/TestRetryProxy.java| 19 +++
 .../hadoop/io/retry/UnreliableImplementation.java |  7 +++
 .../apache/hadoop/io/retry/UnreliableInterface.java   |  4 
 4 files changed, 41 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index a89c3a7..fcbcc86 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -690,7 +690,8 @@ public class RetryPolicies {
   } else if (e instanceof InvalidToken) {
 return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
 "Invalid or Cancelled Token");
-  } else if (e instanceof AccessControlException) {
+  } else if (e instanceof AccessControlException ||
+  hasWrappedAccessControlException(e)) {
 return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
 "Access denied");
   } else if (e instanceof SocketException
@@ -761,4 +762,13 @@ public class RetryPolicies {
 return unwrapped instanceof RetriableException ? 
 (RetriableException) unwrapped : null;
   }
+
+  private static boolean hasWrappedAccessControlException(Exception e) {
+Throwable throwable = e;
+while (!(throwable instanceof AccessControlException) &&
+throwable.getCause() != null) {
+  throwable = throwable.getCause();
+}
+return throwable instanceof AccessControlException;
+  }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
index 2116fb2..a1135a0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
@@ -377,4 +377,23 @@ public class TestRetryProxy {
   assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
 }
   }
+
+  @Test
+  public void testWrappedAccessControlException() throws Exception {
+RetryPolicy policy = mock(RetryPolicy.class);
+RetryPolicy realPolicy = RetryPolicies.failoverOnNetworkException(5);
+setupMockPolicy(policy, realPolicy);
+
+UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create(
+UnreliableInterface.class, unreliableImpl, policy);
+
+try {
+  unreliable.failsWithWrappedAccessControlException();
+  fail("Should fail");
+} catch (IOException expected) {
+  verify(policy, times(1)).shouldRetry(any(Exception.class), anyInt(),
+  anyInt(), anyBoolean());
+  assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
+}
+  }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
index a20d898..15a84bb 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
@@ -139,6 +139,13 @@ class UnreliableImplementation implements 
UnreliableInterface {
 }
   }
 
+  public void failsWithWrappedAccessControlException()
+  throws IOException {
+AccessControlException ace = new AccessControlException();
+IOException ioe = new IOException(ace);
+throw new IOException(ioe);
+  }
+
   @Override
   public String succeedsOnceThenFailsReturningString()
   throws UnreliableException, IOException, StandbyException {
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
index 738a760..80bf47d 100644
--- 

[hadoop] branch branch-3.2 updated: HADOOP-16683. Disable retry of FailoverOnNetworkExceptionRetry in case of wrapped AccessControlException. Contributed by Adam Antal

2020-01-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 1d2c5df  HADOOP-16683. Disable retry of 
FailoverOnNetworkExceptionRetry in case of wrapped AccessControlException. 
Contributed by Adam Antal
1d2c5df is described below

commit 1d2c5dffa8489f389b9f52138feb67b61c492306
Author: Szilard Nemeth 
AuthorDate: Tue Jan 14 11:18:54 2020 +0100

HADOOP-16683. Disable retry of FailoverOnNetworkExceptionRetry in case of 
wrapped AccessControlException. Contributed by Adam Antal
---
 .../org/apache/hadoop/io/retry/RetryPolicies.java | 12 +++-
 .../org/apache/hadoop/io/retry/TestRetryProxy.java| 19 +++
 .../hadoop/io/retry/UnreliableImplementation.java |  7 +++
 .../apache/hadoop/io/retry/UnreliableInterface.java   |  4 
 4 files changed, 41 insertions(+), 1 deletion(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index a89c3a7..fcbcc86 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -690,7 +690,8 @@ public class RetryPolicies {
   } else if (e instanceof InvalidToken) {
 return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
 "Invalid or Cancelled Token");
-  } else if (e instanceof AccessControlException) {
+  } else if (e instanceof AccessControlException ||
+  hasWrappedAccessControlException(e)) {
 return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
 "Access denied");
   } else if (e instanceof SocketException
@@ -761,4 +762,13 @@ public class RetryPolicies {
 return unwrapped instanceof RetriableException ? 
 (RetriableException) unwrapped : null;
   }
+
+  private static boolean hasWrappedAccessControlException(Exception e) {
+Throwable throwable = e;
+while (!(throwable instanceof AccessControlException) &&
+throwable.getCause() != null) {
+  throwable = throwable.getCause();
+}
+return throwable instanceof AccessControlException;
+  }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
index 2116fb2..a1135a0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
@@ -377,4 +377,23 @@ public class TestRetryProxy {
   assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
 }
   }
+
+  @Test
+  public void testWrappedAccessControlException() throws Exception {
+RetryPolicy policy = mock(RetryPolicy.class);
+RetryPolicy realPolicy = RetryPolicies.failoverOnNetworkException(5);
+setupMockPolicy(policy, realPolicy);
+
+UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create(
+UnreliableInterface.class, unreliableImpl, policy);
+
+try {
+  unreliable.failsWithWrappedAccessControlException();
+  fail("Should fail");
+} catch (IOException expected) {
+  verify(policy, times(1)).shouldRetry(any(Exception.class), anyInt(),
+  anyInt(), anyBoolean());
+  assertEquals(RetryDecision.FAIL, caughtRetryAction.action);
+}
+  }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
index a20d898..15a84bb 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableImplementation.java
@@ -139,6 +139,13 @@ class UnreliableImplementation implements 
UnreliableInterface {
 }
   }
 
+  public void failsWithWrappedAccessControlException()
+  throws IOException {
+AccessControlException ace = new AccessControlException();
+IOException ioe = new IOException(ace);
+throw new IOException(ioe);
+  }
+
   @Override
   public String succeedsOnceThenFailsReturningString()
   throws UnreliableException, IOException, StandbyException {
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/UnreliableInterface.java
index 738a760..80bf47d 100644
--- 

[hadoop] branch trunk updated: YARN-9788. Queue Management API does not support parallel updates. Contributed by Prabhu Joseph

2020-01-14 Thread snemeth
This is an automated email from the ASF dual-hosted git repository.

snemeth pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1c51f36  YARN-9788. Queue Management API does not support parallel 
updates. Contributed by Prabhu Joseph
1c51f36 is described below

commit 1c51f36be79924489ca4a2e5ca7e96ac75a6ec18
Author: Szilard Nemeth 
AuthorDate: Tue Jan 14 12:26:03 2020 +0100

YARN-9788. Queue Management API does not support parallel updates. 
Contributed by Prabhu Joseph
---
 .../hadoop/yarn/client/cli/TestSchedConfCLI.java   |  7 +--
 .../scheduler/MutableConfigurationProvider.java| 10 ++--
 .../conf/FSSchedulerConfigurationStore.java|  8 +--
 .../capacity/conf/InMemoryConfigurationStore.java  | 10 ++--
 .../capacity/conf/LeveldbConfigurationStore.java   |  6 +--
 .../conf/MutableCSConfigurationProvider.java   |  8 +--
 .../capacity/conf/YarnConfigurationStore.java  |  6 ++-
 .../capacity/conf/ZKConfigurationStore.java|  7 +--
 .../resourcemanager/webapp/RMWebServices.java  |  8 +--
 .../capacity/conf/ConfigurationStoreBaseTest.java  |  6 +--
 .../conf/TestFSSchedulerConfigurationStore.java|  8 +--
 .../conf/TestLeveldbConfigurationStore.java| 16 +++---
 .../conf/TestMutableCSConfigurationProvider.java   | 57 +-
 .../capacity/conf/TestZKConfigurationStore.java| 30 ++--
 14 files changed, 116 insertions(+), 71 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
index 4233b4c..3b961df 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestSchedConfCLI.java
@@ -45,6 +45,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.MutableConfigurationProvider;
 import 
org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver;
@@ -247,10 +248,10 @@ public class TestSchedConfCLI extends JerseyTestBase {
   globalUpdates.put("schedKey1", "schedVal1");
   schedUpdateInfo.setGlobalParams(globalUpdates);
 
-  provider.logAndApplyMutation(UserGroupInformation.getCurrentUser(),
-  schedUpdateInfo);
+  LogMutation log = provider.logAndApplyMutation(
+  UserGroupInformation.getCurrentUser(), schedUpdateInfo);
   rm.getRMContext().getRMAdminService().refreshQueues();
-  provider.confirmPendingMutation(true);
+  provider.confirmPendingMutation(log, true);
 
   Configuration schedulerConf = provider.getConfiguration();
   assertEquals("schedVal1", schedulerConf.get("schedKey1"));
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
index 03902e3..751c9a3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/MutableConfigurationProvider.java
@@ -20,6 +20,7 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.scheduler;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.conf.YarnConfigurationStore.LogMutation;
 import org.apache.hadoop.yarn.webapp.dao.SchedConfUpdateInfo;
 
 import java.io.IOException;
@@ -46,18 +47,21 @@ public interface MutableConfigurationProvider {
* Log user's requested configuration mutation, and applies it in-memory.
* @param user User who requested the change
* @param confUpdate User's