(hadoop) branch trunk updated (b25b28e5bbda -> 0f51d2a4ec17)

2024-03-17 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


from b25b28e5bbda HDFS-17380. FsImageValidation: remove inaccessible nodes. 
(#6549). Contributed by Tsz-wo Sze.
 add 0f51d2a4ec17 HADOOP-14451. Deadlock in NativeIO (#6632)

No new revisions were added by this update.

Summary of changes:
 .../org/apache/hadoop/io/nativeio/NativeIO.java|  26 +++--
 .../src/org/apache/hadoop/io/nativeio/NativeIO.c   | 126 ++---
 .../hadoop/io/nativeio/TestNativeIoInit.java   |  87 ++
 3 files changed, 191 insertions(+), 48 deletions(-)
 create mode 100644 
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIoInit.java


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



(hadoop) branch trunk updated: [HADOOP-19010] - NullPointerException in Hadoop Credential Check CLI (#6351)

2023-12-15 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 62cc673d00ed [HADOOP-19010] - NullPointerException in Hadoop 
Credential Check CLI (#6351)
62cc673d00ed is described below

commit 62cc673d00ed64437fc787f85c8091f8357461da
Author: Anika Kelhanka 
AuthorDate: Sat Dec 16 12:23:52 2023 +0530

[HADOOP-19010] - NullPointerException in Hadoop Credential Check CLI (#6351)
---
 .../apache/hadoop/security/alias/CredentialShell.java| 16 +++-
 .../org/apache/hadoop/security/alias/TestCredShell.java  | 15 +++
 2 files changed, 26 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
index 66df17a181e5..45b5af36bbbf 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
@@ -25,6 +25,7 @@ import java.security.NoSuchAlgorithmException;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry;
 import org.apache.hadoop.classification.VisibleForTesting;
 
 import org.apache.commons.lang3.StringUtils;
@@ -365,12 +366,17 @@ public class CredentialShell extends CommandShell {
 } else {
   password = c.readPassword("Enter alias password: ");
 }
-char[] storePassword =
-provider.getCredentialEntry(alias).getCredential();
-String beMatch =
-Arrays.equals(storePassword, password) ? "success" : "failed";
+CredentialEntry credentialEntry = provider.getCredentialEntry(alias);
+if(credentialEntry == null) {
+  // Fail the password match when alias not found
+  getOut().println("Password match failed for " + alias + ".");
+} else {
+  char[] storePassword = credentialEntry.getCredential();
+  String beMatch =
+  Arrays.equals(storePassword, password) ? "success" : "failed";
 
-getOut().println("Password match " + beMatch + " for " +  alias + ".");
+  getOut().println("Password match " + beMatch + " for " + alias + 
".");
+}
   } catch (IOException e) {
 getOut().println("Cannot check aliases for CredentialProvider: " +
 provider.toString()
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java
index bf72b52b3206..706158930293 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java
@@ -165,6 +165,21 @@ public class TestCredShell {
 assertTrue(outContent.toString().contains("Passwords don't match"));
   }
 
+  @Test
+  public void testPromptForCredentialNotFound() throws Exception {
+String[] args1 = {"check", "credential1", "-provider",
+jceksProvider};
+ArrayList password = new ArrayList();
+password.add("p@ssw0rd");
+int rc = 0;
+CredentialShell shell = new CredentialShell();
+shell.setConf(new Configuration());
+shell.setPasswordReader(new MockPasswordReader(password));
+rc = shell.run(args1);
+assertEquals(0, rc);
+assertOutputContains("Password match failed for credential1.");
+  }
+
   @Test
   public void testPromptForCredential() throws Exception {
 String[] args1 = {"create", "credential1", "-provider",


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: HADOOP-17306. RawLocalFileSystem's lastModifiedTime() looses milli seconds in JDK < 10.b09 (#2387)

2020-10-23 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new e21b812  HADOOP-17306. RawLocalFileSystem's lastModifiedTime() looses 
milli seconds in JDK < 10.b09 (#2387)
e21b812 is described below

commit e21b81276e04138301dae1aa88da7bc4a51722fd
Author: Vinayakumar B 
AuthorDate: Fri Oct 23 11:30:02 2020 +0530

HADOOP-17306. RawLocalFileSystem's lastModifiedTime() looses milli seconds 
in JDK < 10.b09 (#2387)
---
 .../org/apache/hadoop/fs/RawLocalFileSystem.java   | 11 +++---
 .../hadoop/fs/TestRawLocalFileSystemContract.java  | 24 ++
 2 files changed, 32 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index 72eeb99..1294096 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -72,7 +72,12 @@ public class RawLocalFileSystem extends FileSystem {
   public static void useStatIfAvailable() {
 useDeprecatedFileStatus = !Stat.isAvailable();
   }
-  
+
+  @VisibleForTesting
+  static void setUseDeprecatedFileStatus(boolean useDeprecatedFileStatus) {
+RawLocalFileSystem.useDeprecatedFileStatus = useDeprecatedFileStatus;
+  }
+
   public RawLocalFileSystem() {
 workingDir = getInitialWorkingDirectory();
   }
@@ -700,8 +705,8 @@ public class RawLocalFileSystem extends FileSystem {
 DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs)
   throws IOException {
   super(f.length(), f.isDirectory(), 1, defaultBlockSize,
-  f.lastModified(), getLastAccessTime(f),
-  null, null, null,
+  Files.getLastModifiedTime(f.toPath()).toMillis(),
+  getLastAccessTime(f),null, null, null,
   new Path(f.getPath()).makeQualified(fs.getUri(),
 fs.getWorkingDirectory()));
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
index b51419d..cb45c9e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
@@ -203,4 +203,28 @@ public class TestRawLocalFileSystemContract extends 
FileSystemContractBaseTest {
 }
   }
 
+  @Test
+  public void testMTimeAtime() throws IOException {
+RawLocalFileSystem.setUseDeprecatedFileStatus(true);
+try {
+  Path testDir = getTestBaseDir();
+  String testFilename = "testmtime";
+  Path path = new Path(testDir, testFilename);
+  Path file = new Path(path, "file");
+  fs.create(file);
+  long now = System.currentTimeMillis();
+  long mtime = (now % 1000 == 0) ? now + 1 : now;
+  long atime = (now % 1000 == 0) ? now + 2 : now;
+  fs.setTimes(file, mtime, atime);
+  FileStatus fileStatus = fs.getFileStatus(file);
+  if (!Shell.MAC) {
+// HADOOP-17306 ; Skip MacOS because HFS+ does not support
+// milliseconds for mtime.
+assertEquals(mtime, fileStatus.getModificationTime());
+  }
+  assertEquals(atime, fileStatus.getAccessTime());
+} finally {
+  RawLocalFileSystem.useStatIfAvailable();
+}
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: HADOOP-17306. RawLocalFileSystem's lastModifiedTime() looses milli seconds in JDK < 10.b09 (#2387)

2020-10-23 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 8c6478b  HADOOP-17306. RawLocalFileSystem's lastModifiedTime() looses 
milli seconds in JDK < 10.b09 (#2387)
8c6478b is described below

commit 8c6478b4058025e01e81a6c372ec3e9dfa36f196
Author: Vinayakumar B 
AuthorDate: Fri Oct 23 11:30:02 2020 +0530

HADOOP-17306. RawLocalFileSystem's lastModifiedTime() looses milli seconds 
in JDK < 10.b09 (#2387)
---
 .../org/apache/hadoop/fs/RawLocalFileSystem.java   | 11 +++---
 .../hadoop/fs/TestRawLocalFileSystemContract.java  | 25 ++
 2 files changed, 33 insertions(+), 3 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index cf22105..5a9a44a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -71,7 +71,12 @@ public class RawLocalFileSystem extends FileSystem {
   public static void useStatIfAvailable() {
 useDeprecatedFileStatus = !Stat.isAvailable();
   }
-  
+
+  @VisibleForTesting
+  static void setUseDeprecatedFileStatus(boolean useDeprecatedFileStatus) {
+RawLocalFileSystem.useDeprecatedFileStatus = useDeprecatedFileStatus;
+  }
+
   public RawLocalFileSystem() {
 workingDir = getInitialWorkingDirectory();
   }
@@ -693,8 +698,8 @@ public class RawLocalFileSystem extends FileSystem {
 DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs)
   throws IOException {
   super(f.length(), f.isDirectory(), 1, defaultBlockSize,
-  f.lastModified(), getLastAccessTime(f),
-  null, null, null,
+  Files.getLastModifiedTime(f.toPath()).toMillis(),
+  getLastAccessTime(f),null, null, null,
   new Path(f.getPath()).makeQualified(fs.getUri(),
 fs.getWorkingDirectory()));
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
index 3892f16..0d57e8f 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs;
 
 import java.io.File;
+import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -167,4 +168,28 @@ public class TestRawLocalFileSystemContract extends 
FileSystemContractBaseTest {
 }
   }
 
+  @Test
+  public void testMTimeAtime() throws IOException {
+RawLocalFileSystem.setUseDeprecatedFileStatus(true);
+try {
+  Path testDir = getTestBaseDir();
+  String testFilename = "testmtime";
+  Path path = new Path(testDir, testFilename);
+  Path file = new Path(path, "file");
+  fs.create(file);
+  long now = System.currentTimeMillis();
+  long mtime = (now % 1000 == 0) ? now + 1 : now;
+  long atime = (now % 1000 == 0) ? now + 2 : now;
+  fs.setTimes(file, mtime, atime);
+  FileStatus fileStatus = fs.getFileStatus(file);
+  if (!Shell.MAC) {
+// HADOOP-17306 ; Skip MacOS because HFS+ does not support
+// milliseconds for mtime.
+assertEquals(mtime, fileStatus.getModificationTime());
+  }
+  assertEquals(atime, fileStatus.getAccessTime());
+} finally {
+  RawLocalFileSystem.useStatIfAvailable();
+}
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (da1b6e3 -> d259928)

2020-10-23 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from da1b6e3  HDFS-15622. Deleted blocks linger in the replications queue. 
Contributed by Ahmed Hussein.
 add d259928  HADOOP-17306. RawLocalFileSystem's lastModifiedTime() looses 
milli seconds in JDK < 10.b09 (#2387)

No new revisions were added by this update.

Summary of changes:
 .../org/apache/hadoop/fs/RawLocalFileSystem.java   | 11 +++---
 .../hadoop/fs/TestRawLocalFileSystemContract.java  | 24 ++
 2 files changed, 32 insertions(+), 3 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (da1b6e3 -> d259928)

2020-10-23 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from da1b6e3  HDFS-15622. Deleted blocks linger in the replications queue. 
Contributed by Ahmed Hussein.
 add d259928  HADOOP-17306. RawLocalFileSystem's lastModifiedTime() looses 
milli seconds in JDK < 10.b09 (#2387)

No new revisions were added by this update.

Summary of changes:
 .../org/apache/hadoop/fs/RawLocalFileSystem.java   | 11 +++---
 .../hadoop/fs/TestRawLocalFileSystemContract.java  | 24 ++
 2 files changed, 32 insertions(+), 3 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk updated: HADOOP-17278. Shade guava 29.0-jre in hadoop thirdparty. (#8)

2020-09-27 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1408208  HADOOP-17278. Shade guava 29.0-jre in hadoop thirdparty. (#8)
1408208 is described below

commit 140820867145c06bd4a89037de58d56025a60992
Author: Ayush Saxena 
AuthorDate: Sun Sep 27 20:23:30 2020 +0530

HADOOP-17278. Shade guava 29.0-jre in hadoop thirdparty. (#8)
---
 hadoop-shaded-guava/pom.xml | 114 
 pom.xml |   2 +
 2 files changed, 116 insertions(+)

diff --git a/hadoop-shaded-guava/pom.xml b/hadoop-shaded-guava/pom.xml
new file mode 100644
index 000..dc650de
--- /dev/null
+++ b/hadoop-shaded-guava/pom.xml
@@ -0,0 +1,114 @@
+
+
+http://maven.apache.org/POM/4.0.0;
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+
+hadoop-thirdparty
+org.apache.hadoop.thirdparty
+1.1.0-SNAPSHOT
+..
+
+4.0.0
+hadoop-shaded-guava
+Apache Hadoop shaded Guava
+jar
+
+
+
+com.google.guava
+guava
+${guava.version}
+
+
+
+com.google.errorprone
+error_prone_annotations
+
+
+
+
+
+
+
+
+${project.basedir}/..
+META-INF
+
+licenses-binary/*
+NOTICE.txt
+NOTICE-binary
+
+
+
+${project.basedir}/src/main/resources
+
+
+
+
+org.apache.maven.plugins
+maven-shade-plugin
+
+
true
+
+
+
+shade-guava
+package
+
+shade
+
+
+
+
+
com.google.code.findbugs:jsr305
+
+
+
+
+com/google/
+
${shaded.prefix}/com/google/
+
+
+org/checkerframework/
+
${shaded.prefix}/org/checkerframework/
+
+
+
+
+
+NOTICE
+LICENSE
+
+
+
+META-INF/LICENSE.txt
+${basedir}/../LICENSE-binary
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/pom.xml b/pom.xml
index 8d6f1e3..fa07fc2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -95,6 +95,7 @@
 org.apache.hadoop.thirdparty
 ${shaded.prefix}.protobuf
 3.7.1
+29.0-jre
 
 
 2.8.1
@@ -123,6 +124,7 @@
   
 hadoop-shaded-protobuf_3_7
 hadoop-shaded-jaeger
+hadoop-shaded-guava
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HDFS-15098. Add SM4 encryption method for HDFS. Contributed by liusheng

2020-09-27 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 82b86e3  HDFS-15098. Add SM4 encryption method for HDFS. Contributed 
by liusheng
82b86e3 is described below

commit 82b86e3754225d82b91f1950f547cb97a0d24ade
Author: Vinayakumar B 
AuthorDate: Sun Sep 27 19:27:13 2020 +0530

HDFS-15098. Add SM4 encryption method for HDFS. Contributed by liusheng
---
 hadoop-common-project/hadoop-common/pom.xml|   1 -
 .../apache/hadoop/crypto/AesCtrCryptoCodec.java|  70 --
 .../java/org/apache/hadoop/crypto/CipherSuite.java |   3 +-
 .../java/org/apache/hadoop/crypto/CryptoCodec.java |   6 +-
 .../apache/hadoop/crypto/CryptoStreamUtils.java|   8 +-
 .../apache/hadoop/crypto/JceAesCtrCryptoCodec.java | 136 +++
 ...sCtrCryptoCodec.java => JceCtrCryptoCodec.java} | 149 +++--
 .../apache/hadoop/crypto/JceSm4CtrCryptoCodec.java |  65 +
 .../hadoop/crypto/OpensslAesCtrCryptoCodec.java| 134 +++---
 .../org/apache/hadoop/crypto/OpensslCipher.java|  39 --
 ...CryptoCodec.java => OpensslCtrCryptoCodec.java} | 144 +++-
 .../hadoop/crypto/OpensslSm4CtrCryptoCodec.java|  79 +++
 .../org/apache/hadoop/crypto/key/KeyProvider.java  |  11 +-
 .../hadoop/crypto/random/OsSecureRandom.java   |   8 +-
 .../hadoop/fs/CommonConfigurationKeysPublic.java   |  17 +++
 .../src/org/apache/hadoop/crypto/OpensslCipher.c   | 119 ++--
 .../hadoop/crypto/org_apache_hadoop_crypto.h   |   8 +-
 .../src/main/resources/core-default.xml|  20 +++
 .../hadoop-common/src/site/markdown/SecureMode.md  |   2 +
 .../org/apache/hadoop/crypto/TestCryptoCodec.java  | 100 --
 .../TestCryptoStreamsWithJceSm4CtrCryptoCodec.java |  48 +++
 ...tCryptoStreamsWithOpensslAesCtrCryptoCodec.java |  17 ++-
 ...CryptoStreamsWithOpensslSm4CtrCryptoCodec.java} |  52 +++
 .../datatransfer/sasl/DataTransferSaslUtil.java|  10 +-
 .../datatransfer/sasl/SaslDataTransferClient.java  |  13 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java |   4 +
 .../hadoop-hdfs-client/src/main/proto/hdfs.proto   |   1 +
 .../src/site/markdown/TransparentEncryption.md |   8 +-
 28 files changed, 756 insertions(+), 516 deletions(-)

diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index fb4193a..1d521e9 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -323,7 +323,6 @@
 
   org.bouncycastle
   bcprov-jdk15on
-  test
 
 
   org.apache.kerby
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
deleted file mode 100644
index 3e52560..000
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
+++ /dev/null
@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.crypto;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-import com.google.common.base.Preconditions;
-
-import java.io.IOException;
-
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public abstract class AesCtrCryptoCodec extends CryptoCodec {
-
-  protected static final CipherSuite SUITE = CipherSuite.AES_CTR_NOPADDING;
-
-  /**
-   * For AES, the algorithm block is fixed size of 128 bits.
-   * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
-   */
-  private static final int AES_BLOCK_SIZE = SUITE.getAlgorithmBlockSize();
-
-  @Override
-  public CipherSuite getCipherSuite() {
-return SUITE;
-  }
-  
-  /**
-   * The IV is produced by adding the initial IV to the counter. IV length 
-   * should be the same as {@link #AES_BLOCK_SIZE}
-   */
-  @Override
-  public void calculateIV(byte[] 

[hadoop] branch branch-3.3.0 updated: YARN-10314. YarnClient throws NoClassDefFoundError for WebSocketException with only shaded client jars (#2075)

2020-06-16 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.3.0
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3.0 by this push:
 new 8382e31  YARN-10314. YarnClient throws NoClassDefFoundError for 
WebSocketException with only shaded client jars (#2075)
8382e31 is described below

commit 8382e31c0c33c3d69aff8690adc7c1bbe5137ee6
Author: Vinayakumar B 
AuthorDate: Wed Jun 17 09:26:41 2020 +0530

YARN-10314. YarnClient throws NoClassDefFoundError for WebSocketException 
with only shaded client jars (#2075)
---
 hadoop-client-modules/hadoop-client-minicluster/pom.xml | 16 +---
 hadoop-client-modules/hadoop-client-runtime/pom.xml | 11 +++
 2 files changed, 20 insertions(+), 7 deletions(-)

diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 52595d9..dd954d3 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -811,15 +811,25 @@
 */**
   
 
-
+
 
   org.eclipse.jetty:jetty-client
   
 */**
   
 
+
+  org.eclipse.jetty:jetty-xml
+  
+*/**
+  
+
+
+  org.eclipse.jetty:jetty-http
+  
+*/**
+  
+
   
 
   
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 4960235..bf5e527 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -158,12 +158,8 @@
   
   com.google.code.findbugs:jsr305
   io.dropwizard.metrics:metrics-core
-  org.eclipse.jetty.websocket:*
   org.eclipse.jetty:jetty-servlet
   org.eclipse.jetty:jetty-security
-  org.eclipse.jetty:jetty-client
-  org.eclipse.jetty:jetty-http
-  org.eclipse.jetty:jetty-xml
   org.ow2.asm:*
   
   org.bouncycastle:*
@@ -214,6 +210,13 @@
   
 
 
+  
+  org.eclipse.jetty.websocket:*
+  
+about.html
+  
+
+
   
   org.apache.kerby:kerb-util
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.3 updated: YARN-10314. YarnClient throws NoClassDefFoundError for WebSocketException with only shaded client jars (#2075)

2020-06-16 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new c1ef247  YARN-10314. YarnClient throws NoClassDefFoundError for 
WebSocketException with only shaded client jars (#2075)
c1ef247 is described below

commit c1ef247dc694097533a6bda4697f593deab2afb1
Author: Vinayakumar B 
AuthorDate: Wed Jun 17 09:26:41 2020 +0530

YARN-10314. YarnClient throws NoClassDefFoundError for WebSocketException 
with only shaded client jars (#2075)
---
 hadoop-client-modules/hadoop-client-minicluster/pom.xml | 16 +---
 hadoop-client-modules/hadoop-client-runtime/pom.xml | 11 +++
 2 files changed, 20 insertions(+), 7 deletions(-)

diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index 6c8bc21..48b6619 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -811,15 +811,25 @@
 */**
   
 
-
+
 
   org.eclipse.jetty:jetty-client
   
 */**
   
 
+
+  org.eclipse.jetty:jetty-xml
+  
+*/**
+  
+
+
+  org.eclipse.jetty:jetty-http
+  
+*/**
+  
+
   
 
   
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 5e00f9f..80bd1ee 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -158,12 +158,8 @@
   
   com.google.code.findbugs:jsr305
   io.dropwizard.metrics:metrics-core
-  org.eclipse.jetty.websocket:*
   org.eclipse.jetty:jetty-servlet
   org.eclipse.jetty:jetty-security
-  org.eclipse.jetty:jetty-client
-  org.eclipse.jetty:jetty-http
-  org.eclipse.jetty:jetty-xml
   org.ow2.asm:*
   
   org.bouncycastle:*
@@ -214,6 +210,13 @@
   
 
 
+  
+  org.eclipse.jetty.websocket:*
+  
+about.html
+  
+
+
   
   org.apache.kerby:kerb-util
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: YARN-10314. YarnClient throws NoClassDefFoundError for WebSocketException with only shaded client jars (#2075)

2020-06-16 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fc4ebb0  YARN-10314. YarnClient throws NoClassDefFoundError for 
WebSocketException with only shaded client jars (#2075)
fc4ebb0 is described below

commit fc4ebb0499fe1095b87ff782c265e9afce154266
Author: Vinayakumar B 
AuthorDate: Wed Jun 17 09:26:41 2020 +0530

YARN-10314. YarnClient throws NoClassDefFoundError for WebSocketException 
with only shaded client jars (#2075)
---
 hadoop-client-modules/hadoop-client-minicluster/pom.xml | 16 +---
 hadoop-client-modules/hadoop-client-runtime/pom.xml | 11 +++
 2 files changed, 20 insertions(+), 7 deletions(-)

diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index b447eed..f66528d 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -811,15 +811,25 @@
 */**
   
 
-
+
 
   org.eclipse.jetty:jetty-client
   
 */**
   
 
+
+  org.eclipse.jetty:jetty-xml
+  
+*/**
+  
+
+
+  org.eclipse.jetty:jetty-http
+  
+*/**
+  
+
   
 
   
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index fe95ed8..9a1efff 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -158,12 +158,8 @@
   
   com.google.code.findbugs:jsr305
   io.dropwizard.metrics:metrics-core
-  org.eclipse.jetty.websocket:*
   org.eclipse.jetty:jetty-servlet
   org.eclipse.jetty:jetty-security
-  org.eclipse.jetty:jetty-client
-  org.eclipse.jetty:jetty-http
-  org.eclipse.jetty:jetty-xml
   org.ow2.asm:*
   
   org.bouncycastle:*
@@ -214,6 +210,13 @@
   
 
 
+  
+  org.eclipse.jetty.websocket:*
+  
+about.html
+  
+
+
   
   org.apache.kerby:kerb-util
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (7c4de59 -> e154084)

2020-06-12 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 7c4de59  YARN-10293. Reserved Containers not allocated from available 
space of other nodes in CandidateNodeSet for MultiNodePlacement. Contributed by 
Prabhu Joseph.
 add e154084  HADOOP-17046. Support downstreams' existing Hadoop-rpc 
implementations using non-shaded protobuf classes (#2026)

No new revisions were added by this update.

Summary of changes:
 .../dev-support/findbugsExcludeFile.xml|4 +
 hadoop-common-project/hadoop-common/pom.xml|   92 +-
 .../ipc/protobuf/ProtobufRpcEngineProtos.java  | 1163 
 .../java/org/apache/hadoop/ha/ZKFCRpcServer.java   |4 +-
 .../HAServiceProtocolClientSideTranslatorPB.java   |6 +-
 .../ZKFCProtocolClientSideTranslatorPB.java|4 +-
 .../java/org/apache/hadoop/ipc/ProtobufHelper.java |   17 +
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   |   22 +-
 ...tobufRpcEngine.java => ProtobufRpcEngine2.java} |  153 +--
 .../hadoop/ipc/ProtobufRpcEngineCallback.java  |   11 +-
 ...llback.java => ProtobufRpcEngineCallback2.java} |2 +-
 .../src/main/java/org/apache/hadoop/ipc/RPC.java   |4 +-
 .../java/org/apache/hadoop/ipc/RpcClientUtil.java  |2 +-
 .../java/org/apache/hadoop/ipc/RpcWritable.java|   45 +
 .../main/java/org/apache/hadoop/ipc/Server.java|   12 +-
 .../java/org/apache/hadoop/tracing/TraceAdmin.java |4 +-
 ...bufRpcEngine.proto => ProtobufRpcEngine2.proto} |   20 +-
 .../java/org/apache/hadoop/ha/DummyHAService.java  |4 +-
 .../org/apache/hadoop/ipc/RPCCallBenchmark.java|8 +-
 .../hadoop/ipc/TestMultipleProtocolServer.java |2 +-
 .../hadoop/ipc/TestProtoBufRPCCompatibility.java   |9 +-
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java |5 +-
 .../hadoop/ipc/TestProtoBufRpcServerHandoff.java   |6 +-
 .../apache/hadoop/ipc/TestRPCCompatibility.java|   10 +-
 .../org/apache/hadoop/ipc/TestRPCWaitForProxy.java |2 +-
 .../apache/hadoop/ipc/TestReuseRpcConnections.java |3 +-
 .../java/org/apache/hadoop/ipc/TestRpcBase.java|2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java|4 +-
 .../hadoop/security/TestDoAsEffectiveUser.java |   18 +-
 .../apache/hadoop/hdfs/NameNodeProxiesClient.java  |4 +-
 .../ClientDatanodeProtocolTranslatorPB.java|4 +-
 .../ClientNamenodeProtocolTranslatorPB.java|6 +-
 .../ReconfigurationProtocolTranslatorPB.java   |4 +-
 .../server/federation/router/ConnectionPool.java   |4 +-
 .../federation/router/RouterAdminServer.java   |4 +-
 .../server/federation/router/RouterClient.java |4 +-
 .../server/federation/router/RouterRpcServer.java  |4 +-
 .../hadoop/hdfs/tools/federation/RouterAdmin.java  |4 +-
 .../hdfs/server/federation/MockNamenode.java   |4 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml|3 +
 .../main/java/org/apache/hadoop/hdfs/DFSUtil.java  |   24 +-
 .../org/apache/hadoop/hdfs/NameNodeProxies.java|4 +-
 ...nodeLifelineProtocolClientSideTranslatorPB.java |4 +-
 .../DatanodeProtocolClientSideTranslatorPB.java|4 +-
 .../InterDatanodeProtocolTranslatorPB.java |4 +-
 .../hdfs/qjournal/client/IPCLoggerChannel.java |6 +-
 .../hdfs/qjournal/server/JournalNodeRpcServer.java |4 +-
 .../hdfs/qjournal/server/JournalNodeSyncer.java|4 +-
 .../aliasmap/InMemoryLevelDBAliasMapServer.java|4 +-
 .../hadoop/hdfs/server/datanode/DataNode.java  |4 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java|6 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java |4 +-
 .../hdfs/qjournal/client/TestQJMWithFaults.java|4 +-
 .../qjournal/client/TestQuorumJournalManager.java  |4 +-
 .../hdfs/security/token/block/TestBlockToken.java  |4 +-
 .../namenode/snapshot/SnapshotTestHelper.java  |2 +-
 .../pb/client/HSClientProtocolPBClientImpl.java|4 +-
 .../pb/client/MRClientProtocolPBClientImpl.java|8 +-
 .../apache/hadoop/mapreduce/v2/hs/HSProxies.java   |4 +-
 .../mapreduce/v2/hs/server/HSAdminServer.java  |6 +-
 .../pb/client/ClientAMProtocolPBClientImpl.java|4 +-
 .../ApplicationClientProtocolPBClientImpl.java |4 +-
 .../ApplicationHistoryProtocolPBClientImpl.java|5 +-
 .../ApplicationMasterProtocolPBClientImpl.java |5 +-
 .../pb/client/ClientSCMProtocolPBClientImpl.java   |4 +-
 .../ContainerManagementProtocolPBClientImpl.java   |4 +-
 .../pb/client/CsiAdaptorProtocolPBClientImpl.java  |4 +-
 .../factories/impl/pb/RpcServerFactoryPBImpl.java  |4 +-
 ...eManagerAdministrationProtocolPBClientImpl.java |4 +-
 .../pb/client/SCMAdminProtocolPBClientImpl.java|4 +-
 .../CollectorNodemanagerProtocolPBClientImpl.jav

[hadoop] branch branch-3.3 updated: HADOOP-16985. Handle release package related issues (#1957)

2020-04-15 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
 new b4ba9be  HADOOP-16985. Handle release package related issues (#1957)
b4ba9be is described below

commit b4ba9bed7c591ee858fa6139060c984913e8e94f
Author: Vinayakumar B 
AuthorDate: Tue Apr 14 18:01:46 2020 +0530

HADOOP-16985. Handle release package related issues (#1957)

(cherry picked from commit 4d24d99e854e1718270a75807f7779a623758247)
---
 dev-support/bin/create-release | 4 +++-
 hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml | 1 +
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index f4851d1..39a5d0d 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -651,10 +651,12 @@ function signartifacts
 
   big_console_header "Signing the release"
 
-  for i in ${ARTIFACTS_DIR}/*; do
+  run cd "${ARTIFACTS_DIR}"
+  for i in *; do
 ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
 sha512sum --tag "${i}" > "${i}.sha512"
   done
+  run cd "${BASEDIR}"
 
   if [[ "${ASFRELEASE}" = true ]]; then
 echo "Fetching the Apache Hadoop KEYS file..."
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
index 7c725d7..87169420 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
@@ -56,6 +56,7 @@
 **/build/**
 **/file:/**
 **/SecurityAuth.audit*
+patchprocess/**
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16985. Handle release package related issues (#1957)

2020-04-15 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 4d24d99  HADOOP-16985. Handle release package related issues (#1957)
4d24d99 is described below

commit 4d24d99e854e1718270a75807f7779a623758247
Author: Vinayakumar B 
AuthorDate: Tue Apr 14 18:01:46 2020 +0530

HADOOP-16985. Handle release package related issues (#1957)
---
 dev-support/bin/create-release | 4 +++-
 hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml | 1 +
 2 files changed, 4 insertions(+), 1 deletion(-)

diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index f4851d1..39a5d0d 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -651,10 +651,12 @@ function signartifacts
 
   big_console_header "Signing the release"
 
-  for i in ${ARTIFACTS_DIR}/*; do
+  run cd "${ARTIFACTS_DIR}"
+  for i in *; do
 ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
 sha512sum --tag "${i}" > "${i}.sha512"
   done
+  run cd "${BASEDIR}"
 
   if [[ "${ASFRELEASE}" = true ]]; then
 echo "Fetching the Apache Hadoop KEYS file..."
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
index 7c725d7..87169420 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
@@ -56,6 +56,7 @@
 **/build/**
 **/file:/**
 **/SecurityAuth.audit*
+patchprocess/**
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16927. Update hadoop-thirdparty dependency version to 1.0.0 (#1900). Contributed by Vinayakumar B.

2020-03-20 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f02d5ab  HADOOP-16927. Update hadoop-thirdparty dependency version to 
1.0.0 (#1900). Contributed by Vinayakumar B.
f02d5ab is described below

commit f02d5abacd84efb5436fa418c9192450d815c989
Author: Vinayakumar B 
AuthorDate: Thu Mar 19 15:21:12 2020 +0530

HADOOP-16927. Update hadoop-thirdparty dependency version to 1.0.0 (#1900). 
Contributed by Vinayakumar B.
---
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index c831252..f6c4300 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -91,7 +91,7 @@
 3.7.1
 ${env.HADOOP_PROTOC_PATH}
 
-
1.0.0-SNAPSHOT
+
1.0.0
 
org.apache.hadoop.thirdparty
 
${hadoop-thirdparty-shaded-prefix}.protobuf
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk updated: Finishing 1.0.0 release

2020-03-18 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 80523cc  Finishing 1.0.0 release
80523cc is described below

commit 80523cce96186fd22e1c0c8f8e71fbc1274ff96d
Author: Vinayakumar B 
AuthorDate: Thu Mar 19 01:10:44 2020 +0530

Finishing 1.0.0 release
---
 .../thirdparty-1.0.0/CHANGELOG.thirdparty-1.0.0.md | 55 ++
 .../RELEASENOTES.thirdparty-1.0.0.md   | 24 ++
 2 files changed, 79 insertions(+)

diff --git 
a/src/site/markdown/release/thirdparty-1.0.0/CHANGELOG.thirdparty-1.0.0.md 
b/src/site/markdown/release/thirdparty-1.0.0/CHANGELOG.thirdparty-1.0.0.md
new file mode 100644
index 000..9db3eb5
--- /dev/null
+++ b/src/site/markdown/release/thirdparty-1.0.0/CHANGELOG.thirdparty-1.0.0.md
@@ -0,0 +1,55 @@
+
+
+# Apache Hadoop Third-party Libs Changelog
+
+## Release thirdparty-1.0.0 - 2020-03-11
+
+
+
+### IMPROVEMENTS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-16895](https://issues.apache.org/jira/browse/HADOOP-16895) | 
[thirdparty] Revisit LICENSEs and NOTICEs |  Major | . | Vinayakumar B | 
Vinayakumar B |
+
+
+### BUG FIXES:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-16820](https://issues.apache.org/jira/browse/HADOOP-16820) | 
[thirdparty] ChangeLog and ReleaseNote are not packaged by createrelease script 
|  Major | hadoop-thirdparty | Vinayakumar B | Vinayakumar B |
+| [HADOOP-16824](https://issues.apache.org/jira/browse/HADOOP-16824) | 
[thirdparty] port HADOOP-16754 (Fix docker failed to build yetus/hadoop) to 
thirdparty Dockerfile |  Major | . | Vinayakumar B | Vinayakumar B |
+| [HADOOP-16919](https://issues.apache.org/jira/browse/HADOOP-16919) | 
[thirdparty] Handle release package related issues |  Major | hadoop-thirdparty 
| Vinayakumar B | Vinayakumar B |
+
+
+### SUB-TASKS:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-16595](https://issues.apache.org/jira/browse/HADOOP-16595) | 
[pb-upgrade] Create hadoop-thirdparty artifact to have shaded protobuf |  Major 
| hadoop-thirdparty | Vinayakumar B | Vinayakumar B |
+| [HADOOP-16821](https://issues.apache.org/jira/browse/HADOOP-16821) | 
[pb-upgrade] Use 'o.a.h.thirdparty.protobuf' shaded prefix instead of 
'protobuf\_3\_7' |  Major | hadoop-thirdparty | Vinayakumar B | Vinayakumar B |
+
+
+### OTHER:
+
+| JIRA | Summary | Priority | Component | Reporter | Contributor |
+|: |: | :--- |: |: |: |
+| [HADOOP-16867](https://issues.apache.org/jira/browse/HADOOP-16867) | 
[thirdparty] Add shaded JaegerTracer |  Major | . | Siyao Meng | Siyao Meng |
+
+
diff --git 
a/src/site/markdown/release/thirdparty-1.0.0/RELEASENOTES.thirdparty-1.0.0.md 
b/src/site/markdown/release/thirdparty-1.0.0/RELEASENOTES.thirdparty-1.0.0.md
new file mode 100644
index 000..1d48ea0
--- /dev/null
+++ 
b/src/site/markdown/release/thirdparty-1.0.0/RELEASENOTES.thirdparty-1.0.0.md
@@ -0,0 +1,24 @@
+
+
+# Apache Hadoop Third-party Libs  thirdparty-1.0.0 Release Notes
+
+These release notes cover new developer and user-facing incompatibilities, 
important issues, features, and major improvements.
+
+
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] annotated tag rel/release-1.0.0 created (now 78ae206)

2020-03-18 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to annotated tag rel/release-1.0.0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git.


  at 78ae206  (tag)
 tagging 3395253a6235c40e5eac57a365fc1268d3f379af (commit)
  by Vinayakumar B
  on Thu Mar 19 00:38:41 2020 +0530

- Log -
Hadoop Thirdparty 1.0.0 release
-BEGIN PGP SIGNATURE-

iQIzBAABCgAdFiEEgM5LZnWY0WbA5Zgs5OrZw9AhWnEFAl5ycbkACgkQ5OrZw9Ah
WnFcHQ/7B1Cxj5rGFIisvwtBsnMuAHAugxSXIPsrUl4hft3Hu15wRfugA64rp6+y
n2vlM+veYTc4EHl0Aw+/KYXQVzD37ZqgBsAJqZZYBm8hqXIVj6F9QiVKgsMtbuA4
1ghoMwGpRkdKy5+qwcL08e8c1YtG/0Y8a/QcLn5YZSJaVxTQShLN/95KDP6AfuIc
mr34SV5NkjxB7S+t6di2R4nfmd30nT1Tt5xwxRNl8+1H5n8i8AgcMaUuIU26ZLWD
RYuL3qmije+YfGr42Nxf86D+FzKiYlUGEMlXU/LRhEsJHl0bfWAPy9aYkbsRbz+0
/bWQRrbJ8dc36gb52kJQTdSAO2i7w/b/RxSpROh4m6KzInWCGf6gS7HSJjuq7/BU
xUrxQAlZVvzYzox2NlbQ1Gg/SpQLQPJp8N9gd7tHnQIqqRZ92umuv9WhGOhymqBA
i6NLbelNfKLDZO4XAdwhhQEF7oV8GqIfOLTMcebfI0M8yHIgui81acRb4yBjMdUz
kwV7ZHB3nAdX6qtjOtrQUgDhkQ0bJitJ87hzTdhSCD69yWiCI4A+/UohuuIkT+Im
Til14BvZLaz159XJl7K6khdxkp+9Wl1u5D9KVWZpgM0FMGlaodw3M03s3enHVbIl
z9Ar0UJqmzrqZmTwA8XWz272nDJVG20CbGnFToLBZwa/AwWdNfw=
=olv4
-END PGP SIGNATURE-
---

No new revisions were added by this update.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] annotated tag release-1.0.0-RC1 created (now 863efce)

2020-03-11 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to annotated tag release-1.0.0-RC1
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git.


  at 863efce  (tag)
 tagging 3395253a6235c40e5eac57a365fc1268d3f379af (commit)
  by Vinayakumar B
  on Thu Mar 12 01:50:11 2020 +0530

- Log -
Release candidate - 1.0.0-RC1
-BEGIN PGP SIGNATURE-

iQIzBAABCgAdFiEEgM5LZnWY0WbA5Zgs5OrZw9AhWnEFAl5pR/sACgkQ5OrZw9Ah
WnHTyBAAgTZFVfXyeWabU6wUtPJx/ZYTtbGSjRiVlfv2waiqwy7pCER4Z8AWl4Sf
wOhrzhI48K/3RKQUOEABfhlAbuYNPHBO6m766w8aTdUZZC2BEsIj9fPf+/RDzest
KoPBQJuZiEQKT7Q/85zS/T/FO0WOHR4G+bDzTPWPCvNGh+TAH6bzB6OWYglqTHiv
8kB8v5SuXHDIKPaDtMo6LtnW8qQa3FQVwB9I6bB/wqin20rhxnr8qLkN+CzeJFpu
MvCAYr3u5xnMprxmctwkJDIdvkpba92RWE8YUKQ0QgxMklDrV75xU15dFlbSZrnF
ggrt36QeGeKFbgRCggvbY+mkEGdpdLShBCiAdAvpHV7PT8WWOKeTubpXS77/Ik8N
PVw/B3cmNRAD524dAP8ckjhhzD+LnKeOt8ao/k0Ha2xm8N++6ZumY0/pLMskFZuQ
+hKp+km/ylzBN0ynU5KmUgW9Ck0xkErquE4nTN5L9D4TnZ2DXKHElOCrMnkQt4p8
FS+nfhpYjcO2cJwHbDbhn6aMwXaQFTDL7ulkgUffCk6pXN3ijXQOINOf2bIB7oi7
PuyPapJnU2qKDJF1zvtMvegt7xzRxBjo/l9T92EYrQgcy3BmjYwVw8+fHcg1pGR/
qWCIhugkc0Fpa0tNJRymU4E8DKbxzX8Bf+NsvIPprf8XQvcSF2g=
=QHhE
-END PGP SIGNATURE-
---

No new revisions were added by this update.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch branch-1.0 updated: HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6) addendum to avoid rat check failure for NOTICE-binary

2020-03-11 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-1.0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/branch-1.0 by this push:
 new 3395253  HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6) 
addendum to avoid rat check failure for NOTICE-binary
3395253 is described below

commit 3395253a6235c40e5eac57a365fc1268d3f379af
Author: Vinayakumar B 
AuthorDate: Thu Mar 12 00:35:09 2020 +0530

HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6)
addendum to avoid rat check failure for NOTICE-binary
---
 pom.xml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/pom.xml b/pom.xml
index 646577d..58adaae 100644
--- a/pom.xml
+++ b/pom.xml
@@ -262,6 +262,7 @@
 licenses/**
 licenses-binary/**
 **/dependency-reduced-pom.xml
+NOTICE-binary
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk updated: HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6) addendum to avoid rat check failure for NOTICE-binary

2020-03-11 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 9213756  HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6) 
addendum to avoid rat check failure for NOTICE-binary
9213756 is described below

commit 921375665f5b5937e0bd3f1e588a7996777b26d3
Author: Vinayakumar B 
AuthorDate: Thu Mar 12 00:35:09 2020 +0530

HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6)
addendum to avoid rat check failure for NOTICE-binary
---
 pom.xml | 1 +
 1 file changed, 1 insertion(+)

diff --git a/pom.xml b/pom.xml
index a8d44a5..8d6f1e3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -262,6 +262,7 @@
 licenses/**
 licenses-binary/**
 **/dependency-reduced-pom.xml
+NOTICE-binary
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] 01/02: HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6)

2020-03-11 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-1.0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git

commit 622b2ddeac091d6ab5989f841c6c6b643481fb81
Author: Vinayakumar B 
AuthorDate: Mon Mar 2 17:54:41 2020 +0530

HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6)
---
 LICENSE-binary |  37 +-
 LICENSE.txt|  21 -
 NOTICE-binary  | 781 +
 NOTICE.txt |  33 +-
 hadoop-shaded-jaeger/pom.xml   |  26 +-
 hadoop-shaded-protobuf_3_7/pom.xml |  24 +-
 licenses-binary/LICENSE-cddl-gplv2-ce.txt  | 759 
 .../LICENSE.jetbrains.txt  |  24 +-
 LICENSE.txt => licenses-binary/LICENSE.kotlin.txt  |  24 +-
 .../{LICENSE-protobuf.txt => LICENSE.protobuf.txt} |   0
 licenses-binary/LICENSE.slf4j.txt  |  21 +
 11 files changed, 862 insertions(+), 888 deletions(-)

diff --git a/LICENSE-binary b/LICENSE-binary
index 6c668ef..3bf3ddc 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -205,6 +205,26 @@
 This project bundles some components that are also licensed under the Apache
 License Version 2.0:
 
+com.google.code.gson:gson:jar:2.8.6
+com.squareup.okhttp3:okhttp:jar:4.2.2
+com.squareup.okio:okio:jar:2.2.2
+
+io.jaegertracing:jaeger-client:jar:0.34.2
+io.jaegertracing:jaeger-core:jar:0.34.2
+io.jaegertracing:jaeger-thrift:jar:0.34.2
+io.jaegertracing:jaeger-tracerresolver:jar:0.34.2
+io.opentracing.contrib:opentracing-tracerresolver:jar:0.1.5
+
+org.apache.thrift:libthrift:jar:0.13.0
+
+org.jetbrains:annotations:jar:13.0
+
+org.jetbrains.kotlin:kotlin-stdlib:jar:1.3.50
+org.jetbrains.kotlin:kotlin-stdlib-common:jar:1.3.50
+
+io.opentracing:opentracing-api:jar:0.31.0
+io.opentracing:opentracing-noop:jar:0.31.0
+io.opentracing:opentracing-util:jar:0.31.0
 
 
 

@@ -213,29 +233,16 @@ licenses. This section summarizes those components and 
their licenses.
 See licenses-binary/ for text of these licenses.
 
 
-BSD 2-Clause
-
-
-
 BSD 3-Clause
 
-
 com.google.protobuf:protobuf-java:3.7.1
 
 
 MIT License
 ---
+org.slf4j:slf4j-api:1.7.28
 
 
 CDDL 1.1 + GPLv2 with classpath exception
 -
-
-Eclipse Public License 1.0
---
-
-junit:junit:4.12
-
-
-Public Domain
--
-
+javax.annotation:javax.annotation-api:1.3.2
diff --git a/LICENSE.txt b/LICENSE.txt
index 643e3e7..6b0b127 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -201,24 +201,3 @@
See the License for the specific language governing permissions and
limitations under the License.
 
-
-This product bundles various third-party components under other open source
-licenses. This section summarizes those components and their licenses.
-See licenses/ for text of these licenses.
-
-
-Apache Software Foundation License 2.0
---
-
-
-BSD 2-Clause
-
-
-
-BSD 3-Clause
-
-
-
-MIT License

-
diff --git a/NOTICE-binary b/NOTICE-binary
index 8268c52..1b1ed35 100644
--- a/NOTICE-binary
+++ b/NOTICE-binary
@@ -1,780 +1,41 @@
-Apache Hadoop
-Copyright 2006 and onwards The Apache Software Foundation.
+Apache Hadoop Third-party Libs
+Copyright 2020 and onwards The Apache Software Foundation.
 
 This product includes software developed at
 The Apache Software Foundation (http://www.apache.org/).
 
-Export Control Notice
--
-
-This distribution includes cryptographic software.  The country in
-which you currently reside may have restrictions on the import,
-possession, use, and/or re-export to another country, of
-encryption software.  BEFORE using any encryption software, please
-check your country's laws, regulations and policies concerning the
-import, possession, or use, and re-export of encryption software, to
-see if this is permitted.  See <http://www.wassenaar.org/> for more
-information.
-
-The U.S. Government Department of Commerce, Bureau of Industry and
-Security (BIS), has classified this software as Export Commodity
-Control Number (ECCN) 5D002.C.1, which includes information security
-software using or performing cryptographic functions with asymmetric
-algorithms.  The form and manner of this Apache Software Foundation
-distribution makes it eligible for export under the License Exception
-ENC Technology Software Unrestricted (TSU) exception (see the BIS
-Export Administration Regulations, Section 740.13) for both object
-code and source code.
-
-The following provides more details on the included cryptographic software:
-
-This software 

[hadoop-thirdparty] branch branch-1.0 updated (16f6d2a -> 3df5b6c)

2020-03-11 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch branch-1.0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git.


from 16f6d2a  Preparing for 1.0.0 Release
 new 622b2dd  HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6)
 new 3df5b6c  HADOOP-16919. Handle release packaging issues (#7)

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 LICENSE-binary |  37 +-
 LICENSE.txt|  21 -
 NOTICE-binary  | 781 +
 NOTICE.txt |  33 +-
 dev-support/bin/create-release |   4 +-
 hadoop-shaded-jaeger/pom.xml   |  26 +-
 hadoop-shaded-protobuf_3_7/pom.xml |  24 +-
 licenses-binary/LICENSE-cddl-gplv2-ce.txt  | 759 
 .../LICENSE.jetbrains.txt  |  24 +-
 LICENSE.txt => licenses-binary/LICENSE.kotlin.txt  |  24 +-
 .../{LICENSE-protobuf.txt => LICENSE.protobuf.txt} |   0
 licenses-binary/LICENSE.slf4j.txt  |  21 +
 .../resources/assemblies/hadoop-thirdparty-src.xml |   2 +-
 src/site/markdown/index.md.vm  |  29 +-
 14 files changed, 894 insertions(+), 891 deletions(-)
 create mode 100644 licenses-binary/LICENSE-cddl-gplv2-ce.txt
 copy LICENSE.txt => licenses-binary/LICENSE.jetbrains.txt (95%)
 copy LICENSE.txt => licenses-binary/LICENSE.kotlin.txt (95%)
 rename licenses-binary/{LICENSE-protobuf.txt => LICENSE.protobuf.txt} (100%)
 create mode 100644 licenses-binary/LICENSE.slf4j.txt


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] 02/02: HADOOP-16919. Handle release packaging issues (#7)

2020-03-11 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-1.0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git

commit 3df5b6cc0393e5543d6fb07b8283ef78435bc555
Author: Vinayakumar B 
AuthorDate: Sat Feb 29 01:32:14 2020 +0530

HADOOP-16919. Handle release packaging issues (#7)
---
 dev-support/bin/create-release |  4 ++-
 .../resources/assemblies/hadoop-thirdparty-src.xml |  2 +-
 src/site/markdown/index.md.vm  | 29 +-
 3 files changed, 32 insertions(+), 3 deletions(-)

diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 75b80a1..7859081 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -576,10 +576,12 @@ function signartifacts
 
   big_console_header "Signing the release"
 
-  for i in ${ARTIFACTS_DIR}/*; do
+  run cd "${ARTIFACTS_DIR}"
+  for i in *; do
 ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
 sha512sum --tag "${i}" > "${i}.sha512"
   done
+  run cd "${BASEDIR}"
 
   if [[ "${ASFRELEASE}" = true ]]; then
 echo "Fetching the Apache Hadoop KEYS file..."
diff --git a/src/main/resources/assemblies/hadoop-thirdparty-src.xml 
b/src/main/resources/assemblies/hadoop-thirdparty-src.xml
index ec3aceb..6faeff0 100644
--- a/src/main/resources/assemblies/hadoop-thirdparty-src.xml
+++ b/src/main/resources/assemblies/hadoop-thirdparty-src.xml
@@ -55,7 +55,7 @@
 **/*.log
 **/build/**
 **/file:/**
-**/SecurityAuth.audit*
+patchprocess/**
   
 
   
diff --git a/src/site/markdown/index.md.vm b/src/site/markdown/index.md.vm
index f7acb74..280b1c6 100644
--- a/src/site/markdown/index.md.vm
+++ b/src/site/markdown/index.md.vm
@@ -43,4 +43,31 @@ This page provides an overview of the major changes.
 Protobuf-java
 -
 Google Protobuf's 3.7.1 jar is available as 
*org.apache.hadoop.thirdparty:hadoop-shaded-protobuf_3_7* artifact.
-*com.google.protobuf* package is shaded as 
*org.apache.hadoop.thirdparty.protobuf*.
+
+Following are relocations under *hadoop-shaded-protobuf_3_7* artifact:
+
+|Original package | Shaded package |
+|---|---|
+|com.google.protobuf|org.apache.hadoop.thirdparty.protobuf|
+
+
+io.jaegertracing:jaeger-client
+--
+jaeger-client: 0.34.2 jar is available as 
*org.apache.hadoop.thirdparty:hadoop-shaded-jaeger* artifact.
+
+Following are relocations under *hadoop-shaded-jaeger* artifact:
+
+|Original package | Shaded package |
+|---|---|
+|com.google.gson | 
org.apache.hadoop.thirdparty.io.jaegertracing.com.google.gson|
+|io.jaegertracing.thriftjava|org.apache.hadoop.thirdparty.io.jaegertracing.thriftjava|
+|io.jaegertracing.crossdock|org.apache.hadoop.thirdparty.io.jaegertracing.crossdock|
+|io.jaegertracing.thrift|org.apache.hadoop.thirdparty.io.jaegertracing.thrift|
+|io.jaegertracing.agent|org.apache.hadoop.thirdparty.io.jaegertracing.agent|
+|org.apache.thrift|org.apache.hadoop.thirdparty.io.jaegertracing.apache.thrift|
+|com.twitter.zipkin|org.apache.hadoop.thirdparty.io.jaegertracing.com.twitter.zipkin|
+|okhttp3|org.apache.hadoop.thirdparty.io.jaegertracing.okhttp3|
+|kotlin|org.apache.hadoop.thirdparty.io.jaegertracing.kotlin|
+|org.intellij|org.apache.hadoop.thirdparty.io.jaegertracing.org.intellij|
+|org.jetbrains|org.apache.hadoop.thirdparty.io.jaegertracing.org.jetbrains|
+|okio|org.apache.hadoop.thirdparty.io.jaegertracing.okio|
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk updated: HADOOP-16919. Handle release packaging issues (#7)

2020-03-11 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 19948e6  HADOOP-16919. Handle release packaging issues (#7)
19948e6 is described below

commit 19948e6a98c562ce79be6e2783d51a8d7be110a5
Author: Vinayakumar B 
AuthorDate: Sat Feb 29 01:32:14 2020 +0530

HADOOP-16919. Handle release packaging issues (#7)
---
 dev-support/bin/create-release |  4 ++-
 .../resources/assemblies/hadoop-thirdparty-src.xml |  2 +-
 src/site/markdown/index.md.vm  | 29 +-
 3 files changed, 32 insertions(+), 3 deletions(-)

diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 75b80a1..7859081 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -576,10 +576,12 @@ function signartifacts
 
   big_console_header "Signing the release"
 
-  for i in ${ARTIFACTS_DIR}/*; do
+  run cd "${ARTIFACTS_DIR}"
+  for i in *; do
 ${GPG} --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
 sha512sum --tag "${i}" > "${i}.sha512"
   done
+  run cd "${BASEDIR}"
 
   if [[ "${ASFRELEASE}" = true ]]; then
 echo "Fetching the Apache Hadoop KEYS file..."
diff --git a/src/main/resources/assemblies/hadoop-thirdparty-src.xml 
b/src/main/resources/assemblies/hadoop-thirdparty-src.xml
index ec3aceb..6faeff0 100644
--- a/src/main/resources/assemblies/hadoop-thirdparty-src.xml
+++ b/src/main/resources/assemblies/hadoop-thirdparty-src.xml
@@ -55,7 +55,7 @@
 **/*.log
 **/build/**
 **/file:/**
-**/SecurityAuth.audit*
+patchprocess/**
   
 
   
diff --git a/src/site/markdown/index.md.vm b/src/site/markdown/index.md.vm
index f7acb74..280b1c6 100644
--- a/src/site/markdown/index.md.vm
+++ b/src/site/markdown/index.md.vm
@@ -43,4 +43,31 @@ This page provides an overview of the major changes.
 Protobuf-java
 -
 Google Protobuf's 3.7.1 jar is available as 
*org.apache.hadoop.thirdparty:hadoop-shaded-protobuf_3_7* artifact.
-*com.google.protobuf* package is shaded as 
*org.apache.hadoop.thirdparty.protobuf*.
+
+Following are relocations under *hadoop-shaded-protobuf_3_7* artifact:
+
+|Original package | Shaded package |
+|---|---|
+|com.google.protobuf|org.apache.hadoop.thirdparty.protobuf|
+
+
+io.jaegertracing:jaeger-client
+--
+jaeger-client: 0.34.2 jar is available as 
*org.apache.hadoop.thirdparty:hadoop-shaded-jaeger* artifact.
+
+Following are relocations under *hadoop-shaded-jaeger* artifact:
+
+|Original package | Shaded package |
+|---|---|
+|com.google.gson | 
org.apache.hadoop.thirdparty.io.jaegertracing.com.google.gson|
+|io.jaegertracing.thriftjava|org.apache.hadoop.thirdparty.io.jaegertracing.thriftjava|
+|io.jaegertracing.crossdock|org.apache.hadoop.thirdparty.io.jaegertracing.crossdock|
+|io.jaegertracing.thrift|org.apache.hadoop.thirdparty.io.jaegertracing.thrift|
+|io.jaegertracing.agent|org.apache.hadoop.thirdparty.io.jaegertracing.agent|
+|org.apache.thrift|org.apache.hadoop.thirdparty.io.jaegertracing.apache.thrift|
+|com.twitter.zipkin|org.apache.hadoop.thirdparty.io.jaegertracing.com.twitter.zipkin|
+|okhttp3|org.apache.hadoop.thirdparty.io.jaegertracing.okhttp3|
+|kotlin|org.apache.hadoop.thirdparty.io.jaegertracing.kotlin|
+|org.intellij|org.apache.hadoop.thirdparty.io.jaegertracing.org.intellij|
+|org.jetbrains|org.apache.hadoop.thirdparty.io.jaegertracing.org.jetbrains|
+|okio|org.apache.hadoop.thirdparty.io.jaegertracing.okio|
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk updated: HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6)

2020-03-11 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a11c32c  HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6)
a11c32c is described below

commit a11c32cd2257139275a99cc779249861833be38a
Author: Vinayakumar B 
AuthorDate: Mon Mar 2 17:54:41 2020 +0530

HADOOP-16895. [thirdparty] Revisit LICENSEs and NOTICEs (#6)
---
 LICENSE-binary |  37 +-
 LICENSE.txt|  21 -
 NOTICE-binary  | 781 +
 NOTICE.txt |  33 +-
 hadoop-shaded-jaeger/pom.xml   |  26 +-
 hadoop-shaded-protobuf_3_7/pom.xml |  24 +-
 licenses-binary/LICENSE-cddl-gplv2-ce.txt  | 759 
 .../LICENSE.jetbrains.txt  |  24 +-
 LICENSE.txt => licenses-binary/LICENSE.kotlin.txt  |  24 +-
 .../{LICENSE-protobuf.txt => LICENSE.protobuf.txt} |   0
 licenses-binary/LICENSE.slf4j.txt  |  21 +
 11 files changed, 862 insertions(+), 888 deletions(-)

diff --git a/LICENSE-binary b/LICENSE-binary
index 6c668ef..3bf3ddc 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -205,6 +205,26 @@
 This project bundles some components that are also licensed under the Apache
 License Version 2.0:
 
+com.google.code.gson:gson:jar:2.8.6
+com.squareup.okhttp3:okhttp:jar:4.2.2
+com.squareup.okio:okio:jar:2.2.2
+
+io.jaegertracing:jaeger-client:jar:0.34.2
+io.jaegertracing:jaeger-core:jar:0.34.2
+io.jaegertracing:jaeger-thrift:jar:0.34.2
+io.jaegertracing:jaeger-tracerresolver:jar:0.34.2
+io.opentracing.contrib:opentracing-tracerresolver:jar:0.1.5
+
+org.apache.thrift:libthrift:jar:0.13.0
+
+org.jetbrains:annotations:jar:13.0
+
+org.jetbrains.kotlin:kotlin-stdlib:jar:1.3.50
+org.jetbrains.kotlin:kotlin-stdlib-common:jar:1.3.50
+
+io.opentracing:opentracing-api:jar:0.31.0
+io.opentracing:opentracing-noop:jar:0.31.0
+io.opentracing:opentracing-util:jar:0.31.0
 
 
 

@@ -213,29 +233,16 @@ licenses. This section summarizes those components and 
their licenses.
 See licenses-binary/ for text of these licenses.
 
 
-BSD 2-Clause
-
-
-
 BSD 3-Clause
 
-
 com.google.protobuf:protobuf-java:3.7.1
 
 
 MIT License
 ---
+org.slf4j:slf4j-api:1.7.28
 
 
 CDDL 1.1 + GPLv2 with classpath exception
 -
-
-Eclipse Public License 1.0
---
-
-junit:junit:4.12
-
-
-Public Domain
--
-
+javax.annotation:javax.annotation-api:1.3.2
diff --git a/LICENSE.txt b/LICENSE.txt
index 643e3e7..6b0b127 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -201,24 +201,3 @@
See the License for the specific language governing permissions and
limitations under the License.
 
-
-This product bundles various third-party components under other open source
-licenses. This section summarizes those components and their licenses.
-See licenses/ for text of these licenses.
-
-
-Apache Software Foundation License 2.0
---
-
-
-BSD 2-Clause
-
-
-
-BSD 3-Clause
-
-
-
-MIT License

-
diff --git a/NOTICE-binary b/NOTICE-binary
index 8268c52..1b1ed35 100644
--- a/NOTICE-binary
+++ b/NOTICE-binary
@@ -1,780 +1,41 @@
-Apache Hadoop
-Copyright 2006 and onwards The Apache Software Foundation.
+Apache Hadoop Third-party Libs
+Copyright 2020 and onwards The Apache Software Foundation.
 
 This product includes software developed at
 The Apache Software Foundation (http://www.apache.org/).
 
-Export Control Notice
--
-
-This distribution includes cryptographic software.  The country in
-which you currently reside may have restrictions on the import,
-possession, use, and/or re-export to another country, of
-encryption software.  BEFORE using any encryption software, please
-check your country's laws, regulations and policies concerning the
-import, possession, or use, and re-export of encryption software, to
-see if this is permitted.  See <http://www.wassenaar.org/> for more
-information.
-
-The U.S. Government Department of Commerce, Bureau of Industry and
-Security (BIS), has classified this software as Export Commodity
-Control Number (ECCN) 5D002.C.1, which includes information security
-software using or performing cryptographic functions with asymmetric
-algorithms.  The form and manner of this Apache Software Foundation
-distribution makes it eligible for export under the License Exception
-ENC Technology Software Unrestricted (TSU) exception (see the BIS
-Export Administration

[hadoop-thirdparty] branch branch-1.0 created (now 16f6d2a)

2020-02-25 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch branch-1.0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git.


  at 16f6d2a  Preparing for 1.0.0 Release

This branch includes the following new commits:

 new 16f6d2a  Preparing for 1.0.0 Release

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] tag release-1.0.0-RC0 created (now 16f6d2a)

2020-02-25 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to tag release-1.0.0-RC0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git.


  at 16f6d2a  (commit)
This tag includes the following new commits:

 new 16f6d2a  Preparing for 1.0.0 Release

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] 01/01: Preparing for 1.0.0 Release

2020-02-25 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-1.0
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git

commit 16f6d2ac9a8c7d3ad4ef63e20ed2e3e4dacc474e
Author: Vinayakumar B 
AuthorDate: Tue Feb 25 22:25:48 2020 +0530

Preparing for 1.0.0 Release
---
 hadoop-shaded-jaeger/pom.xml   | 2 +-
 hadoop-shaded-protobuf_3_7/pom.xml | 2 +-
 pom.xml| 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-shaded-jaeger/pom.xml b/hadoop-shaded-jaeger/pom.xml
index 90513f7..994e892 100644
--- a/hadoop-shaded-jaeger/pom.xml
+++ b/hadoop-shaded-jaeger/pom.xml
@@ -23,7 +23,7 @@
 
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.0.0-SNAPSHOT
+1.0.0
 
 4.0.0
 
diff --git a/hadoop-shaded-protobuf_3_7/pom.xml 
b/hadoop-shaded-protobuf_3_7/pom.xml
index 5a622cd..87d9ab6 100644
--- a/hadoop-shaded-protobuf_3_7/pom.xml
+++ b/hadoop-shaded-protobuf_3_7/pom.xml
@@ -23,7 +23,7 @@
   
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.0.0-SNAPSHOT
+1.0.0
 ..
   
   4.0.0
diff --git a/pom.xml b/pom.xml
index b663461..646577d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -23,7 +23,7 @@
   4.0.0
   org.apache.hadoop.thirdparty
   hadoop-thirdparty
-  1.0.0-SNAPSHOT
+  1.0.0
   
 org.apache
 apache


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk updated: Preparing for 1.1.0-SNAPSHOT development

2020-02-25 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 85ae8d7  Preparing for 1.1.0-SNAPSHOT development
85ae8d7 is described below

commit 85ae8d7c582a696a6eef45d4e7a814e2e0222388
Author: Vinayakumar B 
AuthorDate: Tue Feb 25 22:40:30 2020 +0530

Preparing for 1.1.0-SNAPSHOT development
---
 hadoop-shaded-jaeger/pom.xml   | 2 +-
 hadoop-shaded-protobuf_3_7/pom.xml | 2 +-
 pom.xml| 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/hadoop-shaded-jaeger/pom.xml b/hadoop-shaded-jaeger/pom.xml
index 90513f7..0b4db23 100644
--- a/hadoop-shaded-jaeger/pom.xml
+++ b/hadoop-shaded-jaeger/pom.xml
@@ -23,7 +23,7 @@
 
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.0.0-SNAPSHOT
+1.1.0-SNAPSHOT
 
 4.0.0
 
diff --git a/hadoop-shaded-protobuf_3_7/pom.xml 
b/hadoop-shaded-protobuf_3_7/pom.xml
index 5a622cd..90f9def 100644
--- a/hadoop-shaded-protobuf_3_7/pom.xml
+++ b/hadoop-shaded-protobuf_3_7/pom.xml
@@ -23,7 +23,7 @@
   
 hadoop-thirdparty
 org.apache.hadoop.thirdparty
-1.0.0-SNAPSHOT
+1.1.0-SNAPSHOT
 ..
   
   4.0.0
diff --git a/pom.xml b/pom.xml
index b663461..a8d44a5 100644
--- a/pom.xml
+++ b/pom.xml
@@ -23,7 +23,7 @@
   4.0.0
   org.apache.hadoop.thirdparty
   hadoop-thirdparty
-  1.0.0-SNAPSHOT
+  1.1.0-SNAPSHOT
   
 org.apache
 apache


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk updated: HADOOP-16867. [thirdparty] Add shaded JaegerTracer (#5)

2020-02-20 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ccb7eca  HADOOP-16867. [thirdparty] Add shaded JaegerTracer (#5)
ccb7eca is described below

commit ccb7ecae5f05765d410645fbdea9ff31698d647d
Author: Siyao Meng <50227127+smen...@users.noreply.github.com>
AuthorDate: Thu Feb 20 22:31:03 2020 -0800

HADOOP-16867. [thirdparty] Add shaded JaegerTracer (#5)

Co-authored-by: Wei-Chiu Chuang 
---
 hadoop-shaded-jaeger/pom.xml | 181 +++
 pom.xml  |   1 +
 2 files changed, 182 insertions(+)

diff --git a/hadoop-shaded-jaeger/pom.xml b/hadoop-shaded-jaeger/pom.xml
new file mode 100644
index 000..90513f7
--- /dev/null
+++ b/hadoop-shaded-jaeger/pom.xml
@@ -0,0 +1,181 @@
+
+
+http://maven.apache.org/POM/4.0.0;
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+
+hadoop-thirdparty
+org.apache.hadoop.thirdparty
+1.0.0-SNAPSHOT
+
+4.0.0
+
+hadoop-shaded-jaeger
+
+Apache Hadoop shaded JaegerTracer
+jar
+
+
+0.34.2
+
+
+
+
+io.jaegertracing
+jaeger-client
+${jaegertracing.version}
+
+
+
+
+
+
+
+
+maven-assembly-plugin
+
+true
+
+
+
+org.apache.maven.plugins
+maven-shade-plugin
+
+
+
aggregate-into-a-jar-with-relocated-third-parties
+package
+
+shade
+
+
+false
+
false
+false
+
+
+
+
org.apache.httpcomponents:httpclient
+
commons-logging:commons-logging
+
commons-codec:commons-codec
+
org.apache.httpcomponents:httpcore
+
javax.annotation:javax.annotation-api
+org.slf4j:slf4j-api
+io.opentracing:*
+
+
+
+
+
+
+com.google.gson
+
${shaded.prefix}.io.jaegertracing.com.google.gson
+
+
+
+
+
io.jaegertracing.thriftjava
+
${shaded.prefix}.io.jaegertracing.thriftjava
+
+
+
io.jaegertracing.crossdock
+
${shaded.prefix}.io.jaegertracing.crossdock
+
+
+
io.jaegertracing.thrift
+
${shaded.prefix}.io.jaegertracing.thrift
+
+
+
io.jaegertracing.agent
+
${shaded.prefix}.io.jaegertracing.agent
+
+
+
+org.apache.thrift
+
${shaded.prefix}.io.jaegertracing.apache.thrift
+
+
+com.twitter.zipkin
+
${shaded.prefix}.io.jaegertracing.com.twitter.zipkin
+
+
+okhttp3
+
${shaded.prefix}.io.jaegertracing.okhttp3
+
+
+kotlin
+
${shaded.prefix}.io.j

[hadoop] branch trunk updated (5944d28 -> 7dac7e1)

2020-02-07 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 5944d28  HADOOP-16825: ITestAzureBlobFileSystemCheckAccess failing. 
Contributed by Bilahari T H.
 add 7dac7e1  HADOOP-16596. [pb-upgrade] Use shaded protobuf classes from 
hadoop-thirdparty dependency (#1635). Contributed by Vinayakumar B.

No new revisions were added by this update.

Summary of changes:
 hadoop-common-project/hadoop-common/pom.xml|  35 +++
 .../hadoop/fs/LocalFileSystemPathHandle.java   |   2 +-
 .../java/org/apache/hadoop/ha/ZKFCRpcServer.java   |   2 +-
 .../HAServiceProtocolClientSideTranslatorPB.java   |   4 +-
 .../HAServiceProtocolServerSideTranslatorPB.java   |   4 +-
 .../ZKFCProtocolClientSideTranslatorPB.java|   4 +-
 .../ZKFCProtocolServerSideTranslatorPB.java|   4 +-
 .../java/org/apache/hadoop/io/ObjectWritable.java  |   2 +-
 .../org/apache/hadoop/io/retry/RetryUtils.java |   2 +-
 .../java/org/apache/hadoop/ipc/ProtobufHelper.java |   4 +-
 .../org/apache/hadoop/ipc/ProtobufRpcEngine.java   |   4 +-
 .../hadoop/ipc/ProtobufRpcEngineCallback.java  |   2 +-
 .../ProtocolMetaInfoServerSideTranslatorPB.java|   4 +-
 .../src/main/java/org/apache/hadoop/ipc/RPC.java   |   2 +-
 .../java/org/apache/hadoop/ipc/RpcClientUtil.java  |   4 +-
 .../java/org/apache/hadoop/ipc/RpcWritable.java|   6 +-
 .../main/java/org/apache/hadoop/ipc/Server.java|   6 +-
 ...nericRefreshProtocolClientSideTranslatorPB.java |   4 +-
 ...nericRefreshProtocolServerSideTranslatorPB.java |   4 +-
 ...eshCallQueueProtocolClientSideTranslatorPB.java |   4 +-
 ...eshCallQueueProtocolServerSideTranslatorPB.java |   4 +-
 .../org/apache/hadoop/security/Credentials.java|   2 +-
 .../org/apache/hadoop/security/SaslRpcClient.java  |   2 +-
 ...zationPolicyProtocolClientSideTranslatorPB.java |   4 +-
 ...zationPolicyProtocolServerSideTranslatorPB.java |   4 +-
 ...UserMappingsProtocolClientSideTranslatorPB.java |   4 +-
 ...UserMappingsProtocolServerSideTranslatorPB.java |   4 +-
 ...UserMappingsProtocolClientSideTranslatorPB.java |   4 +-
 ...UserMappingsProtocolServerSideTranslatorPB.java |   4 +-
 .../TraceAdminProtocolServerSideTranslatorPB.java  |   4 +-
 .../tracing/TraceAdminProtocolTranslatorPB.java|   2 +-
 .../java/org/apache/hadoop/util/ProtoUtil.java |   2 +-
 .../main/resources/common-version-info.properties  |   2 +-
 .../java/org/apache/hadoop/ha/DummyHAService.java  |   2 +-
 .../apache/hadoop/io/TestObjectWritableProtos.java |   4 +-
 .../org/apache/hadoop/ipc/RPCCallBenchmark.java|   2 +-
 .../hadoop/ipc/TestProtoBufRPCCompatibility.java   |   6 +-
 .../org/apache/hadoop/ipc/TestProtoBufRpc.java |   6 +-
 .../hadoop/ipc/TestProtoBufRpcServerHandoff.java   |   6 +-
 .../test/java/org/apache/hadoop/ipc/TestRPC.java   |   2 +-
 .../apache/hadoop/ipc/TestRPCServerShutdown.java   |   2 +-
 .../java/org/apache/hadoop/ipc/TestRpcBase.java|   6 +-
 .../org/apache/hadoop/ipc/TestRpcWritable.java |   2 +-
 .../java/org/apache/hadoop/ipc/TestSaslRPC.java|   2 +-
 .../hadoop/security/TestDoAsEffectiveUser.java |   2 +-
 .../java/org/apache/hadoop/util/TestProtoUtil.java |   2 +-
 hadoop-common-project/hadoop-kms/pom.xml   |   1 +
 hadoop-common-project/hadoop-registry/pom.xml  |   1 +
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml |  24 +
 .../apache/hadoop/hdfs/protocol/DatanodeID.java|   2 +-
 .../hadoop/hdfs/protocol/HdfsPathHandle.java   |   2 +-
 .../hdfs/protocol/datatransfer/PacketHeader.java   |   2 +-
 .../hdfs/protocol/datatransfer/PipelineAck.java|   2 +-
 .../hadoop/hdfs/protocol/datatransfer/Sender.java  |   2 +-
 .../datatransfer/sasl/DataTransferSaslUtil.java|   2 +-
 .../ClientDatanodeProtocolTranslatorPB.java|   4 +-
 .../ClientNamenodeProtocolTranslatorPB.java|   6 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java |   4 +-
 .../ReconfigurationProtocolTranslatorPB.java   |   4 +-
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml |   2 +
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml|  24 +
 .../RouterAdminProtocolServerSideTranslatorPB.java |   4 +-
 .../RouterAdminProtocolTranslatorPB.java   |   2 +-
 .../federation/router/RouterAdminServer.java   |   2 +-
 .../server/federation/router/RouterRpcServer.java  |   2 +-
 .../driver/impl/StateStoreSerializerPBImpl.java|   2 +-
 .../impl/pb/AddMountTableEntryRequestPBImpl.java   |   2 +-
 .../impl/pb/AddMountTableEntryResponsePBImpl.java  |   2 +-
 .../impl/pb/DisableNameserviceRequestPBImpl.java   |   2 +-
 .../impl/pb/DisableNameserviceResponsePBImpl.java  |   2 +-
 .../impl/pb/EnableNameserviceRequestPBImpl.java|   2 +-
 .../impl/pb/EnableNameserviceResponsePBImpl.java   |   2 +-
 .../impl/pb/EnterSafeModeRequestPBImpl.java|   2 +-
 .../impl/pb

[hadoop-thirdparty] branch trunk updated: HADOOP-16824. port HADOOP-16754 (Fix docker failed to build yetus/hadoop) to thirdparty Dockerfile (#4)

2020-01-22 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d0e9043  HADOOP-16824. port HADOOP-16754 (Fix docker failed to build 
yetus/hadoop) to thirdparty Dockerfile (#4)
d0e9043 is described below

commit d0e90430daafa9bd27d8fa71560a99fc473533f8
Author: Vinayakumar B 
AuthorDate: Wed Jan 22 15:40:11 2020 +0530

HADOOP-16824. port HADOOP-16754 (Fix docker failed to build yetus/hadoop) 
to thirdparty Dockerfile (#4)
---
 dev-support/docker/Dockerfile | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 657c223..5704bfc 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -147,9 +147,9 @@ ENV FINDBUGS_HOME /usr
 # Install shellcheck (0.4.6, the latest as of 2017-09-26)
 
 # hadolint ignore=DL3008
-RUN add-apt-repository -y ppa:jonathonf/ghc-8.0.2 \
+RUN add-apt-repository -y ppa:hvr/ghc \
 && apt-get -q update \
-&& apt-get -q install -y --no-install-recommends shellcheck \
+&& apt-get -q install -y --no-install-recommends shellcheck ghc-8.0.2 \
 && apt-get clean \
 && rm -rf /var/lib/apt/lists/*
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk updated: HADOOP-16820. ChangeLog and ReleaseNote are not packaged by createrelease script. (#2)

2020-01-21 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new efbab22  HADOOP-16820. ChangeLog and ReleaseNote are not packaged by 
createrelease script. (#2)
efbab22 is described below

commit efbab22845d4bbc15149b5b3122ac5439477ab07
Author: Vinayakumar B 
AuthorDate: Wed Jan 22 12:41:13 2020 +0530

HADOOP-16820. ChangeLog and ReleaseNote are not packaged by createrelease 
script. (#2)
---
 dev-support/bin/create-release | 4 ++--
 dev-support/bin/yetus-wrapper  | 9 +
 2 files changed, 11 insertions(+), 2 deletions(-)

diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index db767e2..75b80a1 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -551,12 +551,12 @@ function makearelease
 
   # Stage CHANGELOG and RELEASENOTES files
   for i in CHANGELOG RELEASENOTES; do
-if [[ $(ls -l 
"${BASEDIR}/src/site/markdown/release/${HADOOP_THIRDPARTY_VERSION}"/${i}*.md | 
wc -l) == 0 ]]; then
+if [[ $(ls -l 
"${BASEDIR}/src/site/markdown/release/thirdparty-${HADOOP_THIRDPARTY_VERSION}"/${i}*.md
 | wc -l) == 0 ]]; then
   echo "No ${i} found. Continuing..."
   continue;
 fi
 run cp -p \
-
"${BASEDIR}/src/site/markdown/release/${HADOOP_THIRDPARTY_VERSION}"/${i}*.md \
+
"${BASEDIR}/src/site/markdown/release/thirdparty-${HADOOP_THIRDPARTY_VERSION}"/${i}*.md
 \
 "${ARTIFACTS_DIR}/${i}.md"
   done
 
diff --git a/dev-support/bin/yetus-wrapper b/dev-support/bin/yetus-wrapper
index b0f71f1..ec6a02b 100755
--- a/dev-support/bin/yetus-wrapper
+++ b/dev-support/bin/yetus-wrapper
@@ -176,6 +176,15 @@ if ! (gunzip -c "${TARBALL}.gz" | tar xpf -); then
   exit 1
 fi
 
+if [[ "${WANTED}" == "releasedocmaker" ]]; then
+  # releasedocmaker expects versions to be in form of x.y.z to generate index 
and readme files.
+  # But thirdparty version will be in form of 'thirdparty-x.y.z'
+  if [[ -x 
"${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/lib/releasedocmaker/releasedocmaker/__init__.py"
 ]]; then
+sed -i 's@glob(\"@glob(\"thirdparty-@g' 
"${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/lib/releasedocmaker/releasedocmaker/__init__.py"
+sed -i 's@%s v%s@%s %s@g' 
"${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/lib/releasedocmaker/releasedocmaker/__init__.py"
+  fi
+fi
+
 if [[ -x 
"${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/bin/${WANTED}" 
]]; then
   popd >/dev/null
   exec 
"${HADOOP_PATCHPROCESS}/${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/bin/${WANTED}" 
"${ARGV[@]}"


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk updated: HADOOP-16821. [pb-upgrade] Use 'o.a.h.thirdparty.protobuf' shaded prefix instead of 'protobuf_3_7' (#3)

2020-01-21 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new eac5a3d  HADOOP-16821. [pb-upgrade] Use 'o.a.h.thirdparty.protobuf' 
shaded prefix instead of 'protobuf_3_7' (#3)
eac5a3d is described below

commit eac5a3df55fcc3b1fd4b50cf2fa129250d4c384b
Author: Vinayakumar B 
AuthorDate: Tue Jan 21 22:59:27 2020 +0530

HADOOP-16821. [pb-upgrade] Use 'o.a.h.thirdparty.protobuf' shaded prefix 
instead of 'protobuf_3_7' (#3)
---
 hadoop-shaded-protobuf_3_7/pom.xml | 2 +-
 pom.xml| 1 +
 src/site/markdown/index.md.vm  | 1 +
 3 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/hadoop-shaded-protobuf_3_7/pom.xml 
b/hadoop-shaded-protobuf_3_7/pom.xml
index 102625c..5a622cd 100644
--- a/hadoop-shaded-protobuf_3_7/pom.xml
+++ b/hadoop-shaded-protobuf_3_7/pom.xml
@@ -74,7 +74,7 @@
   
 
   com/google/protobuf
-  ${shaded.prefix}.protobuf_3_7
+  ${protobuf.shade.prefix}
 
 
   google/
diff --git a/pom.xml b/pom.xml
index 155a0a2..0754cb6 100644
--- a/pom.xml
+++ b/pom.xml
@@ -93,6 +93,7 @@
 
 
 org.apache.hadoop.thirdparty
+${shaded.prefix}.protobuf
 3.7.1
 
 
diff --git a/src/site/markdown/index.md.vm b/src/site/markdown/index.md.vm
index adafd02..f7acb74 100644
--- a/src/site/markdown/index.md.vm
+++ b/src/site/markdown/index.md.vm
@@ -43,3 +43,4 @@ This page provides an overview of the major changes.
 Protobuf-java
 -
 Google Protobuf's 3.7.1 jar is available as 
*org.apache.hadoop.thirdparty:hadoop-shaded-protobuf_3_7* artifact.
+*com.google.protobuf* package is shaded as 
*org.apache.hadoop.thirdparty.protobuf*.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16621. [pb-upgrade] Remove Protobuf classes from signatures of Public APIs. Contributed by Vinayakumar B. (#1803)

2020-01-16 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new edbbc03  HADOOP-16621. [pb-upgrade] Remove Protobuf classes from 
signatures of Public APIs. Contributed by Vinayakumar B. (#1803)
edbbc03 is described below

commit edbbc03ce7d479f1b84d9209021e9d2822909cfe
Author: Vinayakumar B 
AuthorDate: Thu Jan 16 23:27:50 2020 +0530

HADOOP-16621. [pb-upgrade] Remove Protobuf classes from signatures of 
Public APIs. Contributed by Vinayakumar B. (#1803)
---
 .../dev-support/findbugsExcludeFile.xml|  6 ++
 .../java/org/apache/hadoop/ipc/ProtobufHelper.java | 69 ++
 .../org/apache/hadoop/security/Credentials.java|  5 +-
 .../org/apache/hadoop/security/token/Token.java| 28 -
 .../dev-support/findbugsExcludeFile.xml|  5 --
 .../hadoop/hdfs/protocolPB/PBHelperClient.java | 45 ++
 6 files changed, 84 insertions(+), 74 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml 
b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
index 802197e..cf5c387 100644
--- a/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
@@ -460,4 +460,10 @@
 
 
   
+
+  
+
+
+
+  
 
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java
index e30f28a..105628f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java
@@ -18,9 +18,15 @@
 package org.apache.hadoop.ipc;
 
 import java.io.IOException;
+import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 
+import com.google.protobuf.ByteString;
 import com.google.protobuf.ServiceException;
 
 /**
@@ -46,4 +52,67 @@ public class ProtobufHelper {
 }
 return e instanceof IOException ? (IOException) e : new IOException(se);
   }
+
+
+  /**
+   * Map used to cache fixed strings to ByteStrings. Since there is no
+   * automatic expiration policy, only use this for strings from a fixed, small
+   * set.
+   * 
+   * This map should not be accessed directly. Used the getFixedByteString
+   * methods instead.
+   */
+  private final static ConcurrentHashMap
+  FIXED_BYTESTRING_CACHE = new ConcurrentHashMap<>();
+
+  /**
+   * Get the ByteString for frequently used fixed and small set strings.
+   * @param key string
+   * @return
+   */
+  public static ByteString getFixedByteString(Text key) {
+ByteString value = FIXED_BYTESTRING_CACHE.get(key);
+if (value == null) {
+  value = ByteString.copyFromUtf8(key.toString());
+  FIXED_BYTESTRING_CACHE.put(new Text(key.copyBytes()), value);
+}
+return value;
+  }
+
+  /**
+   * Get the ByteString for frequently used fixed and small set strings.
+   * @param key string
+   * @return
+   */
+  public static ByteString getFixedByteString(String key) {
+ByteString value = FIXED_BYTESTRING_CACHE.get(key);
+if (value == null) {
+  value = ByteString.copyFromUtf8(key);
+  FIXED_BYTESTRING_CACHE.put(key, value);
+}
+return value;
+  }
+
+  public static ByteString getByteString(byte[] bytes) {
+// return singleton to reduce object allocation
+return (bytes.length == 0) ? ByteString.EMPTY : ByteString.copyFrom(bytes);
+  }
+
+  public static Token tokenFromProto(
+  TokenProto tokenProto) {
+Token token = new Token<>(
+tokenProto.getIdentifier().toByteArray(),
+tokenProto.getPassword().toByteArray(), new Text(tokenProto.getKind()),
+new Text(tokenProto.getService()));
+return token;
+  }
+
+  public static TokenProto protoFromToken(Token tok) {
+TokenProto.Builder builder = TokenProto.newBuilder().
+setIdentifier(getByteString(tok.getIdentifier())).
+setPassword(getByteString(tok.getPassword())).
+setKindBytes(getFixedByteString(tok.getKind())).
+setServiceBytes(getFixedByteString(tok.getService()));
+return builder.build();
+  }
 }
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
index 37cf021..de30b18 100644
--- 
a/hadoop-common-project/hadoop-common/src/m

[hadoop-thirdparty] branch trunk updated: HADOOP-16595. [pb-upgrade] Create hadoop-thirdparty artifact to have shaded protobuf. Contributed by Vinayakumar B. (#1)

2020-01-12 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git


The following commit(s) were added to refs/heads/trunk by this push:
 new fd78dcf  HADOOP-16595. [pb-upgrade] Create hadoop-thirdparty artifact 
to have shaded protobuf. Contributed by Vinayakumar B. (#1)
fd78dcf is described below

commit fd78dcf990adde4d09dc1c9dfbf46a83f710027b
Author: Vinayakumar B 
AuthorDate: Mon Jan 13 10:56:24 2020 +0530

HADOOP-16595. [pb-upgrade] Create hadoop-thirdparty artifact to have shaded 
protobuf. Contributed by Vinayakumar B. (#1)
---
 .github/pull_request_template.md   |   6 +
 .gitignore |   9 +
 LICENSE-binary | 241 +++
 LICENSE.txt| 224 ++
 NOTICE-binary  | 780 +
 NOTICE.txt |  34 +
 dev-support/bin/create-release | 641 +
 dev-support/bin/releasedocmaker|  18 +
 dev-support/bin/yetus-wrapper  | 188 +
 dev-support/docker/Dockerfile  | 219 ++
 dev-support/docker/hadoop_env_checks.sh| 117 
 hadoop-shaded-protobuf_3_7/pom.xml | 115 +++
 licenses-binary/LICENSE-protobuf.txt   |  32 +
 pom.xml| 438 
 .../resources/assemblies/hadoop-thirdparty-src.xml |  62 ++
 src/site/markdown/index.md.vm  |  45 ++
 src/site/resources/css/site.css|  30 +
 src/site/site.xml  |  59 ++
 18 files changed, 3258 insertions(+)

diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 000..2b5014b
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,6 @@
+## NOTICE
+
+Please create an issue in ASF JIRA before opening a pull request,
+and you need to set the title of the pull request which starts with
+the corresponding JIRA issue number. (e.g. HADOOP-X. Fix a typo in YYY.)
+For more details, please see 
https://cwiki.apache.org/confluence/display/HADOOP/How+To+Contribute
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000..ed49e7c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,9 @@
+.idea
+**/target/*
+*.patch
+*.iml
+.project
+.classpath
+.settings
+patchprocess
+**/dependency-reduced-pom.xml
diff --git a/LICENSE-binary b/LICENSE-binary
new file mode 100644
index 000..6c668ef
--- /dev/null
+++ b/LICENSE-binary
@@ -0,0 +1,241 @@
+
+ Apache License
+   Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+  "License" shall mean the terms and conditions for use, reproduction,
+  and distribution as defined by Sections 1 through 9 of this document.
+
+  "Licensor" shall mean the copyright owner or entity authorized by
+  the copyright owner that is granting the License.
+
+  "Legal Entity" shall mean the union of the acting entity and all
+  other entities that control, are controlled by, or are under common
+  control with that entity. For the purposes of this definition,
+  "control" means (i) the power, direct or indirect, to cause the
+  direction or management of such entity, whether by contract or
+  otherwise, or (ii) ownership of fifty percent (50%) or more of the
+  outstanding shares, or (iii) beneficial ownership of such entity.
+
+  "You" (or "Your") shall mean an individual or Legal Entity
+  exercising permissions granted by this License.
+
+  "Source" form shall mean the preferred form for making modifications,
+  including but not limited to software source code, documentation
+  source, and configuration files.
+
+  "Object" form shall mean any form resulting from mechanical
+  transformation or translation of a Source form, including but
+  not limited to compiled object code, generated documentation,
+  and conversions to other media types.
+
+  "Work" shall mean the work of authorship, whether in Source or
+  Object form, made available under the License, as indicated by a
+  copyright notice that is included in or attached to the work
+  (an example is provided in the Appendix below).
+
+  "Derivative Works" shall mean any work, whether in Source or Object
+  form, that is based on (or derived from) the Work and for which the
+  editorial revisions, annotations, elaborations, or other modifications
+  represent, a

[hadoop] branch trunk updated: HADOOP-16797. Add Dockerfile for ARM builds. Contributed by Vinayakumar B. (#1801)

2020-01-12 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 52b360a  HADOOP-16797. Add Dockerfile for ARM builds. Contributed by 
Vinayakumar B. (#1801)
52b360a is described below

commit 52b360a92865d2c7cbd113a82b45c6b5a191ce24
Author: Vinayakumar B 
AuthorDate: Mon Jan 13 10:40:29 2020 +0530

HADOOP-16797. Add Dockerfile for ARM builds. Contributed by Vinayakumar B. 
(#1801)
---
 dev-support/bin/create-release  |  16 ++-
 dev-support/docker/Dockerfile_aarch64   | 235 
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |   6 +
 start-build-env.sh  |  11 +-
 4 files changed, 265 insertions(+), 3 deletions(-)

diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index d14c007..f4851d1 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -204,6 +204,11 @@ function set_defaults
   DOCKERFILE="${BASEDIR}/dev-support/docker/Dockerfile"
   DOCKERRAN=false
 
+  CPU_ARCH=$(echo "$MACHTYPE" | cut -d- -f1)
+  if [ "$CPU_ARCH" = "aarch64" ]; then
+DOCKERFILE="${BASEDIR}/dev-support/docker/Dockerfile_aarch64"
+  fi
+
   # Extract Java version from ${BASEDIR}/pom.xml
   # doing this outside of maven means we can do this before
   # the docker container comes up...
@@ -249,7 +254,9 @@ function startgpgagent
   eval $("${GPGAGENT}" --daemon \
 --options "${LOGDIR}/gpgagent.conf" \
 --log-file="${LOGDIR}/create-release-gpgagent.log")
-  GPGAGENTPID=$(echo "${GPG_AGENT_INFO}" | cut -f 2 -d:)
+  GPGAGENTPID=$(pgrep "${GPGAGENT}")
+  GPG_AGENT_INFO="$HOME/.gnupg/S.gpg-agent:$GPGAGENTPID:1"
+  export GPG_AGENT_INFO
 fi
 
 if [[ -n "${GPG_AGENT_INFO}" ]]; then
@@ -499,7 +506,12 @@ function dockermode
 
 # we always force build with the OpenJDK JDK
 # but with the correct version
-echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-openjdk-amd64"
+if [ "$CPU_ARCH" = "aarch64" ]; then
+  echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-openjdk-arm64"
+else
+  echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-openjdk-amd64"
+fi
+
 echo "USER ${user_name}"
 printf "\n\n"
   ) | docker build -t "${imgname}" -
diff --git a/dev-support/docker/Dockerfile_aarch64 
b/dev-support/docker/Dockerfile_aarch64
new file mode 100644
index 000..8d3c3ad
--- /dev/null
+++ b/dev-support/docker/Dockerfile_aarch64
@@ -0,0 +1,235 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Dockerfile for installing the necessary dependencies for building Hadoop.
+# See BUILDING.txt.
+
+FROM ubuntu:xenial
+
+WORKDIR /root
+
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+
+#
+# Disable suggests/recommends
+#
+RUN echo APT::Install-Recommends "0"\; > /etc/apt/apt.conf.d/10disableextras
+RUN echo APT::Install-Suggests "0"\; >>  /etc/apt/apt.conf.d/10disableextras
+
+ENV DEBIAN_FRONTEND noninteractive
+ENV DEBCONF_TERSE true
+
+##
+# Install common dependencies from packages. Versions here are either
+# sufficient or irrelevant.
+#
+# WARNING: DO NOT PUT JAVA APPS HERE! Otherwise they will install default
+# Ubuntu Java.  See Java section below!
+##
+# hadolint ignore=DL3008
+RUN apt-get -q update \
+&& apt-get -q install -y --no-install-recommends \
+apt-utils \
+build-essential \
+bzip2 \
+clang \
+curl \
+doxygen \
+fuse \
+g++ \
+gcc \
+git \
+gnupg-agent \
+libbz2-dev \
+libcurl4-openssl-dev \
+libfuse-dev \
+libprotobuf-dev \
+libprotoc-dev \
+libsasl2-dev \
+libsnappy-dev \
+libssl-dev \
+libtool \
+libzstd1-dev \
+locales \

[hadoop] branch trunk updated: YARN-10041. Should not use AbstractPath to create unix domain socket (#1771)

2019-12-27 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 0fed874  YARN-10041. Should not use AbstractPath to create unix domain 
socket (#1771)
0fed874 is described below

commit 0fed874adf4cabfa859141bd9d72cfea2824825f
Author: Liu sheng 
AuthorDate: Fri Dec 27 19:20:15 2019 +0800

YARN-10041. Should not use AbstractPath to create unix domain socket (#1771)
---
 .../test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java
index 7eed98f..9e7ac19 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.csi.client;
 
 import csi.v0.Csi;
 import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.test.GenericTestUtils;
+import com.google.common.io.Files;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Assume;
@@ -42,7 +42,7 @@ public class TestCsiClient {
 
   @BeforeClass
   public static void setUp() throws IOException {
-testRoot = GenericTestUtils.getTestDir("csi-test");
+testRoot = Files.createTempDir();
 File socketPath = new File(testRoot, "csi.sock");
 FileUtils.forceMkdirParent(socketPath);
 domainSocket = "unix://" + socketPath.getAbsolutePath();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16774. TestDiskChecker and TestReadWriteDiskValidator fails when run with -Pparallel-tests (#1776). Contributed by Vinayakumar B.

2019-12-20 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e2a5448  HADOOP-16774. TestDiskChecker and TestReadWriteDiskValidator 
fails when run with -Pparallel-tests (#1776). Contributed by Vinayakumar B.
e2a5448 is described below

commit e2a5448d2b02b40ea7d5cc09787f995fe0d253d0
Author: Vinayakumar B 
AuthorDate: Sat Dec 21 00:08:15 2019 +0530

HADOOP-16774. TestDiskChecker and TestReadWriteDiskValidator fails when run 
with -Pparallel-tests (#1776). Contributed by Vinayakumar B.
---
 hadoop-common-project/hadoop-common/pom.xml | 4 
 1 file changed, 4 insertions(+)

diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 84d3ae5..370c68d 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -842,9 +842,13 @@
 
   
 parallel-tests-createdir
+process-test-resources
 
   parallel-tests-createdir
 
+
+  ${test.build.data}
+
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.1 updated: HDFS-14921. Remove SuperUser Check in Setting Storage Policy in FileStatus During Listing. Contributed by Ayush Saxena.

2019-10-24 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new e640e80  HDFS-14921. Remove SuperUser Check in Setting Storage Policy 
in FileStatus During Listing. Contributed by Ayush Saxena.
e640e80 is described below

commit e640e809a9bcb4ba9d45bc2de6aff78b452ea2b8
Author: Vinayakumar B 
AuthorDate: Thu Oct 24 12:14:09 2019 +0530

HDFS-14921. Remove SuperUser Check in Setting Storage Policy in FileStatus 
During Listing. Contributed by Ayush Saxena.
---
 .../server/namenode/FSDirStatAndListingOp.java | 18 +
 .../hadoop/hdfs/TestDistributedFileSystem.java | 30 ++
 2 files changed, 37 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index ae6a6dc..a4e2ddd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -73,14 +73,12 @@ class FSDirStatAndListingOp {
   }
 }
 
-boolean isSuperUser = true;
 if (fsd.isPermissionEnabled()) {
   if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
 fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
   }
-  isSuperUser = pc.isSuperUser();
 }
-return getListing(fsd, iip, startAfter, needLocation, isSuperUser);
+return getListing(fsd, iip, startAfter, needLocation);
   }
 
   /**
@@ -205,11 +203,10 @@ class FSDirStatAndListingOp {
*path
* @param startAfter the name to start listing after
* @param needLocation if block locations are returned
-   * @param includeStoragePolicy if storage policy is returned
* @return a partial listing starting after startAfter
*/
   private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
-  byte[] startAfter, boolean needLocation, boolean includeStoragePolicy)
+  byte[] startAfter, boolean needLocation)
   throws IOException {
 if (FSDirectory.isExactReservedName(iip.getPathComponents())) {
   return getReservedListing(fsd);
@@ -226,9 +223,7 @@ class FSDirStatAndListingOp {
 return null;
   }
 
-  byte parentStoragePolicy = includeStoragePolicy
-  ? targetNode.getStoragePolicyID()
-  : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+  byte parentStoragePolicy = targetNode.getStoragePolicyID();
 
   if (!targetNode.isDirectory()) {
 // return the file's status. note that the iip already includes the
@@ -250,9 +245,10 @@ class FSDirStatAndListingOp {
   HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
   for (int i = 0; i < numOfListing && locationBudget > 0; i++) {
 INode child = contents.get(startChild+i);
-byte childStoragePolicy = (includeStoragePolicy && !child.isSymlink())
-? getStoragePolicyID(child.getLocalStoragePolicyID(),
- parentStoragePolicy)
+byte childStoragePolicy =
+!child.isSymlink()
+? getStoragePolicyID(child.getLocalStoragePolicyID(),
+parentStoragePolicy)
 : parentStoragePolicy;
 listing[i] = createFileStatus(fsd, iip, child, childStoragePolicy,
 needLocation, false);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 25cc817..14dc500 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.StorageStatistics.LongStatistic;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import 
org.apache.hadoop.hdfs.DistributedFileSystem.HdfsDataOutputStreamBuilder;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -90,6 +91,7 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.pr

[hadoop] branch branch-3.2 updated: HDFS-14921. Remove SuperUser Check in Setting Storage Policy in FileStatus During Listing. Contributed by Ayush Saxena.

2019-10-24 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new e1e3d9c  HDFS-14921. Remove SuperUser Check in Setting Storage Policy 
in FileStatus During Listing. Contributed by Ayush Saxena.
e1e3d9c is described below

commit e1e3d9c81de60c110f711933b41e1dab011c10ab
Author: Vinayakumar B 
AuthorDate: Thu Oct 24 12:14:09 2019 +0530

HDFS-14921. Remove SuperUser Check in Setting Storage Policy in FileStatus 
During Listing. Contributed by Ayush Saxena.
---
 .../server/namenode/FSDirStatAndListingOp.java | 18 +
 .../hadoop/hdfs/TestDistributedFileSystem.java | 30 ++
 2 files changed, 37 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 38acfe9..c7f43e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -73,14 +73,12 @@ class FSDirStatAndListingOp {
   }
 }
 
-boolean isSuperUser = true;
 if (fsd.isPermissionEnabled()) {
   if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
 fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
   }
-  isSuperUser = pc.isSuperUser();
 }
-return getListing(fsd, iip, startAfter, needLocation, isSuperUser);
+return getListing(fsd, iip, startAfter, needLocation);
   }
 
   /**
@@ -210,11 +208,10 @@ class FSDirStatAndListingOp {
*path
* @param startAfter the name to start listing after
* @param needLocation if block locations are returned
-   * @param includeStoragePolicy if storage policy is returned
* @return a partial listing starting after startAfter
*/
   private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
-  byte[] startAfter, boolean needLocation, boolean includeStoragePolicy)
+  byte[] startAfter, boolean needLocation)
   throws IOException {
 if (FSDirectory.isExactReservedName(iip.getPathComponents())) {
   return getReservedListing(fsd);
@@ -231,9 +228,7 @@ class FSDirStatAndListingOp {
 return null;
   }
 
-  byte parentStoragePolicy = includeStoragePolicy
-  ? targetNode.getStoragePolicyID()
-  : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+  byte parentStoragePolicy = targetNode.getStoragePolicyID();
 
   if (!targetNode.isDirectory()) {
 // return the file's status. note that the iip already includes the
@@ -255,9 +250,10 @@ class FSDirStatAndListingOp {
   HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
   for (int i = 0; i < numOfListing && locationBudget > 0; i++) {
 INode child = contents.get(startChild+i);
-byte childStoragePolicy = (includeStoragePolicy && !child.isSymlink())
-? getStoragePolicyID(child.getLocalStoragePolicyID(),
- parentStoragePolicy)
+byte childStoragePolicy =
+!child.isSymlink()
+? getStoragePolicyID(child.getLocalStoragePolicyID(),
+parentStoragePolicy)
 : parentStoragePolicy;
 listing[i] = createFileStatus(fsd, iip, child, childStoragePolicy,
 needLocation, false);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 57e65a5..8bef655 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.StorageStatistics.LongStatistic;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import 
org.apache.hadoop.hdfs.DistributedFileSystem.HdfsDataOutputStreamBuilder;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -90,6 +91,7 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.pr

[hadoop] branch trunk updated: HDFS-14921. Remove SuperUser Check in Setting Storage Policy in FileStatus During Listing. Contributed by Ayush Saxena.

2019-10-24 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ee699dc  HDFS-14921. Remove SuperUser Check in Setting Storage Policy 
in FileStatus During Listing. Contributed by Ayush Saxena.
ee699dc is described below

commit ee699dc26c7b660a5222a30782f3bf5cb1e55085
Author: Vinayakumar B 
AuthorDate: Thu Oct 24 12:14:09 2019 +0530

HDFS-14921. Remove SuperUser Check in Setting Storage Policy in FileStatus 
During Listing. Contributed by Ayush Saxena.
---
 .../server/namenode/FSDirStatAndListingOp.java | 18 +
 .../hadoop/hdfs/TestDistributedFileSystem.java | 30 ++
 2 files changed, 37 insertions(+), 11 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 38acfe9..c7f43e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -73,14 +73,12 @@ class FSDirStatAndListingOp {
   }
 }
 
-boolean isSuperUser = true;
 if (fsd.isPermissionEnabled()) {
   if (iip.getLastINode() != null && iip.getLastINode().isDirectory()) {
 fsd.checkPathAccess(pc, iip, FsAction.READ_EXECUTE);
   }
-  isSuperUser = pc.isSuperUser();
 }
-return getListing(fsd, iip, startAfter, needLocation, isSuperUser);
+return getListing(fsd, iip, startAfter, needLocation);
   }
 
   /**
@@ -210,11 +208,10 @@ class FSDirStatAndListingOp {
*path
* @param startAfter the name to start listing after
* @param needLocation if block locations are returned
-   * @param includeStoragePolicy if storage policy is returned
* @return a partial listing starting after startAfter
*/
   private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
-  byte[] startAfter, boolean needLocation, boolean includeStoragePolicy)
+  byte[] startAfter, boolean needLocation)
   throws IOException {
 if (FSDirectory.isExactReservedName(iip.getPathComponents())) {
   return getReservedListing(fsd);
@@ -231,9 +228,7 @@ class FSDirStatAndListingOp {
 return null;
   }
 
-  byte parentStoragePolicy = includeStoragePolicy
-  ? targetNode.getStoragePolicyID()
-  : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+  byte parentStoragePolicy = targetNode.getStoragePolicyID();
 
   if (!targetNode.isDirectory()) {
 // return the file's status. note that the iip already includes the
@@ -255,9 +250,10 @@ class FSDirStatAndListingOp {
   HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
   for (int i = 0; i < numOfListing && locationBudget > 0; i++) {
 INode child = contents.get(startChild+i);
-byte childStoragePolicy = (includeStoragePolicy && !child.isSymlink())
-? getStoragePolicyID(child.getLocalStoragePolicyID(),
- parentStoragePolicy)
+byte childStoragePolicy =
+!child.isSymlink()
+? getStoragePolicyID(child.getLocalStoragePolicyID(),
+parentStoragePolicy)
 : parentStoragePolicy;
 listing[i] = createFileStatus(fsd, iip, child, childStoragePolicy,
 needLocation, false);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 3cb8c80..3f7a6c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.StorageStatistics.LongStatistic;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import 
org.apache.hadoop.hdfs.DistributedFileSystem.HdfsDataOutputStreamBuilder;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@@ -90,6 +91,7 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.pr

[hadoop-thirdparty] 01/01: First commit

2019-09-26 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git

commit c90c42a7a5976578a088e79966d4592cb630157c
Author: Vinayakumar B 
AuthorDate: Thu Sep 26 11:55:07 2019 +0530

First commit
---
 README.md | 4 
 1 file changed, 4 insertions(+)

diff --git a/README.md b/README.md
new file mode 100644
index 000..b65b60a
--- /dev/null
+++ b/README.md
@@ -0,0 +1,4 @@
+# Apache Hadoop Thirdparty
+
+
+


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop-thirdparty] branch trunk created (now c90c42a)

2019-09-26 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop-thirdparty.git.


  at c90c42a  First commit

This branch includes the following new commits:

 new c90c42a  First commit

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (4c0a7a9 -> 07c81e9)

2019-09-23 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from 4c0a7a9  Make upstream aware of 3.2.1 release.
 add 07c81e9  HADOOP-16558. [COMMON+HDFS] use protobuf-maven-plugin to 
generate protobuf classes (#1494). Contributed by Vinayakumar B.

No new revisions were added by this update.

Summary of changes:
 hadoop-common-project/hadoop-common/pom.xml| 66 +-
 .../hadoop-common/src/main/proto/FSProtos.proto|  2 +-
 .../src/main/proto/GenericRefreshProtocol.proto|  2 +-
 .../src/main/proto/GetUserMappingsProtocol.proto   |  2 +-
 .../src/main/proto/HAServiceProtocol.proto |  2 +-
 .../src/main/proto/IpcConnectionContext.proto  |  2 +-
 .../src/main/proto/ProtobufRpcEngine.proto |  2 +-
 .../src/main/proto/ProtocolInfo.proto  |  2 +-
 .../proto/RefreshAuthorizationPolicyProtocol.proto |  2 +-
 .../src/main/proto/RefreshCallQueueProtocol.proto  |  2 +-
 .../main/proto/RefreshUserMappingsProtocol.proto   |  2 +-
 .../hadoop-common/src/main/proto/RpcHeader.proto   |  2 +-
 .../hadoop-common/src/main/proto/Security.proto|  2 +-
 .../hadoop-common/src/main/proto/TraceAdmin.proto  |  2 +-
 .../src/main/proto/ZKFCProtocol.proto  |  2 +-
 .../hadoop-common/src/test/proto/test.proto|  2 +-
 .../src/test/proto/test_rpc_service.proto  |  1 +
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml | 36 +++-
 .../src/main/proto/ClientDatanodeProtocol.proto|  2 +-
 .../src/main/proto/ClientNamenodeProtocol.proto|  2 +-
 .../src/main/proto/ReconfigurationProtocol.proto   |  2 +-
 .../hadoop-hdfs-client/src/main/proto/acl.proto|  2 +-
 .../src/main/proto/datatransfer.proto  |  2 +-
 .../src/main/proto/encryption.proto|  2 +-
 .../src/main/proto/erasurecoding.proto |  2 +-
 .../hadoop-hdfs-client/src/main/proto/hdfs.proto   |  2 +-
 .../src/main/proto/inotify.proto   |  2 +-
 .../hadoop-hdfs-client/src/main/proto/xattr.proto  |  2 +-
 hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml| 32 ---
 .../src/main/proto/FederationProtocol.proto|  2 +-
 .../src/main/proto/RouterProtocol.proto|  2 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml| 48 ++--
 .../src/main/proto/AliasMapProtocol.proto  |  2 +-
 .../src/main/proto/DatanodeLifelineProtocol.proto  |  2 +-
 .../src/main/proto/DatanodeProtocol.proto  |  2 +-
 .../hadoop-hdfs/src/main/proto/HAZKInfo.proto  |  2 +-
 .../hadoop-hdfs/src/main/proto/HdfsServer.proto|  2 +-
 .../src/main/proto/InterDatanodeProtocol.proto |  2 +-
 .../src/main/proto/InterQJournalProtocol.proto |  2 +-
 .../src/main/proto/JournalProtocol.proto   |  2 +-
 .../src/main/proto/NamenodeProtocol.proto  |  2 +-
 .../src/main/proto/QJournalProtocol.proto  |  2 +-
 .../hadoop-hdfs/src/main/proto/editlog.proto   |  2 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto   |  2 +-
 hadoop-project/pom.xml | 49 +++-
 45 files changed, 141 insertions(+), 169 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (aa93866 -> efed445)

2019-09-20 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from aa93866  HDFS-14833. RBF: Router Update Doesn't Sync Quota. 
Contributed by Ayush Saxena.
 add efed445  HADOOP-16589. [pb-upgrade] Update docker image to make 3.7.1 
protoc as default (#1482). Contributed by Vinayakumar B.

No new revisions were added by this update.

Summary of changes:
 dev-support/docker/Dockerfile | 20 ++--
 hadoop-project/pom.xml|  3 ---
 2 files changed, 2 insertions(+), 21 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated (d072d33 -> 1654497)

2019-09-20 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


from d072d33  HDDS-2020. Remove mTLS from Ozone GRPC. Contributed by Xiaoyu 
Yao.
 add 1654497  HADOOP-16557. [pb-upgrade] Upgrade protobuf.version to 3.7.1 
(#1432)

No new revisions were added by this update.

Summary of changes:
 BUILDING.txt   | 25 ++
 .../hadoop-client-runtime/pom.xml  |  7 ++
 .../org/apache/hadoop/ipc/RemoteException.java |  2 +-
 .../java/org/apache/hadoop/ipc/RpcWritable.java|  2 +-
 .../main/java/org/apache/hadoop/ipc/Server.java|  6 +++---
 .../java/org/apache/hadoop/util/TestProtoUtil.java |  2 +-
 .../hdfs/protocol/datatransfer/PipelineAck.java|  2 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java | 17 ---
 .../impl/pb/FederationProtocolPBTranslator.java|  4 ++--
 .../hadoop/hdfs/protocol/BlockListAsLongs.java |  6 +++---
 .../server/namenode/FSImageFormatProtobuf.java |  5 +++--
 .../tools/offlineImageViewer/PBImageXmlWriter.java |  2 +-
 .../hadoop/hdfs/protocolPB/TestPBHelper.java   |  2 +-
 .../hdfs/server/datanode/TestLargeBlockReport.java |  3 +++
 hadoop-project/pom.xml |  5 -
 .../hadoop/hdfs/server/namenode/ImageWriter.java   |  4 ++--
 .../hadoop-yarn/hadoop-yarn-api/pom.xml|  1 +
 .../pb/PlacementConstraintToProtoConverter.java| 22 +--
 .../impl/pb/NodePublishVolumeRequestPBImpl.java|  4 ++--
 .../ValidateVolumeCapabilitiesRequestPBImpl.java   |  4 ++--
 20 files changed, 74 insertions(+), 51 deletions(-)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16562. [pb-upgrade] Update docker image to have 3.7.1 protoc executable (#1429).

2019-09-13 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 39e82ac  HADOOP-16562. [pb-upgrade] Update docker image to have 3.7.1 
protoc executable (#1429).
39e82ac is described below

commit 39e82acc485db0e66cbb3dd26b59dfe111ce6a10
Author: Vinayakumar B 
AuthorDate: Fri Sep 13 15:30:24 2019 +0530

HADOOP-16562. [pb-upgrade] Update docker image to have 3.7.1 protoc 
executable (#1429).

Addendum patch. Moved protobuf-3.7.1 installation within YETUS marker.
---
 dev-support/docker/Dockerfile | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index fe38395..371bdde 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -200,6 +200,16 @@ RUN curl -L -s -S \
 ###
 ENV MAVEN_OPTS -Xms256m -Xmx1536m
 
+RUN mkdir -p /opt/protobuf-3.7-src \
+&& curl -L -s -S \
+  
https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz
 \
+  -o /opt/protobuf-3.7.1.tar.gz \
+&& tar xzf /opt/protobuf-3.7.1.tar.gz --strip-components 1 -C 
/opt/protobuf-3.7-src \
+&& cd /opt/protobuf-3.7-src \
+&& ./configure --prefix=/opt/protobuf-3.7 \
+&& make install \
+&& cd /root \
+&& rm -rf /opt/protobuf-3.7-src
 
 ###
 # Everything past this point is either not needed for testing or breaks Yetus.
@@ -217,16 +227,6 @@ RUN curl -L -o hugo.deb 
https://github.com/gohugoio/hugo/releases/download/v0.30
 # Keep 2.5.0 as well, until 3.7.1 upgrade is complete.
 ##
 # hadolint ignore=DL3003
-RUN mkdir -p /opt/protobuf-3.7-src \
-&& curl -L -s -S \
-  
https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz
 \
-  -o /opt/protobuf-3.7.1.tar.gz \
-&& tar xzf /opt/protobuf-3.7.1.tar.gz --strip-components 1 -C 
/opt/protobuf-3.7-src \
-&& cd /opt/protobuf-3.7-src \
-&& ./configure --prefix=/opt/protobuf-3.7 \
-&& make install \
-&& cd /root \
-&& rm -rf /opt/protobuf-3.7-src
 
 # Add a welcome message and environment checks.
 COPY hadoop_env_checks.sh /root/hadoop_env_checks.sh


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16562. [pb-upgrade] Update docker image to have 3.7.1 protoc executable (#1429). Contributed by Vinayakumar B.

2019-09-12 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f4f9f0f  HADOOP-16562. [pb-upgrade] Update docker image to have 3.7.1 
protoc executable (#1429). Contributed by Vinayakumar B.
f4f9f0f is described below

commit f4f9f0fe4f215e2e1b88b0607102f22388acfe45
Author: Vinayakumar B 
AuthorDate: Thu Sep 12 16:47:54 2019 +0530

HADOOP-16562. [pb-upgrade] Update docker image to have 3.7.1 protoc 
executable (#1429). Contributed by Vinayakumar B.
---
 dev-support/docker/Dockerfile | 16 
 1 file changed, 16 insertions(+)

diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index e71e51c..fe38395 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -212,6 +212,22 @@ RUN curl -L -o hugo.deb 
https://github.com/gohugoio/hugo/releases/download/v0.30
 && dpkg --install hugo.deb \
 && rm hugo.deb
 
+##
+# Install Google Protobuf 3.7.1 (2.6.0 ships with Xenial)
+# Keep 2.5.0 as well, until 3.7.1 upgrade is complete.
+##
+# hadolint ignore=DL3003
+RUN mkdir -p /opt/protobuf-3.7-src \
+&& curl -L -s -S \
+  
https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protobuf-java-3.7.1.tar.gz
 \
+  -o /opt/protobuf-3.7.1.tar.gz \
+&& tar xzf /opt/protobuf-3.7.1.tar.gz --strip-components 1 -C 
/opt/protobuf-3.7-src \
+&& cd /opt/protobuf-3.7-src \
+&& ./configure --prefix=/opt/protobuf-3.7 \
+&& make install \
+&& cd /root \
+&& rm -rf /opt/protobuf-3.7-src
+
 # Add a welcome message and environment checks.
 COPY hadoop_env_checks.sh /root/hadoop_env_checks.sh
 RUN chmod 755 /root/hadoop_env_checks.sh


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch trunk updated: HADOOP-16059. Use SASL Factories Cache to Improve Performance. Contributed by Ayush Saxena.

2019-05-02 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f1875b2  HADOOP-16059. Use SASL Factories Cache to Improve 
Performance. Contributed by Ayush Saxena.
f1875b2 is described below

commit f1875b205e492ef071e7ef78b147efee0e51263d
Author: Vinayakumar B 
AuthorDate: Fri May 3 11:22:14 2019 +0530

HADOOP-16059. Use SASL Factories Cache to Improve Performance. Contributed 
by Ayush Saxena.
---
 .../hadoop/security/FastSaslClientFactory.java | 80 ++
 .../hadoop/security/FastSaslServerFactory.java | 78 +
 .../org/apache/hadoop/security/SaslRpcClient.java  | 12 +++-
 .../org/apache/hadoop/security/SaslRpcServer.java  | 58 ++--
 .../datatransfer/sasl/SaslParticipant.java | 26 ++-
 5 files changed, 196 insertions(+), 58 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslClientFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslClientFactory.java
new file mode 100644
index 000..d5259d3
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslClientFactory.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.sasl.Sasl;
+import javax.security.sasl.SaslClient;
+import javax.security.sasl.SaslClientFactory;
+import javax.security.sasl.SaslException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Class for dealing with caching SASL client factories.
+ */
+@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
+public class FastSaslClientFactory implements SaslClientFactory {
+  private final Map> factoryCache =
+  new HashMap>();
+
+  public FastSaslClientFactory(Map props) {
+final Enumeration factories =
+Sasl.getSaslClientFactories();
+while (factories.hasMoreElements()) {
+  SaslClientFactory factory = factories.nextElement();
+  for (String mech : factory.getMechanismNames(props)) {
+if (!factoryCache.containsKey(mech)) {
+  factoryCache.put(mech, new ArrayList());
+}
+factoryCache.get(mech).add(factory);
+  }
+}
+  }
+
+  @Override
+  public String[] getMechanismNames(Map props) {
+return factoryCache.keySet().toArray(new String[0]);
+  }
+
+  @Override
+  public SaslClient createSaslClient(String[] mechanisms,
+  String authorizationId, String protocol, String serverName,
+  Map props, CallbackHandler cbh) throws SaslException {
+for (String mechanism : mechanisms) {
+  List factories = factoryCache.get(mechanism);
+  if (factories != null) {
+for (SaslClientFactory factory : factories) {
+  SaslClient saslClient =
+  factory.createSaslClient(new String[] {mechanism},
+  authorizationId, protocol, serverName, props, cbh);
+  if (saslClient != null) {
+return saslClient;
+  }
+}
+  }
+}
+return null;
+  }
+}
\ No newline at end of file
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslServerFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslServerFactory.java
new file mode 100644
index 000..79519d4
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/FastSaslServerFactory.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ 

[hadoop] branch trunk updated: HDFS-7663. Erasure Coding: Append on striped file. Contributed by Ayush Saxena.

2019-03-05 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f940ab2  HDFS-7663. Erasure Coding: Append on striped file. 
Contributed by Ayush Saxena.
f940ab2 is described below

commit f940ab242da80a22bae95509d5c282d7e2f7ecdb
Author: Vinayakumar B 
AuthorDate: Tue Mar 5 19:26:42 2019 +0530

HDFS-7663. Erasure Coding: Append on striped file. Contributed by Ayush 
Saxena.
---
 .../org/apache/hadoop/hdfs/DFSOutputStream.java|  16 +--
 .../apache/hadoop/hdfs/DFSStripedOutputStream.java |  20 +++-
 .../hadoop/hdfs/server/namenode/FSDirAppendOp.java |  10 +-
 .../apache/hadoop/hdfs/TestStripedFileAppend.java  | 114 +
 4 files changed, 145 insertions(+), 15 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index aaef8ad..a4e0742 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -119,7 +119,7 @@ public class DFSOutputStream extends FSOutputSummer
   protected int packetSize = 0; // write packet size, not including the header.
   protected int chunksPerPacket = 0;
   protected long lastFlushOffset = 0; // offset when flush was invoked
-  private long initialFileSize = 0; // at time of file open
+  protected long initialFileSize = 0; // at time of file open
   private final short blockReplication; // replication factor of file
   protected boolean shouldSyncBlock = false; // force blocks to disk upon close
   private final EnumSet addBlockFlags;
@@ -391,14 +391,16 @@ public class DFSOutputStream extends FSOutputSummer
   EnumSet flags, Progressable progress, LocatedBlock lastBlock,
   HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes)
   throws IOException {
-if(stat.getErasureCodingPolicy() != null) {
-  throw new IOException(
-  "Not support appending to a striping layout file yet.");
-}
 try (TraceScope ignored =
  dfsClient.newPathTraceScope("newStreamForAppend", src)) {
-  final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
-  progress, lastBlock, stat, checksum, favoredNodes);
+  DFSOutputStream out;
+  if (stat.isErasureCoded()) {
+out = new DFSStripedOutputStream(dfsClient, src, flags, progress,
+lastBlock, stat, checksum, favoredNodes);
+  } else {
+out = new DFSOutputStream(dfsClient, src, flags, progress, lastBlock,
+stat, checksum, favoredNodes);
+  }
   out.start();
   return out;
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 97310ee..ff81995 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -276,6 +276,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
   private final int numAllBlocks;
   private final int numDataBlocks;
   private ExtendedBlock currentBlockGroup;
+  private ExtendedBlock prevBlockGroup4Append;
   private final String[] favoredNodes;
   private final List failedStreamers;
   private final Map corruptBlockCountMap;
@@ -324,6 +325,16 @@ public class DFSStripedOutputStream extends DFSOutputStream
 setCurrentStreamer(0);
   }
 
+  /** Construct a new output stream for appending to a file. */
+  DFSStripedOutputStream(DFSClient dfsClient, String src,
+  EnumSet flags, Progressable progress, LocatedBlock lastBlock,
+  HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes)
+  throws IOException {
+this(dfsClient, src, stat, flags, progress, checksum, favoredNodes);
+initialFileSize = stat.getLen(); // length of file when opened
+prevBlockGroup4Append = lastBlock != null ? lastBlock.getBlock() : null;
+  }
+
   private boolean useDirectBuffer() {
 return encoder.preferDirectBuffer();
   }
@@ -473,12 +484,17 @@ public class DFSStripedOutputStream extends 
DFSOutputStream
 + Arrays.asList(excludedNodes));
 
 // replace failed streamers
+ExtendedBlock prevBlockGroup = currentBlockGroup;
+if (prevBlockGroup4Append != null) {
+  prevBlockGroup = prevBlockGroup4Append;
+  prevBlockGroup4Append = null;
+}
 replaceFailedStreamers();
 
 LOG.debug("Allocating new block group.

[hadoop] branch trunk updated: HDFS-7133. Support clearing namespace quota on '/'. Contributed by Ayush Saxena."

2019-02-25 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new f4ae00c  HDFS-7133. Support clearing namespace quota on '/'. 
Contributed by Ayush Saxena."
f4ae00c is described below

commit f4ae00c5301c0dd8923783ac6dca2d296c289254
Author: Vinayakumar B 
AuthorDate: Tue Feb 26 00:36:00 2019 +0530

HDFS-7133. Support clearing namespace quota on '/'. Contributed by Ayush 
Saxena."
---
 .../hadoop/hdfs/server/namenode/FSDirAttrOp.java   | 51 ++
 .../java/org/apache/hadoop/hdfs/TestQuota.java | 21 +++--
 2 files changed, 41 insertions(+), 31 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 1dbee96..6da59ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -332,38 +332,35 @@ public class FSDirAttrOp {
 
 INodeDirectory dirNode =
 INodeDirectory.valueOf(iip.getLastINode(), iip.getPath());
+final QuotaCounts oldQuota = dirNode.getQuotaCounts();
+final long oldNsQuota = oldQuota.getNameSpace();
+final long oldSsQuota = oldQuota.getStorageSpace();
 if (dirNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) {
-  throw new IllegalArgumentException("Cannot clear namespace quota on 
root.");
-} else { // a directory inode
-  final QuotaCounts oldQuota = dirNode.getQuotaCounts();
-  final long oldNsQuota = oldQuota.getNameSpace();
-  final long oldSsQuota = oldQuota.getStorageSpace();
-
-  if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
-nsQuota = oldNsQuota;
-  }
-  if (ssQuota == HdfsConstants.QUOTA_DONT_SET) {
-ssQuota = oldSsQuota;
-  }
+  nsQuota = HdfsConstants.QUOTA_DONT_SET;
+} else if (nsQuota == HdfsConstants.QUOTA_DONT_SET) {
+  nsQuota = oldNsQuota;
+} // a directory inode
+if (ssQuota == HdfsConstants.QUOTA_DONT_SET) {
+  ssQuota = oldSsQuota;
+}
 
-  // unchanged space/namespace quota
-  if (type == null && oldNsQuota == nsQuota && oldSsQuota == ssQuota) {
-return null;
-  }
+// unchanged space/namespace quota
+if (type == null && oldNsQuota == nsQuota && oldSsQuota == ssQuota) {
+  return null;
+}
 
-  // unchanged type quota
-  if (type != null) {
-  EnumCounters oldTypeQuotas = oldQuota.getTypeSpaces();
-  if (oldTypeQuotas != null && oldTypeQuotas.get(type) == ssQuota) {
-  return null;
-  }
+// unchanged type quota
+if (type != null) {
+  EnumCounters oldTypeQuotas = oldQuota.getTypeSpaces();
+  if (oldTypeQuotas != null && oldTypeQuotas.get(type) == ssQuota) {
+return null;
   }
-
-  final int latest = iip.getLatestSnapshotId();
-  dirNode.recordModification(latest);
-  dirNode.setQuota(fsd.getBlockStoragePolicySuite(), nsQuota, ssQuota, 
type);
-  return dirNode;
 }
+
+final int latest = iip.getLatestSnapshotId();
+dirNode.recordModification(latest);
+dirNode.setQuota(fsd.getBlockStoragePolicySuite(), nsQuota, ssQuota, type);
+return dirNode;
   }
 
   static BlockInfo[] unprotectedSetReplication(
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index 1c4855f..f5d232c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -413,13 +413,13 @@ public class TestQuota {
   }
 });
 
-// 19: clrQuota on the root directory ("/") should fail
-runCommand(admin, true, "-clrQuota", "/");
+// 19: clrQuota on the root directory ("/") should pass.
+runCommand(admin, false, "-clrQuota", "/");
 
 // 20: setQuota on the root directory ("/") should succeed
 runCommand(admin, false, "-setQuota", "100", "/");
 
-runCommand(admin, true, "-clrQuota", "/");
+runCommand(admin, false, "-clrQuota", "/");
 runCommand(admin, false, "-clrSpaceQuota", "/");
 runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false);
 runCommand(admin, false, "-clrSpaceQuota", parent.toString());
@@ -456,7 +456,7 @@ public cla

[hadoop] branch branch-3.2 updated: HADOOP-16108. Tail Follow Interval Should Allow To Specify The Sleep Interval To Save Unnecessary RPC's. Contributed by Ayush Saxena.

2019-02-13 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 49af36b  HADOOP-16108. Tail Follow Interval Should Allow To Specify 
The Sleep Interval To Save Unnecessary RPC's. Contributed by Ayush Saxena.
49af36b is described below

commit 49af36b25c88a095f65cf7bafd234d41661526ca
Author: Vinayakumar B 
AuthorDate: Wed Feb 13 09:29:37 2019 +0530

HADOOP-16108. Tail Follow Interval Should Allow To Specify The Sleep 
Interval To Save Unnecessary RPC's. Contributed by Ayush Saxena.
---
 .../main/java/org/apache/hadoop/fs/shell/Tail.java | 25 --
 .../java/org/apache/hadoop/fs/shell/TestTail.java  | 57 ++
 .../hadoop-common/src/test/resources/testConf.xml  |  6 ++-
 3 files changed, 84 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
index 1d49bf1..8a75a60 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.PathIsDirectoryException;
 import org.apache.hadoop.io.IOUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Get a listing of all files in that match the file patterns.
  */
@@ -40,20 +42,37 @@ class Tail extends FsCommand {
   }
   
   public static final String NAME = "tail";
-  public static final String USAGE = "[-f] ";
+  public static final String USAGE = "[-f] [-s ] ";
   public static final String DESCRIPTION =
-"Show the last 1KB of the file.\n" +
-"-f: Shows appended data as the file grows.\n";
+  "Show the last 1KB of the file.\n"
+  + "-f: Shows appended data as the file grows.\n"
+  + "-s: With -f , "
+  + "defines the sleep interval between iterations in milliseconds.\n";
 
   private long startingOffset = -1024;
   private boolean follow = false;
   private long followDelay = 5000; // milliseconds
   
+  @VisibleForTesting
+  public long getFollowDelay() {
+return followDelay;
+  }
+
   @Override
   protected void processOptions(LinkedList args) throws IOException {
 CommandFormat cf = new CommandFormat(1, 1, "f");
+cf.addOptionWithValue("s");
 cf.parse(args);
 follow = cf.getOpt("f");
+if (follow) {
+  String sleep = cf.getOptValue("s");
+  if (sleep != null && !sleep.isEmpty()) {
+long sleepInterval = Long.parseLong(sleep);
+if (sleepInterval > 0) {
+  followDelay = sleepInterval;
+}
+  }
+}
   }
 
   // TODO: HADOOP-7234 will add glob support; for now, be backwards compat
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTail.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTail.java
new file mode 100644
index 000..31a5a4e
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTail.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.LinkedList;
+
+import org.junit.Test;
+
+/**
+ * Test class to verify Tail shell command.
+ */
+public class TestTail {
+
+  // check follow delay with -s parameter.
+  @Test
+  public void testSleepParameter() throws IOException {
+Tail tail = new Tail();
+LinkedList options = new LinkedList();
+options.add("-f");
+options.add("-s");
+options.add("1");
+options.add("/path");
+tail.processOptions(options);
+assertEq

[hadoop] branch branch-3.1 updated: HADOOP-16108. Tail Follow Interval Should Allow To Specify The Sleep Interval To Save Unnecessary RPC's. Contributed by Ayush Saxena.

2019-02-13 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new c450f9f  HADOOP-16108. Tail Follow Interval Should Allow To Specify 
The Sleep Interval To Save Unnecessary RPC's. Contributed by Ayush Saxena.
c450f9f is described below

commit c450f9f2c427b1716e0d6a318be91f1b9dd41537
Author: Vinayakumar B 
AuthorDate: Wed Feb 13 09:29:37 2019 +0530

HADOOP-16108. Tail Follow Interval Should Allow To Specify The Sleep 
Interval To Save Unnecessary RPC's. Contributed by Ayush Saxena.
---
 .../main/java/org/apache/hadoop/fs/shell/Tail.java | 25 --
 .../java/org/apache/hadoop/fs/shell/TestTail.java  | 57 ++
 .../hadoop-common/src/test/resources/testConf.xml  |  6 ++-
 3 files changed, 84 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
index 1d49bf1..8a75a60 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.PathIsDirectoryException;
 import org.apache.hadoop.io.IOUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Get a listing of all files in that match the file patterns.
  */
@@ -40,20 +42,37 @@ class Tail extends FsCommand {
   }
   
   public static final String NAME = "tail";
-  public static final String USAGE = "[-f] ";
+  public static final String USAGE = "[-f] [-s ] ";
   public static final String DESCRIPTION =
-"Show the last 1KB of the file.\n" +
-"-f: Shows appended data as the file grows.\n";
+  "Show the last 1KB of the file.\n"
+  + "-f: Shows appended data as the file grows.\n"
+  + "-s: With -f , "
+  + "defines the sleep interval between iterations in milliseconds.\n";
 
   private long startingOffset = -1024;
   private boolean follow = false;
   private long followDelay = 5000; // milliseconds
   
+  @VisibleForTesting
+  public long getFollowDelay() {
+return followDelay;
+  }
+
   @Override
   protected void processOptions(LinkedList args) throws IOException {
 CommandFormat cf = new CommandFormat(1, 1, "f");
+cf.addOptionWithValue("s");
 cf.parse(args);
 follow = cf.getOpt("f");
+if (follow) {
+  String sleep = cf.getOptValue("s");
+  if (sleep != null && !sleep.isEmpty()) {
+long sleepInterval = Long.parseLong(sleep);
+if (sleepInterval > 0) {
+  followDelay = sleepInterval;
+}
+  }
+}
   }
 
   // TODO: HADOOP-7234 will add glob support; for now, be backwards compat
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTail.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTail.java
new file mode 100644
index 000..31a5a4e
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTail.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.LinkedList;
+
+import org.junit.Test;
+
+/**
+ * Test class to verify Tail shell command.
+ */
+public class TestTail {
+
+  // check follow delay with -s parameter.
+  @Test
+  public void testSleepParameter() throws IOException {
+Tail tail = new Tail();
+LinkedList options = new LinkedList();
+options.add("-f");
+options.add("-s");
+options.add("1");
+options.add("/path");
+tail.processOptions(options);
+assertEq

[hadoop] branch trunk updated: HADOOP-16108. Tail Follow Interval Should Allow To Specify The Sleep Interval To Save Unnecessary RPC's. Contributed by Ayush Saxena.

2019-02-13 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 00c5ffa  HADOOP-16108. Tail Follow Interval Should Allow To Specify 
The Sleep Interval To Save Unnecessary RPC's. Contributed by Ayush Saxena.
00c5ffa is described below

commit 00c5ffaee2fb16eaef512a47054c7b9ee7ea2e50
Author: Vinayakumar B 
AuthorDate: Wed Feb 13 09:29:37 2019 +0530

HADOOP-16108. Tail Follow Interval Should Allow To Specify The Sleep 
Interval To Save Unnecessary RPC's. Contributed by Ayush Saxena.
---
 .../main/java/org/apache/hadoop/fs/shell/Tail.java | 25 --
 .../java/org/apache/hadoop/fs/shell/TestTail.java  | 57 ++
 .../hadoop-common/src/test/resources/testConf.xml  |  6 ++-
 3 files changed, 84 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
index 1d49bf1..8a75a60 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.PathIsDirectoryException;
 import org.apache.hadoop.io.IOUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Get a listing of all files in that match the file patterns.
  */
@@ -40,20 +42,37 @@ class Tail extends FsCommand {
   }
   
   public static final String NAME = "tail";
-  public static final String USAGE = "[-f] ";
+  public static final String USAGE = "[-f] [-s ] ";
   public static final String DESCRIPTION =
-"Show the last 1KB of the file.\n" +
-"-f: Shows appended data as the file grows.\n";
+  "Show the last 1KB of the file.\n"
+  + "-f: Shows appended data as the file grows.\n"
+  + "-s: With -f , "
+  + "defines the sleep interval between iterations in milliseconds.\n";
 
   private long startingOffset = -1024;
   private boolean follow = false;
   private long followDelay = 5000; // milliseconds
   
+  @VisibleForTesting
+  public long getFollowDelay() {
+return followDelay;
+  }
+
   @Override
   protected void processOptions(LinkedList args) throws IOException {
 CommandFormat cf = new CommandFormat(1, 1, "f");
+cf.addOptionWithValue("s");
 cf.parse(args);
 follow = cf.getOpt("f");
+if (follow) {
+  String sleep = cf.getOptValue("s");
+  if (sleep != null && !sleep.isEmpty()) {
+long sleepInterval = Long.parseLong(sleep);
+if (sleepInterval > 0) {
+  followDelay = sleepInterval;
+}
+  }
+}
   }
 
   // TODO: HADOOP-7234 will add glob support; for now, be backwards compat
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTail.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTail.java
new file mode 100644
index 000..31a5a4e
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestTail.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.LinkedList;
+
+import org.junit.Test;
+
+/**
+ * Test class to verify Tail shell command.
+ */
+public class TestTail {
+
+  // check follow delay with -s parameter.
+  @Test
+  public void testSleepParameter() throws IOException {
+Tail tail = new Tail();
+LinkedList options = new LinkedList();
+options.add("-f");
+options.add("-s");
+options.add("1");
+options.add("/path");
+tail.processOptions(options);
+assertEquals(100

[hadoop] branch trunk updated: HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if One Block Is Not Available. Contributed by Ayush Saxena.

2019-02-12 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7806403  HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if 
One Block Is Not Available. Contributed by Ayush Saxena.
7806403 is described below

commit 7806403842ddd0f5b339e3dca42688b970cae267
Author: Vinayakumar B 
AuthorDate: Tue Feb 12 21:57:57 2019 +0530

HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if One Block Is Not 
Available. Contributed by Ayush Saxena.
---
 .../hadoop/hdfs/server/namenode/NamenodeFsck.java  | 56 ++
 .../blockmanagement/TestBlockInfoStriped.java  | 45 +
 2 files changed, 80 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 9c39d86..ad8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -314,29 +314,22 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   if (blockManager.getCorruptReplicas(block) != null) {
 corruptionRecord = blockManager.getCorruptReplicas(block);
   }
-
-  //report block replicas status on datanodes
-  for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) {
-DatanodeDescriptor dn = blockInfo.getDatanode(idx);
-out.print("Block replica on datanode/rack: " + dn.getHostName() +
-dn.getNetworkLocation() + " ");
-if (corruptionRecord != null && corruptionRecord.contains(dn)) {
-  out.print(CORRUPT_STATUS + "\t ReasonCode: " +
-  blockManager.getCorruptReason(block, dn));
-} else if (dn.isDecommissioned() ){
-  out.print(DECOMMISSIONED_STATUS);
-} else if (dn.isDecommissionInProgress()) {
-  out.print(DECOMMISSIONING_STATUS);
-} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
-  out.print(ENTERING_MAINTENANCE_STATUS);
-} else if (this.showMaintenanceState && dn.isInMaintenance()) {
-  out.print(IN_MAINTENANCE_STATUS);
-} else {
-  out.print(HEALTHY_STATUS);
+  // report block replicas status on datanodes
+  if (blockInfo.isStriped()) {
+for (int idx = (blockInfo.getCapacity() - 1); idx >= 0; idx--) {
+  DatanodeDescriptor dn = blockInfo.getDatanode(idx);
+  if (dn == null) {
+continue;
+  }
+  printDatanodeReplicaStatus(block, corruptionRecord, dn);
+}
+  } else {
+for (int idx = (blockInfo.numNodes() - 1); idx >= 0; idx--) {
+  DatanodeDescriptor dn = blockInfo.getDatanode(idx);
+  printDatanodeReplicaStatus(block, corruptionRecord, dn);
 }
-out.print("\n");
   }
-} catch (Exception e){
+} catch (Exception e) {
   String errMsg = "Fsck on blockId '" + blockId;
   LOG.warn(errMsg, e);
   out.println(e.getMessage());
@@ -347,6 +340,27 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 }
   }
 
+  private void printDatanodeReplicaStatus(Block block,
+  Collection corruptionRecord, DatanodeDescriptor dn) {
+out.print("Block replica on datanode/rack: " + dn.getHostName() +
+dn.getNetworkLocation() + " ");
+if (corruptionRecord != null && corruptionRecord.contains(dn)) {
+  out.print(CORRUPT_STATUS + "\t ReasonCode: " +
+  blockManager.getCorruptReason(block, dn));
+} else if (dn.isDecommissioned()){
+  out.print(DECOMMISSIONED_STATUS);
+} else if (dn.isDecommissionInProgress()) {
+  out.print(DECOMMISSIONING_STATUS);
+} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
+  out.print(ENTERING_MAINTENANCE_STATUS);
+} else if (this.showMaintenanceState && dn.isInMaintenance()) {
+  out.print(IN_MAINTENANCE_STATUS);
+} else {
+  out.print(HEALTHY_STATUS);
+}
+out.print("\n");
+  }
+
   /**
* Check files on DFS, starting from the indicated path.
*/
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
index d20d2fd..878edf2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
+++ 
b/hado

[hadoop] branch branch-3.1 updated: HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if One Block Is Not Available. Contributed by Ayush Saxena.

2019-02-12 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
 new 1f3e737  HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if 
One Block Is Not Available. Contributed by Ayush Saxena.
1f3e737 is described below

commit 1f3e7374078f087d025ee252399b5b09383b0cd7
Author: Vinayakumar B 
AuthorDate: Tue Feb 12 21:57:57 2019 +0530

HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if One Block Is Not 
Available. Contributed by Ayush Saxena.
---
 .../hadoop/hdfs/server/namenode/NamenodeFsck.java  | 56 ++
 .../blockmanagement/TestBlockInfoStriped.java  | 45 +
 2 files changed, 80 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 0201ca1..7e4709c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -302,29 +302,22 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   if (blockManager.getCorruptReplicas(block) != null) {
 corruptionRecord = blockManager.getCorruptReplicas(block);
   }
-
-  //report block replicas status on datanodes
-  for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) {
-DatanodeDescriptor dn = blockInfo.getDatanode(idx);
-out.print("Block replica on datanode/rack: " + dn.getHostName() +
-dn.getNetworkLocation() + " ");
-if (corruptionRecord != null && corruptionRecord.contains(dn)) {
-  out.print(CORRUPT_STATUS + "\t ReasonCode: " +
-  blockManager.getCorruptReason(block, dn));
-} else if (dn.isDecommissioned() ){
-  out.print(DECOMMISSIONED_STATUS);
-} else if (dn.isDecommissionInProgress()) {
-  out.print(DECOMMISSIONING_STATUS);
-} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
-  out.print(ENTERING_MAINTENANCE_STATUS);
-} else if (this.showMaintenanceState && dn.isInMaintenance()) {
-  out.print(IN_MAINTENANCE_STATUS);
-} else {
-  out.print(HEALTHY_STATUS);
+  // report block replicas status on datanodes
+  if (blockInfo.isStriped()) {
+for (int idx = (blockInfo.getCapacity() - 1); idx >= 0; idx--) {
+  DatanodeDescriptor dn = blockInfo.getDatanode(idx);
+  if (dn == null) {
+continue;
+  }
+  printDatanodeReplicaStatus(block, corruptionRecord, dn);
+}
+  } else {
+for (int idx = (blockInfo.numNodes() - 1); idx >= 0; idx--) {
+  DatanodeDescriptor dn = blockInfo.getDatanode(idx);
+  printDatanodeReplicaStatus(block, corruptionRecord, dn);
 }
-out.print("\n");
   }
-} catch (Exception e){
+} catch (Exception e) {
   String errMsg = "Fsck on blockId '" + blockId;
   LOG.warn(errMsg, e);
   out.println(e.getMessage());
@@ -335,6 +328,27 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 }
   }
 
+  private void printDatanodeReplicaStatus(Block block,
+  Collection corruptionRecord, DatanodeDescriptor dn) {
+out.print("Block replica on datanode/rack: " + dn.getHostName() +
+dn.getNetworkLocation() + " ");
+if (corruptionRecord != null && corruptionRecord.contains(dn)) {
+  out.print(CORRUPT_STATUS + "\t ReasonCode: " +
+  blockManager.getCorruptReason(block, dn));
+} else if (dn.isDecommissioned()){
+  out.print(DECOMMISSIONED_STATUS);
+} else if (dn.isDecommissionInProgress()) {
+  out.print(DECOMMISSIONING_STATUS);
+} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
+  out.print(ENTERING_MAINTENANCE_STATUS);
+} else if (this.showMaintenanceState && dn.isInMaintenance()) {
+  out.print(IN_MAINTENANCE_STATUS);
+} else {
+  out.print(HEALTHY_STATUS);
+}
+out.print("\n");
+  }
+
   /**
* Check files on DFS, starting from the indicated path.
*/
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
index becf868..0982c80 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
+++ 
b/hado

[hadoop] branch branch-3.2 updated: HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if One Block Is Not Available. Contributed by Ayush Saxena.

2019-02-12 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new 8907368  HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if 
One Block Is Not Available. Contributed by Ayush Saxena.
8907368 is described below

commit 890736828b409aa0d82991ba3cce2374aea964d4
Author: Vinayakumar B 
AuthorDate: Tue Feb 12 21:57:57 2019 +0530

HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if One Block Is Not 
Available. Contributed by Ayush Saxena.
---
 .../hadoop/hdfs/server/namenode/NamenodeFsck.java  | 56 ++
 .../blockmanagement/TestBlockInfoStriped.java  | 45 +
 2 files changed, 80 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 5d664cb..e59216d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -303,29 +303,22 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   if (blockManager.getCorruptReplicas(block) != null) {
 corruptionRecord = blockManager.getCorruptReplicas(block);
   }
-
-  //report block replicas status on datanodes
-  for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) {
-DatanodeDescriptor dn = blockInfo.getDatanode(idx);
-out.print("Block replica on datanode/rack: " + dn.getHostName() +
-dn.getNetworkLocation() + " ");
-if (corruptionRecord != null && corruptionRecord.contains(dn)) {
-  out.print(CORRUPT_STATUS + "\t ReasonCode: " +
-  blockManager.getCorruptReason(block, dn));
-} else if (dn.isDecommissioned() ){
-  out.print(DECOMMISSIONED_STATUS);
-} else if (dn.isDecommissionInProgress()) {
-  out.print(DECOMMISSIONING_STATUS);
-} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
-  out.print(ENTERING_MAINTENANCE_STATUS);
-} else if (this.showMaintenanceState && dn.isInMaintenance()) {
-  out.print(IN_MAINTENANCE_STATUS);
-} else {
-  out.print(HEALTHY_STATUS);
+  // report block replicas status on datanodes
+  if (blockInfo.isStriped()) {
+for (int idx = (blockInfo.getCapacity() - 1); idx >= 0; idx--) {
+  DatanodeDescriptor dn = blockInfo.getDatanode(idx);
+  if (dn == null) {
+continue;
+  }
+  printDatanodeReplicaStatus(block, corruptionRecord, dn);
+}
+  } else {
+for (int idx = (blockInfo.numNodes() - 1); idx >= 0; idx--) {
+  DatanodeDescriptor dn = blockInfo.getDatanode(idx);
+  printDatanodeReplicaStatus(block, corruptionRecord, dn);
 }
-out.print("\n");
   }
-} catch (Exception e){
+} catch (Exception e) {
   String errMsg = "Fsck on blockId '" + blockId;
   LOG.warn(errMsg, e);
   out.println(e.getMessage());
@@ -336,6 +329,27 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 }
   }
 
+  private void printDatanodeReplicaStatus(Block block,
+  Collection corruptionRecord, DatanodeDescriptor dn) {
+out.print("Block replica on datanode/rack: " + dn.getHostName() +
+dn.getNetworkLocation() + " ");
+if (corruptionRecord != null && corruptionRecord.contains(dn)) {
+  out.print(CORRUPT_STATUS + "\t ReasonCode: " +
+  blockManager.getCorruptReason(block, dn));
+} else if (dn.isDecommissioned()){
+  out.print(DECOMMISSIONED_STATUS);
+} else if (dn.isDecommissionInProgress()) {
+  out.print(DECOMMISSIONING_STATUS);
+} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
+  out.print(ENTERING_MAINTENANCE_STATUS);
+} else if (this.showMaintenanceState && dn.isInMaintenance()) {
+  out.print(IN_MAINTENANCE_STATUS);
+} else {
+  out.print(HEALTHY_STATUS);
+}
+out.print("\n");
+  }
+
   /**
* Check files on DFS, starting from the indicated path.
*/
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
index d20d2fd..878edf2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
+++ 
b/hado

[hadoop] branch trunk updated: YARN-8498. Yarn NodeManager OOM Listener Fails Compilation on Ubuntu 18.04. Contributed by Ayush Saxena.

2019-02-06 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3c96a03  YARN-8498. Yarn NodeManager OOM Listener Fails Compilation on 
Ubuntu 18.04. Contributed by Ayush Saxena.
3c96a03 is described below

commit 3c96a03deadc419524b5fe2b8c85acc894286935
Author: Vinayakumar B 
AuthorDate: Thu Feb 7 13:02:35 2019 +0530

YARN-8498. Yarn NodeManager OOM Listener Fails Compilation on Ubuntu 18.04. 
Contributed by Ayush Saxena.
---
 .../src/main/native/oom-listener/impl/oom_listener_main.c  |  4 +++-
 .../native/oom-listener/test/oom_listener_test_main.cc | 14 +++---
 2 files changed, 10 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c
index eb7fc3e..2f4bac8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c
@@ -25,6 +25,8 @@
 
 #include "oom_listener.h"
 
+extern inline void cleanup(_oom_listener_descriptors *descriptors);
+
 void print_usage(void) {
   fprintf(stderr, "oom-listener");
   fprintf(stderr, "Listen to OOM events in a cgroup");
@@ -101,4 +103,4 @@ int main() {
   return 1;
 }
 
-#endif
\ No newline at end of file
+#endif
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
index 421c21e..b79d501 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
@@ -159,7 +159,7 @@ TEST_F(OOMListenerTest, test_oom) {
   const int simulate_cgroups =
   mock_oom_event_as_user != -1;
 
-  __pid_t mem_hog_pid = fork();
+  pid_t mem_hog_pid = fork();
   if (!mem_hog_pid) {
 // Child process to consume too much memory
 if (simulate_cgroups) {
@@ -171,7 +171,7 @@ TEST_F(OOMListenerTest, test_oom) {
   // Wait until we are added to the cgroup
   // so that it is accounted for our mem
   // usage
-  __pid_t cgroupPid;
+  pid_t cgroupPid;
   do {
 std::ifstream tasks;
 tasks.open(tasks_file.c_str(), tasks.in);
@@ -210,7 +210,7 @@ TEST_F(OOMListenerTest, test_oom) {
 // There is no race condition with the process
 // running out of memory. If oom is 1 at startup
 // oom_listener will send an initial notification
-__pid_t listener = fork();
+pid_t listener = fork();
 if (listener == 0) {
   // child listener forwarding cgroup events
   _oom_listener_descriptors descriptors = {
@@ -253,8 +253,8 @@ TEST_F(OOMListenerTest, test_oom) {
   ASSERT_EQ(0, kill(mem_hog_pid, SIGKILL));
 
   // Verify that process was killed
-  __WAIT_STATUS mem_hog_status = {};
-  __pid_t exited0 = wait(mem_hog_status);
+  int* mem_hog_status = {};
+  pid_t exited0 = wait(mem_hog_status);
   ASSERT_EQ(mem_hog_pid, exited0)
 << "Wrong process exited";
   ASSERT_EQ(NULL, mem_hog_status)
@@ -272,8 +272,8 @@ TEST_F(OOMListenerTest, test_oom) {
 << "Could not delete cgroup " << GetCGroup();
 
   // Check that oom_listener exited on the deletion of the cgroup
-  __WAIT_STATUS oom_listener_status = {};
-  __pid_t exited1 = wait(oom_listener_status);
+  int* oom_listener_status = {};
+  pid_t exited1 = wait(oom_listener_status);
   ASSERT_EQ(listener, exited1)
 << "Wrong process exited";
   ASSERT_EQ(NULL, oom_listener_status)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch branch-3.2 updated: YARN-8498. Yarn NodeManager OOM Listener Fails Compilation on Ubuntu 18.04. Contributed by Ayush Saxena.

2019-02-06 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
 new e2b91b2  YARN-8498. Yarn NodeManager OOM Listener Fails Compilation on 
Ubuntu 18.04. Contributed by Ayush Saxena.
e2b91b2 is described below

commit e2b91b2ccb1eb43ecc4786af974635ba9744befc
Author: Vinayakumar B 
AuthorDate: Thu Feb 7 13:02:35 2019 +0530

YARN-8498. Yarn NodeManager OOM Listener Fails Compilation on Ubuntu 18.04. 
Contributed by Ayush Saxena.
---
 .../src/main/native/oom-listener/impl/oom_listener_main.c  |  4 +++-
 .../native/oom-listener/test/oom_listener_test_main.cc | 14 +++---
 2 files changed, 10 insertions(+), 8 deletions(-)

diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c
index eb7fc3e..2f4bac8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/impl/oom_listener_main.c
@@ -25,6 +25,8 @@
 
 #include "oom_listener.h"
 
+extern inline void cleanup(_oom_listener_descriptors *descriptors);
+
 void print_usage(void) {
   fprintf(stderr, "oom-listener");
   fprintf(stderr, "Listen to OOM events in a cgroup");
@@ -101,4 +103,4 @@ int main() {
   return 1;
 }
 
-#endif
\ No newline at end of file
+#endif
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
index 421c21e..b79d501 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/oom-listener/test/oom_listener_test_main.cc
@@ -159,7 +159,7 @@ TEST_F(OOMListenerTest, test_oom) {
   const int simulate_cgroups =
   mock_oom_event_as_user != -1;
 
-  __pid_t mem_hog_pid = fork();
+  pid_t mem_hog_pid = fork();
   if (!mem_hog_pid) {
 // Child process to consume too much memory
 if (simulate_cgroups) {
@@ -171,7 +171,7 @@ TEST_F(OOMListenerTest, test_oom) {
   // Wait until we are added to the cgroup
   // so that it is accounted for our mem
   // usage
-  __pid_t cgroupPid;
+  pid_t cgroupPid;
   do {
 std::ifstream tasks;
 tasks.open(tasks_file.c_str(), tasks.in);
@@ -210,7 +210,7 @@ TEST_F(OOMListenerTest, test_oom) {
 // There is no race condition with the process
 // running out of memory. If oom is 1 at startup
 // oom_listener will send an initial notification
-__pid_t listener = fork();
+pid_t listener = fork();
 if (listener == 0) {
   // child listener forwarding cgroup events
   _oom_listener_descriptors descriptors = {
@@ -253,8 +253,8 @@ TEST_F(OOMListenerTest, test_oom) {
   ASSERT_EQ(0, kill(mem_hog_pid, SIGKILL));
 
   // Verify that process was killed
-  __WAIT_STATUS mem_hog_status = {};
-  __pid_t exited0 = wait(mem_hog_status);
+  int* mem_hog_status = {};
+  pid_t exited0 = wait(mem_hog_status);
   ASSERT_EQ(mem_hog_pid, exited0)
 << "Wrong process exited";
   ASSERT_EQ(NULL, mem_hog_status)
@@ -272,8 +272,8 @@ TEST_F(OOMListenerTest, test_oom) {
 << "Could not delete cgroup " << GetCGroup();
 
   // Check that oom_listener exited on the deletion of the cgroup
-  __WAIT_STATUS oom_listener_status = {};
-  __pid_t exited1 = wait(oom_listener_status);
+  int* oom_listener_status = {};
+  pid_t exited1 = wait(oom_listener_status);
   ASSERT_EQ(listener, exited1)
 << "Wrong process exited";
   ASSERT_EQ(NULL, oom_listener_status)


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[hadoop] branch HDFS-13891 updated: HDFS-14193. RBF: Inconsistency with the Default Namespace. Contributed by Ayush Saxena.

2019-01-16 Thread vinayakumarb
This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch HDFS-13891
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/HDFS-13891 by this push:
 new 12911fa  HDFS-14193. RBF: Inconsistency with the Default Namespace. 
Contributed by Ayush Saxena.
12911fa is described below

commit 12911fa80e9056ddb22d85462e85e58a72681e06
Author: Vinayakumar B 
AuthorDate: Wed Jan 16 18:06:17 2019 +0530

HDFS-14193. RBF: Inconsistency with the Default Namespace. Contributed by 
Ayush Saxena.
---
 .../federation/resolver/MountTableResolver.java| 27 --
 .../resolver/TestInitializeMountTableResolver.java | 32 +++---
 2 files changed, 16 insertions(+), 43 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
index 9e69840..da58551 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.federation.resolver;
 
-import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
-import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMESERVICE_ID;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE;
 import static 
org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT;
@@ -50,8 +48,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.DFSUtilClient;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
 import org.apache.hadoop.hdfs.server.federation.router.Router;
 import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
@@ -163,33 +159,22 @@ public class MountTableResolver
* @param conf Configuration for this resolver.
*/
   private void initDefaultNameService(Configuration conf) {
-this.defaultNameService = conf.get(
-DFS_ROUTER_DEFAULT_NAMESERVICE,
-DFSUtil.getNamenodeNameServiceId(conf));
-
 this.defaultNSEnable = conf.getBoolean(
 DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE,
 DFS_ROUTER_DEFAULT_NAMESERVICE_ENABLE_DEFAULT);
 
-if (defaultNameService == null) {
-  LOG.warn(
-  "{} and {} is not set. Fallback to {} as the default name service.",
-  DFS_ROUTER_DEFAULT_NAMESERVICE, DFS_NAMESERVICE_ID, 
DFS_NAMESERVICES);
-  Collection nsIds = DFSUtilClient.getNameServiceIds(conf);
-  if (nsIds.isEmpty()) {
-this.defaultNameService = "";
-  } else {
-this.defaultNameService = nsIds.iterator().next();
-  }
+if (!this.defaultNSEnable) {
+  LOG.warn("Default name service is disabled.");
+  return;
 }
+this.defaultNameService = conf.get(DFS_ROUTER_DEFAULT_NAMESERVICE, "");
 
 if (this.defaultNameService.equals("")) {
   this.defaultNSEnable = false;
   LOG.warn("Default name service is not set.");
 } else {
-  String enable = this.defaultNSEnable ? "enabled" : "disabled";
-  LOG.info("Default name service: {}, {} to read or write",
-  this.defaultNameService, enable);
+  LOG.info("Default name service: {}, enabled to read or write",
+  this.defaultNameService);
 }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java
index 5db7531..8a22ade 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestInitializeMountTableResolver.java
@@ -23,7 +23,9 @@ import org.junit.Test;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static 
org.apac

[2/2] hadoop git commit: HDFS-14124. EC : Support EC Commands (set/get/unset EcPolicy) via WebHdfs. Contributed by Ayush Saxena.

2018-12-11 Thread vinayakumarb
HDFS-14124. EC : Support EC Commands (set/get/unset EcPolicy) via WebHdfs. 
Contributed by Ayush Saxena.

(cherry picked from commit 39dc7345b80e27ba8bd1ff4c19ca241aef5ac0fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/abe14d32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/abe14d32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/abe14d32

Branch: refs/heads/branch-3.2
Commit: abe14d32d41b4a2bf630e8a89d794d729cee119b
Parents: 2b3c3d2
Author: Vinayakumar B 
Authored: Tue Dec 11 17:59:04 2018 +0530
Committer: Vinayakumar B 
Committed: Tue Dec 11 18:10:51 2018 +0530

--
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 11 +++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 33 
 .../hadoop/hdfs/web/resources/GetOpParam.java   |  2 +
 .../hadoop/hdfs/web/resources/PostOpParam.java  |  2 +
 .../hadoop/hdfs/web/resources/PutOpParam.java   |  1 +
 .../web/resources/NamenodeWebHdfsMethods.java   | 15 +++-
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 89 +++-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 18 +++-
 8 files changed, 164 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/abe14d32/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 95ccb4b..3889326 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -675,6 +675,17 @@ public class JsonUtilClient {
 replicationFallbacks, copyOnCreateFile.booleanValue());
   }
 
+  public static ErasureCodingPolicy toECPolicy(Map m) {
+byte id = ((Number) m.get("id")).byteValue();
+String name = (String) m.get("name");
+String codec = (String) m.get("codecName");
+int cellsize = ((Number) m.get("cellSize")).intValue();
+int dataunits = ((Number) m.get("numDataUnits")).intValue();
+int parityunits = ((Number) m.get("numParityUnits")).intValue();
+ECSchema ecs = new ECSchema(codec, dataunits, parityunits);
+return new ErasureCodingPolicy(name, ecs, cellsize, id);
+  }
+
   private static StorageType[] toStorageTypes(List list) {
 if (list == null) {
   return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/abe14d32/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 51d4442..4f72613 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -94,6 +94,7 @@ import org.apache.hadoop.hdfs.HdfsKMSUtil;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@@ -1311,15 +1312,47 @@ public class WebHdfsFileSystem extends FileSystem
   }
 
   public void enableECPolicy(String policyName) throws IOException {
+statistics.incrementWriteOps(1);
+storageStatistics.incrementOpCounter(OpType.ENABLE_EC_POLICY);
 final HttpOpParam.Op op = PutOpParam.Op.ENABLEECPOLICY;
 new FsPathRunner(op, null, new ECPolicyParam(policyName)).run();
   }
 
   public void disableECPolicy(String policyName) throws IOException {
+statistics.incrementWriteOps(1);
+storageStatistics.incrementOpCounter(OpType.DISABLE_EC_POLICY);
 final HttpOpParam.Op op = PutOpParam.Op.DISABLEECPOLICY;
 new FsPathRunner(op, null, new ECPolicyParam(policyName)).run();
   }
 
+  public void setErasureCodingPolicy(Path p, String policyName)
+  throws IOException {
+statistics.incrementWriteOps(1);
+storageStatistics.incrementOpCounter(OpType.SET_EC_POLICY);
+final HttpOpParam.Op op = PutOpParam.Op.SETECPOLICY;
+new FsPathRunner(op, p, new 

[1/2] hadoop git commit: HDFS-14124. EC : Support EC Commands (set/get/unset EcPolicy) via WebHdfs. Contributed by Ayush Saxena.

2018-12-11 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 2b3c3d2a3 -> abe14d32d
  refs/heads/trunk 3ff8580f2 -> 39dc7345b


HDFS-14124. EC : Support EC Commands (set/get/unset EcPolicy) via WebHdfs. 
Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39dc7345
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39dc7345
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39dc7345

Branch: refs/heads/trunk
Commit: 39dc7345b80e27ba8bd1ff4c19ca241aef5ac0fc
Parents: 3ff8580
Author: Vinayakumar B 
Authored: Tue Dec 11 17:59:04 2018 +0530
Committer: Vinayakumar B 
Committed: Tue Dec 11 17:59:04 2018 +0530

--
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 11 +++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 33 
 .../hadoop/hdfs/web/resources/GetOpParam.java   |  2 +
 .../hadoop/hdfs/web/resources/PostOpParam.java  |  2 +
 .../hadoop/hdfs/web/resources/PutOpParam.java   |  1 +
 .../web/resources/NamenodeWebHdfsMethods.java   | 15 +++-
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 89 +++-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 18 +++-
 8 files changed, 164 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39dc7345/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
index 95ccb4b..3889326 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -675,6 +675,17 @@ public class JsonUtilClient {
 replicationFallbacks, copyOnCreateFile.booleanValue());
   }
 
+  public static ErasureCodingPolicy toECPolicy(Map m) {
+byte id = ((Number) m.get("id")).byteValue();
+String name = (String) m.get("name");
+String codec = (String) m.get("codecName");
+int cellsize = ((Number) m.get("cellSize")).intValue();
+int dataunits = ((Number) m.get("numDataUnits")).intValue();
+int parityunits = ((Number) m.get("numParityUnits")).intValue();
+ECSchema ecs = new ECSchema(codec, dataunits, parityunits);
+return new ErasureCodingPolicy(name, ecs, cellsize, id);
+  }
+
   private static StorageType[] toStorageTypes(List list) {
 if (list == null) {
   return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39dc7345/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 51d4442..4f72613 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -94,6 +94,7 @@ import org.apache.hadoop.hdfs.HdfsKMSUtil;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@@ -1311,15 +1312,47 @@ public class WebHdfsFileSystem extends FileSystem
   }
 
   public void enableECPolicy(String policyName) throws IOException {
+statistics.incrementWriteOps(1);
+storageStatistics.incrementOpCounter(OpType.ENABLE_EC_POLICY);
 final HttpOpParam.Op op = PutOpParam.Op.ENABLEECPOLICY;
 new FsPathRunner(op, null, new ECPolicyParam(policyName)).run();
   }
 
   public void disableECPolicy(String policyName) throws IOException {
+statistics.incrementWriteOps(1);
+storageStatistics.incrementOpCounter(OpType.DISABLE_EC_POLICY);
 final HttpOpParam.Op op = PutOpParam.Op.DISABLEECPOLICY;
 new FsPathRunner(op, null, new ECPolicyParam(policyName)).run();
   }
 
+  public void setErasureCodingPolicy(Path p, String policyName)
+  throws IOException {
+statistics.incrementWriteOps(1);
+storageStatistics.incrementOpCounter(OpType.SET_EC_POLICY);
+final HttpOpParam.Op op = PutOpParam.Op.SETECPOLICY;

hadoop git commit: HDFS-14113. EC : Add Configuration to restrict UserDefined Policies. Contributed by Ayush Saxena.

2018-12-06 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5d4a43266 -> c03024a53


HDFS-14113. EC : Add Configuration to restrict UserDefined Policies. 
Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c03024a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c03024a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c03024a5

Branch: refs/heads/trunk
Commit: c03024a5305bea1a40c87a4abc0793802bea5c75
Parents: 5d4a432
Author: Vinayakumar B 
Authored: Thu Dec 6 18:20:28 2018 +0530
Committer: Vinayakumar B 
Committed: Thu Dec 6 18:20:28 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 +
 .../namenode/ErasureCodingPolicyManager.java| 13 
 .../src/main/resources/hdfs-default.xml |  8 ++
 .../hadoop/hdfs/TestErasureCodingAddConfig.java | 79 
 4 files changed, 104 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c03024a5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 5899c92..f6ce0d5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -698,6 +698,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
 
   public static final String  DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_KEY = 
"dfs.namenode.ec.policies.max.cellsize";
   public static final int DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT = 
4 * 1024 * 1024;
+  public static final String DFS_NAMENODE_EC_POLICIES_USERPOLICIES_ALLOWED_KEY 
=
+  "dfs.namenode.ec.userdefined.policy.allowed";
+  public static final boolean
+  DFS_NAMENODE_EC_POLICIES_USERPOLICIES_ALLOWED_KEY_DEFAULT = true;
   public static final String  DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY =
   "dfs.namenode.ec.system.default.policy";
   public static final String  DFS_NAMENODE_EC_SYSTEM_DEFAULT_POLICY_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c03024a5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
index fc673f7..ebb60d7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
@@ -58,6 +58,9 @@ public final class ErasureCodingPolicyManager {
   private int maxCellSize =
   DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT;
 
+  private boolean userDefinedAllowed =
+  DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_USERPOLICIES_ALLOWED_KEY_DEFAULT;
+
   // Supported storage policies for striped EC files
   private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE =
   new byte[]{
@@ -143,6 +146,11 @@ public final class ErasureCodingPolicyManager {
 maxCellSize = conf.getInt(
 DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_KEY,
 DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT);
+
+userDefinedAllowed = conf.getBoolean(
+DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_USERPOLICIES_ALLOWED_KEY,
+DFSConfigKeys.
+DFS_NAMENODE_EC_POLICIES_USERPOLICIES_ALLOWED_KEY_DEFAULT);
   }
 
   /**
@@ -270,6 +278,11 @@ public final class ErasureCodingPolicyManager {
*/
   public synchronized ErasureCodingPolicy addPolicy(
   ErasureCodingPolicy policy) {
+if (!userDefinedAllowed) {
+  throw new HadoopIllegalArgumentException(
+  "Addition of user defined erasure coding policy is disabled.");
+}
+
 if (!CodecUtil.hasCodec(policy.getCodecName())) {
   throw new HadoopIllegalArgumentException("Codec name "
   + policy.getCodecName() + " is not supported");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c03024a5/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 

[3/4] hadoop git commit: HDFS-13816. dfs.getQuotaUsage() throws NPE on non-existent dir instead of FileNotFoundException. Contributed by Vinayakumar B.

2018-11-26 Thread vinayakumarb
HDFS-13816. dfs.getQuotaUsage() throws NPE on non-existent dir instead of 
FileNotFoundException. Contributed by Vinayakumar B.

(cherry picked from commit b09828145432c8d986ac8f05ec33608d8e611328)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/538757ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/538757ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/538757ed

Branch: refs/heads/branch-3.2
Commit: 538757ed49b084a8dc73d4843833bd2ad58fbee5
Parents: 1b937d7
Author: Vinayakumar B 
Authored: Mon Nov 26 18:22:32 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Nov 26 18:23:09 2018 +0530

--
 .../hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java   | 4 
 .../src/test/java/org/apache/hadoop/hdfs/TestQuota.java  | 8 
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/538757ed/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 7e22ae1..01de236 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -565,6 +565,10 @@ class FSDirStatAndListingOp {
 fsd.readLock();
 try {
   INode targetNode = iip.getLastINode();
+  if (targetNode == null) {
+throw new FileNotFoundException(
+"File/Directory does not exist: " + iip.getPath());
+  }
   QuotaUsage usage = null;
   if (targetNode.isDirectory()) {
 DirectoryWithQuotaFeature feature =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/538757ed/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index b16782c..1c4855f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -28,6 +28,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayOutputStream;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintStream;
@@ -332,6 +333,13 @@ public class TestQuota {
 // 14a: set quota on a non-existent directory
 Path nonExistentPath = new Path(dir, "test1");
 assertFalse(dfs.exists(nonExistentPath));
+try {
+  compareQuotaUsage(null, dfs, nonExistentPath);
+  fail("Expected FileNotFoundException");
+} catch (FileNotFoundException fnfe) {
+  GenericTestUtils.assertExceptionContains(
+  "File/Directory does not exist: " + nonExistentPath, fnfe);
+}
 args = new String[]{"-setQuota", "1", nonExistentPath.toString()};
 runCommand(admin, args, true);
 runCommand(admin, true, "-setSpaceQuota", "1g", // for space quota


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/4] hadoop git commit: HDFS-13816. dfs.getQuotaUsage() throws NPE on non-existent dir instead of FileNotFoundException. Contributed by Vinayakumar B.

2018-11-26 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 c2e65cac5 -> 8569555fd
  refs/heads/branch-3.1 07529a209 -> 025410007
  refs/heads/branch-3.2 1b937d701 -> 538757ed4
  refs/heads/trunk 23b441c22 -> b09828145


HDFS-13816. dfs.getQuotaUsage() throws NPE on non-existent dir instead of 
FileNotFoundException. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0982814
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0982814
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0982814

Branch: refs/heads/trunk
Commit: b09828145432c8d986ac8f05ec33608d8e611328
Parents: 23b441c
Author: Vinayakumar B 
Authored: Mon Nov 26 18:22:32 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Nov 26 18:22:32 2018 +0530

--
 .../hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java   | 4 
 .../src/test/java/org/apache/hadoop/hdfs/TestQuota.java  | 8 
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0982814/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 7e22ae1..01de236 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -565,6 +565,10 @@ class FSDirStatAndListingOp {
 fsd.readLock();
 try {
   INode targetNode = iip.getLastINode();
+  if (targetNode == null) {
+throw new FileNotFoundException(
+"File/Directory does not exist: " + iip.getPath());
+  }
   QuotaUsage usage = null;
   if (targetNode.isDirectory()) {
 DirectoryWithQuotaFeature feature =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0982814/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index b16782c..1c4855f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -28,6 +28,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayOutputStream;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintStream;
@@ -332,6 +333,13 @@ public class TestQuota {
 // 14a: set quota on a non-existent directory
 Path nonExistentPath = new Path(dir, "test1");
 assertFalse(dfs.exists(nonExistentPath));
+try {
+  compareQuotaUsage(null, dfs, nonExistentPath);
+  fail("Expected FileNotFoundException");
+} catch (FileNotFoundException fnfe) {
+  GenericTestUtils.assertExceptionContains(
+  "File/Directory does not exist: " + nonExistentPath, fnfe);
+}
 args = new String[]{"-setQuota", "1", nonExistentPath.toString()};
 runCommand(admin, args, true);
 runCommand(admin, true, "-setSpaceQuota", "1g", // for space quota


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/4] hadoop git commit: HDFS-13816. dfs.getQuotaUsage() throws NPE on non-existent dir instead of FileNotFoundException. Contributed by Vinayakumar B.

2018-11-26 Thread vinayakumarb
HDFS-13816. dfs.getQuotaUsage() throws NPE on non-existent dir instead of 
FileNotFoundException. Contributed by Vinayakumar B.

(cherry picked from commit b09828145432c8d986ac8f05ec33608d8e611328)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8569555f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8569555f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8569555f

Branch: refs/heads/branch-3.0
Commit: 8569555fd1f244b298c7d025d2fc7d2ac6b181f7
Parents: c2e65ca
Author: Vinayakumar B 
Authored: Mon Nov 26 18:22:32 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Nov 26 18:23:20 2018 +0530

--
 .../hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java   | 4 
 .../src/test/java/org/apache/hadoop/hdfs/TestQuota.java  | 8 
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8569555f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index c5fa4c7..4baf8b5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -563,6 +563,10 @@ class FSDirStatAndListingOp {
 fsd.readLock();
 try {
   INode targetNode = iip.getLastINode();
+  if (targetNode == null) {
+throw new FileNotFoundException(
+"File/Directory does not exist: " + iip.getPath());
+  }
   QuotaUsage usage = null;
   if (targetNode.isDirectory()) {
 DirectoryWithQuotaFeature feature =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8569555f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index 2e99aa3..5398aea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -28,6 +28,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayOutputStream;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintStream;
@@ -332,6 +333,13 @@ public class TestQuota {
 // 14a: set quota on a non-existent directory
 Path nonExistentPath = new Path(dir, "test1");
 assertFalse(dfs.exists(nonExistentPath));
+try {
+  compareQuotaUsage(null, dfs, nonExistentPath);
+  fail("Expected FileNotFoundException");
+} catch (FileNotFoundException fnfe) {
+  GenericTestUtils.assertExceptionContains(
+  "File/Directory does not exist: " + nonExistentPath, fnfe);
+}
 args = new String[]{"-setQuota", "1", nonExistentPath.toString()};
 runCommand(admin, args, true);
 runCommand(admin, true, "-setSpaceQuota", "1g", // for space quota


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/4] hadoop git commit: HDFS-13816. dfs.getQuotaUsage() throws NPE on non-existent dir instead of FileNotFoundException. Contributed by Vinayakumar B.

2018-11-26 Thread vinayakumarb
HDFS-13816. dfs.getQuotaUsage() throws NPE on non-existent dir instead of 
FileNotFoundException. Contributed by Vinayakumar B.

(cherry picked from commit b09828145432c8d986ac8f05ec33608d8e611328)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02541000
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02541000
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02541000

Branch: refs/heads/branch-3.1
Commit: 02541000747267163c2ba0275099a1a6a3d706a7
Parents: 07529a2
Author: Vinayakumar B 
Authored: Mon Nov 26 18:22:32 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Nov 26 18:22:58 2018 +0530

--
 .../hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java   | 4 
 .../src/test/java/org/apache/hadoop/hdfs/TestQuota.java  | 8 
 2 files changed, 12 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02541000/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 7e22ae1..01de236 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -565,6 +565,10 @@ class FSDirStatAndListingOp {
 fsd.readLock();
 try {
   INode targetNode = iip.getLastINode();
+  if (targetNode == null) {
+throw new FileNotFoundException(
+"File/Directory does not exist: " + iip.getPath());
+  }
   QuotaUsage usage = null;
   if (targetNode.isDirectory()) {
 DirectoryWithQuotaFeature feature =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/02541000/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index b16782c..1c4855f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -28,6 +28,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayOutputStream;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintStream;
@@ -332,6 +333,13 @@ public class TestQuota {
 // 14a: set quota on a non-existent directory
 Path nonExistentPath = new Path(dir, "test1");
 assertFalse(dfs.exists(nonExistentPath));
+try {
+  compareQuotaUsage(null, dfs, nonExistentPath);
+  fail("Expected FileNotFoundException");
+} catch (FileNotFoundException fnfe) {
+  GenericTestUtils.assertExceptionContains(
+  "File/Directory does not exist: " + nonExistentPath, fnfe);
+}
 args = new String[]{"-setQuota", "1", nonExistentPath.toString()};
 runCommand(admin, args, true);
 runCommand(admin, true, "-setSpaceQuota", "1g", // for space quota


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/4] hadoop git commit: HADOOP-15948. Inconsistency in get and put syntax if filename/dirname contains space. Contributed by Ayush Saxena.

2018-11-26 Thread vinayakumarb
HADOOP-15948. Inconsistency in get and put syntax if filename/dirname contains 
space. Contributed by Ayush Saxena.

(cherry picked from commit 23b441c2253bcb3be5229d3c5eb1e165369070c0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07529a20
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07529a20
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07529a20

Branch: refs/heads/branch-3.1
Commit: 07529a209d417bd8311dcfcb50298c3311f45f92
Parents: f94d403
Author: Vinayakumar B 
Authored: Mon Nov 26 18:11:01 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Nov 26 18:11:55 2018 +0530

--
 .../java/org/apache/hadoop/fs/shell/CopyCommands.java |  7 +--
 .../org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java  | 10 ++
 2 files changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07529a20/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
index 11cb3d6..c408a4f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
@@ -268,12 +268,7 @@ class CopyCommands {
   try {
 items.add(new PathData(new URI(arg), getConf()));
   } catch (URISyntaxException e) {
-if (Path.WINDOWS) {
-  // Unlike URI, PathData knows how to parse Windows drive-letter 
paths.
-  items.add(new PathData(arg, getConf()));
-} else {
-  throw new IOException("unexpected URISyntaxException", e);
-}
+items.add(new PathData(arg, getConf()));
   }
   return items;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/07529a20/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
index 8dd09e5..8d2e160 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
@@ -43,9 +43,11 @@ public class TestCopyPreserveFlag {
   private static final int MODIFICATION_TIME = 12345000;
   private static final int ACCESS_TIME = 23456000;
   private static final Path DIR_FROM = new Path("d0");
+  private static final Path DIR_FROM_SPL = new Path("d0 space");
   private static final Path DIR_TO1 = new Path("d1");
   private static final Path DIR_TO2 = new Path("d2");
   private static final Path FROM = new Path(DIR_FROM, "f0");
+  private static final Path FROM_SPL = new Path(DIR_FROM_SPL, "f0");
   private static final Path TO = new Path(DIR_TO1, "f1");
   private static final FsPermission PERMISSIONS = new FsPermission(
 FsAction.ALL,
@@ -121,6 +123,14 @@ public class TestCopyPreserveFlag {
   }
 
   @Test(timeout = 1)
+  public void testPutWithSplCharacter() throws Exception {
+fs.mkdirs(DIR_FROM_SPL);
+fs.createNewFile(FROM_SPL);
+run(new Put(), FROM_SPL.toString(), TO.toString());
+assertAttributesChanged(TO);
+  }
+
+  @Test(timeout = 1)
   public void testCopyFromLocal() throws Exception {
 run(new CopyFromLocal(), FROM.toString(), TO.toString());
 assertAttributesChanged(TO);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/4] hadoop git commit: HADOOP-15948. Inconsistency in get and put syntax if filename/dirname contains space. Contributed by Ayush Saxena.

2018-11-26 Thread vinayakumarb
HADOOP-15948. Inconsistency in get and put syntax if filename/dirname contains 
space. Contributed by Ayush Saxena.

(cherry picked from commit 23b441c2253bcb3be5229d3c5eb1e165369070c0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2e65cac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2e65cac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2e65cac

Branch: refs/heads/branch-3.0
Commit: c2e65cac5ea5a190c3890c7035445a9cd95f2f52
Parents: 8d930ae
Author: Vinayakumar B 
Authored: Mon Nov 26 18:11:01 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Nov 26 18:14:30 2018 +0530

--
 .../java/org/apache/hadoop/fs/shell/CopyCommands.java |  7 +--
 .../org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java  | 10 ++
 2 files changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2e65cac/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
index 11cb3d6..c408a4f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
@@ -268,12 +268,7 @@ class CopyCommands {
   try {
 items.add(new PathData(new URI(arg), getConf()));
   } catch (URISyntaxException e) {
-if (Path.WINDOWS) {
-  // Unlike URI, PathData knows how to parse Windows drive-letter 
paths.
-  items.add(new PathData(arg, getConf()));
-} else {
-  throw new IOException("unexpected URISyntaxException", e);
-}
+items.add(new PathData(arg, getConf()));
   }
   return items;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2e65cac/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
index 8dd09e5..8d2e160 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
@@ -43,9 +43,11 @@ public class TestCopyPreserveFlag {
   private static final int MODIFICATION_TIME = 12345000;
   private static final int ACCESS_TIME = 23456000;
   private static final Path DIR_FROM = new Path("d0");
+  private static final Path DIR_FROM_SPL = new Path("d0 space");
   private static final Path DIR_TO1 = new Path("d1");
   private static final Path DIR_TO2 = new Path("d2");
   private static final Path FROM = new Path(DIR_FROM, "f0");
+  private static final Path FROM_SPL = new Path(DIR_FROM_SPL, "f0");
   private static final Path TO = new Path(DIR_TO1, "f1");
   private static final FsPermission PERMISSIONS = new FsPermission(
 FsAction.ALL,
@@ -121,6 +123,14 @@ public class TestCopyPreserveFlag {
   }
 
   @Test(timeout = 1)
+  public void testPutWithSplCharacter() throws Exception {
+fs.mkdirs(DIR_FROM_SPL);
+fs.createNewFile(FROM_SPL);
+run(new Put(), FROM_SPL.toString(), TO.toString());
+assertAttributesChanged(TO);
+  }
+
+  @Test(timeout = 1)
   public void testCopyFromLocal() throws Exception {
 run(new CopyFromLocal(), FROM.toString(), TO.toString());
 assertAttributesChanged(TO);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/4] hadoop git commit: HADOOP-15948. Inconsistency in get and put syntax if filename/dirname contains space. Contributed by Ayush Saxena.

2018-11-26 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 8d930ae34 -> c2e65cac5
  refs/heads/branch-3.1 f94d40341 -> 07529a209
  refs/heads/branch-3.2 f5d2806c8 -> 1b937d701
  refs/heads/trunk a8302e398 -> 23b441c22


HADOOP-15948. Inconsistency in get and put syntax if filename/dirname contains 
space. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23b441c2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23b441c2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23b441c2

Branch: refs/heads/trunk
Commit: 23b441c2253bcb3be5229d3c5eb1e165369070c0
Parents: a8302e3
Author: Vinayakumar B 
Authored: Mon Nov 26 18:11:01 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Nov 26 18:11:01 2018 +0530

--
 .../java/org/apache/hadoop/fs/shell/CopyCommands.java |  7 +--
 .../org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java  | 10 ++
 2 files changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b441c2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
index da7a2b2..f59718f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
@@ -274,12 +274,7 @@ class CopyCommands {
   try {
 items.add(new PathData(new URI(arg), getConf()));
   } catch (URISyntaxException e) {
-if (Path.WINDOWS) {
-  // Unlike URI, PathData knows how to parse Windows drive-letter 
paths.
-  items.add(new PathData(arg, getConf()));
-} else {
-  throw new IOException("unexpected URISyntaxException", e);
-}
+items.add(new PathData(arg, getConf()));
   }
   return items;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b441c2/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
index 8dd09e5..8d2e160 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
@@ -43,9 +43,11 @@ public class TestCopyPreserveFlag {
   private static final int MODIFICATION_TIME = 12345000;
   private static final int ACCESS_TIME = 23456000;
   private static final Path DIR_FROM = new Path("d0");
+  private static final Path DIR_FROM_SPL = new Path("d0 space");
   private static final Path DIR_TO1 = new Path("d1");
   private static final Path DIR_TO2 = new Path("d2");
   private static final Path FROM = new Path(DIR_FROM, "f0");
+  private static final Path FROM_SPL = new Path(DIR_FROM_SPL, "f0");
   private static final Path TO = new Path(DIR_TO1, "f1");
   private static final FsPermission PERMISSIONS = new FsPermission(
 FsAction.ALL,
@@ -121,6 +123,14 @@ public class TestCopyPreserveFlag {
   }
 
   @Test(timeout = 1)
+  public void testPutWithSplCharacter() throws Exception {
+fs.mkdirs(DIR_FROM_SPL);
+fs.createNewFile(FROM_SPL);
+run(new Put(), FROM_SPL.toString(), TO.toString());
+assertAttributesChanged(TO);
+  }
+
+  @Test(timeout = 1)
   public void testCopyFromLocal() throws Exception {
 run(new CopyFromLocal(), FROM.toString(), TO.toString());
 assertAttributesChanged(TO);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/4] hadoop git commit: HADOOP-15948. Inconsistency in get and put syntax if filename/dirname contains space. Contributed by Ayush Saxena.

2018-11-26 Thread vinayakumarb
HADOOP-15948. Inconsistency in get and put syntax if filename/dirname contains 
space. Contributed by Ayush Saxena.

(cherry picked from commit 23b441c2253bcb3be5229d3c5eb1e165369070c0)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b937d70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b937d70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b937d70

Branch: refs/heads/branch-3.2
Commit: 1b937d701dfe5b603b338397b171d14f52895dbf
Parents: f5d2806
Author: Vinayakumar B 
Authored: Mon Nov 26 18:11:01 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Nov 26 18:13:09 2018 +0530

--
 .../java/org/apache/hadoop/fs/shell/CopyCommands.java |  7 +--
 .../org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java  | 10 ++
 2 files changed, 11 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b937d70/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
index da7a2b2..f59718f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
@@ -274,12 +274,7 @@ class CopyCommands {
   try {
 items.add(new PathData(new URI(arg), getConf()));
   } catch (URISyntaxException e) {
-if (Path.WINDOWS) {
-  // Unlike URI, PathData knows how to parse Windows drive-letter 
paths.
-  items.add(new PathData(arg, getConf()));
-} else {
-  throw new IOException("unexpected URISyntaxException", e);
-}
+items.add(new PathData(arg, getConf()));
   }
   return items;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b937d70/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
index 8dd09e5..8d2e160 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCopyPreserveFlag.java
@@ -43,9 +43,11 @@ public class TestCopyPreserveFlag {
   private static final int MODIFICATION_TIME = 12345000;
   private static final int ACCESS_TIME = 23456000;
   private static final Path DIR_FROM = new Path("d0");
+  private static final Path DIR_FROM_SPL = new Path("d0 space");
   private static final Path DIR_TO1 = new Path("d1");
   private static final Path DIR_TO2 = new Path("d2");
   private static final Path FROM = new Path(DIR_FROM, "f0");
+  private static final Path FROM_SPL = new Path(DIR_FROM_SPL, "f0");
   private static final Path TO = new Path(DIR_TO1, "f1");
   private static final FsPermission PERMISSIONS = new FsPermission(
 FsAction.ALL,
@@ -121,6 +123,14 @@ public class TestCopyPreserveFlag {
   }
 
   @Test(timeout = 1)
+  public void testPutWithSplCharacter() throws Exception {
+fs.mkdirs(DIR_FROM_SPL);
+fs.createNewFile(FROM_SPL);
+run(new Put(), FROM_SPL.toString(), TO.toString());
+assertAttributesChanged(TO);
+  }
+
+  @Test(timeout = 1)
   public void testCopyFromLocal() throws Exception {
 run(new CopyFromLocal(), FROM.toString(), TO.toString());
 assertAttributesChanged(TO);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-14056. Fix error messages in HDFS-12716. Contributed by Ayush Saxena.

2018-11-16 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk ee3355be3 -> 68d5dfdc7


HDFS-14056. Fix error messages in HDFS-12716. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68d5dfdc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68d5dfdc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68d5dfdc

Branch: refs/heads/trunk
Commit: 68d5dfdc78d121e89eeae4e577d670028a14a955
Parents: ee3355b
Author: Vinayakumar B 
Authored: Fri Nov 16 18:01:13 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Nov 16 18:05:10 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java   | 5 +++--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 4 ++--
 2 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68d5dfdc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 99c0a87..7c9e9cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -294,7 +294,8 @@ public class DataNode extends ReconfigurableBase
 
   static final int CURRENT_BLOCK_FORMAT_VERSION = 1;
   public static final int MAX_VOLUME_FAILURE_TOLERATED_LIMIT = -1;
-  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG = "should be 
greater than -1";
+  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG =
+  "should be greater than or equal to -1";
 
   /** A list of property that are reconfigurable at runtime. */
   private static final List RECONFIGURABLE_PROPERTIES =
@@ -1405,7 +1406,7 @@ public class DataNode extends ReconfigurableBase
 || volFailuresTolerated >= volsConfigured) {
   throw new DiskErrorException("Invalid value configured for "
   + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
-  + ". Value configured is either greater than -1 or >= "
+  + ". Value configured is either less than -1 or >= "
   + "to the number of configured volumes (" + volsConfigured + ").");
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68d5dfdc/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 7c72af0..c187a7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1281,8 +1281,8 @@
   The number of volumes that are allowed to
   fail before a datanode stops offering service. By default
   any volume failure will cause a datanode to shutdown.
-  The range of the value is -1 now, -1 represents the minimum
-  of volume valids is 1.
+  The value should be greater than or equal to -1 , -1 represents minimum
+  1 valid volume.
   
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/5] hadoop git commit: HDFS-14056. Fix error messages in HDFS-12716. Contributed by Ayush Saxena.

2018-11-16 Thread vinayakumarb
HDFS-14056. Fix error messages in HDFS-12716. Contributed by Ayush Saxena.

(cherry picked from commit 886b935630bf2d46f4c6773de013c093d2e44c38)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a2a8062
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a2a8062
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a2a8062

Branch: refs/heads/branch-3.2
Commit: 7a2a806213184429eab2e1a79436e937c4e8540b
Parents: 71a6ec6
Author: Vinayakumar B 
Authored: Fri Nov 16 18:01:13 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Nov 16 18:01:43 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java   | 5 +++--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 4 ++--
 2 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a2a8062/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 270e30b..e480044 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -294,7 +294,8 @@ public class DataNode extends ReconfigurableBase
 
   static final int CURRENT_BLOCK_FORMAT_VERSION = 1;
   public static final int MAX_VOLUME_FAILURE_TOLERATED_LIMIT = -1;
-  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG = "should be 
greater than -1";
+  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG =
+  "should be greater than or equal to -1";
 
   /** A list of property that are reconfigurable at runtime. */
   private static final List RECONFIGURABLE_PROPERTIES =
@@ -1405,7 +1406,7 @@ public class DataNode extends ReconfigurableBase
 || volFailuresTolerated >= volsConfigured) {
   throw new DiskErrorException("Invalid value configured for "
   + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
-  + ". Value configured is either greater than -1 or >= "
+  + ". Value configured is either less than -1 or >= "
   + "to the number of configured volumes (" + volsConfigured + ").");
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a2a8062/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 2ee8399..79bf245 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1276,8 +1276,8 @@
   The number of volumes that are allowed to
   fail before a datanode stops offering service. By default
   any volume failure will cause a datanode to shutdown.
-  The range of the value is -1 now, -1 represents the minimum
-  of volume valids is 1.
+  The value should be greater than or equal to -1 , -1 represents minimum
+  1 valid volume.
   
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/5] hadoop git commit: HDFS-14056. Fix error messages in HDFS-12716. Contributed by Ayush Saxena.

2018-11-16 Thread vinayakumarb
HDFS-14056. Fix error messages in HDFS-12716. Contributed by Ayush Saxena.

(cherry picked from commit 886b935630bf2d46f4c6773de013c093d2e44c38)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a19c72d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a19c72d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a19c72d7

Branch: refs/heads/branch-3.0
Commit: a19c72d7e8d2e2329fd59178f49460f3a9e900f8
Parents: 1b6fa47
Author: Vinayakumar B 
Authored: Fri Nov 16 18:01:13 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Nov 16 18:02:01 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java   | 5 +++--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 4 ++--
 2 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a19c72d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 695f96b..cac6f55 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -294,7 +294,8 @@ public class DataNode extends ReconfigurableBase
 
   static final int CURRENT_BLOCK_FORMAT_VERSION = 1;
   public static final int MAX_VOLUME_FAILURE_TOLERATED_LIMIT = -1;
-  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG = "should be 
greater than -1";
+  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG =
+  "should be greater than or equal to -1";
 
   /** A list of property that are reconfigurable at runtime. */
   private static final List RECONFIGURABLE_PROPERTIES =
@@ -1405,7 +1406,7 @@ public class DataNode extends ReconfigurableBase
 || volFailuresTolerated >= volsConfigured) {
   throw new DiskErrorException("Invalid value configured for "
   + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
-  + ". Value configured is either greater than -1 or >= "
+  + ". Value configured is either less than -1 or >= "
   + "to the number of configured volumes (" + volsConfigured + ").");
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a19c72d7/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 95203a2..a360c9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1273,8 +1273,8 @@
   The number of volumes that are allowed to
   fail before a datanode stops offering service. By default
   any volume failure will cause a datanode to shutdown.
-  The range of the value is -1 now, -1 represents the minimum
-  of volume valids is 1.
+  The value should be greater than or equal to -1 , -1 represents minimum
+  1 valid volume.
   
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[5/5] hadoop git commit: HDFS-14056. Fix error messages in HDFS-12716. Contributed by Ayush Saxena.

2018-11-16 Thread vinayakumarb
HDFS-14056. Fix error messages in HDFS-12716. Contributed by Ayush Saxena.

(cherry picked from commit 886b935630bf2d46f4c6773de013c093d2e44c38)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3f0681f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3f0681f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3f0681f

Branch: refs/heads/branch-2
Commit: c3f0681f9989116aac3c021b8cc31875dc74ef2b
Parents: bc4098e
Author: Vinayakumar B 
Authored: Fri Nov 16 18:01:13 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Nov 16 18:02:10 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java   | 5 +++--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 4 ++--
 2 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3f0681f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 80a7ca2..1fab84a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -294,7 +294,8 @@ public class DataNode extends ReconfigurableBase
 
   static final int CURRENT_BLOCK_FORMAT_VERSION = 1;
   public static final int MAX_VOLUME_FAILURE_TOLERATED_LIMIT = -1;
-  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG = "should be 
greater than -1";
+  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG =
+  "should be greater than or equal to -1";
 
 
   /** A list of property that are reconfigurable at runtime. */
@@ -1362,7 +1363,7 @@ public class DataNode extends ReconfigurableBase
 || volFailuresTolerated >= volsConfigured) {
   throw new DiskErrorException("Invalid value configured for "
   + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
-  + ". Value configured is either greater than -1 or >= "
+  + ". Value configured is either less than -1 or >= "
   + "to the number of configured volumes (" + volsConfigured + ").");
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3f0681f/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 5802fce..0ce3bd9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1222,8 +1222,8 @@
   The number of volumes that are allowed to
   fail before a datanode stops offering service. By default
   any volume failure will cause a datanode to shutdown.
-  The range of the value is -1 now, -1 represents the minimum
-  of volume valids is 1.
+  The value should be greater than or equal to -1 , -1 represents minimum
+  1 valid volume.
   
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/5] hadoop git commit: HDFS-13963. NN UI is broken with IE11. Contributed by Ayush Saxena.

2018-11-16 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c39e1ee2b -> c3f0681f9
  refs/heads/branch-3.0 1b6fa4733 -> a19c72d7e
  refs/heads/branch-3.1 53b629094 -> e5e4dc42a
  refs/heads/branch-3.2 71a6ec605 -> 7a2a80621


HDFS-13963. NN UI is broken with IE11. Contributed by Ayush Saxena.

(cherry picked from commit 8571507efa3af3e4a5213a7e24c65e26162eac4d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc4098eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc4098eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc4098eb

Branch: refs/heads/branch-2
Commit: bc4098eb07502b2661dabe47daac655154f5e7a5
Parents: c39e1ee
Author: Vinayakumar B 
Authored: Fri Nov 16 17:26:36 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Nov 16 17:43:49 2018 +0530

--
 .../hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html  | 2 +-
 .../hadoop-hdfs/src/main/webapps/datanode/datanode.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/journal/index.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/secondary/status.html | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc4098eb/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
index cf19c94..553eba3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc4098eb/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
index 1bb37fb..08fd354 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 DataNode Information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc4098eb/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 6ae3960..4bc10c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc4098eb/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 3700a5e..1370097 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -18,7 +18,7 @@
   -->
 http://www.w3.org/1999/xhtml;>
   
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc4098eb/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
index 72ab2cc..f1fd4fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 JournalNode Information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc4098eb/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html 

[3/5] hadoop git commit: HDFS-14056. Fix error messages in HDFS-12716. Contributed by Ayush Saxena.

2018-11-16 Thread vinayakumarb
HDFS-14056. Fix error messages in HDFS-12716. Contributed by Ayush Saxena.

(cherry picked from commit 886b935630bf2d46f4c6773de013c093d2e44c38)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5e4dc42
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5e4dc42
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5e4dc42

Branch: refs/heads/branch-3.1
Commit: e5e4dc42a3a896ec9df22459e71a2f23595ff949
Parents: 53b6290
Author: Vinayakumar B 
Authored: Fri Nov 16 18:01:13 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Nov 16 18:01:54 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java   | 5 +++--
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 4 ++--
 2 files changed, 5 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e4dc42/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 787f42c..139b3d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -294,7 +294,8 @@ public class DataNode extends ReconfigurableBase
 
   static final int CURRENT_BLOCK_FORMAT_VERSION = 1;
   public static final int MAX_VOLUME_FAILURE_TOLERATED_LIMIT = -1;
-  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG = "should be 
greater than -1";
+  public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG =
+  "should be greater than or equal to -1";
 
   /** A list of property that are reconfigurable at runtime. */
   private static final List RECONFIGURABLE_PROPERTIES =
@@ -1405,7 +1406,7 @@ public class DataNode extends ReconfigurableBase
 || volFailuresTolerated >= volsConfigured) {
   throw new DiskErrorException("Invalid value configured for "
   + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
-  + ". Value configured is either greater than -1 or >= "
+  + ". Value configured is either less than -1 or >= "
   + "to the number of configured volumes (" + volsConfigured + ").");
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5e4dc42/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 50e6e76..3aeb4d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1273,8 +1273,8 @@
   The number of volumes that are allowed to
   fail before a datanode stops offering service. By default
   any volume failure will cause a datanode to shutdown.
-  The range of the value is -1 now, -1 represents the minimum
-  of volume valids is 1.
+  The value should be greater than or equal to -1 , -1 represents minimum
+  1 valid volume.
   
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[4/4] hadoop git commit: HDFS-13963. NN UI is broken with IE11. Contributed by Ayush Saxena.

2018-11-16 Thread vinayakumarb
HDFS-13963. NN UI is broken with IE11. Contributed by Ayush Saxena.

(cherry picked from commit 8571507efa3af3e4a5213a7e24c65e26162eac4d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b6fa473
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b6fa473
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b6fa473

Branch: refs/heads/branch-3.0
Commit: 1b6fa47334d6c46b7756599782de88021fcd625e
Parents: 6dc6978
Author: Vinayakumar B 
Authored: Fri Nov 16 17:26:36 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Nov 16 17:36:50 2018 +0530

--
 .../hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html  | 2 +-
 .../hadoop-hdfs/src/main/webapps/datanode/datanode.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/journal/index.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/secondary/status.html | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b6fa473/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
index 11c4644..e2837ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b6fa473/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
index 1bb37fb..08fd354 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 DataNode Information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b6fa473/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index bbe6082..34e49c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b6fa473/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 29f114b..6ab50a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -18,7 +18,7 @@
   -->
 http://www.w3.org/1999/xhtml;>
   
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b6fa473/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
index 72ab2cc..f1fd4fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 JournalNode Information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b6fa473/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
index 3db97df..4e488bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
@@ -18,7 +18,7 @@
 -->

[2/4] hadoop git commit: HDFS-13963. NN UI is broken with IE11. Contributed by Ayush Saxena.

2018-11-16 Thread vinayakumarb
HDFS-13963. NN UI is broken with IE11. Contributed by Ayush Saxena.

(cherry picked from commit 8571507efa3af3e4a5213a7e24c65e26162eac4d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71a6ec60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71a6ec60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71a6ec60

Branch: refs/heads/branch-3.2
Commit: 71a6ec6058883cfa50f40c8a11e66cb38496155c
Parents: a69423d
Author: Vinayakumar B 
Authored: Fri Nov 16 17:26:36 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Nov 16 17:29:25 2018 +0530

--
 .../hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html  | 2 +-
 .../hadoop-hdfs/src/main/webapps/datanode/datanode.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/journal/index.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/secondary/status.html | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71a6ec60/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
index 1564ce3..068988c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71a6ec60/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
index 174a9dc..527b11e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 DataNode Information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71a6ec60/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 1caa4e7..3b920dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71a6ec60/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index d30ec5a..9ddb597 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -18,7 +18,7 @@
   -->
 http://www.w3.org/1999/xhtml;>
   
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71a6ec60/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
index 28a254e..8ed572a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 JournalNode Information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71a6ec60/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
index b31108f..ff2f7ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
@@ -18,7 +18,7 @@
 -->

[1/4] hadoop git commit: HDFS-13963. NN UI is broken with IE11. Contributed by Ayush Saxena.

2018-11-16 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 6dc6978ee -> 1b6fa4733
  refs/heads/branch-3.1 b151b5004 -> 53b629094
  refs/heads/branch-3.2 a69423d28 -> 71a6ec605
  refs/heads/trunk af5958f7b -> 8571507ef


HDFS-13963. NN UI is broken with IE11. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8571507e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8571507e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8571507e

Branch: refs/heads/trunk
Commit: 8571507efa3af3e4a5213a7e24c65e26162eac4d
Parents: af5958f
Author: Vinayakumar B 
Authored: Fri Nov 16 17:26:36 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Nov 16 17:26:36 2018 +0530

--
 .../hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html  | 2 +-
 .../hadoop-hdfs/src/main/webapps/datanode/datanode.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/journal/index.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/secondary/status.html | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8571507e/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
index 1564ce3..068988c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8571507e/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
index 0fdf552..6e5956b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 DataNode Information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8571507e/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 1caa4e7..3b920dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8571507e/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index d30ec5a..9ddb597 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -18,7 +18,7 @@
   -->
 http://www.w3.org/1999/xhtml;>
   
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8571507e/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
index 28a254e..8ed572a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 JournalNode Information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8571507e/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
index b31108f..ff2f7ce 100644
--- 

[3/4] hadoop git commit: HDFS-13963. NN UI is broken with IE11. Contributed by Ayush Saxena.

2018-11-16 Thread vinayakumarb
HDFS-13963. NN UI is broken with IE11. Contributed by Ayush Saxena.

(cherry picked from commit 8571507efa3af3e4a5213a7e24c65e26162eac4d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53b62909
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53b62909
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53b62909

Branch: refs/heads/branch-3.1
Commit: 53b6290949d7b8b15f2fd823afe4c3bdf28a638e
Parents: b151b50
Author: Vinayakumar B 
Authored: Fri Nov 16 17:26:36 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Nov 16 17:31:25 2018 +0530

--
 .../hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html  | 2 +-
 .../hadoop-hdfs/src/main/webapps/datanode/datanode.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html   | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/journal/index.html| 2 +-
 .../hadoop-hdfs/src/main/webapps/secondary/status.html | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b62909/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
index 1564ce3..068988c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b62909/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
index 9229dd2..0fdf1c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/datanode.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 DataNode Information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b62909/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 36a9578..358ac71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b62909/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index d30ec5a..9ddb597 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -18,7 +18,7 @@
   -->
 http://www.w3.org/1999/xhtml;>
   
-
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b62909/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
index 28a254e..8ed572a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/journal/index.html
@@ -18,7 +18,7 @@
 -->
 http://www.w3.org/1999/xhtml;>
 
-
+
 
 
 JournalNode Information

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53b62909/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
index b31108f..ff2f7ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/status.html
@@ -18,7 +18,7 @@
 -->

hadoop git commit: HADOOP-15856. Trunk build fails to compile native on Windows. Contributed by Vinayakumar B.

2018-10-24 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8f97d6f2c -> 0ca50648c


HADOOP-15856. Trunk build fails to compile native on Windows. Contributed by 
Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ca50648
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ca50648
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ca50648

Branch: refs/heads/trunk
Commit: 0ca50648c2b1a05356ce4b0d5c3a3da5ab3a7d02
Parents: 8f97d6f
Author: Vinayakumar B 
Authored: Wed Oct 24 11:32:53 2018 +0530
Committer: Vinayakumar B 
Committed: Wed Oct 24 11:32:53 2018 +0530

--
 hadoop-project/pom.xml | 26 +-
 1 file changed, 5 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ca50648/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 3a44e8c..d5aee4b 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1654,6 +1654,11 @@
 ${javac.version}
 ${javac.version}
 false
+
+
+  -h
+  ${project.build.directory}/native/javah/
+
   
 
 
@@ -2120,27 +2125,6 @@
 
   
 
-
-  native
-  
-false
-  
-  
-
-  
-org.apache.maven.plugins
-maven-compiler-plugin
-
-  
-  
--h
-${project.build.directory}/native/javah/
-  
-
-  
-
-  
-
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: Fix potential FSImage corruption. Contributed by Daryn Sharp.

2018-10-15 Thread vinayakumarb
Fix potential FSImage corruption. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1d7

Branch: refs/heads/branch-2.9
Commit: a1d75d69d4619955e6b5ab5a7db18ee252b3
Parents: 81d3208
Author: Vinayakumar B 
Authored: Mon Oct 15 16:04:34 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Oct 15 16:16:13 2018 +0530

--
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |   8 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +
 .../server/namenode/AclEntryStatusFormat.java   | 109 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +
 .../server/namenode/FSImageFormatPBINode.java   |  99 +++--
 .../server/namenode/FSImageFormatProtobuf.java  |  26 ++-
 .../namenode/INodeWithAdditionalFields.java |  36 +++-
 .../server/namenode/SerialNumberManager.java| 200 +--
 .../hdfs/server/namenode/SerialNumberMap.java   |  43 +++-
 .../hdfs/server/namenode/XAttrFormat.java   |  95 +
 .../hdfs/server/namenode/XAttrStorage.java  |  11 -
 .../tools/offlineImageViewer/FSImageLoader.java |  18 +-
 .../offlineImageViewer/PBImageTextWriter.java   |   3 +-
 .../offlineImageViewer/PBImageXmlWriter.java|  22 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   1 +
 .../TestCommitBlockSynchronization.java |   6 +-
 16 files changed, 436 insertions(+), 249 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1d7/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 51ad08f..195fd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -26,6 +26,10 @@ import java.io.Serializable;
 public class LongBitFormat implements Serializable {
   private static final long serialVersionUID = 1L;
 
+  public interface Enum {
+int getLength();
+  }
+
   private final String NAME;
   /** Bit offset */
   private final int OFFSET;
@@ -69,4 +73,8 @@ public class LongBitFormat implements Serializable {
   public long getMin() {
 return MIN;
   }
+
+  public int getLength() {
+return LENGTH;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9232274..5331df8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -676,6 +676,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final long DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_DEFAULT =
   0;  //no throttling
 
+  // String table in the fsimage utilizes an expanded bit range.
+  public static final String DFS_IMAGE_EXPANDED_STRING_TABLES_KEY =
+  "dfs.image.string-tables.expanded";
+  public static final boolean DFS_IMAGE_EXPANDED_STRING_TABLES_DEFAULT =
+  false;
+
   // Image transfer timeout
   public static final String DFS_IMAGE_TRANSFER_TIMEOUT_KEY = 
"dfs.image.transfer.timeout";
   public static final int DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT = 60 * 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
index 2c5b23b..e9e3e59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
@@ -31,25 +31,23 @@ import com.google.common.collect.ImmutableList;
 /**
 

[3/3] hadoop git commit: Fix potential FSImage corruption. Contributed by Daryn Sharp.

2018-10-15 Thread vinayakumarb
Fix potential FSImage corruption. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f697f3c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f697f3c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f697f3c4

Branch: refs/heads/branch-2.8
Commit: f697f3c4fc0067bb82494e445900d86942685b09
Parents: 7d685c6
Author: Vinayakumar B 
Authored: Mon Oct 15 16:04:34 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Oct 15 16:44:24 2018 +0530

--
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |   8 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +
 .../server/namenode/AclEntryStatusFormat.java   | 109 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +
 .../server/namenode/FSImageFormatPBINode.java   |  99 +++--
 .../server/namenode/FSImageFormatProtobuf.java  |  26 ++-
 .../namenode/INodeWithAdditionalFields.java |  36 +++-
 .../server/namenode/SerialNumberManager.java| 200 +--
 .../hdfs/server/namenode/SerialNumberMap.java   |  43 +++-
 .../hdfs/server/namenode/XAttrFormat.java   |  95 +
 .../hdfs/server/namenode/XAttrStorage.java  |  11 -
 .../tools/offlineImageViewer/FSImageLoader.java |  18 +-
 .../offlineImageViewer/PBImageTextWriter.java   |   3 +-
 .../offlineImageViewer/PBImageXmlWriter.java|  22 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   1 +
 .../TestCommitBlockSynchronization.java |   6 +-
 16 files changed, 436 insertions(+), 249 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f697f3c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 51ad08f..195fd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -26,6 +26,10 @@ import java.io.Serializable;
 public class LongBitFormat implements Serializable {
   private static final long serialVersionUID = 1L;
 
+  public interface Enum {
+int getLength();
+  }
+
   private final String NAME;
   /** Bit offset */
   private final int OFFSET;
@@ -69,4 +73,8 @@ public class LongBitFormat implements Serializable {
   public long getMin() {
 return MIN;
   }
+
+  public int getLength() {
+return LENGTH;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f697f3c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 990767a..ac4edf3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -623,6 +623,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final long DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_DEFAULT =
   0;  //no throttling
 
+  // String table in the fsimage utilizes an expanded bit range.
+  public static final String DFS_IMAGE_EXPANDED_STRING_TABLES_KEY =
+  "dfs.image.string-tables.expanded";
+  public static final boolean DFS_IMAGE_EXPANDED_STRING_TABLES_DEFAULT =
+  false;
+
   // Image transfer timeout
   public static final String DFS_IMAGE_TRANSFER_TIMEOUT_KEY = 
"dfs.image.transfer.timeout";
   public static final int DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT = 60 * 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f697f3c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
index 2c5b23b..e9e3e59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
@@ -31,25 +31,23 @@ import com.google.common.collect.ImmutableList;
 /**
 

[1/3] hadoop git commit: Fix potential FSImage corruption. Contributed by Daryn Sharp.

2018-10-15 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0ba621029 -> e212d7d81
  refs/heads/branch-2.8 7d685c6a5 -> f697f3c4f
  refs/heads/branch-2.9 81d3208a8 -> a1d75


Fix potential FSImage corruption. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e212d7d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e212d7d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e212d7d8

Branch: refs/heads/branch-2
Commit: e212d7d81fbccaceddfff6d204aaabe50df8221c
Parents: 0ba6210
Author: Vinayakumar B 
Authored: Mon Oct 15 16:04:34 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Oct 15 16:04:34 2018 +0530

--
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |   8 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +
 .../server/namenode/AclEntryStatusFormat.java   | 109 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +
 .../server/namenode/FSImageFormatPBINode.java   |  99 +++--
 .../server/namenode/FSImageFormatProtobuf.java  |  26 ++-
 .../namenode/INodeWithAdditionalFields.java |  36 +++-
 .../server/namenode/SerialNumberManager.java| 200 +--
 .../hdfs/server/namenode/SerialNumberMap.java   |  43 +++-
 .../hdfs/server/namenode/XAttrFormat.java   |  95 +
 .../hdfs/server/namenode/XAttrStorage.java  |  11 -
 .../tools/offlineImageViewer/FSImageLoader.java |  18 +-
 .../offlineImageViewer/PBImageTextWriter.java   |   3 +-
 .../offlineImageViewer/PBImageXmlWriter.java|  22 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   1 +
 .../TestCommitBlockSynchronization.java |   6 +-
 16 files changed, 436 insertions(+), 249 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e212d7d8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 51ad08f..195fd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -26,6 +26,10 @@ import java.io.Serializable;
 public class LongBitFormat implements Serializable {
   private static final long serialVersionUID = 1L;
 
+  public interface Enum {
+int getLength();
+  }
+
   private final String NAME;
   /** Bit offset */
   private final int OFFSET;
@@ -69,4 +73,8 @@ public class LongBitFormat implements Serializable {
   public long getMin() {
 return MIN;
   }
+
+  public int getLength() {
+return LENGTH;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e212d7d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index edd99f2..81614e5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -685,6 +685,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final long DFS_IMAGE_TRANSFER_BOOTSTRAP_STANDBY_RATE_DEFAULT =
   0;  //no throttling
 
+  // String table in the fsimage utilizes an expanded bit range.
+  public static final String DFS_IMAGE_EXPANDED_STRING_TABLES_KEY =
+  "dfs.image.string-tables.expanded";
+  public static final boolean DFS_IMAGE_EXPANDED_STRING_TABLES_DEFAULT =
+  false;
+
   // Image transfer timeout
   public static final String DFS_IMAGE_TRANSFER_TIMEOUT_KEY = 
"dfs.image.transfer.timeout";
   public static final int DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT = 60 * 1000;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e212d7d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
index 2c5b23b..e9e3e59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
+++ 

[2/2] hadoop git commit: Fix potential FSImage corruption. Contributed by Daryn Sharp.

2018-10-15 Thread vinayakumarb
Fix potential FSImage corruption. Contributed by Daryn Sharp.

(cherry picked from commit b60ca37914b22550e3630fa02742d40697decb31)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a41edb0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a41edb0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a41edb0

Branch: refs/heads/branch-3.2
Commit: 8a41edb089fbdedc5e7d9a2aeec63d126afea49f
Parents: 9cb0654
Author: Vinayakumar B 
Authored: Mon Oct 15 15:48:26 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Oct 15 15:49:56 2018 +0530

--
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |   8 +
 .../server/namenode/AclEntryStatusFormat.java   | 109 ++---
 .../server/namenode/FSImageFormatPBINode.java   | 101 
 .../server/namenode/FSImageFormatProtobuf.java  |  26 ++--
 .../namenode/INodeWithAdditionalFields.java |  36 -
 .../server/namenode/NameNodeLayoutVersion.java  |   3 +-
 .../server/namenode/SerialNumberManager.java| 152 +--
 .../hdfs/server/namenode/SerialNumberMap.java   |  43 +-
 .../hdfs/server/namenode/XAttrFormat.java   |  95 +++-
 .../hdfs/server/namenode/XAttrStorage.java  |  11 --
 .../tools/offlineImageViewer/FSImageLoader.java |  18 ++-
 .../offlineImageViewer/PBImageTextWriter.java   |   3 +-
 .../offlineImageViewer/PBImageXmlWriter.java|  22 +--
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   1 +
 .../TestCommitBlockSynchronization.java |   6 +-
 15 files changed, 383 insertions(+), 251 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a41edb0/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 51ad08f..195fd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -26,6 +26,10 @@ import java.io.Serializable;
 public class LongBitFormat implements Serializable {
   private static final long serialVersionUID = 1L;
 
+  public interface Enum {
+int getLength();
+  }
+
   private final String NAME;
   /** Bit offset */
   private final int OFFSET;
@@ -69,4 +73,8 @@ public class LongBitFormat implements Serializable {
   public long getMin() {
 return MIN;
   }
+
+  public int getLength() {
+return LENGTH;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a41edb0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
index 2c5b23b..e9e3e59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
@@ -31,25 +31,23 @@ import com.google.common.collect.ImmutableList;
 /**
  * Class to pack an AclEntry into an integer. 
  * An ACL entry is represented by a 32-bit integer in Big Endian format. 
- * The bits can be divided in four segments: 
- * [0:1) || [1:3) || [3:6) || [6:7) || [7:32) 
- * 
- * [0:1) -- the scope of the entry (AclEntryScope) 
- * [1:3) -- the type of the entry (AclEntryType) 
- * [3:6) -- the permission of the entry (FsAction) 
- * [6:7) -- A flag to indicate whether Named entry or not 
- * [7:8) -- Reserved 
- * [8:32) -- the name of the entry, which is an ID that points to a 
- * string in the StringTableSection. 
+ *
+ * Note:  this format is used both in-memory and on-disk.  Changes will be
+ * incompatible.
+ *
  */
-public enum AclEntryStatusFormat {
+public enum AclEntryStatusFormat implements LongBitFormat.Enum {
+
+  PERMISSION(null, 3),
+  TYPE(PERMISSION.BITS, 2),
+  SCOPE(TYPE.BITS, 1),
+  NAME(SCOPE.BITS, 24);
 
-  SCOPE(null, 1),
-  TYPE(SCOPE.BITS, 2),
-  PERMISSION(TYPE.BITS, 3),
-  NAMED_ENTRY_CHECK(PERMISSION.BITS, 1),
-  RESERVED(NAMED_ENTRY_CHECK.BITS, 1),
-  NAME(RESERVED.BITS, 24);
+  private static final FsAction[] FSACTION_VALUES = FsAction.values();
+  private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES =
+  AclEntryScope.values();
+  private static final 

[1/2] hadoop git commit: Fix potential FSImage corruption. Contributed by Daryn Sharp.

2018-10-15 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 9cb0654fb -> 8a41edb08
  refs/heads/trunk 8e5365e27 -> b60ca3791


Fix potential FSImage corruption. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b60ca379
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b60ca379
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b60ca379

Branch: refs/heads/trunk
Commit: b60ca37914b22550e3630fa02742d40697decb31
Parents: 8e5365e
Author: Vinayakumar B 
Authored: Mon Oct 15 15:48:26 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Oct 15 15:48:26 2018 +0530

--
 .../apache/hadoop/hdfs/util/LongBitFormat.java  |   8 +
 .../server/namenode/AclEntryStatusFormat.java   | 109 ++---
 .../server/namenode/FSImageFormatPBINode.java   | 101 
 .../server/namenode/FSImageFormatProtobuf.java  |  26 ++--
 .../namenode/INodeWithAdditionalFields.java |  36 -
 .../server/namenode/NameNodeLayoutVersion.java  |   3 +-
 .../server/namenode/SerialNumberManager.java| 152 +--
 .../hdfs/server/namenode/SerialNumberMap.java   |  43 +-
 .../hdfs/server/namenode/XAttrFormat.java   |  95 +++-
 .../hdfs/server/namenode/XAttrStorage.java  |  11 --
 .../tools/offlineImageViewer/FSImageLoader.java |  18 ++-
 .../offlineImageViewer/PBImageTextWriter.java   |   3 +-
 .../offlineImageViewer/PBImageXmlWriter.java|  22 +--
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   1 +
 .../TestCommitBlockSynchronization.java |   6 +-
 15 files changed, 383 insertions(+), 251 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b60ca379/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
index 51ad08f..195fd8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
@@ -26,6 +26,10 @@ import java.io.Serializable;
 public class LongBitFormat implements Serializable {
   private static final long serialVersionUID = 1L;
 
+  public interface Enum {
+int getLength();
+  }
+
   private final String NAME;
   /** Bit offset */
   private final int OFFSET;
@@ -69,4 +73,8 @@ public class LongBitFormat implements Serializable {
   public long getMin() {
 return MIN;
   }
+
+  public int getLength() {
+return LENGTH;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b60ca379/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
index 2c5b23b..e9e3e59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclEntryStatusFormat.java
@@ -31,25 +31,23 @@ import com.google.common.collect.ImmutableList;
 /**
  * Class to pack an AclEntry into an integer. 
  * An ACL entry is represented by a 32-bit integer in Big Endian format. 
- * The bits can be divided in four segments: 
- * [0:1) || [1:3) || [3:6) || [6:7) || [7:32) 
- * 
- * [0:1) -- the scope of the entry (AclEntryScope) 
- * [1:3) -- the type of the entry (AclEntryType) 
- * [3:6) -- the permission of the entry (FsAction) 
- * [6:7) -- A flag to indicate whether Named entry or not 
- * [7:8) -- Reserved 
- * [8:32) -- the name of the entry, which is an ID that points to a 
- * string in the StringTableSection. 
+ *
+ * Note:  this format is used both in-memory and on-disk.  Changes will be
+ * incompatible.
+ *
  */
-public enum AclEntryStatusFormat {
+public enum AclEntryStatusFormat implements LongBitFormat.Enum {
+
+  PERMISSION(null, 3),
+  TYPE(PERMISSION.BITS, 2),
+  SCOPE(TYPE.BITS, 1),
+  NAME(SCOPE.BITS, 24);
 
-  SCOPE(null, 1),
-  TYPE(SCOPE.BITS, 2),
-  PERMISSION(TYPE.BITS, 3),
-  NAMED_ENTRY_CHECK(PERMISSION.BITS, 1),
-  RESERVED(NAMED_ENTRY_CHECK.BITS, 1),
-  NAME(RESERVED.BITS, 24);
+  private static final FsAction[] FSACTION_VALUES = FsAction.values();
+  private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES =
+   

hadoop git commit: HDFS-13945. TestDataNodeVolumeFailure is Flaky. Contributed by Ayush Saxena.

2018-10-12 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 f1fbc5637 -> bed2f407a


HDFS-13945. TestDataNodeVolumeFailure is Flaky. Contributed by Ayush Saxena.

(cherry picked from commit 6e0e6daaf3215213c32cdffa79f3730d40e981ea)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bed2f407
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bed2f407
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bed2f407

Branch: refs/heads/branch-3.1
Commit: bed2f407a91e6be70aaed6f06f575c19d94c
Parents: f1fbc56
Author: Vinayakumar B 
Authored: Fri Oct 12 17:34:10 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Oct 12 17:36:03 2018 +0530

--
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bed2f407/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 16c0cfa..7d04942 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -424,10 +424,8 @@ public class TestDataNodeVolumeFailure {
   @Override
   public Boolean get() {
 // underReplicatedBlocks are due to failed volumes
-int underReplicatedBlocks = BlockManagerTestUtil
-.checkHeartbeatAndGetUnderReplicatedBlocksCount(
-cluster.getNamesystem(), bm);
-
+long underReplicatedBlocks = bm.getLowRedundancyBlocksCount()
++ bm.getPendingReconstructionBlocksCount();
 if (underReplicatedBlocks > 0) {
   return true;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HDFS-13945. TestDataNodeVolumeFailure is Flaky. Contributed by Ayush Saxena.

2018-10-12 Thread vinayakumarb
HDFS-13945. TestDataNodeVolumeFailure is Flaky. Contributed by Ayush Saxena.

(cherry picked from commit 6e0e6daaf3215213c32cdffa79f3730d40e981ea)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b185964b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b185964b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b185964b

Branch: refs/heads/branch-3.2
Commit: b185964b15329996e17ae43107afe3470e0b4231
Parents: d838d39
Author: Vinayakumar B 
Authored: Fri Oct 12 17:34:10 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Oct 12 17:34:28 2018 +0530

--
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b185964b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 16c0cfa..7d04942 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -424,10 +424,8 @@ public class TestDataNodeVolumeFailure {
   @Override
   public Boolean get() {
 // underReplicatedBlocks are due to failed volumes
-int underReplicatedBlocks = BlockManagerTestUtil
-.checkHeartbeatAndGetUnderReplicatedBlocksCount(
-cluster.getNamesystem(), bm);
-
+long underReplicatedBlocks = bm.getLowRedundancyBlocksCount()
++ bm.getPendingReconstructionBlocksCount();
 if (underReplicatedBlocks > 0) {
   return true;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HDFS-13945. TestDataNodeVolumeFailure is Flaky. Contributed by Ayush Saxena.

2018-10-12 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 d838d39a2 -> b185964b1
  refs/heads/trunk de44e1064 -> 6e0e6daaf


HDFS-13945. TestDataNodeVolumeFailure is Flaky. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e0e6daa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e0e6daa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e0e6daa

Branch: refs/heads/trunk
Commit: 6e0e6daaf3215213c32cdffa79f3730d40e981ea
Parents: de44e10
Author: Vinayakumar B 
Authored: Fri Oct 12 17:34:10 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Oct 12 17:34:10 2018 +0530

--
 .../hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e0e6daa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 16c0cfa..7d04942 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -424,10 +424,8 @@ public class TestDataNodeVolumeFailure {
   @Override
   public Boolean get() {
 // underReplicatedBlocks are due to failed volumes
-int underReplicatedBlocks = BlockManagerTestUtil
-.checkHeartbeatAndGetUnderReplicatedBlocksCount(
-cluster.getNamesystem(), bm);
-
+long underReplicatedBlocks = bm.getLowRedundancyBlocksCount()
++ bm.getPendingReconstructionBlocksCount();
 if (underReplicatedBlocks > 0) {
   return true;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HDFS-13156. HDFS Block Placement Policy - Client Local Rack. Contributed by Ayush Saxena.

2018-10-12 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.2 62d329cac -> d838d39a2
  refs/heads/trunk e36ae9639 -> de44e1064


HDFS-13156. HDFS Block Placement Policy - Client Local Rack. Contributed by 
Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de44e106
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de44e106
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de44e106

Branch: refs/heads/trunk
Commit: de44e1064f051248934ceffdd98a3cc13653d886
Parents: e36ae96
Author: Vinayakumar B 
Authored: Fri Oct 12 17:27:23 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Oct 12 17:27:23 2018 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de44e106/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
index 471a27f..a0121e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
@@ -102,7 +102,7 @@ Large HDFS instances run on a cluster of computers that 
commonly spread across m
 The NameNode determines the rack id each DataNode belongs to via the process 
outlined in [Hadoop Rack Awareness](../hadoop-common/RackAwareness.html).
 A simple but non-optimal policy is to place replicas on unique racks. This 
prevents losing data when an entire rack fails and allows use of bandwidth from 
multiple racks when reading data. This policy evenly distributes replicas in 
the cluster which makes it easy to balance load on component failure. However, 
this policy increases the cost of writes because a write needs to transfer 
blocks to multiple racks.
 
-For the common case, when the replication factor is three, HDFS’s placement 
policy is to put one replica on the local machine if the writer is on a 
datanode, otherwise on a random datanode, another replica on a node in a 
different (remote) rack, and the last on a different node in the same remote 
rack. This policy cuts the inter-rack write traffic which generally improves 
write performance. The chance of rack failure is far less than that of node 
failure; this policy does not impact data reliability and availability 
guarantees. However, it does reduce the aggregate network bandwidth used when 
reading data since a block is placed in only two unique racks rather than 
three. With this policy, the replicas of a file do not evenly distribute across 
the racks. One third of replicas are on one node, two thirds of replicas are on 
one rack, and the other third are evenly distributed across the remaining 
racks. This policy improves write performance without compromising data 
reliability or 
 read performance.
+For the common case, when the replication factor is three, HDFS’s placement 
policy is to put one replica on the local machine if the writer is on a 
datanode, otherwise on a random datanode in the same rack as that of the 
writer, another replica on a node in a different (remote) rack, and the last on 
a different node in the same remote rack. This policy cuts the inter-rack write 
traffic which generally improves write performance. The chance of rack failure 
is far less than that of node failure; this policy does not impact data 
reliability and availability guarantees. However, it does reduce the aggregate 
network bandwidth used when reading data since a block is placed in only two 
unique racks rather than three. With this policy, the replicas of a file do not 
evenly distribute across the racks. One third of replicas are on one node, two 
thirds of replicas are on one rack, and the other third are evenly distributed 
across the remaining racks. This policy improves write performance wi
 thout compromising data reliability or read performance.
 
 If the replication factor is greater than 3,
 the placement of the 4th and following replicas are determined randomly


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HDFS-13156. HDFS Block Placement Policy - Client Local Rack. Contributed by Ayush Saxena.

2018-10-12 Thread vinayakumarb
HDFS-13156. HDFS Block Placement Policy - Client Local Rack. Contributed by 
Ayush Saxena.

(cherry picked from commit de44e1064f051248934ceffdd98a3cc13653d886)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d838d39a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d838d39a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d838d39a

Branch: refs/heads/branch-3.2
Commit: d838d39a2cb13bfd1ded50fb91f44168b66ed99e
Parents: 62d329c
Author: Vinayakumar B 
Authored: Fri Oct 12 17:27:23 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Oct 12 17:28:48 2018 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d838d39a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
index 471a27f..a0121e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsDesign.md
@@ -102,7 +102,7 @@ Large HDFS instances run on a cluster of computers that 
commonly spread across m
 The NameNode determines the rack id each DataNode belongs to via the process 
outlined in [Hadoop Rack Awareness](../hadoop-common/RackAwareness.html).
 A simple but non-optimal policy is to place replicas on unique racks. This 
prevents losing data when an entire rack fails and allows use of bandwidth from 
multiple racks when reading data. This policy evenly distributes replicas in 
the cluster which makes it easy to balance load on component failure. However, 
this policy increases the cost of writes because a write needs to transfer 
blocks to multiple racks.
 
-For the common case, when the replication factor is three, HDFS’s placement 
policy is to put one replica on the local machine if the writer is on a 
datanode, otherwise on a random datanode, another replica on a node in a 
different (remote) rack, and the last on a different node in the same remote 
rack. This policy cuts the inter-rack write traffic which generally improves 
write performance. The chance of rack failure is far less than that of node 
failure; this policy does not impact data reliability and availability 
guarantees. However, it does reduce the aggregate network bandwidth used when 
reading data since a block is placed in only two unique racks rather than 
three. With this policy, the replicas of a file do not evenly distribute across 
the racks. One third of replicas are on one node, two thirds of replicas are on 
one rack, and the other third are evenly distributed across the remaining 
racks. This policy improves write performance without compromising data 
reliability or 
 read performance.
+For the common case, when the replication factor is three, HDFS’s placement 
policy is to put one replica on the local machine if the writer is on a 
datanode, otherwise on a random datanode in the same rack as that of the 
writer, another replica on a node in a different (remote) rack, and the last on 
a different node in the same remote rack. This policy cuts the inter-rack write 
traffic which generally improves write performance. The chance of rack failure 
is far less than that of node failure; this policy does not impact data 
reliability and availability guarantees. However, it does reduce the aggregate 
network bandwidth used when reading data since a block is placed in only two 
unique racks rather than three. With this policy, the replicas of a file do not 
evenly distribute across the racks. One third of replicas are on one node, two 
thirds of replicas are on one rack, and the other third are evenly distributed 
across the remaining racks. This policy improves write performance wi
 thout compromising data reliability or read performance.
 
 If the replication factor is greater than 3,
 the placement of the 4th and following replicas are determined randomly


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13906. RBF: Add multiple paths for dfsrouteradmin 'rm' and 'clrquota' commands. Contributed by Ayush Saxena.

2018-10-12 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-13891 e4fca6aae -> 4875eea2a


HDFS-13906. RBF: Add multiple paths for dfsrouteradmin 'rm' and 'clrquota' 
commands. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4875eea2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4875eea2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4875eea2

Branch: refs/heads/HDFS-13891
Commit: 4875eea2ad9cb83a4eeba6dbed7f59d5cea7e294
Parents: e4fca6a
Author: Vinayakumar B 
Authored: Fri Oct 12 17:19:55 2018 +0530
Committer: Vinayakumar B 
Committed: Fri Oct 12 17:19:55 2018 +0530

--
 .../hdfs/tools/federation/RouterAdmin.java  | 102 ++-
 .../federation/router/TestRouterAdminCLI.java   |  82 ---
 2 files changed, 122 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4875eea2/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index 0a681e9..8d2d51d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -151,17 +151,7 @@ public class RouterAdmin extends Configured implements 
Tool {
* @param arg List of of command line parameters.
*/
   private void validateMax(String[] arg) {
-if (arg[0].equals("-rm")) {
-  if (arg.length > 2) {
-throw new IllegalArgumentException(
-"Too many arguments, Max=1 argument allowed");
-  }
-} else if (arg[0].equals("-ls")) {
-  if (arg.length > 2) {
-throw new IllegalArgumentException(
-"Too many arguments, Max=1 argument allowed");
-  }
-} else if (arg[0].equals("-clrQuota")) {
+if (arg[0].equals("-ls")) {
   if (arg.length > 2) {
 throw new IllegalArgumentException(
 "Too many arguments, Max=1 argument allowed");
@@ -183,63 +173,63 @@ public class RouterAdmin extends Configured implements 
Tool {
 }
   }
 
-  @Override
-  public int run(String[] argv) throws Exception {
-if (argv.length < 1) {
-  System.err.println("Not enough parameters specified");
-  printUsage();
-  return -1;
-}
-
-int exitCode = -1;
-int i = 0;
-String cmd = argv[i++];
-
-// Verify that we have enough command line parameters
+  /**
+   * Usage: validates the minimum number of arguments for a command.
+   * @param argv List of of command line parameters.
+   * @return true if number of arguments are valid for the command else false.
+   */
+  private boolean validateMin(String[] argv) {
+String cmd = argv[0];
 if ("-add".equals(cmd)) {
   if (argv.length < 4) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-update".equals(cmd)) {
   if (argv.length < 4) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-rm".equals(cmd)) {
   if (argv.length < 2) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-setQuota".equals(cmd)) {
   if (argv.length < 4) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-clrQuota".equals(cmd)) {
   if (argv.length < 2) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-safemode".equals(cmd)) {
   if (argv.length < 2) {
-System.err.println("Not enough parameters specified for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 } else if ("-nameservice".equals(cmd)) {
   if (argv.length < 3) {
-System.err.println("Not enough parameters specificed for cmd " + cmd);
-printUsage(cmd);
-return exitCode;
+return false;
   }
 }
+return true;
+  }
+
+  @Override
+  public int run(String[] argv) throws Exception {
+if 

[1/3] hadoop git commit: HADOOP-15736. Trash : Negative Value For Deletion Interval Leads To Abnormal Behaviour. Contributed by Ayush Saxena.

2018-09-19 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 78e0c0a1b -> d2655ea60
  refs/heads/branch-3.1 3fb678729 -> 2aa385463
  refs/heads/trunk 6fc293fec -> 7ad27e97f


HADOOP-15736. Trash : Negative Value For Deletion Interval Leads To Abnormal 
Behaviour. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ad27e97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ad27e97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ad27e97

Branch: refs/heads/trunk
Commit: 7ad27e97f05b13b33fdcef9cb63ace9c1728bfb5
Parents: 6fc293f
Author: Vinayakumar B 
Authored: Thu Sep 20 09:31:35 2018 +0530
Committer: Vinayakumar B 
Committed: Thu Sep 20 09:31:35 2018 +0530

--
 .../main/java/org/apache/hadoop/fs/TrashPolicyDefault.java   | 8 +++-
 .../src/test/java/org/apache/hadoop/fs/TestTrash.java| 6 ++
 2 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad27e97/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 6e101a2..39d5e73 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -101,6 +101,12 @@ public class TrashPolicyDefault extends TrashPolicy {
 this.emptierInterval = (long)(conf.getFloat(
 FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
 * MSECS_PER_MINUTE);
+if (deletionInterval < 0) {
+  LOG.warn("Invalid value {} for deletion interval,"
+  + " deletion interaval can not be negative."
+  + "Changing to default value 0", deletionInterval);
+  this.deletionInterval = 0;
+}
   }
 
   private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
@@ -109,7 +115,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 
   @Override
   public boolean isEnabled() {
-return deletionInterval != 0;
+return deletionInterval > 0;
   }
 
   @SuppressWarnings("deprecation")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad27e97/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 568821b..04f56fb 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -132,6 +132,9 @@ public class TestTrash {
 conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
 assertFalse(new Trash(conf).isEnabled());
 
+conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+assertFalse(new Trash(conf).isEnabled());
+
 conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
 assertTrue(new Trash(conf).isEnabled());
 
@@ -526,6 +529,9 @@ public class TestTrash {
 conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
 assertFalse(new Trash(conf).isEnabled());
 
+conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+assertFalse(new Trash(conf).isEnabled());
+
 conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
 assertTrue(new Trash(conf).isEnabled());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: HADOOP-15736. Trash : Negative Value For Deletion Interval Leads To Abnormal Behaviour. Contributed by Ayush Saxena.

2018-09-19 Thread vinayakumarb
HADOOP-15736. Trash : Negative Value For Deletion Interval Leads To Abnormal 
Behaviour. Contributed by Ayush Saxena.

(cherry picked from commit 7ad27e97f05b13b33fdcef9cb63ace9c1728bfb5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2aa38546
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2aa38546
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2aa38546

Branch: refs/heads/branch-3.1
Commit: 2aa385463305beaae8770c8deb22faed6943ef00
Parents: 3fb6787
Author: Vinayakumar B 
Authored: Thu Sep 20 09:31:35 2018 +0530
Committer: Vinayakumar B 
Committed: Thu Sep 20 09:33:22 2018 +0530

--
 .../main/java/org/apache/hadoop/fs/TrashPolicyDefault.java   | 8 +++-
 .../src/test/java/org/apache/hadoop/fs/TestTrash.java| 6 ++
 2 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2aa38546/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 6e101a2..39d5e73 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -101,6 +101,12 @@ public class TrashPolicyDefault extends TrashPolicy {
 this.emptierInterval = (long)(conf.getFloat(
 FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
 * MSECS_PER_MINUTE);
+if (deletionInterval < 0) {
+  LOG.warn("Invalid value {} for deletion interval,"
+  + " deletion interaval can not be negative."
+  + "Changing to default value 0", deletionInterval);
+  this.deletionInterval = 0;
+}
   }
 
   private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
@@ -109,7 +115,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 
   @Override
   public boolean isEnabled() {
-return deletionInterval != 0;
+return deletionInterval > 0;
   }
 
   @SuppressWarnings("deprecation")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2aa38546/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 568821b..04f56fb 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -132,6 +132,9 @@ public class TestTrash {
 conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
 assertFalse(new Trash(conf).isEnabled());
 
+conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+assertFalse(new Trash(conf).isEnabled());
+
 conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
 assertTrue(new Trash(conf).isEnabled());
 
@@ -526,6 +529,9 @@ public class TestTrash {
 conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
 assertFalse(new Trash(conf).isEnabled());
 
+conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+assertFalse(new Trash(conf).isEnabled());
+
 conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
 assertTrue(new Trash(conf).isEnabled());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[3/3] hadoop git commit: HADOOP-15736. Trash : Negative Value For Deletion Interval Leads To Abnormal Behaviour. Contributed by Ayush Saxena.

2018-09-19 Thread vinayakumarb
HADOOP-15736. Trash : Negative Value For Deletion Interval Leads To Abnormal 
Behaviour. Contributed by Ayush Saxena.

(cherry picked from commit 7ad27e97f05b13b33fdcef9cb63ace9c1728bfb5)
(cherry picked from commit 2aa385463305beaae8770c8deb22faed6943ef00)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2655ea6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2655ea6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2655ea6

Branch: refs/heads/branch-3.0
Commit: d2655ea60ccc2129105b9e2b76e4606b46ff283b
Parents: 78e0c0a
Author: Vinayakumar B 
Authored: Thu Sep 20 09:31:35 2018 +0530
Committer: Vinayakumar B 
Committed: Thu Sep 20 09:34:37 2018 +0530

--
 .../main/java/org/apache/hadoop/fs/TrashPolicyDefault.java   | 8 +++-
 .../src/test/java/org/apache/hadoop/fs/TestTrash.java| 6 ++
 2 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2655ea6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 9c6a685..f2c37e3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -101,6 +101,12 @@ public class TrashPolicyDefault extends TrashPolicy {
 this.emptierInterval = (long)(conf.getFloat(
 FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
 * MSECS_PER_MINUTE);
+if (deletionInterval < 0) {
+  LOG.warn("Invalid value {} for deletion interval,"
+  + " deletion interaval can not be negative."
+  + "Changing to default value 0", deletionInterval);
+  this.deletionInterval = 0;
+}
   }
 
   private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
@@ -109,7 +115,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 
   @Override
   public boolean isEnabled() {
-return deletionInterval != 0;
+return deletionInterval > 0;
   }
 
   @SuppressWarnings("deprecation")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2655ea6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 568821b..04f56fb 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -132,6 +132,9 @@ public class TestTrash {
 conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
 assertFalse(new Trash(conf).isEnabled());
 
+conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+assertFalse(new Trash(conf).isEnabled());
+
 conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
 assertTrue(new Trash(conf).isEnabled());
 
@@ -526,6 +529,9 @@ public class TestTrash {
 conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
 assertFalse(new Trash(conf).isEnabled());
 
+conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+assertFalse(new Trash(conf).isEnabled());
+
 conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
 assertTrue(new Trash(conf).isEnabled());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HDFS-13895. EC: Fix Intermittent Failure in TestDFSStripedOutputStreamWithFailureWithRandomECPolicy. Contributed by Ayush Saxena.

2018-09-09 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 cbcdaefa3 -> b8eaeca7e
  refs/heads/trunk 30eceec34 -> eef3bafae


HDFS-13895. EC: Fix Intermittent Failure in 
TestDFSStripedOutputStreamWithFailureWithRandomECPolicy. Contributed by Ayush 
Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eef3bafa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eef3bafa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eef3bafa

Branch: refs/heads/trunk
Commit: eef3bafae8bc0fd31506637cd131599ec97f362f
Parents: 30eceec
Author: Vinayakumar B 
Authored: Mon Sep 10 09:22:59 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Sep 10 09:22:59 2018 +0530

--
 .../TestDFSStripedOutputStreamWithFailure.java  | 41 +++-
 1 file changed, 22 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eef3bafa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index 800fac1..ff52146 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -244,26 +244,29 @@ public class TestDFSStripedOutputStreamWithFailure extends
 // Full stripe and a partial on non-cell boundary
 (cellSize * dataBlocks) + 123,
 };
-try {
-  for (int length: fileLengths) {
-// select the two DNs with partial block to kill
-final int[] dnIndex = {dataBlocks - 2, dataBlocks - 1};
-final int[] killPos = getKillPositions(length, dnIndex.length);
-try {
-  LOG.info("runTestWithMultipleFailure2: length==" + length
-  + ", killPos=" + Arrays.toString(killPos)
-  + ", dnIndex=" + Arrays.toString(dnIndex));
-  setup(conf);
-  runTest(length, killPos, dnIndex, false);
-} catch (Throwable e) {
-  final String err = "failed, killPos=" + Arrays.toString(killPos)
-  + ", dnIndex=" + Arrays.toString(dnIndex) + ", length=" + length;
-  LOG.error(err);
-  throw e;
-}
+// select the two DNs with partial block to kill
+int[] dnIndex = null;
+if (parityBlocks > 1) {
+  dnIndex = new int[] {dataBlocks - 2, dataBlocks - 1};
+} else {
+  dnIndex = new int[] {dataBlocks - 1};
+}
+for (int length : fileLengths) {
+  final int[] killPos = getKillPositions(length, dnIndex.length);
+  try {
+LOG.info("runTestWithMultipleFailure2: length==" + length + ", 
killPos="
++ Arrays.toString(killPos) + ", dnIndex="
++ Arrays.toString(dnIndex));
+setup(conf);
+runTest(length, killPos, dnIndex, false);
+  } catch (Throwable e) {
+final String err = "failed, killPos=" + Arrays.toString(killPos)
++ ", dnIndex=" + Arrays.toString(dnIndex) + ", length=" + length;
+LOG.error(err);
+throw e;
+  } finally {
+tearDown();
   }
-} finally {
-  tearDown();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HDFS-13895. EC: Fix Intermittent Failure in TestDFSStripedOutputStreamWithFailureWithRandomECPolicy. Contributed by Ayush Saxena.

2018-09-09 Thread vinayakumarb
HDFS-13895. EC: Fix Intermittent Failure in 
TestDFSStripedOutputStreamWithFailureWithRandomECPolicy. Contributed by Ayush 
Saxena.

(cherry picked from commit eef3bafae8bc0fd31506637cd131599ec97f362f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8eaeca7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8eaeca7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8eaeca7

Branch: refs/heads/branch-3.1
Commit: b8eaeca7e6afc312c59d71e6de4049bb5bb7444d
Parents: cbcdaef
Author: Vinayakumar B 
Authored: Mon Sep 10 09:22:59 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Sep 10 09:24:08 2018 +0530

--
 .../TestDFSStripedOutputStreamWithFailure.java  | 41 +++-
 1 file changed, 22 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8eaeca7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index 800fac1..ff52146 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -244,26 +244,29 @@ public class TestDFSStripedOutputStreamWithFailure extends
 // Full stripe and a partial on non-cell boundary
 (cellSize * dataBlocks) + 123,
 };
-try {
-  for (int length: fileLengths) {
-// select the two DNs with partial block to kill
-final int[] dnIndex = {dataBlocks - 2, dataBlocks - 1};
-final int[] killPos = getKillPositions(length, dnIndex.length);
-try {
-  LOG.info("runTestWithMultipleFailure2: length==" + length
-  + ", killPos=" + Arrays.toString(killPos)
-  + ", dnIndex=" + Arrays.toString(dnIndex));
-  setup(conf);
-  runTest(length, killPos, dnIndex, false);
-} catch (Throwable e) {
-  final String err = "failed, killPos=" + Arrays.toString(killPos)
-  + ", dnIndex=" + Arrays.toString(dnIndex) + ", length=" + length;
-  LOG.error(err);
-  throw e;
-}
+// select the two DNs with partial block to kill
+int[] dnIndex = null;
+if (parityBlocks > 1) {
+  dnIndex = new int[] {dataBlocks - 2, dataBlocks - 1};
+} else {
+  dnIndex = new int[] {dataBlocks - 1};
+}
+for (int length : fileLengths) {
+  final int[] killPos = getKillPositions(length, dnIndex.length);
+  try {
+LOG.info("runTestWithMultipleFailure2: length==" + length + ", 
killPos="
++ Arrays.toString(killPos) + ", dnIndex="
++ Arrays.toString(dnIndex));
+setup(conf);
+runTest(length, killPos, dnIndex, false);
+  } catch (Throwable e) {
+final String err = "failed, killPos=" + Arrays.toString(killPos)
++ ", dnIndex=" + Arrays.toString(dnIndex) + ", length=" + length;
+LOG.error(err);
+throw e;
+  } finally {
+tearDown();
   }
-} finally {
-  tearDown();
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13806. EC: No error message for unsetting EC policy of the directory inherits the erasure coding policy from an ancestor directory. Contributed by Ayush Saxena.

2018-09-09 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk de85f841d -> 30eceec34


HDFS-13806. EC: No error message for unsetting EC policy of the directory 
inherits the erasure coding policy from an ancestor directory. Contributed by 
Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30eceec3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30eceec3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30eceec3

Branch: refs/heads/trunk
Commit: 30eceec3420fc6be00d3878ba787bd9518d3ca0e
Parents: de85f84
Author: Vinayakumar B 
Authored: Mon Sep 10 09:10:51 2018 +0530
Committer: Vinayakumar B 
Committed: Mon Sep 10 09:10:51 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  3 +-
 .../hdfs/protocol/NoECPolicySetException.java   | 37 
 .../server/namenode/FSDirErasureCodingOp.java   |  4 +++
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  7 
 .../TestUnsetAndChangeDirectoryEcPolicy.java| 23 +---
 .../test/resources/testErasureCodingConf.xml| 24 +
 6 files changed, 92 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30eceec3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 61568d5..f4d11b9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -131,6 +131,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.NoECPolicySetException;
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
@@ -2750,7 +2751,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   throw re.unwrapRemoteException(AccessControlException.class,
   SafeModeException.class,
   UnresolvedPathException.class,
-  FileNotFoundException.class);
+  FileNotFoundException.class, NoECPolicySetException.class);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30eceec3/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NoECPolicySetException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NoECPolicySetException.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NoECPolicySetException.java
new file mode 100644
index 000..de3054a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/NoECPolicySetException.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.protocol;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ *Thrown when no EC policy is set explicitly on the directory.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class NoECPolicySetException extends IOException {
+  private static final long serialVersionUID = 1L;
+
+  public NoECPolicySetException(String msg) {
+super(msg);
+  }
+}


  1   2   3   4   5   6   7   8   9   >