[3/3] hadoop git commit: Revert "HADOOP-14077. Add ability to access jmx via proxy. Contributed by Yuanbo Liu."
Revert "HADOOP-14077. Add ability to access jmx via proxy. Contributed by Yuanbo Liu." This reverts commit 172b23af33554b7d58fd41b022d983bcc2433da7. (cherry picked from commit d0d2d4c51e9534e08893ae14cf3fff7b2ee70b1d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26540a64 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26540a64 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26540a64 Branch: refs/heads/branch-3.1 Commit: 26540a64d4ed7ea9ea25c809ffc6f9d2dbd9a29d Parents: 60c6ded Author: Owen O'MalleyAuthored: Thu Mar 1 09:59:08 2018 -0800 Committer: Wangda Tan Committed: Fri Mar 9 23:01:11 2018 -0800 -- .../AuthenticationWithProxyUserFilter.java | 43 --- .../hadoop/http/TestHttpServerWithSpengo.java | 15 +-- .../mapreduce/v2/app/webapp/AppController.java | 7 +- .../hadoop/yarn/server/webapp/AppBlock.java | 113 ++- 4 files changed, 85 insertions(+), 93 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/26540a64/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java index c97f8ad..ea9b282 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java @@ -20,10 +20,9 @@ package org.apache.hadoop.security; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.util.HttpExceptionUtils; import org.apache.http.NameValuePair; import org.apache.http.client.utils.URLEncodedUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import javax.servlet.FilterChain; import javax.servlet.ServletException; @@ -42,9 +41,6 @@ import java.util.List; */ public class AuthenticationWithProxyUserFilter extends AuthenticationFilter { - public static final Logger LOG = - LoggerFactory.getLogger(AuthenticationWithProxyUserFilter.class); - /** * Constant used in URL's query string to perform a proxy user request, the * value of the DO_AS parameter is the user the request will be @@ -70,30 +66,29 @@ public class AuthenticationWithProxyUserFilter extends AuthenticationFilter { protected void doFilter(FilterChain filterChain, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { -final String proxyUser = getDoAs(request); +// authorize proxy user before calling next filter. +String proxyUser = getDoAs(request); if (proxyUser != null) { + UserGroupInformation realUser = + UserGroupInformation.createRemoteUser(request.getRemoteUser()); + UserGroupInformation proxyUserInfo = + UserGroupInformation.createProxyUser(proxyUser, realUser); - // Change the remote user after proxy user is authorized. - final HttpServletRequest finalReq = request; - request = new HttpServletRequestWrapper(finalReq) { - -private String getRemoteOrProxyUser() throws AuthorizationException { - UserGroupInformation realUser = - UserGroupInformation.createRemoteUser(finalReq.getRemoteUser()); - UserGroupInformation proxyUserInfo = - UserGroupInformation.createProxyUser(proxyUser, realUser); - ProxyUsers.authorize(proxyUserInfo, finalReq.getRemoteAddr()); - return proxyUserInfo.getUserName(); -} + try { +ProxyUsers.authorize(proxyUserInfo, request.getRemoteAddr()); + } catch (AuthorizationException ex) { +HttpExceptionUtils.createServletExceptionResponse(response, +HttpServletResponse.SC_FORBIDDEN, ex); +// stop filter chain if there is an Authorization Exception. +return; + } + final UserGroupInformation finalProxyUser = proxyUserInfo; + // Change the remote user after proxy user is authorized. + request = new HttpServletRequestWrapper(request) { @Override public String getRemoteUser() { - try { -return getRemoteOrProxyUser(); - } catch (AuthorizationException ex) { -LOG.error("Unable to verify proxy user: " + ex.getMessage(), ex); -
[1/3] hadoop git commit: Updated timeline reader to use AuthenticationFilter
Repository: hadoop Updated Branches: refs/heads/branch-3.1 60c6ded48 -> 4b9eb2188 Updated timeline reader to use AuthenticationFilter Change-Id: I961771589180c1eb377d36c37a79aa23754effbf (cherry picked from commit 837338788eb903d0e8bbb1230694782a707891be) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b9eb218 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b9eb218 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b9eb218 Branch: refs/heads/branch-3.1 Commit: 4b9eb2188bfe0075577e937e08d0016a957561dc Parents: 4cd4219 Author: Wangda TanAuthored: Thu Mar 8 09:23:45 2018 -0800 Committer: Wangda Tan Committed: Fri Mar 9 23:01:11 2018 -0800 -- .../TimelineReaderAuthenticationFilterInitializer.java| 10 +- 1 file changed, 5 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b9eb218/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java index e0e1f4d..6a3658d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java @@ -20,11 +20,11 @@ package org.apache.hadoop.yarn.server.timelineservice.reader.security; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.FilterContainer; -import org.apache.hadoop.security.AuthenticationWithProxyUserFilter; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer; /** - * Filter initializer to initialize {@link AuthenticationWithProxyUserFilter} + * Filter initializer to initialize {@link AuthenticationFilter} * for ATSv2 timeline reader server with timeline service specific * configurations. */ @@ -32,9 +32,9 @@ public class TimelineReaderAuthenticationFilterInitializer extends TimelineAuthenticationFilterInitializer{ /** - * Initializes {@link AuthenticationWithProxyUserFilter} + * Initializes {@link AuthenticationFilter} * - * Propagates to {@link AuthenticationWithProxyUserFilter} configuration all + * Propagates to {@link AuthenticationFilter} configuration all * YARN configuration properties prefixed with * {@value TimelineAuthenticationFilterInitializer#PREFIX}. * @@ -47,7 +47,7 @@ public class TimelineReaderAuthenticationFilterInitializer extends public void initFilter(FilterContainer container, Configuration conf) { setAuthFilterConfig(conf); container.addGlobalFilter("Timeline Reader Authentication Filter", -AuthenticationWithProxyUserFilter.class.getName(), +AuthenticationFilter.class.getName(), getFilterConfig()); } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[2/3] hadoop git commit: Revert "HADOOP-13119. Add ability to secure log servlet using proxy users. Contribute by Yuanbo Liu."
Revert "HADOOP-13119. Add ability to secure log servlet using proxy users. Contribute by Yuanbo Liu." This reverts commit a847903b6e64c6edb11d852b91f2c816b1253eb3. Change-Id: I3122a2142f5bdf8507dece930e447556a43cd9ae (cherry picked from commit 8fad3ec76070ccfcd3ed80feaba4355077bc6f5c) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cd42192 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cd42192 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cd42192 Branch: refs/heads/branch-3.1 Commit: 4cd4219206e973a07fd454c141268f95365e293c Parents: 26540a6 Author: Owen O'MalleyAuthored: Thu Mar 1 10:15:22 2018 -0800 Committer: Wangda Tan Committed: Fri Mar 9 23:01:11 2018 -0800 -- .../AuthenticationFilterInitializer.java| 9 +- .../AuthenticationWithProxyUserFilter.java | 119 --- .../security/TestAuthenticationFilter.java | 13 +- .../TestAuthenticationWithProxyUserFilter.java | 79 4 files changed, 13 insertions(+), 207 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cd42192/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java index 65d2211..ca221f5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java @@ -29,9 +29,8 @@ import java.util.HashMap; import java.util.Map; /** - * Initializes {@link AuthenticationWithProxyUserFilter} - * which provides support for Kerberos HTTP SPNEGO authentication - * and proxy user authentication. + * Initializes hadoop-auth AuthenticationFilter which provides support for + * Kerberos HTTP SPNEGO authentication. * * It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO * authentication for Hadoop JobTracker, NameNode, DataNodes and @@ -59,10 +58,8 @@ public class AuthenticationFilterInitializer extends FilterInitializer { public void initFilter(FilterContainer container, Configuration conf) { Map filterConfig = getFilterConfigMap(conf, PREFIX); -// extend AuthenticationFilter's feature to -// support proxy user operation. container.addFilter("authentication", -AuthenticationWithProxyUserFilter.class.getName(), +AuthenticationFilter.class.getName(), filterConfig); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cd42192/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java deleted file mode 100644 index ea9b282..000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.security; - -import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hadoop.util.HttpExceptionUtils; -import org.apache.http.NameValuePair; -import
[2/3] hadoop git commit: Revert "HADOOP-13119. Add ability to secure log servlet using proxy users. Contribute by Yuanbo Liu."
Revert "HADOOP-13119. Add ability to secure log servlet using proxy users. Contribute by Yuanbo Liu." This reverts commit a847903b6e64c6edb11d852b91f2c816b1253eb3. Change-Id: I3122a2142f5bdf8507dece930e447556a43cd9ae (cherry picked from commit 8fad3ec76070ccfcd3ed80feaba4355077bc6f5c) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa6a8b78 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa6a8b78 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa6a8b78 Branch: refs/heads/trunk Commit: fa6a8b78d481d3b4d355e1bf078f30dd5e09850d Parents: 3a8dade Author: Owen O'MalleyAuthored: Thu Mar 1 10:15:22 2018 -0800 Committer: Wangda Tan Committed: Fri Mar 9 22:46:41 2018 -0800 -- .../AuthenticationFilterInitializer.java| 9 +- .../AuthenticationWithProxyUserFilter.java | 119 - .../hadoop/http/TestHttpServerWithSpengo.java | 481 --- .../security/TestAuthenticationFilter.java | 13 +- .../TestAuthenticationWithProxyUserFilter.java | 79 --- 5 files changed, 13 insertions(+), 688 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa6a8b78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java index 65d2211..ca221f5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java @@ -29,9 +29,8 @@ import java.util.HashMap; import java.util.Map; /** - * Initializes {@link AuthenticationWithProxyUserFilter} - * which provides support for Kerberos HTTP SPNEGO authentication - * and proxy user authentication. + * Initializes hadoop-auth AuthenticationFilter which provides support for + * Kerberos HTTP SPNEGO authentication. * * It enables anonymous access, simple/speudo and Kerberos HTTP SPNEGO * authentication for Hadoop JobTracker, NameNode, DataNodes and @@ -59,10 +58,8 @@ public class AuthenticationFilterInitializer extends FilterInitializer { public void initFilter(FilterContainer container, Configuration conf) { Map filterConfig = getFilterConfigMap(conf, PREFIX); -// extend AuthenticationFilter's feature to -// support proxy user operation. container.addFilter("authentication", -AuthenticationWithProxyUserFilter.class.getName(), +AuthenticationFilter.class.getName(), filterConfig); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa6a8b78/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java deleted file mode 100644 index ea9b282..000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.security; - -import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hadoop.util.HttpExceptionUtils; -import org.apache.http.NameValuePair;
[1/3] hadoop git commit: Revert "HADOOP-14077. Add ability to access jmx via proxy. Contributed by Yuanbo Liu."
Repository: hadoop Updated Branches: refs/heads/trunk 4743d4a2c -> ea18e70a7 Revert "HADOOP-14077. Add ability to access jmx via proxy. Contributed by Yuanbo Liu." This reverts commit 172b23af33554b7d58fd41b022d983bcc2433da7. (cherry picked from commit d0d2d4c51e9534e08893ae14cf3fff7b2ee70b1d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a8dade9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a8dade9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a8dade9 Branch: refs/heads/trunk Commit: 3a8dade9b1bf01cf75fc68cecb351c23302cdee5 Parents: 4743d4a Author: Owen O'MalleyAuthored: Thu Mar 1 09:59:08 2018 -0800 Committer: Wangda Tan Committed: Fri Mar 9 22:46:30 2018 -0800 -- .../AuthenticationWithProxyUserFilter.java | 43 --- .../hadoop/http/TestHttpServerWithSpengo.java | 15 +-- .../mapreduce/v2/app/webapp/AppController.java | 7 +- .../hadoop/yarn/server/webapp/AppBlock.java | 113 ++- 4 files changed, 85 insertions(+), 93 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a8dade9/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java index c97f8ad..ea9b282 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java @@ -20,10 +20,9 @@ package org.apache.hadoop.security; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.util.HttpExceptionUtils; import org.apache.http.NameValuePair; import org.apache.http.client.utils.URLEncodedUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import javax.servlet.FilterChain; import javax.servlet.ServletException; @@ -42,9 +41,6 @@ import java.util.List; */ public class AuthenticationWithProxyUserFilter extends AuthenticationFilter { - public static final Logger LOG = - LoggerFactory.getLogger(AuthenticationWithProxyUserFilter.class); - /** * Constant used in URL's query string to perform a proxy user request, the * value of the DO_AS parameter is the user the request will be @@ -70,30 +66,29 @@ public class AuthenticationWithProxyUserFilter extends AuthenticationFilter { protected void doFilter(FilterChain filterChain, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException { -final String proxyUser = getDoAs(request); +// authorize proxy user before calling next filter. +String proxyUser = getDoAs(request); if (proxyUser != null) { + UserGroupInformation realUser = + UserGroupInformation.createRemoteUser(request.getRemoteUser()); + UserGroupInformation proxyUserInfo = + UserGroupInformation.createProxyUser(proxyUser, realUser); - // Change the remote user after proxy user is authorized. - final HttpServletRequest finalReq = request; - request = new HttpServletRequestWrapper(finalReq) { - -private String getRemoteOrProxyUser() throws AuthorizationException { - UserGroupInformation realUser = - UserGroupInformation.createRemoteUser(finalReq.getRemoteUser()); - UserGroupInformation proxyUserInfo = - UserGroupInformation.createProxyUser(proxyUser, realUser); - ProxyUsers.authorize(proxyUserInfo, finalReq.getRemoteAddr()); - return proxyUserInfo.getUserName(); -} + try { +ProxyUsers.authorize(proxyUserInfo, request.getRemoteAddr()); + } catch (AuthorizationException ex) { +HttpExceptionUtils.createServletExceptionResponse(response, +HttpServletResponse.SC_FORBIDDEN, ex); +// stop filter chain if there is an Authorization Exception. +return; + } + final UserGroupInformation finalProxyUser = proxyUserInfo; + // Change the remote user after proxy user is authorized. + request = new HttpServletRequestWrapper(request) { @Override public String getRemoteUser() { - try { -return getRemoteOrProxyUser(); - } catch (AuthorizationException ex) { -
[3/3] hadoop git commit: Updated timeline reader to use AuthenticationFilter
Updated timeline reader to use AuthenticationFilter Change-Id: I961771589180c1eb377d36c37a79aa23754effbf (cherry picked from commit 837338788eb903d0e8bbb1230694782a707891be) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea18e70a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea18e70a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea18e70a Branch: refs/heads/trunk Commit: ea18e70a74e811ffa48c7e18e68510dd37dda63d Parents: fa6a8b7 Author: Wangda TanAuthored: Thu Mar 8 09:23:45 2018 -0800 Committer: Wangda Tan Committed: Fri Mar 9 22:51:08 2018 -0800 -- .../TimelineReaderAuthenticationFilterInitializer.java| 10 +- 1 file changed, 5 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea18e70a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java index e0e1f4d..6a3658d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/security/TimelineReaderAuthenticationFilterInitializer.java @@ -20,11 +20,11 @@ package org.apache.hadoop.yarn.server.timelineservice.reader.security; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.FilterContainer; -import org.apache.hadoop.security.AuthenticationWithProxyUserFilter; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer; /** - * Filter initializer to initialize {@link AuthenticationWithProxyUserFilter} + * Filter initializer to initialize {@link AuthenticationFilter} * for ATSv2 timeline reader server with timeline service specific * configurations. */ @@ -32,9 +32,9 @@ public class TimelineReaderAuthenticationFilterInitializer extends TimelineAuthenticationFilterInitializer{ /** - * Initializes {@link AuthenticationWithProxyUserFilter} + * Initializes {@link AuthenticationFilter} * - * Propagates to {@link AuthenticationWithProxyUserFilter} configuration all + * Propagates to {@link AuthenticationFilter} configuration all * YARN configuration properties prefixed with * {@value TimelineAuthenticationFilterInitializer#PREFIX}. * @@ -47,7 +47,7 @@ public class TimelineReaderAuthenticationFilterInitializer extends public void initFilter(FilterContainer container, Configuration conf) { setAuthFilterConfig(conf); container.addGlobalFilter("Timeline Reader Authentication Filter", -AuthenticationWithProxyUserFilter.class.getName(), +AuthenticationFilter.class.getName(), getFilterConfig()); } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by Yiqun Lin.
Repository: hadoop Updated Branches: refs/heads/branch-2.9 a3c47eba7 -> 56ac7db7e HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by Yiqun Lin. (cherry picked from commit 4743d4a2c70a213a41804a24c776e6db00e1b90d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56ac7db7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56ac7db7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56ac7db7 Branch: refs/heads/branch-2.9 Commit: 56ac7db7ec61f9cd492ea9e72bf3a42241e4d724 Parents: a3c47eb Author: Yiqun LinAuthored: Sat Mar 10 11:28:55 2018 +0800 Committer: Yiqun Lin Committed: Sat Mar 10 11:34:42 2018 +0800 -- .../src/site/markdown/HDFSRouterFederation.md | 12 +++- 1 file changed, 7 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/56ac7db7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md index 600c38e..14fa179 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md @@ -29,7 +29,9 @@ Architecture A natural extension to this partitioned federation is to add a layer of software responsible for federating the namespaces. -This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and supports rebalancing of data across subclusters. +This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and will support rebalancing of data across subclusters later +(see more info in [HDFS-13123](https://issues.apache.org/jira/browse/HDFS-13123)). The subclusters in RBF are not required to be the independent HDFS clusters, a normal federation cluster +(with multiple block pools) or a mixed cluster with federation and independent cluster is also allowed. To accomplish these goals, the federation layer directs block accesses to the proper subcluster, maintains the state of the namespaces, and provides mechanisms for data rebalancing. This layer must be scalable, highly available, and fault tolerant. @@ -304,8 +306,8 @@ The connection to the State Store and the internal caching at the Router. | Property | Default | Description| |: |: |: | | dfs.federation.router.store.enable | `true` | If `true`, the Router connects to the State Store. | -| dfs.federation.router.store.serializer | `StateStoreSerializerPBImpl` | Class to serialize State Store records. | -| dfs.federation.router.store.driver.class | `StateStoreZooKeeperImpl` | Class to implement the State Store. | +| dfs.federation.router.store.serializer | `org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl` | Class to serialize State Store records. | +| dfs.federation.router.store.driver.class | `org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl` | Class to implement the State Store. | | dfs.federation.router.store.connection.test | 6 | How often to check for the connection to the State Store in milliseconds. | | dfs.federation.router.cache.ttl | 6 | How often to refresh the State Store caches in milliseconds. | | dfs.federation.router.store.membership.expiration | 30 | Expiration time in milliseconds for a membership record. | @@ -316,8 +318,8 @@ Forwarding client requests to the right subcluster. | Property | Default | Description| |: |: |: | -| dfs.federation.router.file.resolver.client.class | MountTableResolver | Class to resolve files to subclusters. | -| dfs.federation.router.namenode.resolver.client.class | MembershipNamenodeResolver | Class to resolve the namenode for a subcluster. | +| dfs.federation.router.file.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver` | Class to resolve files to subclusters. | +| dfs.federation.router.namenode.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver` | Class to resolve the namenode for a subcluster. | ### Namenode monitoring - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by Yiqun Lin.
Repository: hadoop Updated Branches: refs/heads/branch-2 0dadcf9ea -> 3a3492489 HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by Yiqun Lin. (cherry picked from commit 4743d4a2c70a213a41804a24c776e6db00e1b90d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3a349248 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3a349248 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3a349248 Branch: refs/heads/branch-2 Commit: 3a349248960bc560b8ac63fea05e458775a42387 Parents: 0dadcf9 Author: Yiqun LinAuthored: Sat Mar 10 11:28:55 2018 +0800 Committer: Yiqun Lin Committed: Sat Mar 10 11:33:44 2018 +0800 -- .../src/site/markdown/HDFSRouterFederation.md | 12 +++- 1 file changed, 7 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3a349248/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md index 5412aae..fdaaa11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md @@ -29,7 +29,9 @@ Architecture A natural extension to this partitioned federation is to add a layer of software responsible for federating the namespaces. -This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and supports rebalancing of data across subclusters. +This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and will support rebalancing of data across subclusters later +(see more info in [HDFS-13123](https://issues.apache.org/jira/browse/HDFS-13123)). The subclusters in RBF are not required to be the independent HDFS clusters, a normal federation cluster +(with multiple block pools) or a mixed cluster with federation and independent cluster is also allowed. To accomplish these goals, the federation layer directs block accesses to the proper subcluster, maintains the state of the namespaces, and provides mechanisms for data rebalancing. This layer must be scalable, highly available, and fault tolerant. @@ -324,8 +326,8 @@ The connection to the State Store and the internal caching at the Router. | Property | Default | Description| |: |: |: | | dfs.federation.router.store.enable | `true` | If `true`, the Router connects to the State Store. | -| dfs.federation.router.store.serializer | `StateStoreSerializerPBImpl` | Class to serialize State Store records. | -| dfs.federation.router.store.driver.class | `StateStoreZooKeeperImpl` | Class to implement the State Store. | +| dfs.federation.router.store.serializer | `org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl` | Class to serialize State Store records. | +| dfs.federation.router.store.driver.class | `org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl` | Class to implement the State Store. | | dfs.federation.router.store.connection.test | 6 | How often to check for the connection to the State Store in milliseconds. | | dfs.federation.router.cache.ttl | 6 | How often to refresh the State Store caches in milliseconds. | | dfs.federation.router.store.membership.expiration | 30 | Expiration time in milliseconds for a membership record. | @@ -336,8 +338,8 @@ Forwarding client requests to the right subcluster. | Property | Default | Description| |: |: |: | -| dfs.federation.router.file.resolver.client.class | MountTableResolver | Class to resolve files to subclusters. | -| dfs.federation.router.namenode.resolver.client.class | MembershipNamenodeResolver | Class to resolve the namenode for a subcluster. | +| dfs.federation.router.file.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver` | Class to resolve files to subclusters. | +| dfs.federation.router.namenode.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver` | Class to resolve the namenode for a subcluster. | ### Namenode monitoring - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by Yiqun Lin.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 465c3f0e2 -> 2e607f210 HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by Yiqun Lin. (cherry picked from commit 4743d4a2c70a213a41804a24c776e6db00e1b90d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e607f21 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e607f21 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e607f21 Branch: refs/heads/branch-3.0 Commit: 2e607f2103bf0a50e838431a2f356366c71a0d80 Parents: 465c3f0 Author: Yiqun LinAuthored: Sat Mar 10 11:28:55 2018 +0800 Committer: Yiqun Lin Committed: Sat Mar 10 11:32:12 2018 +0800 -- .../src/site/markdown/HDFSRouterFederation.md | 12 +++- 1 file changed, 7 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e607f21/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md index 600c38e..14fa179 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md @@ -29,7 +29,9 @@ Architecture A natural extension to this partitioned federation is to add a layer of software responsible for federating the namespaces. -This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and supports rebalancing of data across subclusters. +This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and will support rebalancing of data across subclusters later +(see more info in [HDFS-13123](https://issues.apache.org/jira/browse/HDFS-13123)). The subclusters in RBF are not required to be the independent HDFS clusters, a normal federation cluster +(with multiple block pools) or a mixed cluster with federation and independent cluster is also allowed. To accomplish these goals, the federation layer directs block accesses to the proper subcluster, maintains the state of the namespaces, and provides mechanisms for data rebalancing. This layer must be scalable, highly available, and fault tolerant. @@ -304,8 +306,8 @@ The connection to the State Store and the internal caching at the Router. | Property | Default | Description| |: |: |: | | dfs.federation.router.store.enable | `true` | If `true`, the Router connects to the State Store. | -| dfs.federation.router.store.serializer | `StateStoreSerializerPBImpl` | Class to serialize State Store records. | -| dfs.federation.router.store.driver.class | `StateStoreZooKeeperImpl` | Class to implement the State Store. | +| dfs.federation.router.store.serializer | `org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl` | Class to serialize State Store records. | +| dfs.federation.router.store.driver.class | `org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl` | Class to implement the State Store. | | dfs.federation.router.store.connection.test | 6 | How often to check for the connection to the State Store in milliseconds. | | dfs.federation.router.cache.ttl | 6 | How often to refresh the State Store caches in milliseconds. | | dfs.federation.router.store.membership.expiration | 30 | Expiration time in milliseconds for a membership record. | @@ -316,8 +318,8 @@ Forwarding client requests to the right subcluster. | Property | Default | Description| |: |: |: | -| dfs.federation.router.file.resolver.client.class | MountTableResolver | Class to resolve files to subclusters. | -| dfs.federation.router.namenode.resolver.client.class | MembershipNamenodeResolver | Class to resolve the namenode for a subcluster. | +| dfs.federation.router.file.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver` | Class to resolve files to subclusters. | +| dfs.federation.router.namenode.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver` | Class to resolve the namenode for a subcluster. | ### Namenode monitoring - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by Yiqun Lin.
Repository: hadoop Updated Branches: refs/heads/branch-3.1 86a1c8975 -> 60c6ded48 HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by Yiqun Lin. (cherry picked from commit 4743d4a2c70a213a41804a24c776e6db00e1b90d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60c6ded4 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60c6ded4 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60c6ded4 Branch: refs/heads/branch-3.1 Commit: 60c6ded483c6e0ebd93c3c4a261090aea57d92e8 Parents: 86a1c89 Author: Yiqun LinAuthored: Sat Mar 10 11:28:55 2018 +0800 Committer: Yiqun Lin Committed: Sat Mar 10 11:30:52 2018 +0800 -- .../src/site/markdown/HDFSRouterFederation.md | 12 +++- 1 file changed, 7 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/60c6ded4/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md index 5412aae..fdaaa11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md @@ -29,7 +29,9 @@ Architecture A natural extension to this partitioned federation is to add a layer of software responsible for federating the namespaces. -This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and supports rebalancing of data across subclusters. +This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and will support rebalancing of data across subclusters later +(see more info in [HDFS-13123](https://issues.apache.org/jira/browse/HDFS-13123)). The subclusters in RBF are not required to be the independent HDFS clusters, a normal federation cluster +(with multiple block pools) or a mixed cluster with federation and independent cluster is also allowed. To accomplish these goals, the federation layer directs block accesses to the proper subcluster, maintains the state of the namespaces, and provides mechanisms for data rebalancing. This layer must be scalable, highly available, and fault tolerant. @@ -324,8 +326,8 @@ The connection to the State Store and the internal caching at the Router. | Property | Default | Description| |: |: |: | | dfs.federation.router.store.enable | `true` | If `true`, the Router connects to the State Store. | -| dfs.federation.router.store.serializer | `StateStoreSerializerPBImpl` | Class to serialize State Store records. | -| dfs.federation.router.store.driver.class | `StateStoreZooKeeperImpl` | Class to implement the State Store. | +| dfs.federation.router.store.serializer | `org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl` | Class to serialize State Store records. | +| dfs.federation.router.store.driver.class | `org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl` | Class to implement the State Store. | | dfs.federation.router.store.connection.test | 6 | How often to check for the connection to the State Store in milliseconds. | | dfs.federation.router.cache.ttl | 6 | How often to refresh the State Store caches in milliseconds. | | dfs.federation.router.store.membership.expiration | 30 | Expiration time in milliseconds for a membership record. | @@ -336,8 +338,8 @@ Forwarding client requests to the right subcluster. | Property | Default | Description| |: |: |: | -| dfs.federation.router.file.resolver.client.class | MountTableResolver | Class to resolve files to subclusters. | -| dfs.federation.router.namenode.resolver.client.class | MembershipNamenodeResolver | Class to resolve the namenode for a subcluster. | +| dfs.federation.router.file.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver` | Class to resolve files to subclusters. | +| dfs.federation.router.namenode.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver` | Class to resolve the namenode for a subcluster. | ### Namenode monitoring - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by Yiqun Lin.
Repository: hadoop Updated Branches: refs/heads/trunk 8133cd530 -> 4743d4a2c HDFS-13240. RBF: Update some inaccurate document descriptions. Contributed by Yiqun Lin. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4743d4a2 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4743d4a2 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4743d4a2 Branch: refs/heads/trunk Commit: 4743d4a2c70a213a41804a24c776e6db00e1b90d Parents: 8133cd5 Author: Yiqun LinAuthored: Sat Mar 10 11:28:55 2018 +0800 Committer: Yiqun Lin Committed: Sat Mar 10 11:28:55 2018 +0800 -- .../src/site/markdown/HDFSRouterFederation.md | 12 +++- 1 file changed, 7 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4743d4a2/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md index 5412aae..fdaaa11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md @@ -29,7 +29,9 @@ Architecture A natural extension to this partitioned federation is to add a layer of software responsible for federating the namespaces. -This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and supports rebalancing of data across subclusters. +This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and will support rebalancing of data across subclusters later +(see more info in [HDFS-13123](https://issues.apache.org/jira/browse/HDFS-13123)). The subclusters in RBF are not required to be the independent HDFS clusters, a normal federation cluster +(with multiple block pools) or a mixed cluster with federation and independent cluster is also allowed. To accomplish these goals, the federation layer directs block accesses to the proper subcluster, maintains the state of the namespaces, and provides mechanisms for data rebalancing. This layer must be scalable, highly available, and fault tolerant. @@ -324,8 +326,8 @@ The connection to the State Store and the internal caching at the Router. | Property | Default | Description| |: |: |: | | dfs.federation.router.store.enable | `true` | If `true`, the Router connects to the State Store. | -| dfs.federation.router.store.serializer | `StateStoreSerializerPBImpl` | Class to serialize State Store records. | -| dfs.federation.router.store.driver.class | `StateStoreZooKeeperImpl` | Class to implement the State Store. | +| dfs.federation.router.store.serializer | `org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl` | Class to serialize State Store records. | +| dfs.federation.router.store.driver.class | `org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl` | Class to implement the State Store. | | dfs.federation.router.store.connection.test | 6 | How often to check for the connection to the State Store in milliseconds. | | dfs.federation.router.cache.ttl | 6 | How often to refresh the State Store caches in milliseconds. | | dfs.federation.router.store.membership.expiration | 30 | Expiration time in milliseconds for a membership record. | @@ -336,8 +338,8 @@ Forwarding client requests to the right subcluster. | Property | Default | Description| |: |: |: | -| dfs.federation.router.file.resolver.client.class | MountTableResolver | Class to resolve files to subclusters. | -| dfs.federation.router.namenode.resolver.client.class | MembershipNamenodeResolver | Class to resolve the namenode for a subcluster. | +| dfs.federation.router.file.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver` | Class to resolve files to subclusters. | +| dfs.federation.router.namenode.resolver.client.class | `org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver` | Class to resolve the namenode for a subcluster. | ### Namenode monitoring - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-13232. RBF: ConnectionPool should return first usable connection. Contributed by Ekanth S.
Repository: hadoop Updated Branches: refs/heads/branch-2 4b6b07690 -> 0dadcf9ea HDFS-13232. RBF: ConnectionPool should return first usable connection. Contributed by Ekanth S. (cherry picked from commit 8133cd5305d7913453abb2d48da12f672c0ce334) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0dadcf9e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0dadcf9e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0dadcf9e Branch: refs/heads/branch-2 Commit: 0dadcf9ea376752a98173d2878dda942826634c0 Parents: 4b6b076 Author: Inigo GoiriAuthored: Fri Mar 9 18:25:05 2018 -0800 Committer: Inigo Goiri Committed: Fri Mar 9 18:28:59 2018 -0800 -- .../federation/router/ConnectionPool.java | 2 +- .../router/TestConnectionManager.java | 43 2 files changed, 44 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0dadcf9e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index 0b10187..06bed9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -159,7 +159,7 @@ public class ConnectionPool { for (int i=0; i poolMap = connManager.getPools(); +final int totalConns = 10; +int activeConns = 5; + +ConnectionPool pool = new ConnectionPool( +conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10); +addConnectionsToPool(pool, totalConns, activeConns); +poolMap.put(new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS), pool); + +// All remaining connections should be usable +final int remainingSlots = totalConns - activeConns; +for
hadoop git commit: HDFS-13232. RBF: ConnectionPool should return first usable connection. Contributed by Ekanth S.
Repository: hadoop Updated Branches: refs/heads/branch-2.9 40682baae -> a3c47eba7 HDFS-13232. RBF: ConnectionPool should return first usable connection. Contributed by Ekanth S. (cherry picked from commit 8133cd5305d7913453abb2d48da12f672c0ce334) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3c47eba Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3c47eba Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3c47eba Branch: refs/heads/branch-2.9 Commit: a3c47eba7f73cf979ecf2d7c4b0f8d6ab081860e Parents: 40682ba Author: Inigo GoiriAuthored: Fri Mar 9 18:25:05 2018 -0800 Committer: Inigo Goiri Committed: Fri Mar 9 18:28:28 2018 -0800 -- .../federation/router/ConnectionPool.java | 2 +- .../router/TestConnectionManager.java | 43 2 files changed, 44 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3c47eba/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index 0b10187..06bed9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -159,7 +159,7 @@ public class ConnectionPool { for (int i=0; i poolMap = connManager.getPools(); +final int totalConns = 10; +int activeConns = 5; + +ConnectionPool pool = new ConnectionPool( +conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10); +addConnectionsToPool(pool, totalConns, activeConns); +poolMap.put(new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS), pool); + +// All remaining connections should be usable +final int remainingSlots = totalConns - activeConns; +
hadoop git commit: HDFS-13232. RBF: ConnectionPool should return first usable connection. Contributed by Ekanth S.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 c453fcb23 -> 465c3f0e2 HDFS-13232. RBF: ConnectionPool should return first usable connection. Contributed by Ekanth S. (cherry picked from commit 8133cd5305d7913453abb2d48da12f672c0ce334) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/465c3f0e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/465c3f0e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/465c3f0e Branch: refs/heads/branch-3.0 Commit: 465c3f0e24a1fc5a6768d6e922cf2fd1c84ce176 Parents: c453fcb Author: Inigo GoiriAuthored: Fri Mar 9 18:25:05 2018 -0800 Committer: Inigo Goiri Committed: Fri Mar 9 18:27:17 2018 -0800 -- .../federation/router/ConnectionPool.java | 2 +- .../router/TestConnectionManager.java | 43 2 files changed, 44 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/465c3f0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index 5c77c59..5af8a86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -159,7 +159,7 @@ public class ConnectionPool { for (int i=0; i poolMap = connManager.getPools(); +final int totalConns = 10; +int activeConns = 5; + +ConnectionPool pool = new ConnectionPool( +conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10); +addConnectionsToPool(pool, totalConns, activeConns); +poolMap.put(new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS), pool); + +// All remaining connections should be usable +final int remainingSlots = totalConns - activeConns; +
hadoop git commit: HDFS-13232. RBF: ConnectionPool should return first usable connection. Contributed by Ekanth S.
Repository: hadoop Updated Branches: refs/heads/branch-3.1 4d41f933a -> 86a1c8975 HDFS-13232. RBF: ConnectionPool should return first usable connection. Contributed by Ekanth S. (cherry picked from commit 8133cd5305d7913453abb2d48da12f672c0ce334) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86a1c897 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86a1c897 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86a1c897 Branch: refs/heads/branch-3.1 Commit: 86a1c89754db4d925afe1140e6af56cb9f3bc4cc Parents: 4d41f93 Author: Inigo GoiriAuthored: Fri Mar 9 18:25:05 2018 -0800 Committer: Inigo Goiri Committed: Fri Mar 9 18:25:46 2018 -0800 -- .../federation/router/ConnectionPool.java | 2 +- .../router/TestConnectionManager.java | 43 2 files changed, 44 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/86a1c897/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index 5c77c59..5af8a86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -159,7 +159,7 @@ public class ConnectionPool { for (int i=0; i poolMap = connManager.getPools(); +final int totalConns = 10; +int activeConns = 5; + +ConnectionPool pool = new ConnectionPool( +conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10); +addConnectionsToPool(pool, totalConns, activeConns); +poolMap.put(new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS), pool); + +// All remaining connections should be usable +final int remainingSlots = totalConns - activeConns; +
hadoop git commit: HDFS-13232. RBF: ConnectionPool should return first usable connection. Contributed by Ekanth S.
Repository: hadoop Updated Branches: refs/heads/trunk afe1a3ccd -> 8133cd530 HDFS-13232. RBF: ConnectionPool should return first usable connection. Contributed by Ekanth S. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8133cd53 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8133cd53 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8133cd53 Branch: refs/heads/trunk Commit: 8133cd5305d7913453abb2d48da12f672c0ce334 Parents: afe1a3c Author: Inigo GoiriAuthored: Fri Mar 9 18:25:05 2018 -0800 Committer: Inigo Goiri Committed: Fri Mar 9 18:25:05 2018 -0800 -- .../federation/router/ConnectionPool.java | 2 +- .../router/TestConnectionManager.java | 43 2 files changed, 44 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8133cd53/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index 5c77c59..5af8a86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -159,7 +159,7 @@ public class ConnectionPool { for (int i=0; i poolMap = connManager.getPools(); +final int totalConns = 10; +int activeConns = 5; + +ConnectionPool pool = new ConnectionPool( +conf, TEST_NN_ADDRESS, TEST_USER1, 0, 10); +addConnectionsToPool(pool, totalConns, activeConns); +poolMap.put(new ConnectionPoolId(TEST_USER1, TEST_NN_ADDRESS), pool); + +// All remaining connections should be usable +final int remainingSlots = totalConns - activeConns; +for (int i = 0; i < remainingSlots; i++) { + ConnectionContext cc =
hadoop git commit: HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu.
Repository: hadoop Updated Branches: refs/heads/branch-2.9 ba7b103c9 -> 40682baae HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu. (cherry picked from commit afe1a3ccd56a12fec900360a8a2855c080728e65) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40682baa Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40682baa Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40682baa Branch: refs/heads/branch-2.9 Commit: 40682baae7e2f6f44babfa13701432ab288ead15 Parents: ba7b103 Author: Inigo GoiriAuthored: Fri Mar 9 17:18:51 2018 -0800 Committer: Inigo Goiri Committed: Fri Mar 9 17:21:43 2018 -0800 -- .../federation/resolver/MountTableResolver.java | 15 +-- .../resolver/TestMountTableResolver.java| 46 2 files changed, 58 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/40682baa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index dac6f7f..2c7d1f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -238,9 +238,17 @@ public class MountTableResolver Entry entry = it.next(); PathLocation loc = entry.getValue(); String src = loc.getSourcePath(); - if (src.startsWith(path)) { -LOG.debug("Removing {}", src); -it.remove(); + if (src != null) { +if (src.startsWith(path)) { + LOG.debug("Removing {}", src); + it.remove(); +} + } else { +String dest = loc.getDefaultLocation().getDest(); +if (dest.startsWith(path)) { + LOG.debug("Removing default cache {}", dest); + it.remove(); +} } } @@ -287,6 +295,7 @@ public class MountTableResolver if (!oldEntries.contains(srcPath)) { // Add node, it does not exist this.tree.put(srcPath, entry); + invalidateLocationCache(srcPath); LOG.info("Added new mount point {} to resolver", srcPath); } else { // Node exists, check for updates http://git-wip-us.apache.org/repos/asf/hadoop/blob/40682baa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java index a09daf0..f530fe9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.resolver; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE; import static org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; @@ -82,6 +83,7 @@ public class TestMountTableResolver { Configuration conf = new Configuration(); conf.setInt( FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE, TEST_MAX_CACHE_SIZE); +conf.setStrings(DFS_ROUTER_DEFAULT_NAMESERVICE, "0"); mountTable = new MountTableResolver(conf); // Root mount point @@ -479,4 +481,48 @@ public class TestMountTableResolver { long cacheSize = mountTable.getCacheSize(); assertTrue(cacheSize <= TEST_MAX_CACHE_SIZE); } + + @Test + public void testLocationCache() throws Exception { +List entries = new ArrayList<>(); + +// Add entry and test location cache +Map map1 = getMountTableEntry("1", "/testlocationcache"); +MountTable entry1 = MountTable.newInstance("/testlocationcache", map1); +entries.add(entry1); + +Map map2 = getMountTableEntry("2", +"/anothertestlocationcache"); +
hadoop git commit: HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu.
Repository: hadoop Updated Branches: refs/heads/branch-2 121daaf47 -> 4b6b07690 HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu. (cherry picked from commit afe1a3ccd56a12fec900360a8a2855c080728e65) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b6b0769 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b6b0769 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b6b0769 Branch: refs/heads/branch-2 Commit: 4b6b07690e7d4f582b9328017e3204b700a4c1eb Parents: 121daaf Author: Inigo GoiriAuthored: Fri Mar 9 17:18:51 2018 -0800 Committer: Inigo Goiri Committed: Fri Mar 9 17:21:10 2018 -0800 -- .../federation/resolver/MountTableResolver.java | 15 +-- .../resolver/TestMountTableResolver.java| 46 2 files changed, 58 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b6b0769/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index dac6f7f..2c7d1f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -238,9 +238,17 @@ public class MountTableResolver Entry entry = it.next(); PathLocation loc = entry.getValue(); String src = loc.getSourcePath(); - if (src.startsWith(path)) { -LOG.debug("Removing {}", src); -it.remove(); + if (src != null) { +if (src.startsWith(path)) { + LOG.debug("Removing {}", src); + it.remove(); +} + } else { +String dest = loc.getDefaultLocation().getDest(); +if (dest.startsWith(path)) { + LOG.debug("Removing default cache {}", dest); + it.remove(); +} } } @@ -287,6 +295,7 @@ public class MountTableResolver if (!oldEntries.contains(srcPath)) { // Add node, it does not exist this.tree.put(srcPath, entry); + invalidateLocationCache(srcPath); LOG.info("Added new mount point {} to resolver", srcPath); } else { // Node exists, check for updates http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b6b0769/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java index a09daf0..f530fe9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.resolver; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE; import static org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; @@ -82,6 +83,7 @@ public class TestMountTableResolver { Configuration conf = new Configuration(); conf.setInt( FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE, TEST_MAX_CACHE_SIZE); +conf.setStrings(DFS_ROUTER_DEFAULT_NAMESERVICE, "0"); mountTable = new MountTableResolver(conf); // Root mount point @@ -479,4 +481,48 @@ public class TestMountTableResolver { long cacheSize = mountTable.getCacheSize(); assertTrue(cacheSize <= TEST_MAX_CACHE_SIZE); } + + @Test + public void testLocationCache() throws Exception { +List entries = new ArrayList<>(); + +// Add entry and test location cache +Map map1 = getMountTableEntry("1", "/testlocationcache"); +MountTable entry1 = MountTable.newInstance("/testlocationcache", map1); +entries.add(entry1); + +Map map2 = getMountTableEntry("2", +"/anothertestlocationcache"); +MountTable
hadoop git commit: HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 bd2ebf031 -> c453fcb23 HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu. (cherry picked from commit afe1a3ccd56a12fec900360a8a2855c080728e65) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c453fcb2 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c453fcb2 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c453fcb2 Branch: refs/heads/branch-3.0 Commit: c453fcb23fde0153cd8ebfa808c5728a2c8b99d4 Parents: bd2ebf0 Author: Inigo GoiriAuthored: Fri Mar 9 17:18:51 2018 -0800 Committer: Inigo Goiri Committed: Fri Mar 9 17:20:29 2018 -0800 -- .../federation/resolver/MountTableResolver.java | 15 +-- .../resolver/TestMountTableResolver.java| 46 2 files changed, 58 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c453fcb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index dac6f7f..2c7d1f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -238,9 +238,17 @@ public class MountTableResolver Entry entry = it.next(); PathLocation loc = entry.getValue(); String src = loc.getSourcePath(); - if (src.startsWith(path)) { -LOG.debug("Removing {}", src); -it.remove(); + if (src != null) { +if (src.startsWith(path)) { + LOG.debug("Removing {}", src); + it.remove(); +} + } else { +String dest = loc.getDefaultLocation().getDest(); +if (dest.startsWith(path)) { + LOG.debug("Removing default cache {}", dest); + it.remove(); +} } } @@ -287,6 +295,7 @@ public class MountTableResolver if (!oldEntries.contains(srcPath)) { // Add node, it does not exist this.tree.put(srcPath, entry); + invalidateLocationCache(srcPath); LOG.info("Added new mount point {} to resolver", srcPath); } else { // Node exists, check for updates http://git-wip-us.apache.org/repos/asf/hadoop/blob/c453fcb2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java index a09daf0..f530fe9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.resolver; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE; import static org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; @@ -82,6 +83,7 @@ public class TestMountTableResolver { Configuration conf = new Configuration(); conf.setInt( FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE, TEST_MAX_CACHE_SIZE); +conf.setStrings(DFS_ROUTER_DEFAULT_NAMESERVICE, "0"); mountTable = new MountTableResolver(conf); // Root mount point @@ -479,4 +481,48 @@ public class TestMountTableResolver { long cacheSize = mountTable.getCacheSize(); assertTrue(cacheSize <= TEST_MAX_CACHE_SIZE); } + + @Test + public void testLocationCache() throws Exception { +List entries = new ArrayList<>(); + +// Add entry and test location cache +Map map1 = getMountTableEntry("1", "/testlocationcache"); +MountTable entry1 = MountTable.newInstance("/testlocationcache", map1); +entries.add(entry1); + +Map map2 = getMountTableEntry("2", +"/anothertestlocationcache"); +
hadoop git commit: HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu.
Repository: hadoop Updated Branches: refs/heads/branch-3.1 c337c97c6 -> 4d41f933a HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu. (cherry picked from commit afe1a3ccd56a12fec900360a8a2855c080728e65) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d41f933 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d41f933 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d41f933 Branch: refs/heads/branch-3.1 Commit: 4d41f933a298a2644284abcb4971c30543e85dc3 Parents: c337c97 Author: Inigo GoiriAuthored: Fri Mar 9 17:18:51 2018 -0800 Committer: Inigo Goiri Committed: Fri Mar 9 17:19:50 2018 -0800 -- .../federation/resolver/MountTableResolver.java | 15 +-- .../resolver/TestMountTableResolver.java| 46 2 files changed, 58 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d41f933/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index dac6f7f..2c7d1f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -238,9 +238,17 @@ public class MountTableResolver Entry entry = it.next(); PathLocation loc = entry.getValue(); String src = loc.getSourcePath(); - if (src.startsWith(path)) { -LOG.debug("Removing {}", src); -it.remove(); + if (src != null) { +if (src.startsWith(path)) { + LOG.debug("Removing {}", src); + it.remove(); +} + } else { +String dest = loc.getDefaultLocation().getDest(); +if (dest.startsWith(path)) { + LOG.debug("Removing default cache {}", dest); + it.remove(); +} } } @@ -287,6 +295,7 @@ public class MountTableResolver if (!oldEntries.contains(srcPath)) { // Add node, it does not exist this.tree.put(srcPath, entry); + invalidateLocationCache(srcPath); LOG.info("Added new mount point {} to resolver", srcPath); } else { // Node exists, check for updates http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d41f933/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java index a09daf0..f530fe9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.resolver; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE; import static org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; @@ -82,6 +83,7 @@ public class TestMountTableResolver { Configuration conf = new Configuration(); conf.setInt( FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE, TEST_MAX_CACHE_SIZE); +conf.setStrings(DFS_ROUTER_DEFAULT_NAMESERVICE, "0"); mountTable = new MountTableResolver(conf); // Root mount point @@ -479,4 +481,48 @@ public class TestMountTableResolver { long cacheSize = mountTable.getCacheSize(); assertTrue(cacheSize <= TEST_MAX_CACHE_SIZE); } + + @Test + public void testLocationCache() throws Exception { +List entries = new ArrayList<>(); + +// Add entry and test location cache +Map map1 = getMountTableEntry("1", "/testlocationcache"); +MountTable entry1 = MountTable.newInstance("/testlocationcache", map1); +entries.add(entry1); + +Map map2 = getMountTableEntry("2", +"/anothertestlocationcache"); +
hadoop git commit: HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu.
Repository: hadoop Updated Branches: refs/heads/trunk ba0da2785 -> afe1a3ccd HDFS-13212. RBF: Fix router location cache issue. Contributed by Weiwei Wu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/afe1a3cc Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/afe1a3cc Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/afe1a3cc Branch: refs/heads/trunk Commit: afe1a3ccd56a12fec900360a8a2855c080728e65 Parents: ba0da27 Author: Inigo GoiriAuthored: Fri Mar 9 17:18:51 2018 -0800 Committer: Inigo Goiri Committed: Fri Mar 9 17:18:51 2018 -0800 -- .../federation/resolver/MountTableResolver.java | 15 +-- .../resolver/TestMountTableResolver.java| 46 2 files changed, 58 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/afe1a3cc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index dac6f7f..2c7d1f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -238,9 +238,17 @@ public class MountTableResolver Entry entry = it.next(); PathLocation loc = entry.getValue(); String src = loc.getSourcePath(); - if (src.startsWith(path)) { -LOG.debug("Removing {}", src); -it.remove(); + if (src != null) { +if (src.startsWith(path)) { + LOG.debug("Removing {}", src); + it.remove(); +} + } else { +String dest = loc.getDefaultLocation().getDest(); +if (dest.startsWith(path)) { + LOG.debug("Removing default cache {}", dest); + it.remove(); +} } } @@ -287,6 +295,7 @@ public class MountTableResolver if (!oldEntries.contains(srcPath)) { // Add node, it does not exist this.tree.put(srcPath, entry); + invalidateLocationCache(srcPath); LOG.info("Added new mount point {} to resolver", srcPath); } else { // Node exists, check for updates http://git-wip-us.apache.org/repos/asf/hadoop/blob/afe1a3cc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java index a09daf0..f530fe9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.federation.resolver; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_DEFAULT_NAMESERVICE; import static org.apache.hadoop.hdfs.DFSConfigKeys.FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; @@ -82,6 +83,7 @@ public class TestMountTableResolver { Configuration conf = new Configuration(); conf.setInt( FEDERATION_MOUNT_TABLE_MAX_CACHE_SIZE, TEST_MAX_CACHE_SIZE); +conf.setStrings(DFS_ROUTER_DEFAULT_NAMESERVICE, "0"); mountTable = new MountTableResolver(conf); // Root mount point @@ -479,4 +481,48 @@ public class TestMountTableResolver { long cacheSize = mountTable.getCacheSize(); assertTrue(cacheSize <= TEST_MAX_CACHE_SIZE); } + + @Test + public void testLocationCache() throws Exception { +List entries = new ArrayList<>(); + +// Add entry and test location cache +Map map1 = getMountTableEntry("1", "/testlocationcache"); +MountTable entry1 = MountTable.newInstance("/testlocationcache", map1); +entries.add(entry1); + +Map map2 = getMountTableEntry("2", +"/anothertestlocationcache"); +MountTable entry2 = MountTable.newInstance("/anothertestlocationcache", +
hadoop git commit: HDFS-13252. Code refactoring: Remove Diff.ListType.
Repository: hadoop Updated Branches: refs/heads/trunk 4eeff62f6 -> ba0da2785 HDFS-13252. Code refactoring: Remove Diff.ListType. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba0da278 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba0da278 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba0da278 Branch: refs/heads/trunk Commit: ba0da2785d251745969f88a50d33ce61876d91aa Parents: 4eeff62 Author: Tsz-Wo Nicholas SzeAuthored: Fri Mar 9 15:25:41 2018 -0800 Committer: Tsz-Wo Nicholas Sze Committed: Fri Mar 9 15:50:26 2018 -0800 -- .../hdfs/server/namenode/FSDirRenameOp.java | 3 +- .../hdfs/server/namenode/INodeDirectory.java| 10 +- .../hdfs/server/namenode/INodeReference.java| 4 - .../snapshot/DirectorySnapshottableFeature.java | 5 +- .../snapshot/DirectoryWithSnapshotFeature.java | 131 +++ .../snapshot/FSImageFormatPBSnapshot.java | 6 +- .../namenode/snapshot/SnapshotDiffInfo.java | 11 +- .../snapshot/SnapshotDiffListingInfo.java | 15 +-- .../snapshot/SnapshotFSImageFormat.java | 4 +- .../java/org/apache/hadoop/hdfs/util/Diff.java | 131 +-- .../namenode/snapshot/SnapshotTestHelper.java | 79 ++- .../snapshot/TestRenameWithSnapshots.java | 129 +++--- .../snapshot/TestSetQuotaWithSnapshot.java | 6 +- 13 files changed, 260 insertions(+), 274 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba0da278/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index efc8da2..6162ceb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -588,8 +588,7 @@ class FSDirRenameOp { private INode srcChild; private INode oldDstChild; -RenameOperation(FSDirectory fsd, INodesInPath srcIIP, INodesInPath dstIIP) -throws QuotaExceededException { +RenameOperation(FSDirectory fsd, INodesInPath srcIIP, INodesInPath dstIIP) { this.fsd = fsd; this.srcIIP = srcIIP; this.dstIIP = dstIIP; http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba0da278/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 6594a56..72ad9e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFea import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; -import org.apache.hadoop.hdfs.util.Diff.ListType; import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.annotations.VisibleForTesting; @@ -353,7 +352,7 @@ public class INodeDirectory extends INodeWithAdditionalFields // replace the instance in the created list of the diff list DirectoryWithSnapshotFeature sf = this.getDirectoryWithSnapshotFeature(); if (sf != null) { - sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild); + sf.getDiffs().replaceCreatedChild(oldChild, newChild); } // update the inodeMap @@ -746,8 +745,8 @@ public class INodeDirectory extends INodeWithAdditionalFields final INode newChild) throws QuotaExceededException { DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature(); assert sf != null : "Directory does not have snapshot feature"; -sf.getDiffs().removeChild(ListType.DELETED, oldChild); -sf.getDiffs().replaceChild(ListType.CREATED, oldChild, newChild); +sf.getDiffs().removeDeletedChild(oldChild); +sf.getDiffs().replaceCreatedChild(oldChild, newChild);
hadoop git commit: HDFS-13244. Add stack, conf, metrics links to utilities dropdown in NN webUI. Contributed by Bharat Viswanadham.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 80aabd72c -> bd2ebf031 HDFS-13244. Add stack, conf, metrics links to utilities dropdown in NN webUI. Contributed by Bharat Viswanadham. (cherry picked from commit 4eeff62f6925991bca725b1ede5308055817de80) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd2ebf03 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd2ebf03 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd2ebf03 Branch: refs/heads/branch-3.0 Commit: bd2ebf031c0328159ca8976ba0a67986eb5944ad Parents: 80aabd7 Author: Hanisha KoneruAuthored: Fri Mar 9 15:27:17 2018 -0800 Committer: Hanisha Koneru Committed: Fri Mar 9 15:49:09 2018 -0800 -- .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 3 +++ .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html | 3 +++ 2 files changed, 6 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd2ebf03/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 6ae3960..ec801e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -43,6 +43,9 @@ Browse the file system Logs +Metrics +Configuration +Process Thread Dump http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd2ebf03/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html index 3700a5e..29f114b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html @@ -44,6 +44,9 @@ Browse the file system Logs +Metrics +Configuration +Process Thread Dump - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-13244. Add stack, conf, metrics links to utilities dropdown in NN webUI. Contributed by Bharat Viswanadham.
Repository: hadoop Updated Branches: refs/heads/branch-3.1 0f76690c7 -> c337c97c6 HDFS-13244. Add stack, conf, metrics links to utilities dropdown in NN webUI. Contributed by Bharat Viswanadham. (cherry picked from commit 4eeff62f6925991bca725b1ede5308055817de80) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c337c97c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c337c97c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c337c97c Branch: refs/heads/branch-3.1 Commit: c337c97c63cf547d2d5992916bcf5ceb0a66c915 Parents: 0f76690 Author: Hanisha KoneruAuthored: Fri Mar 9 15:27:17 2018 -0800 Committer: Hanisha Koneru Committed: Fri Mar 9 15:48:28 2018 -0800 -- .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 3 +++ .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html | 3 +++ 2 files changed, 6 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c337c97c/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 96b1210..a928425 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -43,6 +43,9 @@ Browse the file system Logs +Metrics +Configuration +Process Thread Dump http://git-wip-us.apache.org/repos/asf/hadoop/blob/c337c97c/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html index 3700a5e..29f114b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html @@ -44,6 +44,9 @@ Browse the file system Logs +Metrics +Configuration +Process Thread Dump - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-13244. Add stack, conf, metrics links to utilities dropdown in NN webUI. Contributed by Bharat Viswanadham.
Repository: hadoop Updated Branches: refs/heads/trunk 7b0dc3102 -> 4eeff62f6 HDFS-13244. Add stack, conf, metrics links to utilities dropdown in NN webUI. Contributed by Bharat Viswanadham. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4eeff62f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4eeff62f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4eeff62f Branch: refs/heads/trunk Commit: 4eeff62f6925991bca725b1ede5308055817de80 Parents: 7b0dc31 Author: Hanisha KoneruAuthored: Fri Mar 9 15:27:17 2018 -0800 Committer: Hanisha Koneru Committed: Fri Mar 9 15:27:17 2018 -0800 -- .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html | 3 +++ .../hadoop-hdfs/src/main/webapps/hdfs/explorer.html | 3 +++ 2 files changed, 6 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4eeff62f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html index 96b1210..a928425 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html @@ -43,6 +43,9 @@ Browse the file system Logs +Metrics +Configuration +Process Thread Dump http://git-wip-us.apache.org/repos/asf/hadoop/blob/4eeff62f/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html index 3700a5e..29f114b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html @@ -44,6 +44,9 @@ Browse the file system Logs +Metrics +Configuration +Process Thread Dump - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-13190. Document WebHDFS support for snapshot diff
Repository: hadoop Updated Branches: refs/heads/branch-3.1 32e8290e2 -> 0f76690c7 HDFS-13190. Document WebHDFS support for snapshot diff Signed-off-by: Akira AjisakaSigned-off-by: Xiaoyu Yao (cherry picked from commit 7b0dc310208ee5bc191c9accb3d1312513145653) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f76690c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f76690c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f76690c Branch: refs/heads/branch-3.1 Commit: 0f76690c73e5afeeac4ab1c82fb26a5bcae8474b Parents: 32e8290 Author: Lokesh Jain Authored: Fri Mar 9 15:04:14 2018 -0800 Committer: Akira Ajisaka Committed: Fri Mar 9 15:07:16 2018 -0800 -- .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 92 1 file changed, 92 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f76690c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 4a1395e..057ca59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -50,6 +50,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop * [`CHECKACCESS`](#Check_access) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).access) * [`GETALLSTORAGEPOLICY`](#Get_all_Storage_Policies) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getAllStoragePolicies) * [`GETSTORAGEPOLICY`](#Get_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStoragePolicy) +* [`GETSNAPSHOTDIFF`](#Get_Snapshot_Diff) * HTTP PUT * [`CREATE`](#Create_and_Write_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create) * [`MKDIRS`](#Make_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs) @@ -1266,6 +1267,21 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).deleteSna See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSnapshot +### Get Snapshot Diff + +* Submit a HTTP GET request. + +curl -i GET "http://:/webhdfs/v1/?op=GETSNAPSHOTDIFF + ==" + +The client receives a response with a [`SnapshotDiffReport` JSON object](#SnapshotDiffReport_JSON_Schema): + +HTTP/1.1 200 OK +Content-Type: application/json +Transfer-Encoding: chunked + + {"SnapshotDiffReport":{"diffList":[],"fromSnapshot":"s3","snapshotRoot":"/foo","toSnapshot":"s4"}} + Delegation Token Operations --- @@ -2043,6 +2059,82 @@ A `BlockStoragePolicies` JSON object represents an array of `BlockStoragePolicy` } ``` +### SnapshotDiffReport JSON Schema + +```json +{ + "name": "SnapshotDiffReport", + "type": "object", + "properties": + { +"SnapshotDiffReport": +{ + "type": "object", + "properties" : + { +"diffList": +{ + "description": "An array of DiffReportEntry", + "type": "array", + "items" : diffReportEntries, + "required": true +}, +"fromSnapshot": +{ + "description": "Source snapshot", + "type": "string", + "required": true +}, +"snapshotRoot": +{ + "description" : "String representation of snapshot root path", + "type": "string", + "required": true +}, +"toSnapshot": +{ + "description" : "Destination snapshot", + "type": "string", + "required": true +} + } +} + } +} +``` + + DiffReport Entries + +JavaScript syntax is used to define `diffReportEntries` so that it can be referred in `SnapshotDiffReport` JSON schema. + +```javascript +var diffReportEntries = +{ + "type": "object", + "properties": + { +"sourcePath": +{ + "description" : "Source path name relative to snapshot root", + "type": "string", + "required": true +}, +"targetPath": +{ + "description" : "Target path relative to snapshot root used for renames", + "type": "string", + "required": true +}, +"type": +{ + "description" : "Type of diff report entry", + "enum": ["CREATE", "MODIFY", "DELETE", "RENAME"], + "required": true +} + } +} +``` + HTTP Query
hadoop git commit: HDFS-13190. Document WebHDFS support for snapshot diff
Repository: hadoop Updated Branches: refs/heads/trunk 9a082fbe6 -> 7b0dc3102 HDFS-13190. Document WebHDFS support for snapshot diff Signed-off-by: Akira AjisakaSigned-off-by: Xiaoyu Yao Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b0dc310 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b0dc310 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b0dc310 Branch: refs/heads/trunk Commit: 7b0dc310208ee5bc191c9accb3d1312513145653 Parents: 9a082fb Author: Lokesh Jain Authored: Fri Mar 9 15:04:14 2018 -0800 Committer: Akira Ajisaka Committed: Fri Mar 9 15:06:15 2018 -0800 -- .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 92 1 file changed, 92 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b0dc310/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 4a1395e..057ca59 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -50,6 +50,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop * [`CHECKACCESS`](#Check_access) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).access) * [`GETALLSTORAGEPOLICY`](#Get_all_Storage_Policies) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getAllStoragePolicies) * [`GETSTORAGEPOLICY`](#Get_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStoragePolicy) +* [`GETSNAPSHOTDIFF`](#Get_Snapshot_Diff) * HTTP PUT * [`CREATE`](#Create_and_Write_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create) * [`MKDIRS`](#Make_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs) @@ -1266,6 +1267,21 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).deleteSna See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSnapshot +### Get Snapshot Diff + +* Submit a HTTP GET request. + +curl -i GET "http://:/webhdfs/v1/?op=GETSNAPSHOTDIFF + ==" + +The client receives a response with a [`SnapshotDiffReport` JSON object](#SnapshotDiffReport_JSON_Schema): + +HTTP/1.1 200 OK +Content-Type: application/json +Transfer-Encoding: chunked + + {"SnapshotDiffReport":{"diffList":[],"fromSnapshot":"s3","snapshotRoot":"/foo","toSnapshot":"s4"}} + Delegation Token Operations --- @@ -2043,6 +2059,82 @@ A `BlockStoragePolicies` JSON object represents an array of `BlockStoragePolicy` } ``` +### SnapshotDiffReport JSON Schema + +```json +{ + "name": "SnapshotDiffReport", + "type": "object", + "properties": + { +"SnapshotDiffReport": +{ + "type": "object", + "properties" : + { +"diffList": +{ + "description": "An array of DiffReportEntry", + "type": "array", + "items" : diffReportEntries, + "required": true +}, +"fromSnapshot": +{ + "description": "Source snapshot", + "type": "string", + "required": true +}, +"snapshotRoot": +{ + "description" : "String representation of snapshot root path", + "type": "string", + "required": true +}, +"toSnapshot": +{ + "description" : "Destination snapshot", + "type": "string", + "required": true +} + } +} + } +} +``` + + DiffReport Entries + +JavaScript syntax is used to define `diffReportEntries` so that it can be referred in `SnapshotDiffReport` JSON schema. + +```javascript +var diffReportEntries = +{ + "type": "object", + "properties": + { +"sourcePath": +{ + "description" : "Source path name relative to snapshot root", + "type": "string", + "required": true +}, +"targetPath": +{ + "description" : "Target path relative to snapshot root used for renames", + "type": "string", + "required": true +}, +"type": +{ + "description" : "Type of diff report entry", + "enum": ["CREATE", "MODIFY", "DELETE", "RENAME"], + "required": true +} + } +} +``` + HTTP Query Parameter Dictionary ---
hadoop git commit: HDFS-13190. Document WebHDFS support for snapshot diff
Repository: hadoop Updated Branches: refs/heads/branch-3.0 dec53bb9e -> 80aabd72c HDFS-13190. Document WebHDFS support for snapshot diff Signed-off-by: Akira AjisakaSigned-off-by: Xiaoyu Yao (cherry picked from commit 7b0dc310208ee5bc191c9accb3d1312513145653) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/80aabd72 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/80aabd72 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/80aabd72 Branch: refs/heads/branch-3.0 Commit: 80aabd72c24386699fd95ffe573b3057cba54b61 Parents: dec53bb Author: Lokesh Jain Authored: Fri Mar 9 15:04:14 2018 -0800 Committer: Akira Ajisaka Committed: Fri Mar 9 15:07:34 2018 -0800 -- .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 92 1 file changed, 92 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/80aabd72/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 84e8a57..d9ce232 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -50,6 +50,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop * [`CHECKACCESS`](#Check_access) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).access) * [`GETALLSTORAGEPOLICY`](#Get_all_Storage_Policies) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getAllStoragePolicies) * [`GETSTORAGEPOLICY`](#Get_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStoragePolicy) +* [`GETSNAPSHOTDIFF`](#Get_Snapshot_Diff) * HTTP PUT * [`CREATE`](#Create_and_Write_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create) * [`MKDIRS`](#Make_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs) @@ -1259,6 +1260,21 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).deleteSna See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSnapshot +### Get Snapshot Diff + +* Submit a HTTP GET request. + +curl -i GET "http://:/webhdfs/v1/?op=GETSNAPSHOTDIFF + ==" + +The client receives a response with a [`SnapshotDiffReport` JSON object](#SnapshotDiffReport_JSON_Schema): + +HTTP/1.1 200 OK +Content-Type: application/json +Transfer-Encoding: chunked + + {"SnapshotDiffReport":{"diffList":[],"fromSnapshot":"s3","snapshotRoot":"/foo","toSnapshot":"s4"}} + Delegation Token Operations --- @@ -2036,6 +2052,82 @@ A `BlockStoragePolicies` JSON object represents an array of `BlockStoragePolicy` } ``` +### SnapshotDiffReport JSON Schema + +```json +{ + "name": "SnapshotDiffReport", + "type": "object", + "properties": + { +"SnapshotDiffReport": +{ + "type": "object", + "properties" : + { +"diffList": +{ + "description": "An array of DiffReportEntry", + "type": "array", + "items" : diffReportEntries, + "required": true +}, +"fromSnapshot": +{ + "description": "Source snapshot", + "type": "string", + "required": true +}, +"snapshotRoot": +{ + "description" : "String representation of snapshot root path", + "type": "string", + "required": true +}, +"toSnapshot": +{ + "description" : "Destination snapshot", + "type": "string", + "required": true +} + } +} + } +} +``` + + DiffReport Entries + +JavaScript syntax is used to define `diffReportEntries` so that it can be referred in `SnapshotDiffReport` JSON schema. + +```javascript +var diffReportEntries = +{ + "type": "object", + "properties": + { +"sourcePath": +{ + "description" : "Source path name relative to snapshot root", + "type": "string", + "required": true +}, +"targetPath": +{ + "description" : "Target path relative to snapshot root used for renames", + "type": "string", + "required": true +}, +"type": +{ + "description" : "Type of diff report entry", + "enum": ["CREATE", "MODIFY", "DELETE", "RENAME"], + "required": true +} + } +} +``` + HTTP Query
[hadoop] Git Push Summary
Repository: hadoop Updated Branches: refs/heads/YARN-8006 [created] 9a082fbe6 - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-11394. Support for getting erasure coding policy through WebHDFS#FileStatus.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 9dacb9fb2 -> dec53bb9e HDFS-11394. Support for getting erasure coding policy through WebHDFS#FileStatus. (cherry picked from commit 9a082fbe6e302df7139b65a23be9a39acd87715d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dec53bb9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dec53bb9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dec53bb9 Branch: refs/heads/branch-3.0 Commit: dec53bb9e133bfddb73b29b7a8d9f0076986fc31 Parents: 9dacb9f Author: Hanisha KoneruAuthored: Fri Mar 9 13:20:32 2018 -0800 Committer: Hanisha Koneru Committed: Fri Mar 9 13:39:49 2018 -0800 -- .../org/apache/hadoop/hdfs/web/JsonUtil.java| 3 + .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 111 +++ 2 files changed, 114 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/dec53bb9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index ac90c40..b78b063 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -134,6 +134,9 @@ public class JsonUtil { } if (status.isErasureCoded()) { m.put("ecBit", true); + if (status.getErasureCodingPolicy() != null) { +m.put("ecPolicy", status.getErasureCodingPolicy().getName()); + } } m.put("accessTime", status.getAccessTime()); m.put("modificationTime", status.getModificationTime()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/dec53bb9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index b883c50..157b0cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -44,6 +44,7 @@ import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Random; @@ -81,6 +82,7 @@ import org.apache.hadoop.hdfs.TestDFSClientRetries; import org.apache.hadoop.hdfs.TestFileCreation; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; @@ -109,6 +111,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.log4j.Level; +import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.junit.Assert; @@ -1521,4 +1524,112 @@ public class TestWebHDFS { } } } + + /** + * Tests that the LISTSTATUS ang GETFILESTATUS WebHDFS calls return the + * ecPolicy for EC files. + */ + @Test(timeout=30) + public void testECPolicyInFileStatus() throws Exception { +final Configuration conf = WebHdfsTestUtil.createConf(); +final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies +.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID); +final String ecPolicyName = ecPolicy.getName(); +MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) +.numDataNodes(5) +.build(); +cluster.waitActive(); +final DistributedFileSystem fs = cluster.getFileSystem(); + +// Create an EC dir and write a test file in it +final Path ecDir = new Path("/ec"); +Path ecFile = new Path(ecDir, "ec_file.txt"); +Path nonEcFile = new Path(ecDir, "non_ec_file.txt"); +fs.mkdirs(ecDir); + +// Create a non-EC file before enabling ec policy +DFSTestUtil.createFile(fs, nonEcFile, 1024, (short) 1, 0); + +
hadoop git commit: HDFS-11394. Support for getting erasure coding policy through WebHDFS#FileStatus.
Repository: hadoop Updated Branches: refs/heads/branch-3.1 af48a2a0f -> 32e8290e2 HDFS-11394. Support for getting erasure coding policy through WebHDFS#FileStatus. (cherry picked from commit 9a082fbe6e302df7139b65a23be9a39acd87715d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32e8290e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32e8290e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32e8290e Branch: refs/heads/branch-3.1 Commit: 32e8290e20c1075b98a6e31c534adfa0e647fe1d Parents: af48a2a Author: Hanisha KoneruAuthored: Fri Mar 9 13:20:32 2018 -0800 Committer: Hanisha Koneru Committed: Fri Mar 9 13:21:39 2018 -0800 -- .../org/apache/hadoop/hdfs/web/JsonUtil.java| 3 + .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 111 +++ 2 files changed, 114 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/32e8290e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 095b9ac..83fbc6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -134,6 +134,9 @@ public class JsonUtil { } if (status.isErasureCoded()) { m.put("ecBit", true); + if (status.getErasureCodingPolicy() != null) { +m.put("ecPolicy", status.getErasureCodingPolicy().getName()); + } } if (status.isSnapshotEnabled()) { m.put("snapshotEnabled", status.isSnapshotEnabled()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/32e8290e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index c94122e..8571d82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -44,6 +44,7 @@ import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Random; @@ -81,6 +82,7 @@ import org.apache.hadoop.hdfs.TestDFSClientRetries; import org.apache.hadoop.hdfs.TestFileCreation; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; @@ -109,6 +111,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.log4j.Level; +import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.junit.Assert; @@ -1578,6 +1581,114 @@ public class TestWebHDFS { } } + /** + * Tests that the LISTSTATUS ang GETFILESTATUS WebHDFS calls return the + * ecPolicy for EC files. + */ + @Test(timeout=30) + public void testECPolicyInFileStatus() throws Exception { +final Configuration conf = WebHdfsTestUtil.createConf(); +final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies +.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID); +final String ecPolicyName = ecPolicy.getName(); +MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) +.numDataNodes(5) +.build(); +cluster.waitActive(); +final DistributedFileSystem fs = cluster.getFileSystem(); + +// Create an EC dir and write a test file in it +final Path ecDir = new Path("/ec"); +Path ecFile = new Path(ecDir, "ec_file.txt"); +Path nonEcFile = new Path(ecDir, "non_ec_file.txt"); +fs.mkdirs(ecDir); + +// Create a non-EC file before enabling ec policy +DFSTestUtil.createFile(fs, nonEcFile, 1024, (short) 1, 0); + +fs.enableErasureCodingPolicy(ecPolicyName); +
hadoop git commit: HDFS-11394. Support for getting erasure coding policy through WebHDFS#FileStatus.
Repository: hadoop Updated Branches: refs/heads/trunk 99ab511cb -> 9a082fbe6 HDFS-11394. Support for getting erasure coding policy through WebHDFS#FileStatus. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a082fbe Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a082fbe Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a082fbe Branch: refs/heads/trunk Commit: 9a082fbe6e302df7139b65a23be9a39acd87715d Parents: 99ab511 Author: Hanisha KoneruAuthored: Fri Mar 9 13:20:32 2018 -0800 Committer: Hanisha Koneru Committed: Fri Mar 9 13:20:32 2018 -0800 -- .../org/apache/hadoop/hdfs/web/JsonUtil.java| 3 + .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 111 +++ 2 files changed, 114 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a082fbe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 095b9ac..83fbc6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -134,6 +134,9 @@ public class JsonUtil { } if (status.isErasureCoded()) { m.put("ecBit", true); + if (status.getErasureCodingPolicy() != null) { +m.put("ecPolicy", status.getErasureCodingPolicy().getName()); + } } if (status.isSnapshotEnabled()) { m.put("snapshotEnabled", status.isSnapshotEnabled()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a082fbe/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index c94122e..8571d82 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -44,6 +44,7 @@ import java.net.SocketTimeoutException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; +import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Random; @@ -81,6 +82,7 @@ import org.apache.hadoop.hdfs.TestDFSClientRetries; import org.apache.hadoop.hdfs.TestFileCreation; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; @@ -109,6 +111,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DataChecksum; import org.apache.log4j.Level; +import org.codehaus.jettison.json.JSONArray; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.junit.Assert; @@ -1578,6 +1581,114 @@ public class TestWebHDFS { } } + /** + * Tests that the LISTSTATUS ang GETFILESTATUS WebHDFS calls return the + * ecPolicy for EC files. + */ + @Test(timeout=30) + public void testECPolicyInFileStatus() throws Exception { +final Configuration conf = WebHdfsTestUtil.createConf(); +final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies +.getByID(SystemErasureCodingPolicies.RS_3_2_POLICY_ID); +final String ecPolicyName = ecPolicy.getName(); +MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) +.numDataNodes(5) +.build(); +cluster.waitActive(); +final DistributedFileSystem fs = cluster.getFileSystem(); + +// Create an EC dir and write a test file in it +final Path ecDir = new Path("/ec"); +Path ecFile = new Path(ecDir, "ec_file.txt"); +Path nonEcFile = new Path(ecDir, "non_ec_file.txt"); +fs.mkdirs(ecDir); + +// Create a non-EC file before enabling ec policy +DFSTestUtil.createFile(fs, nonEcFile, 1024, (short) 1, 0); + +fs.enableErasureCodingPolicy(ecPolicyName); +fs.setErasureCodingPolicy(ecDir, ecPolicyName); + +// Create a EC file +
hadoop git commit: HADOOP-15293. TestLogLevel fails on Java 9
Repository: hadoop Updated Branches: refs/heads/trunk 4f395063b -> 99ab511cb HADOOP-15293. TestLogLevel fails on Java 9 Signed-off-by: Akira AjisakaProject: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99ab511c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99ab511c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99ab511c Branch: refs/heads/trunk Commit: 99ab511cbac570bea9d31a55898b95590a8e3159 Parents: 4f39506 Author: Takanobu Asanuma Authored: Fri Mar 9 10:20:35 2018 -0800 Committer: Akira Ajisaka Committed: Fri Mar 9 10:20:35 2018 -0800 -- .../src/test/java/org/apache/hadoop/log/TestLogLevel.java| 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/99ab511c/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java index 16b4071..fd30b50 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java @@ -358,7 +358,7 @@ public class TestLogLevel extends KerberosSecurityTestcase { } catch (SSLException e) { GenericTestUtils.assertExceptionContains("Error while authenticating " + "with endpoint", e); - GenericTestUtils.assertExceptionContains("Unrecognized SSL message", e + GenericTestUtils.assertExceptionContains("recognized SSL message", e .getCause()); } } @@ -379,7 +379,7 @@ public class TestLogLevel extends KerberosSecurityTestcase { } catch (SSLException e) { GenericTestUtils.assertExceptionContains("Error while authenticating " + "with endpoint", e); - GenericTestUtils.assertExceptionContains("Unrecognized SSL message", e + GenericTestUtils.assertExceptionContains("recognized SSL message", e .getCause()); } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-8000. Yarn Service component instance name shows up as component name in container record. Contributed by Chandni Singh
Repository: hadoop Updated Branches: refs/heads/trunk 32fa3a63e -> 4f395063b YARN-8000. Yarn Service component instance name shows up as component name in container record. Contributed by Chandni Singh Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f395063 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f395063 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f395063 Branch: refs/heads/trunk Commit: 4f395063bbae1636d4c59bc962916d78694b50d3 Parents: 32fa3a6 Author: Billie RinaldiAuthored: Fri Mar 9 08:50:28 2018 -0800 Committer: Billie Rinaldi Committed: Fri Mar 9 08:50:28 2018 -0800 -- .../main/resources/definition/YARN-Services-Examples.md | 4 ++-- .../hadoop/yarn/service/api/records/Container.java | 12 ++-- .../src/site/markdown/yarn-service/YarnServiceAPI.md| 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f395063/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md index 00b21dd..e4cdc7b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Services-Examples.md @@ -72,7 +72,7 @@ Note, lifetime value of -1 means unlimited lifetime. "state": "READY", "launch_time": 1504051512412, "bare_host": "10.22.8.143", -"component_name": "hello-0" +"component_instance_name": "hello-0" }, { "id": "container_e03_1503963985568_0002_01_02", @@ -81,7 +81,7 @@ Note, lifetime value of -1 means unlimited lifetime. "state": "READY", "launch_time": 1504051536450, "bare_host": "10.22.8.143", -"component_name": "hello-1" +"component_instance_name": "hello-1" } ], "launch_command": "./start_nginx.sh", http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f395063/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java index af06542..1ffd85f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Container.java @@ -173,20 +173,20 @@ public class Container extends BaseResource { } /** - * Name of the component that this container instance belongs to. + * Name of the component instance that this container instance belongs to. **/ - public Container componentName(String componentName) { -this.componentInstanceName = componentName; + public Container componentInstanceName(String componentInstanceName) { +this.componentInstanceName = componentInstanceName; return this; } - @ApiModelProperty(example = "null", value = "Name of the component that this container instance belongs to.") - @JsonProperty("component_name") + @ApiModelProperty(example = "null", value = "Name of the component instance that this container instance belongs to.") + @JsonProperty("component_instance_name") public String getComponentInstanceName() { return componentInstanceName; } - @XmlElement(name = "component_name") + @XmlElement(name = "component_instance_name") public void setComponentInstanceName(String componentInstanceName) {
hadoop git commit: MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao
Repository: hadoop Updated Branches: refs/heads/branch-2.8 f4621e022 -> 54ede2586 MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao (cherry picked from commit 32fa3a63e0e7d8bfb3d3b9b3c500ecb3a4874ecf) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/54ede258 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/54ede258 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/54ede258 Branch: refs/heads/branch-2.8 Commit: 54ede2586c0fda35ef0d4a2d2a7dcfefede52691 Parents: f4621e0 Author: Jason LoweAuthored: Fri Mar 9 10:41:16 2018 -0600 Committer: Jason Lowe Committed: Fri Mar 9 10:47:35 2018 -0600 -- .../src/main/resources/mapred-default.xml | 16 1 file changed, 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/54ede258/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index a0a969f..54aebdd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -415,22 +415,6 @@ - mapreduce.map.cpu.vcores - 1 - - The number of virtual cores required for each map task. - - - - - mapreduce.reduce.cpu.vcores - 1 - - The number of virtual cores required for each reduce task. - - - - mapreduce.reduce.merge.inmem.threshold 1000 The threshold, in terms of the number of files - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao
Repository: hadoop Updated Branches: refs/heads/branch-2.9 04e18e747 -> ba7b103c9 MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao (cherry picked from commit 32fa3a63e0e7d8bfb3d3b9b3c500ecb3a4874ecf) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba7b103c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba7b103c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba7b103c Branch: refs/heads/branch-2.9 Commit: ba7b103c958c36d68a20afbc88a6dcd6ada8ebe6 Parents: 04e18e7 Author: Jason LoweAuthored: Fri Mar 9 10:41:16 2018 -0600 Committer: Jason Lowe Committed: Fri Mar 9 10:46:35 2018 -0600 -- .../src/main/resources/mapred-default.xml | 16 1 file changed, 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba7b103c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index b182114..05cbcd6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -428,22 +428,6 @@ - mapreduce.map.cpu.vcores - 1 - - The number of virtual cores required for each map task. - - - - - mapreduce.reduce.cpu.vcores - 1 - - The number of virtual cores required for each reduce task. - - - - mapreduce.reduce.merge.inmem.threshold 1000 The threshold, in terms of the number of files - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao
Repository: hadoop Updated Branches: refs/heads/branch-2 aa748c6bc -> 121daaf47 MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao (cherry picked from commit 32fa3a63e0e7d8bfb3d3b9b3c500ecb3a4874ecf) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/121daaf4 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/121daaf4 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/121daaf4 Branch: refs/heads/branch-2 Commit: 121daaf474339445163f5cd13441ea67a702ef1f Parents: aa748c6 Author: Jason LoweAuthored: Fri Mar 9 10:41:16 2018 -0600 Committer: Jason Lowe Committed: Fri Mar 9 10:45:47 2018 -0600 -- .../src/main/resources/mapred-default.xml | 16 1 file changed, 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/121daaf4/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index c0f287b..4f89762 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -428,22 +428,6 @@ - mapreduce.map.cpu.vcores - 1 - - The number of virtual cores required for each map task. - - - - - mapreduce.reduce.cpu.vcores - 1 - - The number of virtual cores required for each reduce task. - - - - mapreduce.reduce.merge.inmem.threshold 1000 The threshold, in terms of the number of files - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao
Repository: hadoop Updated Branches: refs/heads/branch-3.0 ca5c4d454 -> 9dacb9fb2 MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao (cherry picked from commit 32fa3a63e0e7d8bfb3d3b9b3c500ecb3a4874ecf) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dacb9fb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dacb9fb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dacb9fb Branch: refs/heads/branch-3.0 Commit: 9dacb9fb27bec371083de7fc03629362a0876e24 Parents: ca5c4d4 Author: Jason LoweAuthored: Fri Mar 9 10:41:16 2018 -0600 Committer: Jason Lowe Committed: Fri Mar 9 10:44:49 2018 -0600 -- .../src/main/resources/mapred-default.xml | 16 1 file changed, 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dacb9fb/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index 3bf2543..db36e91 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -400,22 +400,6 @@ - mapreduce.map.cpu.vcores - 1 - - The number of virtual cores required for each map task. - - - - - mapreduce.reduce.cpu.vcores - 1 - - The number of virtual cores required for each reduce task. - - - - mapreduce.reduce.merge.inmem.threshold 1000 The threshold, in terms of the number of files - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao
Repository: hadoop Updated Branches: refs/heads/branch-3.1 2958666e0 -> af48a2a0f MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao (cherry picked from commit 32fa3a63e0e7d8bfb3d3b9b3c500ecb3a4874ecf) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af48a2a0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af48a2a0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af48a2a0 Branch: refs/heads/branch-3.1 Commit: af48a2a0f36d620bc3e1f3c8cf871384a94f0750 Parents: 2958666 Author: Jason LoweAuthored: Fri Mar 9 10:41:16 2018 -0600 Committer: Jason Lowe Committed: Fri Mar 9 10:43:08 2018 -0600 -- .../src/main/resources/mapred-default.xml | 16 1 file changed, 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/af48a2a0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index d0e5a2d..cf8be33 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -422,22 +422,6 @@ - mapreduce.map.cpu.vcores - 1 - - The number of virtual cores required for each map task. - - - - - mapreduce.reduce.cpu.vcores - 1 - - The number of virtual cores required for each reduce task. - - - - mapreduce.reduce.merge.inmem.threshold 1000 The threshold, in terms of the number of files - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao
Repository: hadoop Updated Branches: refs/heads/trunk 3f7bd4679 -> 32fa3a63e MAPREDUCE-6930. mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores are both present twice in mapred-default.xml. Contributed by Sen Zhao Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32fa3a63 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32fa3a63 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32fa3a63 Branch: refs/heads/trunk Commit: 32fa3a63e0e7d8bfb3d3b9b3c500ecb3a4874ecf Parents: 3f7bd46 Author: Jason LoweAuthored: Fri Mar 9 10:41:16 2018 -0600 Committer: Jason Lowe Committed: Fri Mar 9 10:41:16 2018 -0600 -- .../src/main/resources/mapred-default.xml | 16 1 file changed, 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/32fa3a63/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index d0e5a2d..cf8be33 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -422,22 +422,6 @@ - mapreduce.map.cpu.vcores - 1 - - The number of virtual cores required for each map task. - - - - - mapreduce.reduce.cpu.vcores - 1 - - The number of virtual cores required for each reduce task. - - - - mapreduce.reduce.merge.inmem.threshold 1000 The threshold, in terms of the number of files - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[1/2] hadoop git commit: HADOOP-15273.distcp can't handle remote stores with different checksum algorithms. Contributed by Steve Loughran.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 f879504fe -> ca5c4d454 HADOOP-15273.distcp can't handle remote stores with different checksum algorithms. Contributed by Steve Loughran. (cherry picked from commit 7ef4d942dd96232b0743a40ed25f77065254f94d) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1771af23 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1771af23 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1771af23 Branch: refs/heads/branch-3.0 Commit: 1771af2320b9627c37aae7593e14a850d4935115 Parents: f879504 Author: Steve LoughranAuthored: Thu Mar 8 11:24:06 2018 + Committer: Steve Loughran Committed: Fri Mar 9 10:49:11 2018 + -- .../org/apache/hadoop/tools/DistCpOptions.java | 5 .../tools/mapred/RetriableFileCopyCommand.java | 29 +++- .../hadoop/tools/mapred/TestCopyMapper.java | 14 +- 3 files changed, 29 insertions(+), 19 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1771af23/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java -- diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java index ece1a94..f33f7fd 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java @@ -534,11 +534,6 @@ public final class DistCpOptions { + "mutually exclusive"); } - if (!syncFolder && skipCRC) { -throw new IllegalArgumentException( -"Skip CRC is valid only with update options"); - } - if (!syncFolder && append) { throw new IllegalArgumentException( "Append is valid only with update options"); http://git-wip-us.apache.org/repos/asf/hadoop/blob/1771af23/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java -- diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java index 21f621a..1eabf7f 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java @@ -210,15 +210,30 @@ public class RetriableFileCopyCommand extends RetriableCommand { throws IOException { if (!DistCpUtils.checksumsAreEqual(sourceFS, source, sourceChecksum, targetFS, target)) { - StringBuilder errorMessage = new StringBuilder("Check-sum mismatch between ") - .append(source).append(" and ").append(target).append("."); - if (sourceFS.getFileStatus(source).getBlockSize() != + StringBuilder errorMessage = + new StringBuilder("Checksum mismatch between ") + .append(source).append(" and ").append(target).append("."); + boolean addSkipHint = false; + String srcScheme = sourceFS.getScheme(); + String targetScheme = targetFS.getScheme(); + if (!srcScheme.equals(targetScheme) + && !(srcScheme.contains("hdfs") && targetScheme.contains("hdfs"))) { +// the filesystems are different and they aren't both hdfs connectors +errorMessage.append("Source and destination filesystems are of" ++ " different types\n") +.append("Their checksum algorithms may be incompatible"); +addSkipHint = true; + } else if (sourceFS.getFileStatus(source).getBlockSize() != targetFS.getFileStatus(target).getBlockSize()) { -errorMessage.append(" Source and target differ in block-size.") -.append(" Use -pb to preserve block-sizes during copy.") -.append(" Alternatively, skip checksum-checks altogether, using -skipCrc.") +errorMessage.append(" Source and target differ in block-size.\n") +.append(" Use -pb to preserve block-sizes during copy."); +addSkipHint = true; + } + if (addSkipHint) { +errorMessage.append(" You can skip checksum-checks altogether " ++ " with -skipcrccheck.\n") .append(" (NOTE: By skipping checksums, one runs the risk of " + -"masking data-corruption during file-transfer.)"); +"masking data-corruption during file-transfer.)\n"); } throw new
[2/2] hadoop git commit: HADOOP-15277. Remove .FluentPropertyBeanIntrospector from CLI operation log output. Contributed by Steve Loughran.
HADOOP-15277. Remove .FluentPropertyBeanIntrospector from CLI operation log output. Contributed by Steve Loughran. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca5c4d45 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca5c4d45 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca5c4d45 Branch: refs/heads/branch-3.0 Commit: ca5c4d454e0d10cf342b8fb68d7a369673060601 Parents: 1771af2 Author: Steve LoughranAuthored: Fri Mar 9 10:44:07 2018 + Committer: Steve Loughran Committed: Fri Mar 9 10:50:04 2018 + -- .../hadoop-common/src/main/conf/log4j.properties | 3 +++ 1 file changed, 3 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca5c4d45/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties -- diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index 5f4b22b..c31e54f 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -306,3 +306,6 @@ log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages} #log4j.appender.FSSTATEDUMP.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n #log4j.appender.FSSTATEDUMP.MaxFileSize=${hadoop.log.maxfilesize} #log4j.appender.FSSTATEDUMP.MaxBackupIndex=${hadoop.log.maxbackupindex} + +# Log levels of third-party libraries +log4j.logger.org.apache.commons.beanutils=WARN - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[2/2] hadoop git commit: HADOOP-15277. Remove .FluentPropertyBeanIntrospector from CLI operation log output. Contributed by Steve Loughran.
HADOOP-15277. Remove .FluentPropertyBeanIntrospector from CLI operation log output. Contributed by Steve Loughran. (cherry picked from commit 3f7bd467979042161897a7c91c5b094b83164f75) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2958666e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2958666e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2958666e Branch: refs/heads/branch-3.1 Commit: 2958666e0475f54a8fcac5e9153f509f14379037 Parents: 9368f7f Author: Steve LoughranAuthored: Fri Mar 9 10:44:07 2018 + Committer: Steve Loughran Committed: Fri Mar 9 10:44:53 2018 + -- .../hadoop-common/src/main/conf/log4j.properties | 3 +++ 1 file changed, 3 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2958666e/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties -- diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index 5f4b22b..c31e54f 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -306,3 +306,6 @@ log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages} #log4j.appender.FSSTATEDUMP.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n #log4j.appender.FSSTATEDUMP.MaxFileSize=${hadoop.log.maxfilesize} #log4j.appender.FSSTATEDUMP.MaxBackupIndex=${hadoop.log.maxbackupindex} + +# Log levels of third-party libraries +log4j.logger.org.apache.commons.beanutils=WARN - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[1/2] hadoop git commit: HADOOP-15277. Remove .FluentPropertyBeanIntrospector from CLI operation log output. Contributed by Steve Loughran.
Repository: hadoop Updated Branches: refs/heads/branch-3.1 9368f7fba -> 2958666e0 refs/heads/trunk 122805b43 -> 3f7bd4679 HADOOP-15277. Remove .FluentPropertyBeanIntrospector from CLI operation log output. Contributed by Steve Loughran. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f7bd467 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f7bd467 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f7bd467 Branch: refs/heads/trunk Commit: 3f7bd467979042161897a7c91c5b094b83164f75 Parents: 122805b Author: Steve LoughranAuthored: Fri Mar 9 10:44:07 2018 + Committer: Steve Loughran Committed: Fri Mar 9 10:44:07 2018 + -- .../hadoop-common/src/main/conf/log4j.properties | 3 +++ 1 file changed, 3 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f7bd467/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties -- diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index 5f4b22b..c31e54f 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -306,3 +306,6 @@ log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages} #log4j.appender.FSSTATEDUMP.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n #log4j.appender.FSSTATEDUMP.MaxFileSize=${hadoop.log.maxfilesize} #log4j.appender.FSSTATEDUMP.MaxBackupIndex=${hadoop.log.maxbackupindex} + +# Log levels of third-party libraries +log4j.logger.org.apache.commons.beanutils=WARN - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[Hadoop Wiki] Update of "Books" by Packt Publishing
Dear Wiki user, You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change notification. The "Books" page has been changed by Packt Publishing: https://wiki.apache.org/hadoop/Books?action=diff=46=47 == Hadoop Videos == + + === Solving 10 Hadoop'able Problems (Video) === + '''Name:''' [[https://www.packtpub.com/big-data-and-business-intelligence/solving-10-hadoopable-problems-video|Solving 10 Hadoop'able Problems (Video)]] + + '''Author:''' Tomasz Lelek + + '''Publisher:''' Packt + + '''Date of Publishing:''' February 2018 + + Need solutions to your big data problems? Here are 10 real-world projects demonstrating problems solved using Hadoop + === Learn By Example: Hadoop, MapReduce for Big Data problems (Video) === - + '''Name:''' [[https://www.packtpub.com/big-data-and-business-intelligence/learn-example-hadoop-mapreduce-big-data-problems-video|Learn By Example: Hadoop, MapReduce for Big Data problems (Video)]] '''Author:''' Loonycorn - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org