[08/50] [abbrv] ambari git commit: AMBARI-13437. HDFS File caching does not work because of ulimit not being passed into the start command for datanode. (aonishuk)

2015-10-19 Thread ncole
AMBARI-13437. HDFS File caching does not work because of ulimit not being 
passed into the start command for datanode. (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/52083d1f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/52083d1f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/52083d1f

Branch: refs/heads/branch-dev-patch-upgrade
Commit: 52083d1ff8425948485149cd1c862ba9ddcb58db
Parents: 81280ea
Author: Andrew Onishuk 
Authored: Thu Oct 15 19:58:01 2015 +0300
Committer: Andrew Onishuk 
Committed: Thu Oct 15 19:58:27 2015 +0300

--
 .../server/upgrade/UpgradeCatalog213.java   | 25 
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |  8 +++
 .../2.0.6/hooks/before-ANY/scripts/params.py|  3 +++
 .../services/HDFS/configuration/hadoop-env.xml  |  8 +++
 .../services/HDFS/configuration/hadoop-env.xml  |  8 +++
 .../server/upgrade/UpgradeCatalog213Test.java   |  4 
 6 files changed, 56 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/52083d1f/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
index 90a75be..803e5f4 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
@@ -58,7 +58,15 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
   private static final String AMS_ENV = "ams-env";
   private static final String AMS_HBASE_ENV = "ams-hbase-env";
   private static final String HBASE_ENV_CONFIG = "hbase-env";
+  private static final String HADOOP_ENV_CONFIG = "hadoop-env";
   private static final String CONTENT_PROPERTY = "content";
+  private static final String HADOOP_ENV_CONTENT_TO_APPEND = "\n{% if 
is_datanode_max_locked_memory_set %}\n" +
+"# Fix temporary bug, when ulimit from 
conf files is not picked up, without full relogin. \n" +
+"# Makes sense to fix only when runing DN 
as root \n" +
+"if [ \"$command\" == \"datanode\" ] && [ 
\"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n" +
+"  ulimit -l 
{{datanode_max_locked_memory}}\n" +
+"fi\n" +
+"{% endif %};\n";
 
   private static final String KERBEROS_DESCRIPTOR_TABLE = 
"kerberos_descriptor";
   private static final String KERBEROS_DESCRIPTOR_NAME_COLUMN = 
"kerberos_descriptor_name";
@@ -141,6 +149,7 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
 updateAMSConfigs();
 updateHDFSConfigs();
 updateHbaseEnvConfig();
+updateHadoopEnv();
 updateKafkaConfigs();
   }
 
@@ -212,6 +221,22 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
 
 return rootJson.toString();
   }
+  
+  protected void updateHadoopEnv() throws AmbariException {
+AmbariManagementController ambariManagementController = 
injector.getInstance(AmbariManagementController.class);
+
+for (final Cluster cluster : 
getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+  Config hadoopEnvConfig = 
cluster.getDesiredConfigByType(HADOOP_ENV_CONFIG);
+  if (hadoopEnvConfig != null) {
+String content = hadoopEnvConfig.getProperties().get(CONTENT_PROPERTY);
+if (content != null) {
+  content += HADOOP_ENV_CONTENT_TO_APPEND;
+  Map updates = 
Collections.singletonMap(CONTENT_PROPERTY, content);
+  updateConfigurationPropertiesForCluster(cluster, HADOOP_ENV_CONFIG, 
updates, true, false);
+}
+  }
+}
+  }
 
   protected void updateHDFSConfigs() throws AmbariException {
 AmbariManagementController ambariManagementController = 
injector.getInstance(

http://git-wip-us.apache.org/repos/asf/ambari/blob/52083d1f/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index a8f7951..5319da9 100644
--- 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
+++ 

ambari git commit: AMBARI-13437. HDFS File caching does not work because of ulimit not being passed into the start command for datanode. (aonishuk)

2015-10-15 Thread aonishuk
Repository: ambari
Updated Branches:
  refs/heads/branch-2.1 8738b7ee6 -> 70c8adac7


AMBARI-13437. HDFS File caching does not work because of ulimit not being 
passed into the start command for datanode. (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/70c8adac
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/70c8adac
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/70c8adac

Branch: refs/heads/branch-2.1
Commit: 70c8adac7d1e6cd6592f0dc0da66a9b4eff0256e
Parents: 8738b7e
Author: Andrew Onishuk 
Authored: Thu Oct 15 20:01:42 2015 +0300
Committer: Andrew Onishuk 
Committed: Thu Oct 15 20:01:42 2015 +0300

--
 .../server/upgrade/UpgradeCatalog213.java   | 25 
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |  8 +++
 .../2.0.6/hooks/before-ANY/scripts/params.py|  3 +++
 .../services/HDFS/configuration/hadoop-env.xml  |  8 +++
 .../services/HDFS/configuration/hadoop-env.xml  |  8 +++
 .../server/upgrade/UpgradeCatalog213Test.java   |  4 
 6 files changed, 56 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/70c8adac/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
index 7a42b3b..a94723f 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
@@ -83,7 +83,15 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
   private static final String AMS_ENV = "ams-env";
   private static final String AMS_HBASE_ENV = "ams-hbase-env";
   private static final String HBASE_ENV_CONFIG = "hbase-env";
+  private static final String HADOOP_ENV_CONFIG = "hadoop-env";
   private static final String CONTENT_PROPERTY = "content";
+  private static final String HADOOP_ENV_CONTENT_TO_APPEND = "\n{% if 
is_datanode_max_locked_memory_set %}\n" +
+"# Fix temporary bug, when ulimit from 
conf files is not picked up, without full relogin. \n" +
+"# Makes sense to fix only when runing DN 
as root \n" +
+"if [ \"$command\" == \"datanode\" ] && [ 
\"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n" +
+"  ulimit -l 
{{datanode_max_locked_memory}}\n" +
+"fi\n" +
+"{% endif %};\n";
 
   public static final String UPGRADE_PACKAGE_COL = "upgrade_package";
   public static final String UPGRADE_TYPE_COL = "upgrade_type";
@@ -174,6 +182,7 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
 bootstrapRepoVersionForHDP21();
 
 addNewConfigurationsFromXml();
+updateHadoopEnv();
 updateStormConfigs();
 updateAMSConfigs();
 updateHDFSConfigs();
@@ -575,6 +584,22 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
 
 return rootJson.toString();
   }
+  
+  protected void updateHadoopEnv() throws AmbariException {
+AmbariManagementController ambariManagementController = 
injector.getInstance(AmbariManagementController.class);
+
+for (final Cluster cluster : 
getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+  Config hadoopEnvConfig = 
cluster.getDesiredConfigByType(HADOOP_ENV_CONFIG);
+  if (hadoopEnvConfig != null) {
+String content = hadoopEnvConfig.getProperties().get(CONTENT_PROPERTY);
+if (content != null) {
+  content += HADOOP_ENV_CONTENT_TO_APPEND;
+  Map updates = 
Collections.singletonMap(CONTENT_PROPERTY, content);
+  updateConfigurationPropertiesForCluster(cluster, HADOOP_ENV_CONFIG, 
updates, true, false);
+}
+  }
+}
+  }
 
   protected void updateHDFSConfigs() throws AmbariException {
 AmbariManagementController ambariManagementController = 
injector.getInstance(

http://git-wip-us.apache.org/repos/asf/ambari/blob/70c8adac/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index a8f7951..5319da9 100644
--- 

ambari git commit: AMBARI-13437. HDFS File caching does not work because of ulimit not being passed into the start command for datanode. (aonishuk)

2015-10-15 Thread aonishuk
Repository: ambari
Updated Branches:
  refs/heads/trunk 81280ea39 -> 52083d1ff


AMBARI-13437. HDFS File caching does not work because of ulimit not being 
passed into the start command for datanode. (aonishuk)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/52083d1f
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/52083d1f
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/52083d1f

Branch: refs/heads/trunk
Commit: 52083d1ff8425948485149cd1c862ba9ddcb58db
Parents: 81280ea
Author: Andrew Onishuk 
Authored: Thu Oct 15 19:58:01 2015 +0300
Committer: Andrew Onishuk 
Committed: Thu Oct 15 19:58:27 2015 +0300

--
 .../server/upgrade/UpgradeCatalog213.java   | 25 
 .../HDFS/2.1.0.2.0/configuration/hadoop-env.xml |  8 +++
 .../2.0.6/hooks/before-ANY/scripts/params.py|  3 +++
 .../services/HDFS/configuration/hadoop-env.xml  |  8 +++
 .../services/HDFS/configuration/hadoop-env.xml  |  8 +++
 .../server/upgrade/UpgradeCatalog213Test.java   |  4 
 6 files changed, 56 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/ambari/blob/52083d1f/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
--
diff --git 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
index 90a75be..803e5f4 100644
--- 
a/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
+++ 
b/ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog213.java
@@ -58,7 +58,15 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
   private static final String AMS_ENV = "ams-env";
   private static final String AMS_HBASE_ENV = "ams-hbase-env";
   private static final String HBASE_ENV_CONFIG = "hbase-env";
+  private static final String HADOOP_ENV_CONFIG = "hadoop-env";
   private static final String CONTENT_PROPERTY = "content";
+  private static final String HADOOP_ENV_CONTENT_TO_APPEND = "\n{% if 
is_datanode_max_locked_memory_set %}\n" +
+"# Fix temporary bug, when ulimit from 
conf files is not picked up, without full relogin. \n" +
+"# Makes sense to fix only when runing DN 
as root \n" +
+"if [ \"$command\" == \"datanode\" ] && [ 
\"$EUID\" -eq 0 ] && [ -n \"$HADOOP_SECURE_DN_USER\" ]; then\n" +
+"  ulimit -l 
{{datanode_max_locked_memory}}\n" +
+"fi\n" +
+"{% endif %};\n";
 
   private static final String KERBEROS_DESCRIPTOR_TABLE = 
"kerberos_descriptor";
   private static final String KERBEROS_DESCRIPTOR_NAME_COLUMN = 
"kerberos_descriptor_name";
@@ -141,6 +149,7 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
 updateAMSConfigs();
 updateHDFSConfigs();
 updateHbaseEnvConfig();
+updateHadoopEnv();
 updateKafkaConfigs();
   }
 
@@ -212,6 +221,22 @@ public class UpgradeCatalog213 extends 
AbstractUpgradeCatalog {
 
 return rootJson.toString();
   }
+  
+  protected void updateHadoopEnv() throws AmbariException {
+AmbariManagementController ambariManagementController = 
injector.getInstance(AmbariManagementController.class);
+
+for (final Cluster cluster : 
getCheckedClusterMap(ambariManagementController.getClusters()).values()) {
+  Config hadoopEnvConfig = 
cluster.getDesiredConfigByType(HADOOP_ENV_CONFIG);
+  if (hadoopEnvConfig != null) {
+String content = hadoopEnvConfig.getProperties().get(CONTENT_PROPERTY);
+if (content != null) {
+  content += HADOOP_ENV_CONTENT_TO_APPEND;
+  Map updates = 
Collections.singletonMap(CONTENT_PROPERTY, content);
+  updateConfigurationPropertiesForCluster(cluster, HADOOP_ENV_CONFIG, 
updates, true, false);
+}
+  }
+}
+  }
 
   protected void updateHDFSConfigs() throws AmbariException {
 AmbariManagementController ambariManagementController = 
injector.getInstance(

http://git-wip-us.apache.org/repos/asf/ambari/blob/52083d1f/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
--
diff --git 
a/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
 
b/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/configuration/hadoop-env.xml
index a8f7951..5319da9 100644
---