AMBARI-22121. Create mpack for Isilon OneFS (amagyar)

Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/2a1c2e49
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/2a1c2e49
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/2a1c2e49

Branch: refs/heads/branch-feature-AMBARI-22008
Commit: 2a1c2e496c14cd5b00c8c235a00fc907d85dac90
Parents: f087874
Author: Attila Magyar <amag...@hortonworks.com>
Authored: Fri Oct 6 09:13:09 2017 +0200
Committer: Attila Magyar <amag...@hortonworks.com>
Committed: Wed Nov 22 10:50:40 2017 +0100

----------------------------------------------------------------------
 .../isilon-onefs-mpack/.gitignore               |   1 +
 .../management-packs/isilon-onefs-mpack/pom.xml | 110 ++++++
 .../src/main/assemblies/isilon-onefs-mpack.xml  |  40 ++
 .../addon-services/ONEFS/1.0.0/alerts.json      |  46 +++
 .../ONEFS/1.0.0/configuration/core-site.xml     | 102 +++++
 .../ONEFS/1.0.0/configuration/hadoop-env.xml    | 386 +++++++++++++++++++
 .../ONEFS/1.0.0/configuration/hdfs-site.xml     |  61 +++
 .../addon-services/ONEFS/1.0.0/metainfo.xml     |  90 +++++
 .../addon-services/ONEFS/1.0.0/metrics.json     |  59 +++
 .../ONEFS/1.0.0/package/scripts/__init__.py     |  20 +
 .../ONEFS/1.0.0/package/scripts/onefs_client.py |  56 +++
 .../ONEFS/1.0.0/package/scripts/params.py       |  29 ++
 .../ONEFS/1.0.0/package/scripts/params_linux.py |  74 ++++
 .../1.0.0/package/scripts/params_windows.py     |  83 ++++
 .../1.0.0/package/scripts/service_check.py      | 137 +++++++
 .../1.0.0/package/scripts/status_params.py      |  58 +++
 .../addon-services/ONEFS/1.0.0/widgets.json     |  38 ++
 .../src/main/resources/mpack.json               |  28 ++
 18 files changed, 1418 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/.gitignore
----------------------------------------------------------------------
diff --git a/contrib/management-packs/isilon-onefs-mpack/.gitignore 
b/contrib/management-packs/isilon-onefs-mpack/.gitignore
new file mode 100644
index 0000000..1377554
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/.gitignore
@@ -0,0 +1 @@
+*.swp

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/pom.xml
----------------------------------------------------------------------
diff --git a/contrib/management-packs/isilon-onefs-mpack/pom.xml 
b/contrib/management-packs/isilon-onefs-mpack/pom.xml
new file mode 100644
index 0000000..5d8f215
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/pom.xml
@@ -0,0 +1,110 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/maven-v4_0_0.xsd";>
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.ambari</groupId>
+  <artifactId>isilon-onefs-mpack</artifactId>
+  <packaging>pom</packaging>
+  <version>0.1.0.0-SNAPSHOT</version>
+  <name>Isilon OneFS Ambari Management Pack</name>
+  <url>http://ambari.apache.org/</url>
+  <properties>
+    <minAmbariVersion>3.0.0.0</minAmbariVersion>
+    <maxAmbariVersion></maxAmbariVersion>
+    <nifiversion>1.0.0</nifiversion>
+  </properties>
+  <parent>
+    <groupId>org.apache.ambari.contrib.mpacks</groupId>
+    <artifactId>ambari-contrib-mpacks</artifactId>
+    <version>2.0.0.0-SNAPSHOT</version>
+  </parent>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <version>1.8</version>
+        <executions>
+          <execution>
+            <id>parse-version</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>parse-version</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>regex-property</id>
+            <goals>
+              <goal>regex-property</goal>
+            </goals>
+            <configuration>
+              <name>mpackVersion</name>
+              <value>${project.version}</value>
+              <regex>^([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)(\.|-).*</regex>
+              <replacement>$1.$2.$3.$4</replacement>
+              <failIfNoMatch>false</failIfNoMatch>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-resources-plugin</artifactId>
+        <version>2.6</version>
+        <executions>
+          <execution>
+            <id>copy-resources</id>
+            <phase>compile</phase>
+            <goals>
+              <goal>copy-resources</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${basedir}/target/</outputDirectory>
+              <resources>
+                <resource>
+                  <directory>${basedir}/src/main/resources</directory>
+                  <includes>
+                    <include>mpack.json</include>
+                  </includes>
+                  <filtering>true</filtering>
+                </resource>
+              </resources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <configuration>
+          <tarLongFileMode>gnu</tarLongFileMode>
+          <descriptors>
+            <descriptor>src/main/assemblies/isilon-onefs-mpack.xml</descriptor>
+          </descriptors>
+        </configuration>
+        <executions>
+          <execution>
+            <id>build-tarball</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/assemblies/isilon-onefs-mpack.xml
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/assemblies/isilon-onefs-mpack.xml
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/assemblies/isilon-onefs-mpack.xml
new file mode 100644
index 0000000..232cf50
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/assemblies/isilon-onefs-mpack.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+  
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+  <id></id>
+  <formats>
+    <format>dir</format>
+    <format>tar.gz</format>
+  </formats>
+  <includeBaseDirectory>true</includeBaseDirectory>
+  <fileSets>
+     <fileSet>
+      <directory>src/main/resources/addon-services</directory>
+      <outputDirectory>addon-services</outputDirectory>
+    </fileSet>
+  </fileSets>
+  <files>
+    <file>
+      <source>target/mpack.json</source>
+    </file>
+  </files>    
+  <dependencySets>
+  </dependencySets>
+</assembly>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json
new file mode 100644
index 0000000..5718721
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/alerts.json
@@ -0,0 +1,46 @@
+{
+  "ONEFS":{
+    "service": [
+      {
+        "name": "onefs_namenode_cpu",
+        "label": "OneFS NameNode Host CPU Utilization",
+        "description": "This host-level alert is triggered if CPU utilization 
of the NameNode exceeds certain warning and critical thresholds. It checks the 
NameNode JMX Servlet for the SystemCPULoad property. The threshold values are 
in percent.",
+        "interval": 1,
+        "scope": "HOST",
+        "enabled": true,
+        "source": {
+          "type": "SERVER",
+          "class": "org.apache.ambari.server.alerts.JmxServerSideAlert",
+          "uri": {
+            "http": "${hdfs-site/dfs.namenode.http-address}",
+            "https": "${hdfs-site/dfs.namenode.https-address}",
+            "https_property": "${hdfs-site/dfs.http.policy}",
+            "https_property_value": "HTTPS_ONLY",
+            "connection_timeout": 5.0
+          },
+          "reporting": {
+            "ok": {
+              "text": "{1} CPU, load {0,number,percent}"
+            },
+            "warning": {
+              "text": "{1} CPU, load {0,number,percent}",
+              "value": 5
+            },
+            "critical": {
+              "text": "{1} CPU, load {0,number,percent}",
+              "value": 20
+            },
+            "units" : "%",
+            "type": "PERCENT"
+           },
+           "jmx": {
+             "property_list": [
+              "java.lang:type=OperatingSystem/SystemCpuLoad",
+              "java.lang:type=OperatingSystem/AvailableProcessors"
+             ]
+           }
+          }
+      }
+    ]
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml
new file mode 100644
index 0000000..7d3acd7
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/core-site.xml
@@ -0,0 +1,102 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+<!-- Put site-specific property overrides in this file. -->
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude"; 
supports_final="true">
+  <!-- file system properties -->
+  <property>
+    <name>fs.defaultFS</name>
+    <!-- cluster variant -->
+    <value>hdfs://localhost:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for HDFS.</description>
+    <final>true</final>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value>simple</value>
+    <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.security.authorization</name>
+    <value>false</value>
+    <description>
+     Enable authorization for different protocols.
+  </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>DEFAULT</value>
+    <description>The mapping from kerberos principal names to local OS 
mapreduce.job.user.names.
+  So the default rule is just "DEFAULT" which takes all principals in your 
default domain to their first component.
+  "omal...@apache.org" and "omalley/ad...@apache.org" to "omalley", if your 
default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the 
principal name excluding the realm and the pattern for building the name from 
the sections of the principal name. The base uses $0 to mean the realm, $1 to 
mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omal...@apache.org" to "omal...@apache.org"
+[2:$1] translates "omalley/ad...@apache.org" to "omalley"
+[2:$1%$2] translates "omalley/ad...@apache.org" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to 
apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed 
string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name 
followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all 
principals from ACME.COM that had a single component "j...@acme.com", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", 
your rules would look like:
+
+RULE[2:$1%$2@$0](.%ad...@apache.org)s/./admin/
+DEFAULT
+    </description>
+    <value-attributes>
+      <type>multiLine</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>net.topology.script.file.name</name>
+    <value>/etc/hadoop/conf/topology_script.py</value>
+    <description>
+      Location of topology script used by Hadoop to determine the rack 
location of nodes.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml
new file mode 100644
index 0000000..bb671cc
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hadoop-env.xml
@@ -0,0 +1,386 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration supports_adding_forbidden="true">
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+    <display-name>Hadoop Log Dir Prefix</display-name>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <display-name>Hadoop PID Dir Prefix</display-name>
+    <description>Hadoop PID Dir Prefix</description>
+    <value-attributes>
+      <type>directory</type>
+      <overridable>false</overridable>
+      <editable-only-at-install>true</editable-only-at-install>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop_root_logger</name>
+    <value>INFO,RFA</value>
+    <display-name>Hadoop Root Logger</display-name>
+    <description>Hadoop Root Logger</description>
+    <value-attributes>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+    <display-name>Hadoop maximum Java heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <unit>MB</unit>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+    <display-name>NameNode Java heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <depends-on>
+      <property>
+        <type>hdfs-site</type>
+        <name>dfs.datanode.data.dir</name>
+      </property>
+    </depends-on>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>Default size of Java new generation for NameNode (Java option 
-XX:NewSize) Note: The value of NameNode new generation size (default size of 
Java new generation for NameNode (Java option -XX:NewSize)) should be 1/8 of 
maximum heap size (-Xmx). Ensure that the value of the namenode_opt_newsize 
property is 1/8 the value of maximum heap size (-Xmx).</description>
+    <display-name>NameNode new generation size</display-name>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>namenode_heapsize</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+    <display-name>NameNode maximum new generation size</display-name>
+    <depends-on>
+      <property>
+        <type>hadoop-env</type>
+        <name>namenode_heapsize</name>
+      </property>
+    </depends-on>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>16384</maximum>
+      <unit>MB</unit>
+      <increment-step>256</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+    <display-name>NameNode permanent generation size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2096</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+    <display-name>NameNode maximum permanent generation size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>2096</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+    <display-name>DataNode maximum Java heap size</display-name>
+    <value-attributes>
+      <type>int</type>
+      <minimum>0</minimum>
+      <maximum>268435456</maximum>
+      <unit>MB</unit>
+      <increment-step>128</increment-step>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <display-name>Proxy User Group</display-name>
+    <value>users</value>
+    <property-type>GROUP</property-type>
+    <description>Proxy user group.</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <display-name>HDFS User</display-name>
+    <value>hdfs</value>
+    <property-type>USER</property-type>
+    <description>User to run HDFS as</description>
+    <value-attributes>
+      <type>user</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_tmp_dir</name>
+    <value>/tmp</value>
+    <description>HDFS tmp Dir</description>
+    <display-name>HDFS tmp Dir</display-name>
+    <property-type>NOT_MANAGED_HDFS_PATH</property-type>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_user_nofile_limit</name>
+    <value>128000</value>
+    <description>Max open files limit setting for HDFS user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_user_nproc_limit</name>
+    <value>65536</value>
+    <description>Max number of processes limit setting for HDFS 
user.</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>namenode_backup_dir</name>
+    <description>Local directory for storing backup copy of NameNode images 
during upgrade</description>
+    <value>/tmp/upgrades</value>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>hdfs_user_keytab</name>
+    <description>HDFS keytab path</description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>hdfs_principal_name</name>
+    <description>HDFS principal name</description>
+    <property-type>KERBEROS_PRINCIPAL</property-type>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <display-name>hadoop-env template</display-name>
+    <description>This is the jinja template for hadoop-env.sh 
file</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop home directory
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+
+# Path to jsvc required by secure datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+if [[ ("$SERVICE" = "hiveserver2") || ("$SERVICE" = "metastore") || ( 
"$SERVICE" = "cli") ]]; then
+  if [ "$HADOOP_HEAPSIZE" = "" ]; then
+    export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+  fi
+else
+  export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+fi
+
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+
+{% if java_version &lt; 8 %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
-Dorg.mortbay.jetty.Request.maxFormContentSize=-1  ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-XX:PermSize={{namenode_opt_permsize}} 
-XX:MaxPermSize={{namenode_opt_maxpermsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m -XX:MaxPermSize=512m 
$HADOOP_CLIENT_OPTS"
+{% else %}
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly 
-Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
-Dorg.mortbay.jetty.Request.maxFormContentSize=-1 ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-XX:CMSInitiatingOccupancyFraction=70 -XX:+UseCMSInitiatingOccupancyOnly 
${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} 
-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT 
${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+{% endif %}
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 
-XX:+UseConcMarkSweepGC 
-XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log 
-XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} 
-Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc 
-XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS 
-Dmapred.audit.logger=INFO,MRAUDIT 
-Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} 
-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console 
${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-XX:CMSInitiatingOccupancyFraction=70 
-XX:+UseCMSInitiatingOccupancyOnly -XX:ConcGCThreads=4 -XX:+UseConcMarkSweepGC 
-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS 
${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+export 
HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER:-{{hadoop_secure_dn_user}}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export 
HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
 -Drm.audit.logger=INFO,RMAUDIT"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Add database libraries
+JAVA_JDBC_LIBS=""
+if [ -d "/usr/share/java" ]; then
+  for jarFile in `ls /usr/share/java | grep -E 
"(mysql|ojdbc|postgresql|sqljdbc)" 2&gt;/dev/null`
+  do
+    JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+  done
+fi
+
+# Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+
+# Add libraries to the hadoop classpath - some may not need a colon as they 
already include it
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+if [ -d "/usr/lib/tez" ]; then
+  export 
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/usr/lib/tez/*:/usr/lib/tez/lib/*:/etc/tez/conf
+fi
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export 
JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:{{hadoop_lib_home}}/native/Linux-{{architecture}}-64
+
+{% if is_datanode_max_locked_memory_set %}
+# Fix temporary bug, when ulimit from conf files is not picked up, without 
full relogin. 
+# Makes sense to fix only when runing DN as root 
+if [ "$command" == "datanode" ] &amp;&amp; [ "$EUID" -eq 0 ] &amp;&amp; [ -n 
"$HADOOP_SECURE_DN_USER" ]; then
+  ulimit -l {{datanode_max_locked_memory}}
+fi
+{% endif %}
+    </value>
+    <value-attributes>
+      <type>content</type>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml
new file mode 100644
index 0000000..cb6544f
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/configuration/hdfs-site.xml
@@ -0,0 +1,61 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<!-- Put site-specific property overrides in this file. -->
+<configuration supports_final="true">
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>false</value>
+    <display-name>WebHDFS enabled</display-name>
+    <description>Whether to enable WebHDFS feature</description>
+    <final>true</final>
+    <value-attributes>
+      <type>boolean</type>
+      <overridable>false</overridable>
+    </value-attributes>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:8082</value>
+    <description>
+      The datanode http server address and port.
+    </description>
+    <on-ambari-upgrade add="true"/>
+  </property>
+  <property>
+    <name>dfs.namenode.http-address</name>
+    <value>localhost:8082</value>
+    <description>The name of the default file system.  Either the
+      literal string "local" or a host:port for HDFS.</description>
+    <final>true</final>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>dfs.namenode.https-address</name>
+    <value>localhost:8080</value>
+    <description>The https address where namenode binds</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+  <property>
+    <name>dfs.client-write-packet-size</name>
+    <value>131072</value>
+    <description>Packet size for clients to write</description>
+    <on-ambari-upgrade add="false"/>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
new file mode 100644
index 0000000..f20bcf8
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metainfo.xml
@@ -0,0 +1,90 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ONEFS</name>
+      <displayName>OneFS</displayName>
+      <serviceType>HCFS</serviceType>
+      <comment>Isilon Systems OneFS</comment>
+      <version>1.0.0</version>
+      <components>
+       <component>
+          <name>ONEFS_CLIENT</name>
+          <displayName>OneFS Client</displayName>
+          <category>CLIENT</category>
+          <componentType>HCFS_CLIENT</componentType>
+          <cardinality>1+</cardinality>
+          <versionAdvertised>true</versionAdvertised>
+          <commandScript>
+            <script>scripts/onefs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>1200</timeout>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hdfs-site.xml</fileName>
+              <dictionaryName>hdfs-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>xml</type>
+              <fileName>core-site.xml</fileName>
+              <dictionaryName>core-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>log4j.properties</fileName>
+              <dictionaryName>hdfs-log4j,yarn-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hadoop-env.sh</fileName>
+              <dictionaryName>hadoop-env</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>hadoop-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
+      </configuration-dependencies>
+      <restartRequiredAfterRackChange>true</restartRequiredAfterRackChange>
+    </service>
+  </services>
+</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
new file mode 100644
index 0000000..cd705eb
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/metrics.json
@@ -0,0 +1,59 @@
+{
+  "ONEFS_CLIENT": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "default": {
+            "metrics/onefs/demo/counter": {
+              "metric": "onefs.demo.counter",
+              "pointInTime": true,
+              "temporal": true
+            }
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "properties": {
+          "url_config_type": "hdfs-site",
+          "url_property_name": "dfs.namenode.http-address"
+        },
+        "metrics": {
+          "default": {
+            "metrics/dfs/namenode/Used": {
+              "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/memHeapUsedM": {
+              "metric": "jvm.JvmMetrics.MemHeapUsedM",
+              "pointInTime": true,
+              "temporal": true
+            },
+            "metrics/jvm/HeapMemoryMax": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/HeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryMax": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+              "pointInTime": true,
+              "temporal": false
+            },
+            "metrics/jvm/NonHeapMemoryUsed": {
+              "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+              "pointInTime": true,
+              "temporal": false
+            }
+          }
+        }
+      }
+    ]
+  }
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/__init__.py
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/__init__.py
new file mode 100644
index 0000000..35de4bb
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/__init__.py
@@ -0,0 +1,20 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py
new file mode 100644
index 0000000..dbf1331
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/onefs_client.py
@@ -0,0 +1,56 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management.core.exceptions import ClientComponentHasNoStatus
+from resource_management.libraries.script import Script
+from resource_management.libraries.resources.xml_config import XmlConfig
+
+class OneFsClient(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    self.setup_config(env)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def setup_config(self, env):
+    import params
+    env.set_params(params)
+    XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            
configuration_attributes=params.config['configuration_attributes']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+    )
+    XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            
configuration_attributes=params.config['configuration_attributes']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+    )
+
+if __name__ == "__main__":
+  OneFsClient().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params.py
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params.py
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params.py
new file mode 100644
index 0000000..838510c
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params.py
@@ -0,0 +1,29 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from ambari_commons import OSCheck
+from resource_management.libraries.functions.default import default
+
+if OSCheck.is_windows_family():
+  from params_windows import *
+else:
+  from params_linux import *
+
+nfsgateway_heapsize = 
config['configurations']['hadoop-env']['nfsgateway_heapsize']
+retryAble = default("/commandParams/command_retry_enabled", False)
+script_https_protocol = Script.get_force_https_protocol_name()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
new file mode 100644
index 0000000..72e0ae9
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_linux.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management.libraries.functions.version import 
format_stack_version, compare_versions
+from resource_management import *
+import os
+import itertools
+import re
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions.get_not_managed_resources import 
get_not_managed_resources
+from resource_management.libraries.resources.hdfs_resource import HdfsResource
+from resource_management.libraries.functions import stack_select
+
+config = Script.get_config()
+
+hostname = config["hostname"]
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['cluster-env']['user_group']
+hdfs_tmp_dir = config['configurations']['hadoop-env']['hdfs_tmp_dir']
+security_enabled = config['configurations']['cluster-env']['security_enabled']
+
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+
+has_journalnode_hosts = not len(journalnode_hosts)  == 0
+has_zkfc_hosts = not len(zkfc_hosts)  == 0
+is_namenode_master = hostname in namenode_host
+
+dfs_type = default("/commandParams/dfs_type", "")
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+kinit_path_local = 
get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', 
None))
+hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
+hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+hdfs_principal_name = 
default('/configurations/hadoop-env/hdfs_principal_name', None)
+hdfs_site = config['configurations']['hdfs-site']
+default_fs = config['configurations']['core-site']['fs.defaultFS']
+
+import functools
+#create partial functions with common arguments for every HdfsResource call
+#to create/delete/copyfromlocal hdfs directories/files we need to call 
params.HdfsResource in code
+HdfsResource = functools.partial(
+  HdfsResource,
+  user=hdfs_user,
+  hdfs_resource_ignore_file = 
"/var/lib/ambari-agent/data/.hdfs_resource_ignore",
+  security_enabled = security_enabled,
+  keytab = hdfs_user_keytab,
+  kinit_path_local = kinit_path_local,
+  hadoop_bin_dir = hadoop_bin_dir,
+  hadoop_conf_dir = hadoop_conf_dir,
+  principal_name = hdfs_principal_name,
+  hdfs_site = hdfs_site,
+  default_fs = default_fs,
+  immutable_paths = get_not_managed_resources(),
+  dfs_type = dfs_type
+)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_windows.py
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_windows.py
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_windows.py
new file mode 100644
index 0000000..3e712b3
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/params_windows.py
@@ -0,0 +1,83 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+#Used in subsequent imports from params
+from install_params import exclude_packages
+from status_params import *
+
+config = Script.get_config()
+hadoop_conf_dir = None
+hbase_conf_dir = None
+hadoop_home = None
+try:
+  hadoop_conf_dir = os.environ["HADOOP_CONF_DIR"]
+  hbase_conf_dir = os.environ["HBASE_CONF_DIR"]
+  hadoop_home = os.environ["HADOOP_HOME"]
+except:
+  pass
+#directories & files
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+fs_checkpoint_dir = 
config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir']
+dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
+#decomission
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
+hdfs_include_file = None
+manage_include_files = 
default("/configurations/hdfs-site/manage.include.files", False)
+if include_file_path and manage_include_files:
+  slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+  hdfs_include_file = list(set(slave_hosts) - set(hdfs_exclude_file))
+update_files_only = default("/commandParams/update_files_only",False)
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = 
default("/configurations/hdfs-site/dfs.internal.nameservices", None)
+dfs_ha_namenode_ids = 
default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"),
 None)
+
+namenode_id = None
+namenode_rpc = None
+hostname = config["hostname"]
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = 
config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname.lower() in nn_host.lower():
+      namenode_id = nn_id
+      namenode_rpc = nn_host
+
+hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
+hdfs_user = hadoop_user
+
+grep_exe = "findstr"
+
+name_node_params = default("/commandParams/namenode", None)
+
+service_map = {
+  "datanode" : datanode_win_service_name,
+  "journalnode" : journalnode_win_service_name,
+  "namenode" : namenode_win_service_name,
+  "secondarynamenode" : snamenode_win_service_name,
+  "zkfc_slave": zkfc_win_service_name
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py
new file mode 100644
index 0000000..3d798a3
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/service_check.py
@@ -0,0 +1,137 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.shell import as_user
+from ambari_commons.os_family_impl import OsFamilyImpl
+from ambari_commons import OSConst
+from resource_management.libraries.functions.curl_krb_request import 
curl_krb_request
+from resource_management.core.logger import Logger
+
+class HdfsServiceCheck(Script):
+  pass
+
+@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
+class HdfsServiceCheckDefault(HdfsServiceCheck):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    unique = functions.get_unique_id_and_date()
+    dir = params.hdfs_tmp_dir
+    tmp_file = format("{dir}/{unique}")
+
+    if params.security_enabled:
+      Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} 
{hdfs_principal_name}"),
+        user=params.hdfs_user
+      )
+    params.HdfsResource(dir,
+                        type="directory",
+                        action="create_on_execute",
+                        mode=0777
+    )
+    params.HdfsResource(tmp_file,
+                        type="file",
+                        action="delete_on_execute",
+    )
+
+    params.HdfsResource(tmp_file,
+                        type="file",
+                        source="/etc/passwd",
+                        action="create_on_execute"
+    )
+    params.HdfsResource(None, action="execute")
+
+    if params.has_journalnode_hosts:
+      if params.security_enabled:
+        for host in params.journalnode_hosts:
+          if params.https_only:
+            uri = format("https://{host}:{journalnode_port}";)
+          else:
+            uri = format("http://{host}:{journalnode_port}";)
+          response, errmsg, time_millis = curl_krb_request(params.tmp_dir, 
params.smoke_user_keytab,
+                                                           
params.smokeuser_principal, uri, "jn_service_check",
+                                                           
params.kinit_path_local, False, None, params.smoke_user)
+          if not response:
+            Logger.error("Cannot access WEB UI on: {0}. Error : {1}", uri, 
errmsg)
+            return 1
+      else:
+        journalnode_port = params.journalnode_port
+        checkWebUIFileName = "checkWebUI.py"
+        checkWebUIFilePath = format("{tmp_dir}/{checkWebUIFileName}")
+        comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
+
+        checkWebUICmd = format("ambari-python-wrap {checkWebUIFilePath} -m 
{comma_sep_jn_hosts} -p {journalnode_port} -s {https_only} -o 
{script_https_protocol}")
+        File(checkWebUIFilePath,
+             content=StaticFile(checkWebUIFileName),
+             mode=0775)
+
+        Execute(checkWebUICmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5,
+                user=params.smoke_user
+        )
+
+    if params.is_namenode_master:
+      if params.has_zkfc_hosts:
+        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+        check_zkfc_process_cmd = as_user(format(
+          "ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 
2>&1"), user=params.hdfs_user)
+        Execute(check_zkfc_process_cmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5
+        )
+
+@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
+class HdfsServiceCheckWindows(HdfsServiceCheck):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    unique = functions.get_unique_id_and_date()
+
+    #Hadoop uses POSIX-style paths, separator is always /
+    dir = params.hdfs_tmp_dir
+    tmp_file = dir + '/' + unique
+
+    #commands for execution
+    hadoop_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", 
"hadoop.cmd"))
+    create_dir_cmd = "%s fs -mkdir %s" % (hadoop_cmd, dir)
+    own_dir = "%s fs -chmod 777 %s" % (hadoop_cmd, dir)
+    test_dir_exists = "%s fs -test -e %s" % (hadoop_cmd, dir)
+    cleanup_cmd = "%s fs -rm %s" % (hadoop_cmd, tmp_file)
+    create_file_cmd = "%s fs -put %s %s" % (hadoop_cmd, 
os.path.join(params.hadoop_conf_dir, "core-site.xml"), tmp_file)
+    test_cmd = "%s fs -test -e %s" % (hadoop_cmd, tmp_file)
+
+    hdfs_cmd = "cmd /C %s" % (os.path.join(params.hadoop_home, "bin", 
"hdfs.cmd"))
+    safemode_command = "%s dfsadmin -safemode get | %s OFF" % (hdfs_cmd, 
params.grep_exe)
+
+    Execute(safemode_command, logoutput=True, try_sleep=3, tries=20)
+    Execute(create_dir_cmd, user=params.hdfs_user,logoutput=True, 
ignore_failures=True)
+    Execute(own_dir, user=params.hdfs_user,logoutput=True)
+    Execute(test_dir_exists, user=params.hdfs_user,logoutput=True)
+    Execute(create_file_cmd, user=params.hdfs_user,logoutput=True)
+    Execute(test_cmd, user=params.hdfs_user,logoutput=True)
+    Execute(cleanup_cmd, user=params.hdfs_user,logoutput=True)
+
+if __name__ == "__main__":
+  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/status_params.py
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/status_params.py
new file mode 100644
index 0000000..153f9a6
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/package/scripts/status_params.py
@@ -0,0 +1,58 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from ambari_commons import OSCheck
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import format
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions import get_kinit_path
+from resource_management.libraries.script.script import Script
+
+config = Script.get_config()
+
+if OSCheck.is_windows_family():
+  namenode_win_service_name = "namenode"
+  datanode_win_service_name = "datanode"
+  snamenode_win_service_name = "secondarynamenode"
+  journalnode_win_service_name = "journalnode"
+  zkfc_win_service_name = "zkfc"
+else:
+  hadoop_pid_dir_prefix = 
config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+  hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+  hadoop_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+  datanode_pid_file = 
format("{hadoop_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+  namenode_pid_file = 
format("{hadoop_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+  snamenode_pid_file = 
format("{hadoop_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+  journalnode_pid_file = 
format("{hadoop_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+  zkfc_pid_file = format("{hadoop_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+  nfsgateway_pid_file = 
format("{hadoop_pid_dir_prefix}/root/hadoop_privileged_nfs3.pid")
+
+  # Security related/required params
+  hostname = config['hostname']
+  security_enabled = 
config['configurations']['cluster-env']['security_enabled']
+  hdfs_user_principal = 
config['configurations']['hadoop-env']['hdfs_principal_name']
+  hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+
+  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
+
+  kinit_path_local = 
get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', 
None))
+  tmp_dir = Script.get_tmp_dir()
+
+stack_name = default("/hostLevelParams/stack_name", None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
new file mode 100644
index 0000000..23da1d6
--- /dev/null
+++ 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/addon-services/ONEFS/1.0.0/widgets.json
@@ -0,0 +1,38 @@
+{
+  "layouts": [
+    {
+      "layout_name": "default_onefs_layout",
+      "display_name": "Standard ONEFS Dashboard",
+      "section_name": "ONEFS_SUMMARY",
+      "widgetLayoutInfo": [
+        {
+          "widget_name": "OneFS metrics demo",
+          "description": "Test widget",
+          "default_section_name": "ONEFS_SUMMARY",
+          "widget_type": "GRAPH",
+          "is_visible": true,
+          "scope" : "SERVICE",
+          "metrics": [
+            {
+              "name": "counter",
+              "metric_path": "metrics/onefs/demo/counter",
+              "service_name": "ONEFS",
+              "component_name": "ONEFS_CLUSTER"
+            }
+          ],
+          "values": [
+            {
+              "name": "demo value",
+              "values" : "${counter}"
+            }
+          ],
+          "properties": {
+            "graph_type": "LINE",
+            "time_range": "1",
+            "display_unit": "%"
+          }
+        }
+      ]
+    }
+  ]
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/2a1c2e49/contrib/management-packs/isilon-onefs-mpack/src/main/resources/mpack.json
----------------------------------------------------------------------
diff --git 
a/contrib/management-packs/isilon-onefs-mpack/src/main/resources/mpack.json 
b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/mpack.json
new file mode 100644
index 0000000..f15fcb6
--- /dev/null
+++ b/contrib/management-packs/isilon-onefs-mpack/src/main/resources/mpack.json
@@ -0,0 +1,28 @@
+{
+  "type" : "full-release",
+  "name" : "onefs-ambari-mpack",
+  "version": "0.1",
+  "description" : "OneFS Ambari Management Pack",
+  "prerequisites": {
+    "min-ambari-version" : "3.0.0.0"
+  },
+  "artifacts": [
+    {
+      "name" : "ONEFS-addon-services",
+      "type" : "stack-addon-service-definitions",
+      "source_dir" : "addon-services",
+      "service_versions_map" : [
+          {
+             "service_name" : "ONEFS",
+             "service_version" : "1.0.0",
+             "applicable_stacks" : [
+                 {
+                      "stack_name" : "HDP", "stack_version" : "2.6"
+                  }
+              ]
+          }
+      ]
+    }
+  ]
+}
+

Reply via email to