NIFI-280 - Modifing existing existing documentation
removing index.html and renaming to additionalDetails.html if there is relevent 
information


Project: http://git-wip-us.apache.org/repos/asf/incubator-nifi/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-nifi/commit/0fa553ac
Tree: http://git-wip-us.apache.org/repos/asf/incubator-nifi/tree/0fa553ac
Diff: http://git-wip-us.apache.org/repos/asf/incubator-nifi/diff/0fa553ac

Branch: refs/heads/NIFI-353
Commit: 0fa553ac0265f96cb5c2a5f42bc760aa0c65b590
Parents: 220fcac
Author: danbress <[email protected]>
Authored: Sat Jan 31 12:56:25 2015 -0500
Committer: danbress <[email protected]>
Committed: Sat Feb 7 10:47:07 2015 -0500

----------------------------------------------------------------------
 .../additionalDetails.html                      |  53 +++
 .../index.html                                  |  88 -----
 .../additionalDetails.html                      |  57 +++
 .../index.html                                  | 162 ---------
 .../additionalDetails.html                      |  32 ++
 .../index.html                                  | 150 --------
 .../additionalDetails.html                      |  51 +++
 .../index.html                                  | 159 ---------
 .../additionalDetails.html                      |  78 +++++
 .../index.html                                  | 173 ---------
 .../additionalDetails.html                      |  45 +++
 .../index.html                                  | 189 ----------
 .../index.html                                  |  63 ----
 .../additionalDetails.html                      |  68 ++++
 .../index.html                                  | 166 ---------
 .../index.html                                  | 116 -------
 .../index.html                                  |  65 ----
 .../additionalDetails.html                      |  58 ++++
 .../index.html                                  | 147 --------
 .../additionalDetails.html                      |  60 ++++
 .../index.html                                  | 106 ------
 .../index.html                                  |  97 ------
 .../index.html                                  | 160 ---------
 .../additionalDetails.html                      |  34 ++
 .../index.html                                  | 135 --------
 .../additionalDetails.html                      | 161 +++++++++
 .../index.html                                  | 311 -----------------
 .../additionalDetails.html                      |  57 +++
 .../index.html                                  | 111 ------
 .../index.html                                  |  64 ----
 .../additionalDetails.html                      |  74 ++++
 .../index.html                                  | 227 ------------
 .../additionalDetails.html                      |  79 +++++
 .../index.html                                  | 186 ----------
 .../additionalDetails.html                      |  45 +++
 .../index.html                                  | 143 --------
 .../index.html                                  | 118 -------
 .../index.html                                  | 122 -------
 .../additionalDetails.html                      |  80 +++++
 .../index.html                                  | 250 -------------
 .../additionalDetails.html                      |  35 ++
 .../index.html                                  |  88 -----
 .../additionalDetails.html                      |  35 ++
 .../index.html                                  |  89 -----
 .../additionalDetails.html                      |  98 ++++++
 .../index.html                                  | 136 --------
 .../additionalDetails.html                      |  64 ++++
 .../index.html                                  | 181 ----------
 .../index.html                                  |  86 -----
 .../index.html                                  | 144 --------
 .../index.html                                  |  80 -----
 .../additionalDetails.html                      | 115 ++++++
 .../index.html                                  | 347 -------------------
 .../index.html                                  |  64 ----
 .../additionalDetails.html                      |  49 +++
 .../index.html                                  | 143 --------
 .../additionalDetails.html                      |  48 +++
 .../index.html                                  | 187 ----------
 .../index.html                                  | 114 ------
 .../additionalDetails.html                      |  66 ++++
 .../index.html                                  | 283 ---------------
 .../index.html                                  | 109 ------
 .../index.html                                  | 152 --------
 .../additionalDetails.html                      |  34 ++
 .../index.html                                  | 281 ---------------
 .../index.html                                  |  91 -----
 .../index.html                                  | 114 ------
 .../additionalDetails.html                      |  46 +++
 .../index.html                                  | 110 ------
 .../index.html                                  |  82 -----
 .../index.html                                  |  85 -----
 .../additionalDetails.html                      |  45 +++
 .../index.html                                  | 100 ------
 .../additionalDetails.html                      |  82 +++++
 .../index.html                                  | 123 -------
 .../additionalDetails.html                      |  67 ++++
 .../index.html                                  | 107 ------
 .../additionalDetails.html                      |  72 ++++
 .../index.html                                  | 121 -------
 .../index.html                                  |  64 ----
 .../index.html                                  |  63 ----
 .../additionalDetails.html                      | 106 ++++++
 .../index.html                                  | 163 ---------
 .../index.html                                  |  56 ---
 .../additionalDetails.html                      |  70 ++++
 .../index.html                                  |  85 -----
 .../index.html                                  |  58 ----
 .../index.html                                  |  77 ----
 .../additionalDetails.html                      |  41 +++
 .../index.html                                  |  65 ----
 .../additionalDetails.html                      |  52 +++
 .../index.html                                  |  98 ------
 .../additionalDetails.html                      |  31 ++
 .../index.html                                  |  51 ---
 .../additionalDetails.html                      |  53 +++
 .../index.html                                  | 103 ------
 .../additionalDetails.html                      |  49 +++
 .../index.html                                  |  63 ----
 .../additionalDetails.html                      | 253 ++++++++++++++
 .../index.html                                  | 253 --------------
 100 files changed, 2543 insertions(+), 8124 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.CreateHadoopSequenceFile/additionalDetails.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.CreateHadoopSequenceFile/additionalDetails.html
 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.CreateHadoopSequenceFile/additionalDetails.html
new file mode 100644
index 0000000..d940382
--- /dev/null
+++ 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.CreateHadoopSequenceFile/additionalDetails.html
@@ -0,0 +1,53 @@
+<!DOCTYPE html>
+<html lang="en">
+    <!--
+      Licensed to the Apache Software Foundation (ASF) under one or more
+      contributor license agreements.  See the NOTICE file distributed with
+      this work for additional information regarding copyright ownership.
+      The ASF licenses this file to You under the Apache License, Version 2.0
+      (the "License"); you may not use this file except in compliance with
+      the License.  You may obtain a copy of the License at
+          http://www.apache.org/licenses/LICENSE-2.0
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      See the License for the specific language governing permissions and
+      limitations under the License.
+    -->
+    <head>
+        <meta charset="utf-8" />
+        <title>CreateHadoopSequenceFile</title>
+
+        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
+    </head>
+
+    <body>
+        <!-- Processor Documentation 
================================================== -->
+        <h2>Description:</h2>
+        <p>This processor is used to create a Hadoop Sequence File, which 
essentially is a file of key/value pairs. The key 
+            will be a file name and the value will be the flow file content. 
The processor will take either a merged (a.k.a. packaged) flow 
+            file or a singular flow file. Historically, this processor handled 
the merging by type and size or time prior to creating a 
+            SequenceFile output; it no longer does this. If creating a 
SequenceFile that contains multiple files of the same type is desired,
+            precede this processor with a <code>RouteOnAttribute</code> 
processor to segregate files of the same type and follow that with a
+            <code>MergeContent</code> processor to bundle up files. If the 
type of files is not important, just use the 
+            <code>MergeContent</code> processor. When using the 
<code>MergeContent</code> processor, the following Merge Formats are 
+            supported by this processor:
+        <ul>
+            <li>TAR</li>
+            <li>ZIP</li>
+            <li>FlowFileStream v3</li>
+        </ul>
+        The created SequenceFile is named the same as the incoming FlowFile 
with the suffix '.sf'. For incoming FlowFiles that are 
+        bundled, the keys in the SequenceFile are the individual file names, 
the values are the contents of each file.
+    </p>
+    NOTE: The value portion of a key/value pair is loaded into memory. While 
there is a max size limit of 2GB, this could cause memory
+    issues if there are too many concurrent tasks and the flow file sizes are 
large.
+
+    <p>
+        <strong>See Also:</strong>
+    </p>
+    <ul>
+        <li><a 
href="../org.apache.nifi.processors.hadoop.PutHDFS/index.html">PutHDFS</a></li>
+    </ul>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.CreateHadoopSequenceFile/index.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.CreateHadoopSequenceFile/index.html
 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.CreateHadoopSequenceFile/index.html
deleted file mode 100644
index ff1709e..0000000
--- 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.CreateHadoopSequenceFile/index.html
+++ /dev/null
@@ -1,88 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-    <!--
-      Licensed to the Apache Software Foundation (ASF) under one or more
-      contributor license agreements.  See the NOTICE file distributed with
-      this work for additional information regarding copyright ownership.
-      The ASF licenses this file to You under the Apache License, Version 2.0
-      (the "License"); you may not use this file except in compliance with
-      the License.  You may obtain a copy of the License at
-          http://www.apache.org/licenses/LICENSE-2.0
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      See the License for the specific language governing permissions and
-      limitations under the License.
-    -->
-    <head>
-        <meta charset="utf-8" />
-        <title>CreateHadoopSequenceFile</title>
-
-        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
-    </head>
-
-    <body>
-        <!-- Processor Documentation 
================================================== -->
-        <h2>Description:</h2>
-        <p>This processor is used to create a Hadoop Sequence File, which 
essentially is a file of key/value pairs. The key 
-            will be a file name and the value will be the flow file content. 
The processor will take either a merged (a.k.a. packaged) flow 
-            file or a singular flow file. Historically, this processor handled 
the merging by type and size or time prior to creating a 
-            SequenceFile output; it no longer does this. If creating a 
SequenceFile that contains multiple files of the same type is desired,
-            precede this processor with a <code>RouteOnAttribute</code> 
processor to segregate files of the same type and follow that with a
-            <code>MergeContent</code> processor to bundle up files. If the 
type of files is not important, just use the 
-            <code>MergeContent</code> processor. When using the 
<code>MergeContent</code> processor, the following Merge Formats are 
-            supported by this processor:
-        <ul>
-            <li>TAR</li>
-            <li>ZIP</li>
-            <li>FlowFileStream v3</li>
-        </ul>
-        The created SequenceFile is named the same as the incoming FlowFile 
with the suffix '.sf'. For incoming FlowFiles that are 
-        bundled, the keys in the SequenceFile are the individual file names, 
the values are the contents of each file.
-    </p>
-    NOTE: The value portion of a key/value pair is loaded into memory. While 
there is a max size limit of 2GB, this could cause memory
-    issues if there are too many concurrent tasks and the flow file sizes are 
large.
-
-    <h2>Properties:</h2>
-    <p>In the list below, the names of required properties appear in bold. Any 
other properties (not in bold) are
-        considered optional. If a property has a default value, it is 
indicated. If a property supports the use of the
-        NiFi Expression Language (or simply, "expression language"), that is 
also indicated.</p>
-    <ul>
-        <li>Hadoop Configuration Resources
-            <ul>
-                <li>A file or comma separated list of files which contains the 
Hadoop file system configuration.
-                    Without this, Hadoop will search the classpath for a 
'core-site.xml' and 'hdfs-site.xml' file or will
-                    revert to a default configuration.</li>
-                <li>Default value: none</li>
-            </ul>
-        </li>
-        <li>compression type
-            <ul>
-                <li>Type of compression to use when creating Sequence 
File.</li>
-                <li>Default value: none</li>
-            </ul>
-        </li>
-    </ul>
-
-    <h2>Relationships:</h2>
-    <ul>
-        <li>success
-            <ul>
-                <li>Generated Sequence Files are sent to this 
relationship.</li>
-            </ul>
-        </li>
-        <li>fail
-            <ul>
-                <li>Incoming files that failed to generate a Sequence File are 
sent to this relationship.</li>
-            </ul>
-        </li>
-    </ul>
-
-    <p>
-        <strong>See Also:</strong>
-    </p>
-    <ul>
-        <li><a 
href="../org.apache.nifi.processors.hadoop.PutHDFS/index.html">PutHDFS</a></li>
-    </ul>
-</body>
-</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFS/additionalDetails.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFS/additionalDetails.html
 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFS/additionalDetails.html
new file mode 100644
index 0000000..0060c2c
--- /dev/null
+++ 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFS/additionalDetails.html
@@ -0,0 +1,57 @@
+<!DOCTYPE html>
+<html lang="en">
+    <!--
+      Licensed to the Apache Software Foundation (ASF) under one or more
+      contributor license agreements.  See the NOTICE file distributed with
+      this work for additional information regarding copyright ownership.
+      The ASF licenses this file to You under the Apache License, Version 2.0
+      (the "License"); you may not use this file except in compliance with
+      the License.  You may obtain a copy of the License at
+          http://www.apache.org/licenses/LICENSE-2.0
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      See the License for the specific language governing permissions and
+      limitations under the License.
+    -->
+    <head>
+        <meta charset="utf-8" />
+        <title>GetHDFS</title>
+
+        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
+    </head>
+
+    <body>
+        <p>
+            <strong>Modifies Attributes:</strong>
+        </p>
+        <table border="1">
+            <thead>
+                <tr>
+                    <th>Attribute Name</th>
+                    <th>Description</th>
+                </tr>
+            </thead>
+            <tbody>
+                <tr>
+                    <td>filename</td>
+                    <td>The name of the file that was read from HDFS.</td>
+                </tr>
+                <tr>
+                    <td>path</td>
+                    <td>The path is set to the relative path of the file's 
directory on HDFS. For example, if the <em>Directory</em>
+                        property is set to <code>/tmp</code>, then files 
picked up from /tmp will have the path attribute set to
+                        &quot;<code>./</code>&quot;. If the <em>Recurse 
Subdirectories</em> property is set to <code>true</code>
+                        and a file is picked up from /tmp/abc/1/2/3, then the 
path attribute will be set to &quot;<code>abc/1/2/3</code>&quot;.
+                    </td>
+                </tr>
+            </tbody>
+        </table>
+        <p>
+            <strong>See Also:</strong>
+        </p>
+        <ul>
+            <li><a 
href="../org.apache.nifi.processors.hadoop.PutHDFS/index.html">PutHDFS</a></li>
+        </ul>
+    </body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFS/index.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFS/index.html
 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFS/index.html
deleted file mode 100644
index 162e7a1..0000000
--- 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFS/index.html
+++ /dev/null
@@ -1,162 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-    <!--
-      Licensed to the Apache Software Foundation (ASF) under one or more
-      contributor license agreements.  See the NOTICE file distributed with
-      this work for additional information regarding copyright ownership.
-      The ASF licenses this file to You under the Apache License, Version 2.0
-      (the "License"); you may not use this file except in compliance with
-      the License.  You may obtain a copy of the License at
-          http://www.apache.org/licenses/LICENSE-2.0
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      See the License for the specific language governing permissions and
-      limitations under the License.
-    -->
-    <head>
-        <meta charset="utf-8" />
-        <title>GetHDFS</title>
-
-        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
-    </head>
-
-    <body>
-        <!-- Processor Documentation 
================================================== -->
-        <h2>Description:</h2>
-        <p>This processor reads files from an HDFS cluster into NiFi 
FlowFiles.</p>
-
-        <p>
-            <strong>Modifies Attributes:</strong>
-        </p>
-        <table border="1">
-            <thead>
-                <tr>
-                    <th>Attribute Name</th>
-                    <th>Description</th>
-                </tr>
-            </thead>
-            <tbody>
-                <tr>
-                    <td>filename</td>
-                    <td>The name of the file that was read from HDFS.</td>
-                </tr>
-                <tr>
-                    <td>path</td>
-                    <td>The path is set to the relative path of the file's 
directory on HDFS. For example, if the <em>Directory</em>
-                        property is set to <code>/tmp</code>, then files 
picked up from /tmp will have the path attribute set to
-                        &quot;<code>./</code>&quot;. If the <em>Recurse 
Subdirectories</em> property is set to <code>true</code>
-                        and a file is picked up from /tmp/abc/1/2/3, then the 
path attribute will be set to &quot;<code>abc/1/2/3</code>&quot;.
-                    </td>
-                </tr>
-            </tbody>
-        </table>
-
-        <h2>Properties:</h2>
-        <p>In the list below, the names of required properties appear in bold. 
Any other properties (not in bold) are
-            considered optional. If a property has a default value, it is 
indicated. If a property supports the use of the
-            NiFi Expression Language (or simply, "expression language"), that 
is also indicated.</p>
-        <ul>
-            <li>Hadoop Configuration Resources
-                <ul>
-                    <li>A file or comma separated list of files which contains 
the Hadoop file system configuration.
-                        Without this, Hadoop will search the classpath for a 
'core-site.xml' and 'hdfs-site.xml' file or will
-                        revert to a default configuration.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li><strong>Directory</strong>
-                <ul>
-                    <li>The HDFS directory from which FlowFile content should 
be read.</li>
-                    <li>Default value: none</li>
-                </ul></li>
-            <li><strong>Recurse Subdirectories</strong>
-                <ul>
-                    <li>A Boolean value (true/false), when true will pull 
files from subdirectories of the HDFS <em>Directory</em>.
-                    </li>
-                    <li>Default value: true</li>
-                </ul></li>
-            <li><strong>Keep Source File</strong>
-                <ul>
-                    <li>A Boolean value (true/false), indicates whether to 
keep (true) or delete (false) the file from HDFS
-                        after it has been successfully transferred.</li>
-                    <li>Default value: false</li>
-                </ul></li>
-            <li>File Filter Regex
-                <ul>
-                    <li>A Java Regular Expression for filtering Filenames; if 
a filter is supplied then only files whose
-                        names match that Regular Expression will be fetched, 
otherwise all files will be fetched.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li><strong>Filter Match Name Only</strong>
-                <ul>
-                    <li>A Boolean value (true/false), when true <em>File 
Filter Regex</em> will match on just the filename,
-                        otherwise subdirectory names will be included with 
filename in the regex comparison.
-                    </li>
-                    <li>Default value: true</li>
-                </ul></li>
-            <li><strong>Ignore Dotted Files</strong>
-                <ul>
-                    <li>A Boolean value (true/false), when true files whose 
names begin with a dot (&quot;.&quot;) will not
-                        be fetched.</li>
-                    <li>Default value: true</li>
-                </ul></li>
-            <li><strong>Minimum File Age</strong>
-                <ul>
-                    <li>The minimum age that a file must be in order to be 
fetched; any file that is younger than this
-                        amount of time (based on last modification time) will 
be ignored. The value must be a non-negative
-                        integer and be followed by a time unit, such as nanos, 
millis, secs, mins, hrs, days.</li>
-                    <li>Default value: 0 sec</li>
-                </ul></li>
-            <li>Maximum File Age
-                <ul>
-                    <li>The maximum age that a file must be in order to be 
fetched; any file that is older than this amount
-                        of time (based on last modification time) will be 
ignored. The value must be a non-negative integer,
-                        followed by a time unit, such as nanos, millis, secs, 
mins, hrs, days. Cannot be less than 100 millis.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li><strong>Polling Interval</strong>
-                <ul>
-                    <li>Indicates how long to wait between performing 
directory listings. The value must be a non-negative
-                        integer and be followed by a time unit, such as nanos, 
millis, secs, mins, hrs, days.</li>
-                    <li>Default value: 0 sec</li>
-                </ul></li>
-            <li><strong>Batch Size</strong>
-                <ul>
-                    <li>The maximum number of files to pull in each iteration, 
based on configured run schedule.</li>
-                    <li>Default value: 100</li>
-                </ul></li>
-            <li>IO Buffer Size
-                <ul>
-                    <li>Amount of memory to use to buffer file contents during 
IO. This is a data size integer that must
-                        include units of B, KB, MB, GB, or TB. This overrides 
the Hadoop Configuration.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-        </ul>
-
-        <h2>Relationships:</h2>
-        <ul>
-            <li>success
-                <ul>
-                    <li>All files retrieved from HDFS are transferred to this 
relationship.</li>
-                </ul>
-            </li>
-            <li>passthrough
-                <ul>
-                    <li>If this processor has an input queue for some reason, 
then FlowFiles arriving on that input are
-                        transferred to this relationship.</li>
-                </ul>
-            </li>
-        </ul>
-
-        <p>
-            <strong>See Also:</strong>
-        </p>
-        <ul>
-            <li><a 
href="../org.apache.nifi.processors.hadoop.PutHDFS/index.html">PutHDFS</a></li>
-        </ul>
-    </body>
-</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFSSequenceFile/additionalDetails.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFSSequenceFile/additionalDetails.html
 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFSSequenceFile/additionalDetails.html
new file mode 100644
index 0000000..ca0b7cd
--- /dev/null
+++ 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFSSequenceFile/additionalDetails.html
@@ -0,0 +1,32 @@
+<!DOCTYPE html>
+<html lang="en">
+    <!--
+      Licensed to the Apache Software Foundation (ASF) under one or more
+      contributor license agreements.  See the NOTICE file distributed with
+      this work for additional information regarding copyright ownership.
+      The ASF licenses this file to You under the Apache License, Version 2.0
+      (the "License"); you may not use this file except in compliance with
+      the License.  You may obtain a copy of the License at
+          http://www.apache.org/licenses/LICENSE-2.0
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      See the License for the specific language governing permissions and
+      limitations under the License.
+    -->
+    <head>
+        <meta charset="utf-8" />
+        <title>GetHDFSSequenceFile</title>
+
+        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
+    </head>
+
+    <body>
+        <p>
+            <strong>See Also:</strong>
+        </p>
+        <ul>
+            <li><a 
href="../org.apache.nifi.processors.hadoop.PutHDFS/index.html">PutHDFS</a></li>
+        </ul>
+    </body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFSSequenceFile/index.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFSSequenceFile/index.html
 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFSSequenceFile/index.html
deleted file mode 100644
index 71f5fa7..0000000
--- 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.GetHDFSSequenceFile/index.html
+++ /dev/null
@@ -1,150 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-    <!--
-      Licensed to the Apache Software Foundation (ASF) under one or more
-      contributor license agreements.  See the NOTICE file distributed with
-      this work for additional information regarding copyright ownership.
-      The ASF licenses this file to You under the Apache License, Version 2.0
-      (the "License"); you may not use this file except in compliance with
-      the License.  You may obtain a copy of the License at
-          http://www.apache.org/licenses/LICENSE-2.0
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      See the License for the specific language governing permissions and
-      limitations under the License.
-    -->
-    <head>
-        <meta charset="utf-8" />
-        <title>GetHDFSSequenceFile</title>
-
-        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
-    </head>
-
-    <body>
-        <!-- Processor Documentation 
================================================== -->
-        <h2>Description:</h2>
-        <p>This processor is used to pull files from HDFS. The files being 
pulled in MUST be <code>SequenceFile</code> 
-            formatted files. The processor creates a flow file for each 
key/value entry in the ingested <code>SequenceFile</code>. 
-            The created flow file's content depends on the value of the 
optional configuration property <b>FlowFile Content</b>. Currently,
-            there are two choices: VALUE ONLY and KEY VALUE PAIR. With the 
prior, only the <code>SequenceFile</code> value element is 
-            written to the flow file contents. With the latter, the 
<code>SequenceFile</code> key and value are written to the flow file 
-            contents as serialized objects; the format is key length (int), 
key(String), value length(int), value(bytes). The default is
-            VALUE ONLY.</p>
-        NOTE: This processor loads the entire value entry into memory. While 
the size limit for a value entry is 2GB, this will cause 
-        memory problems if there are too many concurrent tasks and the data 
being ingested is large.
-
-        <h2>Properties:</h2>
-        <p>In the list below, the names of required properties appear in bold. 
Any other properties (not in bold) are
-            considered optional. If a property has a default value, it is 
indicated. If a property supports the use of the
-            NiFi Expression Language (or simply, "expression language"), that 
is also indicated.</p>
-        <ul>
-            <li>Hadoop Configuration Resources
-                <ul>
-                    <li>A file or comma separated list of files which contains 
the Hadoop file system configuration.
-                        Without this, Hadoop will search the classpath for a 
'core-site.xml' and 'hdfs-site.xml' file or will
-                        revert to a default configuration.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li><strong>FlowFile Content</strong>
-                <ul>
-                    <li>Indicate if the content is to be both the key and 
value of the Sequence File, or just the value.</li>
-                    <li>Default value: VALUE ONLY</li>
-                </ul>
-            </li>
-            <li><strong>Directory</strong>
-                <ul>
-                    <li>The HDFS directory from which FlowFile content should 
be read.</li>
-                    <li>Default value: none</li>
-                </ul></li>
-            <li><strong>Recurse Subdirectories</strong>
-                <ul>
-                    <li>A Boolean value (true/false), when true will pull 
files from subdirectories of the HDFS <em>Directory</em>.
-                    </li>
-                    <li>Default value: true</li>
-                </ul></li>
-            <li><strong>Keep Source File</strong>
-                <ul>
-                    <li>A Boolean value (true/false), indicates whether to 
keep (true) or delete (false) the file from HDFS
-                        after it has been successfully transferred.</li>
-                    <li>Default value: false</li>
-                </ul></li>
-            <li>File Filter Regex
-                <ul>
-                    <li>A Java Regular Expression for filtering Filenames; if 
a filter is supplied then only files whose
-                        names match that Regular Expression will be fetched, 
otherwise all files will be fetched.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li><strong>Filter Match Name Only</strong>
-                <ul>
-                    <li>A Boolean value (true/false), when true <em>File 
Filter Regex</em> will match on just the filename,
-                        otherwise subdirectory names will be included with 
filename in the regex comparison.
-                    </li>
-                    <li>Default value: true</li>
-                </ul></li>
-            <li><strong>Ignore Dotted Files</strong>
-                <ul>
-                    <li>A Boolean value (true/false), when true files whose 
names begin with a dot (&quot;.&quot;) will not
-                        be fetched.</li>
-                    <li>Default value: true</li>
-                </ul></li>
-            <li><strong>Minimum File Age</strong>
-                <ul>
-                    <li>The minimum age that a file must be in order to be 
fetched; any file that is younger than this
-                        amount of time (based on last modification time) will 
be ignored. The value must be a non-negative
-                        integer and be followed by a time unit, such as nanos, 
millis, secs, mins, hrs, days.</li>
-                    <li>Default value: 0 sec</li>
-                </ul></li>
-            <li>Maximum File Age
-                <ul>
-                    <li>The maximum age that a file must be in order to be 
fetched; any file that is older than this amount
-                        of time (based on last modification time) will be 
ignored. The value must be a non-negative integer,
-                        followed by a time unit, such as nanos, millis, secs, 
mins, hrs, days. Cannot be less than 100 millis.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li><strong>Polling Interval</strong>
-                <ul>
-                    <li>Indicates how long to wait between performing 
directory listings. The value must be a non-negative
-                        integer and be followed by a time unit, such as nanos, 
millis, secs, mins, hrs, days.</li>
-                    <li>Default value: 0 sec</li>
-                </ul></li>
-            <li><strong>Batch Size</strong>
-                <ul>
-                    <li>The maximum number of files to pull in each iteration, 
based on configured run schedule.</li>
-                    <li>Default value: 100</li>
-                </ul></li>
-            <li>IO Buffer Size
-                <ul>
-                    <li>Amount of memory to use to buffer file contents during 
IO. This is a data size integer that must
-                        include units of B, KB, MB, GB, or TB. This overrides 
the Hadoop Configuration.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-        </ul>
-
-        <h2>Relationships:</h2>
-        <ul>
-            <li>success
-                <ul>
-                    <li>All files retrieved from HDFS are transferred to this 
relationship.</li>
-                </ul>
-            </li>
-            <li>passthrough
-                <ul>
-                    <li>If this processor has an input queue for some reason, 
then FlowFiles arriving on that input are
-                        transferred to this relationship.</li>
-                </ul>
-            </li>
-        </ul>
-
-        <p>
-            <strong>See Also:</strong>
-        </p>
-        <ul>
-            <li><a 
href="../org.apache.nifi.processors.hadoop.PutHDFS/index.html">PutHDFS</a></li>
-        </ul>
-    </body>
-</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.PutHDFS/additionalDetails.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.PutHDFS/additionalDetails.html
 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.PutHDFS/additionalDetails.html
new file mode 100644
index 0000000..1a02d10
--- /dev/null
+++ 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.PutHDFS/additionalDetails.html
@@ -0,0 +1,51 @@
+<!DOCTYPE html>
+<html lang="en">
+    <!--
+      Licensed to the Apache Software Foundation (ASF) under one or more
+      contributor license agreements.  See the NOTICE file distributed with
+      this work for additional information regarding copyright ownership.
+      The ASF licenses this file to You under the Apache License, Version 2.0
+      (the "License"); you may not use this file except in compliance with
+      the License.  You may obtain a copy of the License at
+          http://www.apache.org/licenses/LICENSE-2.0
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      See the License for the specific language governing permissions and
+      limitations under the License.
+    -->
+    <head>
+        <meta charset="utf-8" />
+        <title>PutHDFS</title>
+
+        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
+    </head>
+
+    <body>
+        
+        <p>
+            <strong>Uses Attributes:</strong>
+        </p>
+        <table border="1">
+            <thead>
+                <tr>
+                    <th>Attribute Name</th>
+                    <th>Description</th>
+                </tr>
+            </thead>
+            <tbody>
+                <tr>
+                    <td>filename</td>
+                    <td>The name of the file written to HDFS comes from the 
value of this attribute.</td>
+                </tr>
+            </tbody>
+        </table>
+
+        <p>
+            <strong>See Also:</strong>
+        </p>
+        <ul>
+            <li><a 
href="../org.apache.nifi.processors.hadoop.GetHDFS/index.html">GetHDFS</a></li>
+        </ul>
+    </body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.PutHDFS/index.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.PutHDFS/index.html
 
b/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.PutHDFS/index.html
deleted file mode 100644
index cacdd72..0000000
--- 
a/nifi/nifi-nar-bundles/nifi-hadoop-bundle/nifi-hdfs-processors/src/main/resources/docs/org.apache.nifi.processors.hadoop.PutHDFS/index.html
+++ /dev/null
@@ -1,159 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-    <!--
-      Licensed to the Apache Software Foundation (ASF) under one or more
-      contributor license agreements.  See the NOTICE file distributed with
-      this work for additional information regarding copyright ownership.
-      The ASF licenses this file to You under the Apache License, Version 2.0
-      (the "License"); you may not use this file except in compliance with
-      the License.  You may obtain a copy of the License at
-          http://www.apache.org/licenses/LICENSE-2.0
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      See the License for the specific language governing permissions and
-      limitations under the License.
-    -->
-    <head>
-        <meta charset="utf-8" />
-        <title>PutHDFS</title>
-
-        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
-    </head>
-
-    <body>
-        <!-- Processor Documentation 
================================================== -->
-        <h2>Description:</h2>
-        <p>
-            This processor writes FlowFiles to an HDFS cluster. It will create 
directories in which to store files as
-            needed based on the <em>Directory</em> property.
-        </p>
-
-        <p>
-            When files are written to HDFS, the file's owner is the user 
identity of the NiFi process, the file's group is the
-            group of the parent directory, and the read/write/execute 
permissions use the default umask. The owner can be
-            overridden using the <em>Remote Owner</em> property, the group can 
be overridden using the <em>Remote Group</em>
-            property, and the read/write/execute permissions can be overridden 
using the <em>Permissions umask</em> property.
-        </p>
-        <p>NOTE: This processor can change owner or group only if the user 
identity of the NiFi process has super user
-            privilege in HDFS to do so.</p>
-        <p>
-            NOTE: The <em>Permissions umask</em> property cannot add execute 
permissions to regular files.
-        </p>
-
-        <p>
-            <strong>Uses Attributes:</strong>
-        </p>
-        <table border="1">
-            <thead>
-                <tr>
-                    <th>Attribute Name</th>
-                    <th>Description</th>
-                </tr>
-            </thead>
-            <tbody>
-                <tr>
-                    <td>filename</td>
-                    <td>The name of the file written to HDFS comes from the 
value of this attribute.</td>
-                </tr>
-            </tbody>
-        </table>
-
-        <h2>Properties:</h2>
-        <p>In the list below, the names of required properties appear in bold. 
Any other properties (not in bold) are
-            considered optional. If a property has a default value, it is 
indicated. If a property supports the use of the
-            NiFi Expression Language (or simply, "expression language"), that 
is also indicated.</p>
-        <ul>
-            <li>Hadoop Configuration Resources
-                <ul>
-                    <li>A file or comma separated list of files which contains 
the Hadoop file system configuration.
-                        Without this, Hadoop will search the classpath for a 
'core-site.xml' and 'hdfs-site.xml' file or will
-                        revert to a default configuration.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li><strong>Directory</strong>
-                <ul>
-                    <li>The HDFS directory to which FlowFile content should be 
written. This property supports the
-                        expression language so you can keep the FlowFile's 
directory structure by using the ${path} attribute
-                        reference, e.g. /in/data/${path}.</li>
-                    <li>Default value: none</li>
-                    <li>Supports expression language: true</li>
-                </ul></li>
-            <li><strong>Conflict Resolution Strategy</strong>
-                <ul>
-                    <li>Indicates what should happen when a file with the same 
name already exists in the output directory.
-                        Valid options are:</li>
-                    <ul>
-                        <li>replace - existing file is overwritten by new 
file</li>
-                        <li>ignore - existing file is untouched, FlowFile 
routed to success</li>
-                        <li>fail - existing file is untouched, FlowFile routed 
to failure</li>
-                    </ul>
-                    <li>Default value: fail</li>
-                </ul></li>
-            <li>Block Size
-                <ul>
-                    <li>Size of each block as written to HDFS. This is a data 
size integer that must include units of B,
-                        KB, MB, GB, or TB. This overrides the Hadoop 
Configuration.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li>IO Buffer Size
-                <ul>
-                    <li>Amount of memory to use to buffer file contents during 
IO. This is a data size integer that must
-                        include units of B, KB, MB, GB, or TB. This overrides 
the Hadoop Configuration.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li>Replication
-                <ul>
-                    <li>Number of times that HDFS will replicate each file. 
This must be an integer greater than 0. This
-                        overrides the Hadoop Configuration.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li>Permissions umask
-                <ul>
-                    <li>A umask represented as an octal number which 
determines the permissions of files written to HDFS.
-                        This overrides the Hadoop Configuration 
dfs.umaskmode.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li>Remote Owner
-                <ul>
-                    <li>Changes the owner of the HDFS file to this value after 
it is written. This only works if NiFi is
-                        running as a user that has HDFS super user privilege 
to change owner.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-            <li>Remote Group
-                <ul>
-                    <li>Changes the group of the HDFS file to this value after 
it is written. This only works if NiFi is
-                        running as a user that has HDFS super user privilege 
to change group.</li>
-                    <li>Default value: none</li>
-                </ul>
-            </li>
-        </ul>
-
-        <h2>Relationships:</h2>
-        <ul>
-            <li>success
-                <ul>
-                    <li>Files that have been successfully written to HDFS are 
transferred to this relationship.</li>
-                </ul>
-            </li>
-            <li>failure
-                <ul>
-                    <li>Files that could not be written to HDFS for some 
reason are transferred to this relationship.</li>
-                </ul>
-            </li>
-        </ul>
-
-        <p>
-            <strong>See Also:</strong>
-        </p>
-        <ul>
-            <li><a 
href="../org.apache.nifi.processors.hadoop.GetHDFS/index.html">GetHDFS</a></li>
-        </ul>
-    </body>
-</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.GetKafka/additionalDetails.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.GetKafka/additionalDetails.html
 
b/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.GetKafka/additionalDetails.html
new file mode 100644
index 0000000..daee6ee
--- /dev/null
+++ 
b/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.GetKafka/additionalDetails.html
@@ -0,0 +1,78 @@
+<!DOCTYPE html>
+<html lang="en">
+    <!--
+      Licensed to the Apache Software Foundation (ASF) under one or more
+      contributor license agreements.  See the NOTICE file distributed with
+      this work for additional information regarding copyright ownership.
+      The ASF licenses this file to You under the Apache License, Version 2.0
+      (the "License"); you may not use this file except in compliance with
+      the License.  You may obtain a copy of the License at
+          http://www.apache.org/licenses/LICENSE-2.0
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      See the License for the specific language governing permissions and
+      limitations under the License.
+    -->
+    <head>
+        <meta charset="utf-8" />
+        <title>GetKafka</title>
+        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
+    </head>
+
+    <body>
+        <!-- Processor Documentation 
================================================== -->
+        <h2>Description:</h2>
+        <p>
+               This Processors polls <a href="http://kafka.apache.org/";>Apache 
Kafka</a>
+               for data. When a message is received from Kafka, this Processor 
emits a FlowFile
+               where the content of the FlowFile is the value of the Kafka 
message. If the
+               message has a key associated with it, an attribute named 
<code>kafka.key</code>
+               will be added to the FlowFile, with the value being the UTF-8 
Encoded value
+               of the Message's Key.
+        </p>
+        <p>
+               Kafka supports the notion of a Consumer Group when pulling 
messages in order to
+               provide scalability while still offering a publish-subscribe 
interface. Each
+               Consumer Group must have a unique identifier. The Consumer 
Group identifier that
+               is used by NiFi is the UUID of the Processor. This means that 
all of the nodes
+               within a cluster will use the same Consumer Group Identifier so 
that they do
+               not receive duplicate data but multiple GetKafka Processors can 
be used to pull
+               from multiple Topics, as each Processor will receive a 
different Processor UUID 
+               and therefore a different Consumer Group Identifier.
+        </p>
+
+        <p>
+            <strong>Modifies Attributes:</strong>
+        </p>
+        <table border="1">
+            <thead>
+                <tr>
+                    <th>Attribute Name</th>
+                    <th>Description</th>
+                </tr>
+            </thead>
+            <tbody>
+                <tr>
+                       <td>kafka.topic</td>
+                       <td>The name of the Kafka Topic from which the message 
was received</td>
+                </tr>
+                <tr>
+                    <td>kafka.key</td>
+                    <td>The key of the Kafka message, if it exists and batch 
size is 1. If the message does not have a key,
+                       or if the batch size is greater than 1, this attribute 
will not be added.</td>
+                </tr>
+                <tr>
+                       <td>kafka.partition</td>
+                       <td>The partition of the Kafka Topic from which the 
message was received. This attribute is added only
+                               if the batch size is 1.</td>
+                </tr>
+                <tr>
+                       <td>kafka.offset</td>
+                       <td>The offset of the message within the Kafka 
partition. This attribute is added only
+                               if the batch size is 1.</td>
+                </tr>
+            </tbody>
+        </table>
+    </body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.GetKafka/index.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.GetKafka/index.html
 
b/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.GetKafka/index.html
deleted file mode 100644
index 279dd75..0000000
--- 
a/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.GetKafka/index.html
+++ /dev/null
@@ -1,173 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-    <!--
-      Licensed to the Apache Software Foundation (ASF) under one or more
-      contributor license agreements.  See the NOTICE file distributed with
-      this work for additional information regarding copyright ownership.
-      The ASF licenses this file to You under the Apache License, Version 2.0
-      (the "License"); you may not use this file except in compliance with
-      the License.  You may obtain a copy of the License at
-          http://www.apache.org/licenses/LICENSE-2.0
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      See the License for the specific language governing permissions and
-      limitations under the License.
-    -->
-    <head>
-        <meta charset="utf-8" />
-        <title>GetKafka</title>
-        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
-    </head>
-
-    <body>
-        <!-- Processor Documentation 
================================================== -->
-        <h2>Description:</h2>
-        <p>
-               This Processors polls <a href="http://kafka.apache.org/";>Apache 
Kafka</a>
-               for data. When a message is received from Kafka, this Processor 
emits a FlowFile
-               where the content of the FlowFile is the value of the Kafka 
message. If the
-               message has a key associated with it, an attribute named 
<code>kafka.key</code>
-               will be added to the FlowFile, with the value being the UTF-8 
Encoded value
-               of the Message's Key.
-        </p>
-        <p>
-               Kafka supports the notion of a Consumer Group when pulling 
messages in order to
-               provide scalability while still offering a publish-subscribe 
interface. Each
-               Consumer Group must have a unique identifier. The Consumer 
Group identifier that
-               is used by NiFi is the UUID of the Processor. This means that 
all of the nodes
-               within a cluster will use the same Consumer Group Identifier so 
that they do
-               not receive duplicate data but multiple GetKafka Processors can 
be used to pull
-               from multiple Topics, as each Processor will receive a 
different Processor UUID 
-               and therefore a different Consumer Group Identifier.
-        </p>
-
-        <p>
-            <strong>Modifies Attributes:</strong>
-        </p>
-        <table border="1">
-            <thead>
-                <tr>
-                    <th>Attribute Name</th>
-                    <th>Description</th>
-                </tr>
-            </thead>
-            <tbody>
-                <tr>
-                       <td>kafka.topic</td>
-                       <td>The name of the Kafka Topic from which the message 
was received</td>
-                </tr>
-                <tr>
-                    <td>kafka.key</td>
-                    <td>The key of the Kafka message, if it exists and batch 
size is 1. If the message does not have a key,
-                       or if the batch size is greater than 1, this attribute 
will not be added.</td>
-                </tr>
-                <tr>
-                       <td>kafka.partition</td>
-                       <td>The partition of the Kafka Topic from which the 
message was received. This attribute is added only
-                               if the batch size is 1.</td>
-                </tr>
-                <tr>
-                       <td>kafka.offset</td>
-                       <td>The offset of the message within the Kafka 
partition. This attribute is added only
-                               if the batch size is 1.</td>
-                </tr>
-            </tbody>
-        </table>
-
-
-        <p>
-            <strong>Properties:</strong>
-        </p>
-        <p>In the list below, the names of required properties appear
-            in bold. Any other properties (not in bold) are considered 
optional.
-            If a property has a default value, it is indicated. If a property
-            supports the use of the NiFi Expression Language (or simply,
-            "expression language"), that is also indicated.</p>
-        <ul>
-            <li><strong>ZooKeeper Connection String</strong>
-                <ul>
-                    <li>The Connection String to use in order to connect to 
ZooKeeper. This is often a 
-                       comma-separated list of &lt;host&gt;:&lt;port&gt; 
combinations. For example, 
-                       host1:2181,host2:2181,host3:2188</li>
-                    <li>Default value: no default</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            <li><strong>Topic Name</strong>
-                <ul>
-                    <li>The Kafka Topic to pull messages from</li>
-                    <li>Default value: no default</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            <li><strong>Zookeeper Commit Frequency</strong>
-                <ul>
-                    <li>Specifies how often to communicate with ZooKeeper to 
indicate which messages have been pulled. 
-                       A longer time period will result in better overall 
performance but can result in more data 
-                       duplication if a NiFi node is lost
-                    </li>
-                    <li>Default value: 60 secs</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            <li><strong>ZooKeeper Communications Timeout</strong>
-                <ul>
-                    <li>The amount of time to wait for a response from 
ZooKeeper before determining that there is a communications error</li>
-                    <li>Default value: 30 secs</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            <li><strong>Kafka Communications Timeout</strong>
-                <ul>
-                    <li>The amount of time to wait for a response from Kafka 
before determining that there is a communications error</li>
-                    <li>Default value: 30 secs</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            
-            <li><strong>Batch Size</strong>
-                <ul>
-                    <li>Specifies the maximum number of messages to combine 
into a single FlowFile. 
-                       These messages will be concatenated together with the 
&lt;Message Demarcator&gt; 
-                       string placed between the content of each message. If 
the messages from Kafka 
-                       should not be concatenated together, leave this value 
at 1.</li>
-                    <li>Default value: 1</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            
-            <li><strong>Message Demarcator</strong>
-                <ul>
-                    <li>Specifies the characters to use in order to demarcate 
multiple messages from Kafka. 
-                       If the &lt;Batch Size&gt; property is set to 1, this 
value is ignored. Otherwise, for each two 
-                       subsequent messages in the batch, this value will be 
placed in between them. This property will
-                       treat "\n" as a new-line, "\r" as a carriage return and 
"\t" as a tab character. All other
-                       characters are treated as literal characters.
-                    </li>
-                    <li>Default value: \n</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            <li><strong>Client Name</strong>
-                <ul>
-                    <li>Client Name to use when communicating with Kafka</li>
-                    <li>Default value: "NiFi-" followed by the UUID of the 
Processor</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            
-        </ul>
-        <p>
-            <strong>Relationships:</strong>
-        </p>
-        <ul>
-            <li>success
-                <ul>
-                    <li>All messages that are received from Kafka are routed 
to the 'success' relationship</li>
-                </ul>
-            </li>
-        </ul>
-
-    </body>
-</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.PutKafka/additionalDetails.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.PutKafka/additionalDetails.html
 
b/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.PutKafka/additionalDetails.html
new file mode 100644
index 0000000..04d9463
--- /dev/null
+++ 
b/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.PutKafka/additionalDetails.html
@@ -0,0 +1,45 @@
+<!DOCTYPE html>
+<html lang="en">
+    <!--
+      Licensed to the Apache Software Foundation (ASF) under one or more
+      contributor license agreements.  See the NOTICE file distributed with
+      this work for additional information regarding copyright ownership.
+      The ASF licenses this file to You under the Apache License, Version 2.0
+      (the "License"); you may not use this file except in compliance with
+      the License.  You may obtain a copy of the License at
+          http://www.apache.org/licenses/LICENSE-2.0
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      See the License for the specific language governing permissions and
+      limitations under the License.
+    -->
+    <head>
+        <meta charset="utf-8" />
+        <title>PutKafka</title>
+        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
+    </head>
+
+    <body>
+        <!-- Processor Documentation 
================================================== -->
+        <h2>Description:</h2>
+        <p>
+               This Processors puts the contents of a FlowFile to a Topic in 
+               <a href="http://kafka.apache.org/";>Apache Kafka</a>. The full 
contents of
+               a FlowFile becomes the contents of a single message in Kafka.
+               This message is optionally assigned a key by using the
+               &lt;Kafka Key&gt; Property.
+        </p>
+
+               <p>
+                       The Processor allows the user to configure an optional 
Message Delimiter that
+                       can be used to send many messages per FlowFile. For 
example, a \n could be used
+                       to indicate that the contents of the FlowFile should be 
used to send one message
+                       per line of text. If the property is not set, the 
entire contents of the FlowFile
+                       will be sent as a single message. When using the 
delimiter, if some messages are
+                       successfully sent but other messages fail to send, the 
FlowFile will be FORKed into
+                       two child FlowFiles, with the successfully sent 
messages being routed to 'success'
+                       and the messages that could not be sent going to 
'failure'.
+               </p>
+    </body>
+</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.PutKafka/index.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.PutKafka/index.html
 
b/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.PutKafka/index.html
deleted file mode 100644
index 29b7c17..0000000
--- 
a/nifi/nifi-nar-bundles/nifi-kafka-bundle/nifi-kafka-processors/src/main/resources/docs/org.apache.nifi.processors.kafka.PutKafka/index.html
+++ /dev/null
@@ -1,189 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-    <!--
-      Licensed to the Apache Software Foundation (ASF) under one or more
-      contributor license agreements.  See the NOTICE file distributed with
-      this work for additional information regarding copyright ownership.
-      The ASF licenses this file to You under the Apache License, Version 2.0
-      (the "License"); you may not use this file except in compliance with
-      the License.  You may obtain a copy of the License at
-          http://www.apache.org/licenses/LICENSE-2.0
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      See the License for the specific language governing permissions and
-      limitations under the License.
-    -->
-    <head>
-        <meta charset="utf-8" />
-        <title>PutKafka</title>
-        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
-    </head>
-
-    <body>
-        <!-- Processor Documentation 
================================================== -->
-        <h2>Description:</h2>
-        <p>
-               This Processors puts the contents of a FlowFile to a Topic in 
-               <a href="http://kafka.apache.org/";>Apache Kafka</a>. The full 
contents of
-               a FlowFile becomes the contents of a single message in Kafka.
-               This message is optionally assigned a key by using the
-               &lt;Kafka Key&gt; Property.
-        </p>
-
-               <p>
-                       The Processor allows the user to configure an optional 
Message Delimiter that
-                       can be used to send many messages per FlowFile. For 
example, a \n could be used
-                       to indicate that the contents of the FlowFile should be 
used to send one message
-                       per line of text. If the property is not set, the 
entire contents of the FlowFile
-                       will be sent as a single message. When using the 
delimiter, if some messages are
-                       successfully sent but other messages fail to send, the 
FlowFile will be FORKed into
-                       two child FlowFiles, with the successfully sent 
messages being routed to 'success'
-                       and the messages that could not be sent going to 
'failure'.
-               </p>
-
-        <p>
-            <strong>Properties:</strong>
-        </p>
-        <p>In the list below, the names of required properties appear
-            in bold. Any other properties (not in bold) are considered 
optional.
-            If a property has a default value, it is indicated. If a property
-            supports the use of the NiFi Expression Language (or simply,
-            "expression language"), that is also indicated.</p>
-        <ul>
-            <li><strong>Known Brokers</strong>
-                <ul>
-                    <li>
-                       A comma-separated list of known Kafka Brokers in the 
format 
-                       &lt;host&gt;:&lt;port&gt;. This list does not need to be
-                       exhaustive but provides a mechanism for determining 
which
-                       other nodes belong to the Kafka cluster.
-                    </li>
-                    <li>Default value: no default</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            <li><strong>Topic Name</strong>
-                <ul>
-                    <li>The Kafka Topic to send messages to. While the GetKafka
-                       Processor requires a statically named Topic so that it 
knows
-                       where to fetch messages from, the PutKafka Processor 
does allow
-                       the Expression Language to be used so that a single 
PutKafka
-                       Processor can be used to send messages to many 
different Kafka
-                       topics.
-                    </li>
-                    <li>Default value: no default</li>
-                    <li>Supports expression language: true</li>
-                </ul>
-            </li>
-            
-            <li>Kafka Key
-                <ul>
-                    <li>
-                       The Key to use for the Message. If no value is given, 
the message
-                       will not be given a Key.
-                    </li>
-                    <li>Default value: no default</li>
-                    <li>Supports expression language: true</li>
-                </ul>
-            </li>
-            <li><strong>Delivery Guarantee</strong>
-                <ul>
-                    <li>
-                       Specifies the requirement for guaranteeing that a 
message is sent to Kafka.
-                       This Property can have one of three different values:
-                       <ul>
-                               <li>
-                                       <b>Guarantee Replicated Delivery</b> - 
FlowFile will be routed to 
-                                               failure unless the message is 
replicated to the appropriate number 
-                                               of Kafka Nodes according to the 
Topic configuration
-                               </li>
-                               <li>
-                                       <b>Guarantee Single Node Delivery</b> - 
FlowFile will be routed to 
-                                               success if the message is 
received by a single Kafka node, 
-                                               whether or not it is 
replicated. This is faster than 
-                                               &lt;Guarantee Replicated 
Delivery&gt; but can result in data loss 
-                                               if a Kafka node crashes
-                               </li>
-                               <li>
-                                       <b>Best Effort</b> - FlowFile will be 
routed to success after successfully 
-                                               writing the content to a Kafka 
node, without waiting for a response. 
-                                               This provides the best 
performance but may result in data loss.
-                               </li>
-                       </ul>
-                    </li>
-                    <li>Default value: Best Effort</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            <li>Message Delimiter
-                <ul>
-                    <li>
-                       Specifies the delimiter to use for splitting apart 
multiple messages within a single FlowFile. 
-                       If not specified, the entire content of the FlowFile 
will be used as a single message.
-                       If specified, the contents of the FlowFile will be 
split on this delimiter and each section 
-                                               sent as a separate Kafka 
message.
-                    </li>
-                    <li>Default value: no default</li>
-                    <li>Supports expression language: true</li>
-                </ul>
-            </li>
-            <li><strong>Communications Timeout</strong>
-                <ul>
-                    <li>
-                       The amount of time to wait for a response from Kafka 
before determining 
-                       that there is a communications error
-                    </li>
-                    <li>Default value: 30 secs</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            <li><strong>Max Buffer Size</strong>
-                <ul>
-                    <li>
-                       The maximum amount of data to buffer in memory before 
sending to Kafka
-                    </li>
-                    <li>Default value: 1 MB</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-            <li><strong>Client Name</strong>
-                <ul>
-                    <li>Client Name to use when communicating with Kafka</li>
-                    <li>Default value: "NiFi-" followed by the UUID of the 
Processor</li>
-                    <li>Supports expression language: false</li>
-                </ul>
-            </li>
-        </ul>
-        
-        
-        <p>
-            <strong>Relationships:</strong>
-        </p>
-        <ul>
-            <li>success
-                <ul>
-                    <li>All FlowFiles that are successfully sent to Kafka are 
routed 
-                       to this relationship. If using the &lt;Message 
Delimiter&gt; property,
-                       it's possible for some messages to be sent while others 
fail. In this
-                       case, only the messages that are successfully sent will 
be routed to
-                       this Relationship while the other messages will be 
routed to the
-                       'failure' relationship.
-                    </li>
-                </ul>
-            </li>
-            
-            <li>failure
-                <ul>
-                    <li>All FlowFiles that cannot be sent to Kafka for any 
reason be routed 
-                       to this relationship. If a portion of a FlowFile is 
successfully sent
-                       to Kafka but not all, only those messages that cannot 
be sent to Kafka
-                       will be routed to this Relationship.
-                    </li>
-                </ul>
-            </li>
-            
-        </ul>
-
-    </body>
-</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/docs/org.apache.nifi.processors.standard.Base64EncodeContent/index.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/docs/org.apache.nifi.processors.standard.Base64EncodeContent/index.html
 
b/nifi/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/docs/org.apache.nifi.processors.standard.Base64EncodeContent/index.html
deleted file mode 100644
index c6a4b4d..0000000
--- 
a/nifi/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/docs/org.apache.nifi.processors.standard.Base64EncodeContent/index.html
+++ /dev/null
@@ -1,63 +0,0 @@
-<!DOCTYPE html>
-<html lang="en">
-    <!--
-      Licensed to the Apache Software Foundation (ASF) under one or more
-      contributor license agreements.  See the NOTICE file distributed with
-      this work for additional information regarding copyright ownership.
-      The ASF licenses this file to You under the Apache License, Version 2.0
-      (the "License"); you may not use this file except in compliance with
-      the License.  You may obtain a copy of the License at
-          http://www.apache.org/licenses/LICENSE-2.0
-      Unless required by applicable law or agreed to in writing, software
-      distributed under the License is distributed on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-      See the License for the specific language governing permissions and
-      limitations under the License.
-    -->
-    <head>
-        <meta charset="utf-8" />
-        <title>EncodeContent</title>
-        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
-    </head>
-
-    <body>
-        <!-- Processor Documentation 
================================================== -->
-        <h2>Description:</h2>
-        <p>
-            This processor base64 encodes FlowFile content, or decodes 
FlowFile content from base64.
-        <p>
-            <strong>Properties:</strong>
-        </p>
-        <p>
-            In the list below, the names of required properties appear in 
bold. Any other properties (not in bold) are considered optional. If a property 
has a default value, it is indicated. If a property supports the use of the 
NiFi Expression Language (or simply, "expression language"), that is also 
indicated.
-        </p>
-
-        <ul>
-            <li><strong>Mode</strong>
-                <ul>
-                    <li>This property specifies whether the content should be 
base64 encoded, or decoded from base64. Valid values are:
-                        <ul>
-                            <li>Encode</li>
-                            <li>Decode</li>
-                        </ul></li>
-                    <li>Default value: Encode</li>
-                    <li>Supports expression language: false</li>
-                </ul></li>
-        </ul>
-        <p>
-            <strong>Relationships:</strong>
-        </p>
-        <ul>
-            <li>failure
-                <ul>
-                    <li>If a FlowFile cannot be encoded or decoded, then it 
follows this relationship</li>
-                </ul></li>
-            <li>success
-                <ul>
-                    <li>If a FlowFile is successfully encoded or decoded, then 
it follows this relationship</li>
-                </ul></li>
-        </ul>
-
-
-    </body>
-</html>

http://git-wip-us.apache.org/repos/asf/incubator-nifi/blob/0fa553ac/nifi/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/docs/org.apache.nifi.processors.standard.CompressContent/additionalDetails.html
----------------------------------------------------------------------
diff --git 
a/nifi/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/docs/org.apache.nifi.processors.standard.CompressContent/additionalDetails.html
 
b/nifi/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/docs/org.apache.nifi.processors.standard.CompressContent/additionalDetails.html
new file mode 100644
index 0000000..4339e32
--- /dev/null
+++ 
b/nifi/nifi-nar-bundles/nifi-standard-bundle/nifi-standard-processors/src/main/resources/docs/org.apache.nifi.processors.standard.CompressContent/additionalDetails.html
@@ -0,0 +1,68 @@
+<!DOCTYPE html>
+<html lang="en">
+    <!--
+      Licensed to the Apache Software Foundation (ASF) under one or more
+      contributor license agreements.  See the NOTICE file distributed with
+      this work for additional information regarding copyright ownership.
+      The ASF licenses this file to You under the Apache License, Version 2.0
+      (the "License"); you may not use this file except in compliance with
+      the License.  You may obtain a copy of the License at
+          http://www.apache.org/licenses/LICENSE-2.0
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      See the License for the specific language governing permissions and
+      limitations under the License.
+    -->
+    <head>
+        <meta charset="utf-8" />
+        <title>CompressContent</title>
+
+        <link rel="stylesheet" href="../../css/component-usage.css" 
type="text/css" />
+    </head>
+
+    <body>
+
+        <!-- Processor Documentation 
================================================== -->
+        
+        <p>
+            <strong>Uses Attributes:</strong>
+        </p>
+        <table border="1">
+            <thead>
+                <tr>
+                    <th>Attribute Name</th>
+                    <th>Description</th>
+                </tr>
+            </thead>
+            <tbody>
+                <tr>
+                    <td>mime.type</td>
+                    <td>If the <code>Compression Format</code> is set to 
<code>use mime.type attribute</code>,
+                        this attribute is used to determine the compression 
type. Otherwise, this attribute is ignored.</td>
+                </tr>
+            </tbody>
+        </table>
+
+        <p>
+            <strong>Modifies Attributes:</strong>
+        </p>
+        <table border="1">
+            <thead>
+                <tr>
+                    <th>Attribute Name</th>
+                    <th>Description</th>
+                </tr>
+            </thead>
+            <tbody>
+                <tr>
+                    <td>mime.type</td>
+                    <td>If the <code>Mode</code> property is set to 
<code>compress</code>, the appropriate MIME Type is set. If the 
+                        <code>Mode</code> property is set to 
<code>decompress</code> and the file is successfully decompressed, 
+                        this attribute is removed, as the MIME Type is no 
longer known.
+                    </td>
+                </tr>
+            </tbody>
+        </table>
+    </body>
+</html>

Reply via email to