Repository: incubator-impala
Updated Branches:
  refs/heads/master 1609585dc -> c46379602


IMPALA-3401: [DOCS] Remove blocks of content devoted to CM

Change-Id: I07d56463939fe152c1c6124cc0ad5591337bad6b
Reviewed-on: http://gerrit.cloudera.org:8080/6179
Reviewed-by: Ambreen Kazi <[email protected]>
Tested-by: Impala Public Jenkins
Reviewed-by: John Russell <[email protected]>


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/8b417c69
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/8b417c69
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/8b417c69

Branch: refs/heads/master
Commit: 8b417c692140e831deaf54d066e785bacbd5e68b
Parents: 1609585
Author: John Russell <[email protected]>
Authored: Tue Feb 28 13:02:00 2017 -0800
Committer: Impala Public Jenkins <[email protected]>
Committed: Wed Mar 1 22:49:32 2017 +0000

----------------------------------------------------------------------
 docs/impala.ditamap                         |   2 -
 docs/impala_keydefs.ditamap                 |   1 -
 docs/shared/impala_common.xml               |  26 --
 docs/topics/impala_admin.xml                |   1 -
 docs/topics/impala_admission.xml            |  41 ---
 docs/topics/impala_authorization.xml        |  55 ---
 docs/topics/impala_breakpad.xml             |   7 +-
 docs/topics/impala_cm_installation.xml      |  74 ----
 docs/topics/impala_config_options.xml       |  33 --
 docs/topics/impala_hbase.xml                |   5 -
 docs/topics/impala_howto_rm.xml             | 438 -----------------------
 docs/topics/impala_incompatible_changes.xml |  80 -----
 docs/topics/impala_isilon.xml               |  23 --
 docs/topics/impala_ldap.xml                 |  30 --
 docs/topics/impala_logging.xml              |  23 --
 docs/topics/impala_perf_hdfs_caching.xml    |  18 -
 docs/topics/impala_processes.xml            |  22 --
 docs/topics/impala_proxy.xml                |  38 --
 docs/topics/impala_scalability.xml          |   7 +-
 docs/topics/impala_ssl.xml                  | 146 --------
 docs/topics/impala_txtfile.xml              |   9 -
 docs/topics/impala_webui.xml                |   5 -
 22 files changed, 2 insertions(+), 1082 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/impala.ditamap
----------------------------------------------------------------------
diff --git a/docs/impala.ditamap b/docs/impala.ditamap
index 10876ba..41730c4 100644
--- a/docs/impala.ditamap
+++ b/docs/impala.ditamap
@@ -51,7 +51,6 @@ under the License.
     <topicref href="topics/impala_schema_design.xml"/>
   </topicref>
   <topicref audience="standalone" href="topics/impala_install.xml#install">
-    <topicref href="topics/impala_cm_installation.xml#cm_installation" 
audience="hidden"/>
     <topicref href="topics/impala_noncm_installation.xml#noncm_installation"/>
 <!-- Removed when Hue docs were removed from main library. Might be worth 
resurrecting someday as general Impala+Hue topic. -->
 <!--           <topicref href="impala_hue_xi20559.xml"/> -->
@@ -69,7 +68,6 @@ under the License.
   <topicref href="topics/impala_admin.xml">
     <topicref audience="standalone" href="topics/impala_admission.xml"/>
     <topicref audience="standalone" 
href="topics/impala_resource_management.xml"/>
-    <topicref href="topics/impala_howto_rm.xml"/>
     <topicref href="topics/impala_timeouts.xml"/>
     <topicref href="topics/impala_proxy.xml"/>
     <topicref href="topics/impala_disk_space.xml"/>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/impala_keydefs.ditamap
----------------------------------------------------------------------
diff --git a/docs/impala_keydefs.ditamap b/docs/impala_keydefs.ditamap
index 08e0f4f..b48d6d7 100644
--- a/docs/impala_keydefs.ditamap
+++ b/docs/impala_keydefs.ditamap
@@ -10341,7 +10341,6 @@ 
https://issues.cloudera.org/secure/IssueNavigator.jspa?reset=true&amp;jqlQuery=p
   <keydef href="topics/impala_admin.xml" keys="admin"/>
   <keydef href="topics/impala_admission.xml" keys="admission_control"/>
   <keydef href="topics/impala_resource_management.xml" 
keys="resource_management"/>
-  <keydef href="topics/impala_howto_rm.xml" keys="howto_impala_rm"/>
   <keydef href="topics/impala_timeouts.xml" keys="timeouts"/>
   <keydef href="topics/impala_proxy.xml" keys="proxy"/>
   <keydef href="topics/impala_disk_space.xml" keys="disk_space"/>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/shared/impala_common.xml
----------------------------------------------------------------------
diff --git a/docs/shared/impala_common.xml b/docs/shared/impala_common.xml
index 4604a33..a6d88c3 100644
--- a/docs/shared/impala_common.xml
+++ b/docs/shared/impala_common.xml
@@ -574,20 +574,6 @@ under the License.
             in the <cmdname>impalad</cmdname> and <cmdname>catalogd</cmdname> 
configuration settings.
           </p>
         </li>
-        <li audience="hidden">
-          <p>
-            For clusters managed by Cloudera Manager, select the 
-            <uicontrol>Use HDFS Rules to Map Kerberos Principals to Short 
Names</uicontrol>
-            checkbox to enable the service-wide 
<codeph>load_auth_to_local_rules</codeph> configuration setting.
-            Use the Cloudera Manager setting, <uicontrol>Additional Rules to 
Map Kerberos Principals to Short Names</uicontrol>,
-            to insert mapping rules.
-            Then restart the Impala service.
-          </p>
-          <p>
-            See <xref 
href="http://www.cloudera.com/documentation/enterprise/latest/topics/sg_auth_to_local_isolate.html";
 scope="external" format="html">Using Auth-to-Local Rules to Isolate Cluster 
Users</xref>
-            for general information about this feature.
-          </p>
-        </li>
       </ul>
     </p>
 
@@ -608,12 +594,6 @@ under the License.
       Sentry logs all facts that lead up to authorization decisions at the 
debug level. If you do not understand
       why Sentry is denying access, the best way to debug is to temporarily 
turn on debug logging:
       <ul>
-        <li audience="hidden">
-          In Cloudera Manager, add 
<codeph>log4j.logger.org.apache.sentry=DEBUG</codeph> to the logging settings
-          for your service through the corresponding <uicontrol>Logging Safety 
Valve</uicontrol> field for the
-          Impala, Hive Server 2, or Solr Server services.
-        </li>
-
         <li>
           Add <codeph>log4j.logger.org.apache.sentry=DEBUG</codeph> to the 
<filepath>log4j.properties</filepath>
           file on each host in the cluster, in the appropriate configuration 
directory for each service.
@@ -3367,12 +3347,6 @@ sudo pip-python install ssl</codeblock>
         the connection has been closed.
       </note>
 
-      <p id="impala_mr">
-        For a detailed information about configuring a cluster to share 
resources between Impala queries and MapReduce jobs, see
-        <xref audience="integrated" 
href="../topics/admin_howto_multitenancy.xml#howto_multitenancy"/><xref 
audience="standalone" 
href="https://www.cloudera.com/documentation/enterprise/latest/topics/admin_howto_multitenancy.html";
 format="html" scope="external"/>
-        and <xref href="../topics/impala_howto_rm.xml#howto_impala_rm"/>.
-      </p>
-
       <note id="llama_beta" type="warning">
         In CDH 5.0.0, the Llama component is in beta. It is intended for 
evaluation of resource management in test
         environments, in combination with Impala and YARN. It is currently not 
recommended for production

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_admin.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_admin.xml b/docs/topics/impala_admin.xml
index 2b9f032..01ed0da 100644
--- a/docs/topics/impala_admin.xml
+++ b/docs/topics/impala_admin.xml
@@ -73,6 +73,5 @@ under the License.
       </ul>
     </p>
 
-<!-- <p conref="../shared/impala_common.xml#common/impala_mr"/> -->
   </conbody>
 </concept>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_admission.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_admission.xml b/docs/topics/impala_admission.xml
index ea47d89..cceae93 100644
--- a/docs/topics/impala_admission.xml
+++ b/docs/topics/impala_admission.xml
@@ -606,32 +606,6 @@ under the License.
       </section>
     </conbody>
 
-    <concept id="admission_config_cm" audience="hidden">
-
-<!-- TK: Maybe all this stuff overlaps with admin_impala_admission_control and 
can be delegated there. -->
-
-      <title>Configuring Admission Control Using Cloudera Manager</title>
-  <prolog>
-    <metadata>
-      <data name="Category" value="Cloudera Manager"/>
-    </metadata>
-  </prolog>
-
-      <conbody>
-
-        <p>
-          In Cloudera Manager, you can configure pools to manage queued Impala 
queries, and the options for the
-          limit on number of concurrent queries and how to handle queries that 
exceed the limit. For details, see
-          <xref 
href="http://www.cloudera.com/documentation/enterprise/latest/topics/cm_mc_managing_resources.html";
 scope="external" format="html">Managing Resources with Cloudera Manager</xref>.
-        </p>
-
-        <p audience="hidden"><!-- Hiding link because that subtopic is now 
hidden. -->
-          See <xref href="#admission_examples"/> for a sample setup for 
admission control under
-          Cloudera Manager.
-        </p>
-      </conbody>
-    </concept>
-
     <concept id="admission_config_noncm">
 
       <title>Configuring Admission Control Using the Command Line</title>
@@ -694,10 +668,6 @@ 
impala.admission-control.pool-queue-timeout-ms.<varname>queue_name</varname></ph
           <keyword keyref="impala25_full"/> and higher.
         </p>
 
-        <p audience="hidden"><!-- Hiding link because that subtopic is now 
hidden. -->
-          See <xref href="#admission_examples/section_etq_qgb_rq"/> for sample 
configuration files
-          for admission control using multiple resource pools, without 
Cloudera Manager.
-        </p>
       </conbody>
     </concept>
 
@@ -708,17 +678,6 @@ 
impala.admission-control.pool-queue-timeout-ms.<varname>queue_name</varname></ph
 
       <conbody>
 
-        <section id="section_fqn_qgb_rq" audience="hidden">
-
-          <title>Example Admission Control Configurations Using Cloudera 
Manager</title>
-
-          <p>
-            For full instructions about configuring dynamic resource pools 
through Cloudera Manager, see
-            <xref audience="integrated" 
href="cm_mc_resource_pools.xml#xd_583c10bfdbd326ba--43d5fd93-1410993f8c2--7ff2"/><xref
 audience="standalone" 
href="http://www.cloudera.com/documentation/enterprise/latest/topics/cm_mc_resource_pools.html";
 scope="external" format="html"/>.
-          </p>
-
-        </section>
-
         <section id="section_etq_qgb_rq">
 
           <title>Example Admission Control Configurations Using Configuration 
Files</title>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_authorization.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_authorization.xml 
b/docs/topics/impala_authorization.xml
index 7890d71..7a360fa 100644
--- a/docs/topics/impala_authorization.xml
+++ b/docs/topics/impala_authorization.xml
@@ -223,16 +223,6 @@ under the License.
       </p>
 
       <ul>
-        <li audience="hidden">
-          <p>
-            In an environment managed by Cloudera Manager, the server name is 
specified through
-            <menucascade><uicontrol>Impala 
(Service-Wide)</uicontrol><uicontrol>Category</uicontrol><uicontrol>Advanced</uicontrol><uicontrol>Sentry
 Service</uicontrol></menucascade> and
-            
<menucascade><uicontrol>Hive</uicontrol><uicontrol>Service-Wide</uicontrol><uicontrol>Advanced</uicontrol><uicontrol>Sentry
 Service</uicontrol></menucascade>.
-            The values must be the same for both, so that Impala and Hive can 
share the privilege rules. Restart
-            the Impala and Hive services after setting or changing this value.
-          </p>
-        </li>
-
         <li>
           <p>
             Specify the <codeph>server1</codeph> value for the 
<codeph>sentry.hive.server</codeph> property in the
@@ -283,20 +273,6 @@ report_generator = 
server=server1-&gt;db=reporting_db-&gt;table=*-&gt;action=SEL
         <codeph>REVOKE</codeph> statements in <keyword 
keyref="impala20_full"/>.)
       </p>
 
-      <p audience="hidden">
-        Hive already had <codeph>GRANT</codeph> and <codeph>REVOKE</codeph> 
statements prior to CDH 5.1, but those
-        statements were not production-ready. CDH 5.1 is the first release 
where those statements use the Sentry
-        framework and are considered GA level. If you used the Hive 
<codeph>GRANT</codeph> and
-        <codeph>REVOKE</codeph> statements prior to CDH 5.1, you must set up 
these privileges with the CDH 5.1
-        versions of <codeph>GRANT</codeph> and <codeph>REVOKE</codeph> to take 
advantage of Sentry authorization.
-      </p>
-
-      <p audience="hidden">
-        For information about using the updated Hive <codeph>GRANT</codeph> 
and <codeph>REVOKE</codeph> statements,
-        see
-        <xref 
href="http://www.cloudera.com/documentation/enterprise/latest/topics/cm_sg_sentry_service.html";
 scope="external" format="html">Sentry
-        service</xref> topic in the <cite>CDH 5 Security Guide</cite>.
-      </p>
     </conbody>
   </concept>
 
@@ -1584,37 +1560,6 @@ column-level security was by creating views that 
referenced particular sets of c
     </conbody>
   </concept>
 
-  <concept id="sentry_cm" audience="hidden">
-
-    <title>Managing Sentry for Impala through Cloudera Manager</title>
-
-    <conbody>
-
-      <p>
-        To enable the Sentry service for Impala and Hive, set 
<uicontrol>Hive/Impala &gt; Service-Wide &gt; Sentry
-        Service</uicontrol> parameter to the Sentry service. Then restart 
Impala and Hive. Simply adding Sentry
-        service as a dependency and restarting enables Impala and Hive to use 
the Sentry service.
-      </p>
-
-      <p>
-        To set the server name to use when granting server level privileges, 
set the <uicontrol>Hive &gt;
-        Service-Wide &gt; Advanced &gt; Server Name for Sentry 
Authorization</uicontrol> parameter. When using
-        Sentry with the Hive Metastore, you can specify the list of users that 
are allowed to bypass Sentry
-        Authorization in Hive Metastore using <uicontrol>Hive &gt; 
Service-Wide &gt; Security &gt; Bypass Sentry
-        Authorization Users</uicontrol>. These are usually service users that 
already ensure all activity has been
-        authorized.
-      </p>
-
-      <note>
-        The <uicontrol>Hive/Impala &gt; Service-Wide &gt; Policy File Based 
Sentry</uicontrol> tab contains
-        parameters only relevant to configuring Sentry using policy files. In 
particular, make sure that
-        <uicontrol>Enable Sentry Authorization using Policy Files</uicontrol> 
parameter is unchecked when using the
-        Sentry service. Cloudera Manager throws a validation error if you 
attempt to configure the Sentry service
-        and policy file at the same time.
-      </note>
-    </conbody>
-  </concept>
-
   <concept id="sec_ex_default">
 
     <title>The DEFAULT Database in a Secure Deployment</title>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_breakpad.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_breakpad.xml b/docs/topics/impala_breakpad.xml
index e5e8cc6..5d755fd 100644
--- a/docs/topics/impala_breakpad.xml
+++ b/docs/topics/impala_breakpad.xml
@@ -85,14 +85,9 @@ under the License.
         <!-- Location stated in IMPALA-3581; overridden by different location 
from IMPALA-2686?
              
<filepath><varname>log_directory</varname>/minidumps/<varname>daemon_name</varname></filepath>
 -->
          <ul>
-          <li audience="hidden">
-            <p>
-              Clusters managed by Cloudera Manager: 
<filepath>/var/log/impala-minidumps/<varname>daemon_name</varname></filepath>
-            </p>
-          </li>
           <li>
             <p>
-              Clusters not managed by Cloudera Manager:
+              Clusters not managed by cluster management software:
               
<filepath><varname>impala_log_dir</varname>/<varname>daemon_name</varname>/minidumps/<varname>daemon_name</varname></filepath>
             </p>
           </li>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_cm_installation.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_cm_installation.xml 
b/docs/topics/impala_cm_installation.xml
deleted file mode 100644
index f1a3b04..0000000
--- a/docs/topics/impala_cm_installation.xml
+++ /dev/null
@@ -1,74 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied.  See the License for the
-specific language governing permissions and limitations
-under the License.
--->
-<!DOCTYPE concept PUBLIC "-//OASIS//DTD DITA Concept//EN" "concept.dtd">
-<concept id="cm_installation" audience="hidden">
-
-  <title>Installing Impala with Cloudera Manager</title>
-  <prolog>
-    <metadata>
-      <data name="Category" value="Impala"/>
-      <data name="Category" value="Installing"/>
-      <data name="Category" value="Cloudera Manager"/>
-      <data name="Category" value="Administrators"/>
-    </metadata>
-  </prolog>
-
-  <conbody>
-
-    <p>
-      Before installing Impala through the Cloudera Manager interface, make 
sure all applicable nodes have the
-      appropriate hardware configuration and levels of operating system and 
CDH. See
-      <xref href="impala_prereqs.xml#prereqs"/> for details.
-    </p>
-
-    <note rev="1.2.0">
-      <p rev="1.2.0">
-        To install the latest Impala under CDH 4, upgrade Cloudera Manager to 
4.8 or higher. Cloudera Manager 4.8 is
-        the first release that can manage the Impala catalog service 
introduced in Impala 1.2. Cloudera Manager 4.8
-        requires this service to be present, so if you upgrade to Cloudera 
Manager 4.8, also upgrade Impala to the
-        most recent version at the same time.
-<!-- Not so relevant now for 1.1.1, but maybe someday we'll capture all this 
history in a compatibility grid.
-        Upgrade to Cloudera Manager 4.6.2 or higher to enable Cloudera Manager 
to
-        handle access control for the Impala web UI, available by default 
through
-        port 25000 on each Impala host.
-        -->
-      </p>
-    </note>
-
-    <p>
-      For information on installing Impala in a Cloudera Manager-managed 
environment, see
-      <xref audience="integrated" href="cm_ig_install_impala.xml"/><xref 
audience="standalone" 
href="http://www.cloudera.com/documentation/enterprise/latest/topics/cm_ig_install_impala.html";
 scope="external" format="html">Installing Impala</xref>.
-    </p>
-
-    <p>
-      Managing your Impala installation through Cloudera Manager has a number 
of advantages. For example, when you
-      make configuration changes to CDH components using Cloudera Manager, it 
automatically applies changes to the
-      copies of configuration files, such as <codeph>hive-site.xml</codeph>, 
that Impala keeps under
-      <filepath>/etc/impala/conf</filepath>. It also sets up the Hive 
Metastore service that is required for
-      Impala running under CDH 4.1.
-    </p>
-
-    <p>
-      In some cases, depending on the level of Impala, CDH, and Cloudera 
Manager, you might need to add particular
-      component configuration details in some of the free-form option fields 
on the Impala configuration pages
-      within Cloudera Manager. <ph 
conref="../shared/impala_common.xml#common/safety_valve"/>
-    </p>
-  </conbody>
-</concept>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_config_options.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_config_options.xml 
b/docs/topics/impala_config_options.xml
index 3260b75..3c1ae1f 100644
--- a/docs/topics/impala_config_options.xml
+++ b/docs/topics/impala_config_options.xml
@@ -66,35 +66,6 @@ under the License.
 
   </conbody>
 
-  <concept id="config_options_cm" audience="hidden">
-
-    <title>Configuring Impala Startup Options through Cloudera Manager</title>
-
-    <conbody>
-
-      <p>
-        If you manage your cluster through Cloudera Manager, configure the 
settings for all the
-        Impala-related daemons by navigating to this page:
-        
<menucascade><uicontrol>Clusters</uicontrol><uicontrol>Impala</uicontrol><uicontrol>Configuration</uicontrol><uicontrol>View
-        and Edit</uicontrol></menucascade>. See the Cloudera Manager 
documentation for
-        <xref 
href="http://www.cloudera.com/documentation/enterprise/latest/topics/cm_mc_impala_service.html";
 scope="external" format="html">instructions
-        about how to configure Impala through Cloudera Manager</xref>.
-      </p>
-
-      <p>
-        If the Cloudera Manager interface does not yet have a form field for a 
newly added
-        option, or if you need to use special options for debugging and 
troubleshooting, the
-        <uicontrol>Advanced</uicontrol> option page for each daemon includes 
one or more fields
-        where you can enter option names directly.
-        <ph conref="../shared/impala_common.xml#common/safety_valve"/> There 
is also a free-form
-        field for query options, on the top-level <uicontrol>Impala 
Daemon</uicontrol> options
-        page.
-      </p>
-
-    </conbody>
-
-  </concept>
-
   <concept id="config_options_noncm">
 
     <title>Configuring Impala Startup Options through the Command Line</title>
@@ -237,10 +208,6 @@ Starting Impala Catalog Server:                            
[  OK  ]</codeblock>
             to:
           </p>
 <codeblock>export ENABLE_CORE_DUMPS=${ENABLE_COREDUMPS:-true}</codeblock>
-          <p audience="hidden">
-            On systems managed by Cloudera Manager, enable the 
<uicontrol>Enable Core
-            Dump</uicontrol> setting for the Impala service.
-          </p>
 
           <note 
conref="../shared/impala_common.xml#common/core_dump_considerations"/>
         </li>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_hbase.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_hbase.xml b/docs/topics/impala_hbase.xml
index 0068e48..0b3754b 100644
--- a/docs/topics/impala_hbase.xml
+++ b/docs/topics/impala_hbase.xml
@@ -156,11 +156,6 @@ under the License.
 &lt;/property&gt;
 </codeblock>
 
-      <p audience="hidden">
-        Currently, Cloudera Manager does not have an Impala-only override for 
HBase settings, so any HBase
-        configuration change you make through Cloudera Manager would take 
affect for all HBase applications.
-        Therefore, this change is not recommended on systems managed by 
Cloudera Manager.
-      </p>
     </conbody>
   </concept>
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_howto_rm.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_howto_rm.xml b/docs/topics/impala_howto_rm.xml
deleted file mode 100644
index 9e4361a..0000000
--- a/docs/topics/impala_howto_rm.xml
+++ /dev/null
@@ -1,438 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-  http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing,
-software distributed under the License is distributed on an
-"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-KIND, either express or implied.  See the License for the
-specific language governing permissions and limitations
-under the License.
--->
-<!DOCTYPE concept PUBLIC "-//OASIS//DTD DITA Concept//EN" "concept.dtd">
-<concept id="howto_impala_rm">
-
- <title>How to Configure Resource Management for Impala</title>
- <prolog>
-  <metadata>
-   <data name="Category" value="Impala"/>
-   <data name="Category" value="Admission Control"/>
-   <data name="Category" value="Resource Management"/>
-  </metadata>
- </prolog>
- <conbody>
-  <p>Impala includes features that balance and maximize resources in your CDH 
cluster. This topic describes
-  how you can enhance a CDH cluster using Impala to improve efficiency.</p>
-  <p outputclass="toc inpage">A typical deployment uses the following.</p>
-  <ul audience="HTML">
-   <li audience="HTML">Creating Static Service Pools</li>
-   <li audience="HTML">Using Admission Control <ul>
-     <li audience="HTML">Setting Per-query Memory Limits</li>
-     <li audience="HTML">Creating Dynamic Resource Pools</li>
-    </ul></li>
-
-  </ul>
- </conbody>
- <concept id="static_service_pools">
-   <title>Creating Static Service Pools</title>
-  <conbody>
-  <p>Use Static Service Pools to allocate dedicated resources for Impala and 
other services to allow
-    for predictable resource availability. </p>
-    <p>Static service pools isolate services from one another, so that high 
load on one service has
-    bounded impact on other services. You can use Cloudera Manager to 
configure static service pools
-    that control memory, CPU and Disk I/O.</p>
-    <p >The following screenshot shows a sample configuration for Static 
Service Pools in
-    Cloudera Manager:</p>
-   <draft-comment author="ddawson">
-    <p>Need accurate numbers - or can we remove services other than HDFS, 
Impala, and YARN? Matt
-     Jacobs is going to run these numbers by someone in the field.</p>
-   </draft-comment>
-   <p>
-    <image href="../images/howto_static_server_pools_config.png" 
placement="break"
-     id="PDF_33" align="center" scale="33" audience="HTML"/>
-    <image href="../images/howto_static_server_pools_config.png" 
placement="break"
-     id="HTML_SCALEFIT" align="center" scalefit="yes" audience="PDF"/>
-   </p>
-   <p>
-    <ul id="ul_tkw_4rs_pw">
-     <li >
-      <p >HDFS always needs to have a minimum of 5-10% of the resources.</p>
-     </li>
-     <li >
-      Generally, YARN and Impala split the rest of the resources.
-     
-     <ul id="ul_ukw_4rs_pw">
-      <li >
-       <p >For mostly batch workloads, you might allocate YARN 60%, Impala 
30%, and HDFS
-        10%. </p>
-      </li>
-      <li >
-       <p >For mostly ad hoc query workloads, you might allocate Impala 60%, 
YARN 30%, and
-        HDFS 10%.</p>
-      </li>
-     </ul></li>
-    </ul>
-   </p>
-   </conbody>
-  </concept>
- <concept id="enable_admission_control">
-  <title>Using Admission Control</title>
-  <conbody>
-   <p>Within the constraints of the static service pool, you can further 
subdivide Impala's
-    resources using Admission Control. You configure Impala Admission Control 
pools in the Cloudera
-    Manager Dynamic Resource Pools page.</p>
-   <p>You use Admission Control to divide usage between Dynamic Resource Pools 
in multitenant use
-    cases. Allocating resources judiciously allows your most important queries 
to run faster and
-    more reliably.</p>
-   <p>
-    <note>In this context, Impala Dynamic Resource Pools are different than 
the default YARN Dynamic
-     Resource Pools. You can turn on Dynamic Resource Pools that are 
exclusively for use by
-     Impala.</note>
-   </p>
-   <p>Admission Control is enabled by default.</p>
-   <p>A Dynamic Resource Pool has the following properties:<ul 
id="ul_blk_jjg_sw">
-     <li><b>Max Running Queries</b>: Maximum number of concerrently executing 
queries in the pool
-      before incoming queries are queued.</li>
-     <li><b>Max Memory Resources</b>: Maximum memory used by queries in the 
pool before incoming
-      queries are queued. This value is used at the time of admission and is 
not enforced at query
-      runtime.</li>
-     <li><b>Default Query Memory Limit</b>: Defines the maximum amount of 
memory a query can
-      allocate on each node. This is enforced at runtime. If the query 
attempts to use more memory,
-      it is forced to spill, if possible. Otherwise, it is cancelled. The 
total memory that can be
-      used by a query is the <codeph>MEM_LIMIT</codeph> times the number of 
nodes.</li>
-     <li><b>Max Queued Queries</b>: Maximum number of queries that can be 
queued in the pool before
-      additional queries are rejected.</li>
-     <li><b>Queue Timeout</b>: Specifies how long queries can wait in the 
queue before they are
-      cancelled with a timeout error.</li>
-    </ul></p>
-  </conbody>
- </concept>
- <concept id="set_per_query_memory_limits">
-  <title>Setting Per-query Memory Limits</title>
-  <conbody>
-   <p>Use per-query memory limits to prevent queries from consuming excessive 
memory resources that
-    impact other queries. <ph rev="upstream">Cloudera</ph> recommends that you 
set the query memory limits whenever
-    possible.</p>
-   <p>If you set the <b>Pool Max Mem Resources</b> for a resource pool, Impala 
attempts to throttle
-    queries if there is not enough memory to run them within the specified 
resources.</p>
-   <p>Only use admission control with maximum memory resources if you can 
ensure there are query
-    memory limits. Set the pool <b>Default Query Memory Limit</b> to be 
certain. You can override
-    this setting with the <codeph>query</codeph> option, if necessary.</p>
-   <p>Typically, you set query memory limits using the <codeph>set 
MEM_LIMIT=Xg;</codeph> query
-    option. When you find the right value for your business case, memory-based 
admission control
-    works well. The potential downside is that queries that attempt to use 
more memory might perform
-    poorly or even be cancelled.</p>
-   <p>To find a reasonable default query memory limit:<ol id="ol_ydt_xhy_pw">
-     <li>Run the workload.</li>
-     <li>In Cloudera Manager, go to <menucascade>
-       <uicontrol>Impala</uicontrol>
-       <uicontrol>Queries</uicontrol>
-      </menucascade>.</li>
-     <li>Click <uicontrol>Select Attributes</uicontrol>.</li>
-     <li>Select <uicontrol>Per Node Peak Memory Usage</uicontrol> and click
-       <uicontrol>Update</uicontrol>.</li>
-     <li>Allow the system time to gather information, then click the 
<uicontrol>Show
-       Histogram</uicontrol> icon to see the results.<image placement="break"
-       href="../images/howto_show_histogram.png" align="center" 
id="image_hmv_xky_pw"/></li>
-     <li>Use the histogram to find a value that accounts for most queries. 
Queries that require more
-      resources than this limit should explicitly set the memory limit to 
ensure they can run to
-       completion.<draft-comment author="ddawson">This chart uses bad sample 
data - we will change
-       the chart when we have real numbers from the sample use 
case.</draft-comment><image
-       placement="break" href="../images/howto_per_node_peak_memory_usage.png" 
align="center"
-       id="image_ehn_hly_pw" scalefit="yes"/></li>
-    </ol></p>
-  </conbody>
- </concept>
- <concept id="concept_en4_3sy_pw">
-  <title>Creating Dynamic Resource Pools</title>
-  <conbody>
-   <p>A dynamic resource pool is a named configuration of resources and a 
policy for scheduling the
-    resources among Impala queries running in the pool. Dynamic resource pools 
allow you to schedule
-    and allocate resources to Impala queries based on a user's access to 
specific pools and the
-    resources available to those pools. </p>
-   <p>This example creates both production and development resource pools or 
queues. It assumes you
-    have 3 worker nodes with 24GiB of RAM each for an aggregate memory of 
72000MiB. This pool
-    configuration allocates the Production queue twice the memory resources of 
the Development
-    queue, and a higher number of concurrent queries.</p>
-   <p>To create a Production dynamic resource pool for Impala:</p>
-    <ol>
-     <li>In Cloudera Manager, select <menucascade>
-      <uicontrol>Clusters</uicontrol>
-      <uicontrol>Dynamic Resource Pool Configuration</uicontrol>
-     </menucascade>.</li>
-     <li>Click the <uicontrol>Impala Admission Control</uicontrol> tab.</li>
-     <li>Click <b>Create Resource Pool</b>.</li>
-     <li>Specify a name and resource limits for the Production pool:<ul 
id="ul_rjt_wqv_2v">
-       <li>In the <b>Resource Pool Name</b> field, enter 
<userinput>Production</userinput>.</li>
-       <li>In the <uicontrol>Max Memory</uicontrol> field, enter 
<userinput>48000</userinput>.</li>
-      <li>In the <uicontrol>Default Query Memory Limit</uicontrol> field, enter
-        <userinput>1600</userinput>.</li>
-       <li>In the <uicontrol>Max Running Queries</uicontrol> field, enter
-       <userinput>10</userinput>.</li>
-       <li>In the <uicontrol>Max Queued Queries</uicontrol> field, enter
-       <userinput>200</userinput>.</li>
-      </ul></li>
-     <li>Click <uicontrol>Create</uicontrol>.</li>
-     <li>Click <uicontrol>Refresh Dynamic Resource Pools</uicontrol>.</li>
-    </ol>
-    <p>The Production queue runs up to 10 queries at once. If the total memory 
requested
-    by these queries exceeds 48000 MiB, it holds the next query in the queue 
until the memory is
-    released. It also prevents a query from running if it needs more memory 
than is currently
-    available. Admission Control holds the next query if either Max Running 
Queries is reached, or
-    the pool Max Memory limit is reached.</p>
-   <p>Here, Max Memory resources and Default Query Memory Limit throttle 
throughput to 10 queries,
-    so setting Max Running Queries might not be necessary, though it does not 
hurt to do so. Most
-    users set Max Running Queries when they cannot pick good numbers for 
memory. Since users can
-    override the query option <varname>mem_limit</varname>, setting the Max 
Running Queries property
-    might make sense.</p>
-    <p>To create a Development dynamic resource pool for Impala:</p>
-
-    <ol>
-     <li>In Cloudera Manager, select <menucascade>
-      <uicontrol>Clusters</uicontrol>
-      <uicontrol>Dynamic Resource Pool Configuration</uicontrol>
-     </menucascade>.</li>
-     <li>Click the <uicontrol>Impala Admission Control</uicontrol> tab.</li>
-     <li>Click <b>Create Resource Pool</b>.</li>
-     <li>Specify a name and resource limits for the Development pool:<ul 
id="ul_j42_q3z_pw">
-       <li>In the <b>Resource Pool Name</b> field, enter 
<userinput>Development</userinput>.</li>
-       <li>In the <uicontrol>Max Memory</uicontrol> field, enter 
<userinput>24000</userinput>.</li>
-      <li>In the <uicontrol>Default Query Memory Limit</uicontrol> field, enter
-        <userinput>8000</userinput>.</li>
-       <li>In the <uicontrol>Max Running Queries</uicontrol> field, enter 
1.</li>
-       <li>In the <uicontrol>Max Queued Queries</uicontrol> field, enter 
100.</li>
-      </ul></li>
-     <li>Click <uicontrol>Create</uicontrol>.</li>
-     <li>Click <uicontrol>Refresh Dynamic Resource Pools</uicontrol>.<p>The 
Development queue runs
-      one query at a time. If the total memory required by the query exceeds 
24000 MiB, it holds the
-      query until memory is released.</p></li>
-    </ol>
-  </conbody>
-  <concept id="setting_placement_rules">
-   <title>Understanding Placement Rules</title>
-   <conbody>
-    <p>Placement rules determine how queries are mapped to resource pools. The 
standard settings are
-     to use a specified pool when specified; otherwise, use the default 
pool.</p>
-    <p>For example, you can use the SET statement to select the pool in which 
to run a
-     query.<codeblock>SET REQUEST_POOL=Production;</codeblock></p>
-    <p>If you do not use a <codeph>SET</codeph> statement, queries are run in 
the default pool.</p>
-   </conbody>
-  </concept>
-  <concept id="setting_access_control_on_pools">
-  <title>Setting Access Control on Pools</title>
-  <conbody>
-   <p>You can specify that only cetain users and groups are allowed to use the 
pools you define.</p>
-   <p>To create a Development dynamic resource pool for Impala:</p>
-   <ol>
-    <li>In Cloudera Manager, select <menucascade>
-       <uicontrol>Clusters</uicontrol>
-       <uicontrol>Dynamic Resource Pool Configuration</uicontrol>
-      </menucascade>.</li>
-    <li>Click the <uicontrol>Impala Admission Control</uicontrol> tab.</li>
-    <li>Click the <uicontrol>Edit</uicontrol> button for the Production 
pool.</li>
-    <li>Click the Submission Access Control tab.</li>
-    <li>Select <uicontrol>Allow these users and groups to submit to this 
pool</uicontrol>.</li>
-    <li>Enter a comma-separated list of users who can use the pool.
-     <image placement="break"
-      href="../images/howto_access_control.png" align="center" scalefit="yes"/>
-     </li>
-    
-    <li>Click <uicontrol>Save</uicontrol>.</li>
-   </ol>
-  </conbody>
- </concept>
- </concept>
-<concept id="impala_resource_management_example">
- <title>Impala Resource Management Example</title>
- <conbody>
-   <p>Anne Chang is administrator for an enterprise data hub that runs a 
number of workloads,
-    including Impala. </p>
-   <p>Anne has a 20-node cluster that uses Cloudera Manager static 
partitioning. Because of the
-    heavy Impala workload, Anne needs to make sure Impala gets enough 
resources. While the best
-    configuration values might not be known in advance, she decides to start 
by allocating 50% of
-    resources to Impala. Each node has 128 GiB dedicated to each impalad. 
Impala has 2560 GiB in
-    aggregate that can be shared across the resource pools she creates.</p>
-   <p>Next, Anne studies the workload in more detail. After some research, she 
might choose to
-    revisit these initial values for static partitioning. </p>
-   <p>To figure out how to further allocate Impala’s resources, Anne needs 
to consider the workloads
-    and users, and determine their requirements. There are a few main sources 
of Impala queries: <ul
-     id="ul_ml3_sf2_5w">
-     <li>Large reporting queries executed by an external process/tool. These 
are critical business
-      intelligence queries that are important for business decisions. It is 
important that they get
-      the resources they need to run. There typically are not many of these 
queries at a given
-      time.</li>
-     <li>Frequent, small queries generated by a web UI. These queries scan a 
limited amount of data
-      and do not require expensive joins or aggregations. These queries are 
important, but not as
-      critical, perhaps the client tries resending the query or the end user 
refreshes the
-      page.</li>
-     <li>Occasionally, expert users might run ad-hoc queries. The queries can 
vary significantly in
-      their resource requirements. While Anne wants a good experience for 
these users, it is hard to
-      control what they do (for example, submitting inefficient or incorrect 
queries by mistake).
-      Anne restricts these queries by default and tells users to reach out to 
her if they need more
-      resources. </li>
-    </ul></p>
-   <p>To set up admission control for this workload, Anne first runs the 
workloads independently, so
-    that she can observe the workload’s resource usage in Cloudera Manager. 
If they could not easily
-    be run manually, but had been run in the past, Anne uses the history 
information from Cloudera
-    Manager. It can be helpful to use other search criteria (for example, 
<i>user</i>) to isolate
-    queries by workload. Anne uses the Cloudera Manager chart for Per-Node 
Peak Memory usage to
-    identify the maximum memory requirements for the queries. </p>
-   <p>From this data, Anne observes the following about the queries in the 
groups above:<ul
-     id="ul_amq_ng2_5w">
-     <li> Large reporting queries use up to 32 GiB per node. There are 
typically 1 or 2 queries
-      running at a time. On one occasion, she observed that 3 of these queries 
were running
-      concurrently. Queries can take 3 minutes to complete.</li>
-     <li>Web UI-generated queries use between 100 MiB per node to usually less 
than 4 GiB per node
-      of memory, but occasionally as much as 10 GiB per node. Queries take, on 
average, 5 seconds,
-      and there can be as many as 140 incoming queries per minute.</li>
-     <li>Anne has little data on ad hoc queries, but some are trivial 
(approximately 100 MiB per
-      node), others join several tables (requiring a few GiB per node), and 
one user submitted a
-      huge cross join of all tables that used all system resources (that was 
likely a mistake).</li>
-    </ul></p>
-   <p>Based on these observations, Anne creates the admission control 
configuration with the
-    following pools: </p>
-   <section id="section_yjc_h32_5w">
-    <title>XL_Reporting</title>
-    <p>
-     <table frame="all" rowsep="1" colsep="1" id="XL_Reporting_Table">
-      <tgroup cols="2">
-       <colspec colname="c1" colnum="1" colwidth="1.0*"/>
-       <colspec colname="c2" colnum="2" colwidth="1.0*"/>
-       <thead>
-        <row>
-         <entry>Property</entry>
-         <entry>Value</entry>
-        </row>
-       </thead>
-       <tbody>
-        <row>
-         <entry>Max Memory</entry>
-         <entry>1280 GiB</entry>
-        </row>
-        <row>
-         <entry>Default Query Memory Limit</entry>
-         <entry>32 GiB</entry>
-        </row>
-        <row>
-         <entry>Max Running Queries</entry>
-         <entry>2</entry>
-        </row>
-        <row>
-         <entry>Queue Timeout</entry>
-         <entry>5 minutes</entry>
-        </row>
-       </tbody>
-      </tgroup>
-     </table>
-    </p>
-    <p>This pool is for large reporting queries. In order to support running 2 
queries at a time,
-     the pool memory resources are set to 1280 GiB (aggregate cluster memory). 
This is for 2
-     queries, each with 32 GiB per node, across 20 nodes. Anne sets the 
pool’s Default Query Memory
-     Limit to 32 GiB so that no query uses more than 32 GiB on any given node. 
She sets Max Running
-     Queries to 2 (though it is not necessary she do so). She increases the 
pool’s queue timeout to
-     5 minutes in case a third query comes in and has to wait. She does not 
expect more than 3
-     concurrent queries, and she does not want them to wait that long anyway, 
so she does not
-     increase the queue timeout. If the workload increases in the future, she 
might choose to adjust
-     the configuration or buy more hardware. </p>
-   </section>
-   <section id="section_xm3_j32_5w"><title>HighThroughput_UI</title>
-    <p>
-     <table frame="all" rowsep="1" colsep="1" id="High_Throughput_UI_Table">
-      <tgroup cols="2">
-       <colspec colname="c1" colnum="1" colwidth="1.0*"/>
-       <colspec colname="c2" colnum="2" colwidth="1.0*"/>
-       <thead>
-        <row>
-         <entry>Property</entry>
-         <entry>Value</entry>
-        </row>
-       </thead>
-       <tbody>
-        <row>
-         <entry>Max Memory</entry>
-         <entry>960 GiB (inferred)</entry>
-        </row>
-        <row>
-         <entry>Default Query Memory Limit</entry>
-         <entry>4 GiB</entry>
-        </row>
-        <row>
-         <entry>Max Running Queries</entry>
-         <entry>12</entry>
-        </row>
-        <row>
-         <entry>Queue Timeout</entry>
-         <entry>5 minutes</entry>
-        </row>
-       </tbody>
-      </tgroup>
-     </table>
-    </p>
-    <p>This pool is used for the small, high throughput queries generated by 
the web tool. Anne sets
-     the Default Query Memory Limit to 4 GiB per node, and sets Max Running 
Queries to 12. This
-     implies a maximum amount of memory per node used by the queries in this 
pool: 48 GiB per node
-     (12 queries * 4 GiB per node memory limit).</p><p>Notice that Anne does 
not set the pool memory resources, but does set the pool’s Default Query
-     Memory Limit. This is intentional: admission control processes queries 
faster when a pool uses
-     the Max Running Queries limit instead of the peak memory 
resources.</p><p>This should be enough memory for most queries, since only a 
few go over 4 GiB per node. For those
-     that do require more memory, they can probably still complete with less 
memory (spilling if
-     necessary). If, on occasion, a query cannot run with this much memory and 
it fails, Anne might
-     reconsider this configuration later, or perhaps she does not need to 
worry about a few rare
-     failures from this web UI.</p><p>With regard to throughput, since these 
queries take around 5 seconds and she is allowing 12
-     concurrent queries, the pool should be able to handle approximately 144 
queries per minute,
-     which is enough for the peak maximum expected of 140 queries per minute. 
In case there is a
-     large burst of queries, Anne wants them to queue. The default maximum 
size of the queue is
-     already 200, which should be more than large enough. Anne does not need 
to change it.</p></section>
-   <section id="section_asm_yj2_5w"><title>Default</title>
-    <p>
-     <table frame="all" rowsep="1" colsep="1" id="default_table">
-      <tgroup cols="2">
-       <colspec colname="c1" colnum="1" colwidth="1.0*"/>
-       <colspec colname="c2" colnum="2" colwidth="1.0*"/>
-       <thead>
-        <row>
-         <entry>Property</entry>
-         <entry>Value</entry>
-        </row>
-       </thead>
-       <tbody>
-        <row>
-         <entry>Max Memory</entry>
-         <entry>320 GiB</entry>
-        </row>
-        <row>
-         <entry>Default Query Memory Limit</entry>
-         <entry>4 GiB</entry>
-        </row>
-        <row>
-         <entry>Max Running Queries</entry>
-         <entry>Unlimited</entry>
-        </row>
-        <row>
-         <entry>Queue Timeout</entry>
-         <entry>60 Seconds</entry>
-        </row>
-       </tbody>
-      </tgroup>
-     </table>
-    </p><p>The default pool (which already exists) is a catch all for ad-hoc 
queries. Anne wants to use the
-     remaining memory not used by the first two pools, 16 GiB per node 
(XL_Reporting uses 64 GiB per
-     node, High_Throughput_UI uses 48 GiB per node). For the other pools to 
get the resources they
-     expect, she must still set the Max Memory resources and the Default Query 
Memory Limit. She
-     sets the Max Memory resources to 320 GiB (16 * 20). She sets the Default 
Query Memory Limit to
-     4 GiB per node for now. That is somewhat arbitrary, but satisfies some of 
the ad hoc queries
-     she observed. If someone writes a bad query by mistake, she does not 
actually want it using all
-     the system resources. If a user has a large query to submit, an expert 
user can override the
-     Default Query Memory Limit (up to 16 GiB per node, since that is bound by 
the pool Max Memory
-     resources). If that is still insufficient for this user’s workload, the 
user should work with
-     Anne to adjust the settings and perhaps create a dedicated pool for the 
workload.</p></section>
-  </conbody>
-</concept>
-</concept>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_incompatible_changes.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_incompatible_changes.xml 
b/docs/topics/impala_incompatible_changes.xml
index a25f327..897d973 100644
--- a/docs/topics/impala_incompatible_changes.xml
+++ b/docs/topics/impala_incompatible_changes.xml
@@ -1226,8 +1226,6 @@ select * from `cross`;</codeblock>
         to Impala 1.2.x in general.
       </p>
 
-      <p audience="hidden" 
conref="../shared/impala_common.xml#common/cm48_upgrade"/>
-
 <!-- <note conref="common.xml#common/cdh4_cdh5_upgrade"/> -->
     </conbody>
   </concept>
@@ -1268,9 +1266,6 @@ select * from `cross`;</codeblock>
         <li/>
       </ul>
 
-      <p audience="hidden" 
conref="../shared/impala_common.xml#common/cm48_upgrade"/>
-
-<!--  <note conref="common.xml#common/cdh4_cdh5_upgrade"/> -->
     </conbody>
   </concept>
 
@@ -1405,83 +1400,8 @@ ALTER TABLE <varname>table_name</varname> SET FILEFORMAT
           <codeph>impala-lzo-cdh4</codeph> to the latest level. See <xref 
href="impala_txtfile.xml#lzo"/> for
           details.
         </li>
-
-        <li>
-          Cloudera Manager 4.5.2 and higher only supports Impala 1.0 and 
higher, and vice versa. If you upgrade to
-          Impala 1.0 or higher managed by Cloudera Manager, you must also 
upgrade Cloudera Manager to version 4.5.2
-          or higher. If you upgrade from an earlier version of Cloudera 
Manager, and were using Impala, you must
-          also upgrade Impala to version 1.0 or higher. The beta versions of 
Impala are no longer supported as of
-          the release of Impala 1.0.
-        </li>
-      </ul>
-    </conbody>
-  </concept>
-
-  <concept id="incompatible_changes_07" audience="hidden">
-
-    <title>Incompatible Change Introduced in Version 0.7 of the Impala Beta 
Release</title>
-
-    <conbody>
-
-      <ul>
-        <li>
-          The defaults for the <codeph>-nn</codeph> and 
<codeph>-nn_port</codeph> flags have changed and are now
-          read from <codeph>core-site.xml</codeph>. Impala prints the values 
of <codeph>-nn</codeph> and
-          <codeph>-nn_port</codeph> to the log when it starts. The ability to 
set <codeph>-nn</codeph> and
-          <codeph>-nn_port</codeph> on the command line is deprecated in 0.7 
and may be removed in Impala 0.8.
-        </li>
-      </ul>
-    </conbody>
-  </concept>
-
-  <concept id="incompatible_changes_06" audience="hidden">
-
-    <title>Incompatible Change Introduced in Version 0.6 of the Impala Beta 
Release</title>
-
-    <conbody>
-
-      <ul>
-        <li>
-          Cloudera Manager 4.5 supports only version 0.6 of the Impala Beta 
Release. It does not support
-          the earlier beta versions. If you upgrade your Cloudera Manager 
installation, you must also upgrade
-          Impala to beta version 0.6. If you upgrade Impala to beta version 
0.6, you must upgrade Cloudera Manager
-          to 4.5.
-        </li>
-      </ul>
-    </conbody>
-  </concept>
-
-  <concept id="incompatible_changes_04" audience="hidden">
-
-    <title>Incompatible Change Introduced in Version 0.4 of the Impala Beta 
Release</title>
-
-    <conbody>
-
-      <ul>
-        <li>
-          Cloudera Manager 4.1.3 supports only version 0.4 of the Impala Beta 
Release. It does not support
-          the earlier beta versions. If you upgrade your Cloudera Manager 
installation, you must also upgrade
-          Impala to beta version 0.4. If you upgrade Impala to beta version 
0.4, you must upgrade Cloudera Manager
-          to 4.1.3.
-        </li>
       </ul>
     </conbody>
   </concept>
 
-  <concept id="incompatible_changes_03" audience="hidden">
-
-    <title>Incompatible Change Introduced in Version 0.3 of the Impala Beta 
Release</title>
-
-    <conbody>
-
-      <ul>
-        <li>
-          Cloudera Manager 4.1.2 supports only version 0.3 of the Impala Beta 
Release. It does not support
-          the earlier beta versions. If you upgrade your Cloudera Manager 
installation, you must also upgrade
-          Impala to beta version 0.3. If you upgrade Impala to beta version 
0.3, you must upgrade Cloudera Manager
-          to 4.1.2.
-        </li>
-      </ul>
-    </conbody>
-  </concept>
 </concept>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_isilon.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_isilon.xml b/docs/topics/impala_isilon.xml
index bf93b39..12e6343 100644
--- a/docs/topics/impala_isilon.xml
+++ b/docs/topics/impala_isilon.xml
@@ -102,30 +102,7 @@ CREATE TABLE d1.t2 (a TINYINT, b BOOLEAN);
       for the <cmdname>impalad</cmdname> daemon on clusters not using Cloudera 
Manager.
     </p>
 
-    <p audience="hidden">
-<!--
-      For information about tasks performed on
-      Isilon OneFS, see the information hub for Cloudera on the EMC Community 
Network:
-      <xref href="https://community.emc.com/docs/DOC-39522"; format="html" 
scope="external">https://community.emc.com/docs/DOC-39522</xref>.
--->
-      <!-- This is a little bit of a circular loop when this topic is conrefed 
into the main Isilon page,
-           consider if there's a way to conditionalize it out in that case. -->
-      For information about managing Isilon storage devices through Cloudera 
Manager, see
-      <xref audience="integrated" href="cm_mc_isilon_service.xml"/><xref 
audience="standalone" 
href="http://www.cloudera.com/documentation/enterprise/latest/topics/cm_mc_isilon_service.html";
 scope="external" format="html"/>.
-    </p>
-
     <!-- <p outputclass="toc inpage"/> -->
   </conbody>
-<concept id="isilon_cm_configs" audience="hidden">
-<title>Required Configurations</title>
-<conbody>
-<p>Specify the following configurations in Cloudera Manager on the 
<menucascade><uicontrol>Clusters</uicontrol><uicontrol><varname>Isilon 
Service</varname></uicontrol><uicontrol>Configuration</uicontrol></menucascade> 
tab:<ul id="ul_vpx_bw5_vv">
-<li>In <uicontrol>HDFS Client Advanced Configuration Snippet (Safety Valve) 
for hdfs-site.xml</uicontrol> <codeph>hdfs-site.xml</codeph> and the 
<uicontrol>Cluster-wide Advanced Configuration Snippet (Safety Valve) for 
core-site.xml</uicontrol> properties for the Isilon service, set the value of 
the <codeph>dfs.client.file-block-storage-locations.timeout.millis</codeph> 
property to <codeph>10000</codeph>.</li>
-<li>In the Isilon <uicontrol>Cluster-wide Advanced Configuration Snippet 
(Safety Valve) for core-site.xml</uicontrol> property for the Isilon service, 
set the value of the <codeph>hadoop.security.token.service.use_ip</codeph> 
property to <codeph>FALSE</codeph>. </li>
-<li>If you see errors that reference the <codeph>.Trash</codeph> directory, 
make sure that the <uicontrol>Use Trash</uicontrol> property is selected.</li>
-</ul></p>
-
-</conbody>
-</concept>
 
 </concept>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_ldap.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_ldap.xml b/docs/topics/impala_ldap.xml
index 757a4e2..b01eb64 100644
--- a/docs/topics/impala_ldap.xml
+++ b/docs/topics/impala_ldap.xml
@@ -185,16 +185,6 @@ under the License.
         </li>
       </ul>
 
-      <p audience="hidden">
-        For clusters managed by Cloudera Manager 5.4.0 and higher,
-        search for the configuration field names <codeph>ldap_domain</codeph>,
-        <codeph>ldap_basedn</codeph>, or <codeph>ldap_bind_pattern</codeph>,
-        fill in and save the appropriate field values, and restart the Impala 
service.
-        Prior to Cloudera Manager 5.4.0, these values were filled in using the
-        <uicontrol>Impala Daemon Command Line Argument Advanced Configuration 
Snippet (Safety Valve)</uicontrol>
-        field.
-      </p>
-
       <p>
         These options are mutually exclusive; Impala does not start if more 
than one of these options is specified.
       </p>
@@ -299,26 +289,6 @@ username, for example 
<codeph>[email protected]</codeph>.
       </metadata>
     </prolog>
     <conbody>
-      <section id="ldap_impala_hue_cm" audience="hidden">
-        <title>Enabling LDAP for Impala in Hue Using Cloudera Manager</title>
-        <p>
-          <ol>
-            <li>Go to the Hue service.</li>
-            <li>Click the Configuration tab.</li>
-            <li>Select <menucascade><uicontrol>Scope</uicontrol><uicontrol>Hue
-                  Server</uicontrol></menucascade>.</li>
-            <li>Select
-              
<menucascade><uicontrol>Category</uicontrol><uicontrol>Advanced</uicontrol></menucascade>.</li>
-            <li>Add the following properties to the <b>Hue Server Advanced
-                Configuration Snippet (Safety Valve) for
-                hue_safety_valve_server.ini</b>
-              property.<codeblock>[impala]
-auth_username=&lt;LDAP username of Hue user to be authenticated>
-auth_password=&lt;LDAP password of Hue user to be 
authenticated></codeblock></li>
-            <li>Click <b>Save Changes</b>.</li>
-          </ol>
-        </p>
-      </section>
       <section id="ldap_impala_hue_cmdline">
         <title>Enabling LDAP for Impala in Hue Using the Command Line</title>
         <p>LDAP authentication for the Impala app in Hue can be enabled by

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_logging.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_logging.xml b/docs/topics/impala_logging.xml
index 92b9c88..79bc112 100644
--- a/docs/topics/impala_logging.xml
+++ b/docs/topics/impala_logging.xml
@@ -166,11 +166,6 @@ under the License.
           important messages such as informational ones are buffered in memory 
rather than being flushed to disk
           immediately.
         </li>
-
-        <li audience="hidden">
-          Cloudera Manager has an Impala configuration setting that sets the 
<codeph>-logbuflevel</codeph> startup
-          option.
-        </li>
       </ul>
 
     </conbody>
@@ -189,15 +184,6 @@ under the License.
 
     <conbody>
 
-      <p rev="upstream" audience="hidden"><!-- Whole paragraph can probably 
go. -->
-        <ph rev="upstream">Cloudera</ph> recommends installing Impala through 
the Cloudera Manager administration interface. To assist with
-        troubleshooting, Cloudera Manager collects front-end and back-end logs 
together into a single view, and let
-        you do a search across log data for all the managed nodes rather than 
examining the logs on each node
-        separately. If you installed Impala using Cloudera Manager, refer to 
the topics on Monitoring Services
-        (<xref 
href="http://www.cloudera.com/documentation/enterprise/latest/topics/cm_dg_service_monitoring.html";
 scope="external" format="html">CDH 5</xref>)
-        or Logs (<xref 
href="http://www.cloudera.com/documentation/enterprise/latest/topics/cm_dg_logs.html";
 scope="external" format="html">CDH 5</xref>).
-      </p>
-
       <p>
         If you are using Impala in an environment not managed by Cloudera 
Manager, review Impala log files on each
         host, when you have traced an issue back to a specific system.
@@ -250,15 +236,6 @@ under the License.
         Linux tool or technique of choice. A value of 1 preserves only the 
very latest log file.
       </p>
 
-      <p audience="hidden">
-        To set up log rotation on a system managed by Cloudera Manager 5.4.0 
and higher, search for the
-        <codeph>max_log_files</codeph> option name and set the appropriate 
value for the <userinput>Maximum Log
-        Files</userinput> field for each Impala configuration category 
(Impala, Catalog Server, and StateStore).
-        Then restart the Impala service. In earlier Cloudera Manager releases, 
specify the
-        <codeph>-max_log_files=<varname>maximum</varname></codeph> option in 
the <uicontrol>Command Line Argument
-        Advanced Configuration Snippet (Safety Valve)</uicontrol> field for 
each Impala configuration category.
-      </p>
-
     </conbody>
 
   </concept>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_perf_hdfs_caching.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_perf_hdfs_caching.xml 
b/docs/topics/impala_perf_hdfs_caching.xml
index 79d700a..9b16153 100644
--- a/docs/topics/impala_perf_hdfs_caching.xml
+++ b/docs/topics/impala_perf_hdfs_caching.xml
@@ -469,24 +469,6 @@ Found 122 entries
         </li>
       </ul>
 
-      <p audience="hidden">
-        <b>Cloudera Manager:</b>
-      </p>
-
-      <ul audience="hidden">
-        <li>
-          You can enable or disable HDFS caching through Cloudera Manager, 
using the configuration setting
-          <uicontrol>Maximum Memory Used for Caching</uicontrol> for the HDFS 
service. This control sets the HDFS
-          configuration parameter 
<codeph>dfs_datanode_max_locked_memory</codeph>, which specifies the upper limit
-          of HDFS cache size on each node.
-        </li>
-
-        <li>
-          All the other manipulation of the HDFS caching settings, such as 
what files are cached, is done through
-          the command line, either Impala DDL statements or the Linux 
<cmdname>hdfs cacheadmin</cmdname> command.
-        </li>
-      </ul>
-
       <p>
         <b>Impala memory limits:</b>
       </p>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_processes.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_processes.xml b/docs/topics/impala_processes.xml
index a97a644..71986d3 100644
--- a/docs/topics/impala_processes.xml
+++ b/docs/topics/impala_processes.xml
@@ -69,28 +69,6 @@ under the License.
     <p outputclass="toc inpage"/>
   </conbody>
 
-  <concept id="starting_via_cm" audience="hidden">
-
-    <title>Starting Impala through Cloudera Manager</title>
-
-    <conbody>
-
-      <p>
-        If you installed Impala with Cloudera Manager, use Cloudera Manager to 
start and stop services. The
-        Cloudera Manager GUI is a convenient way to check that all services 
are running, to set configuration
-        options using form fields in a browser, and to spot potential issues 
such as low disk space before they
-        become serious. Cloudera Manager automatically starts all the 
Impala-related services as a group, in the
-        correct order. See
-        <xref 
href="http://www.cloudera.com/documentation/enterprise/latest/topics/cm_mc_start_stop_service.html";
 scope="external" format="html">the
-        Cloudera Manager Documentation</xref> for details.
-      </p>
-
-      <note>
-        <p 
conref="../shared/impala_common.xml#common/udf_persistence_restriction"/>
-      </note>
-    </conbody>
-  </concept>
-
   <concept id="starting_via_cmdline">
 
     <title>Starting Impala from the Command Line</title>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_proxy.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_proxy.xml b/docs/topics/impala_proxy.xml
index d0474c5..bb8d029 100644
--- a/docs/topics/impala_proxy.xml
+++ b/docs/topics/impala_proxy.xml
@@ -143,15 +143,6 @@ under the License.
           Run the load-balancing proxy server, pointing it at the 
configuration file that you set up.
         </li>
 
-        <li audience="hidden">
-          On systems managed by Cloudera Manager, on the page
-          
<menucascade><uicontrol>Impala</uicontrol><uicontrol>Configuration</uicontrol><uicontrol>Impala
 Daemon
-          Default Group</uicontrol></menucascade>, specify a value for the 
<uicontrol>Impala Daemons Load
-          Balancer</uicontrol> field. Specify the address of the load balancer 
in
-          <codeph><varname>host</varname>:<varname>port</varname></codeph> 
format. This setting lets Cloudera
-          Manager route all appropriate Impala-related operations through the 
proxy server.
-        </li>
-
         <li>
           For any scripts, jobs, or configuration settings for applications 
that formerly connected to a specific
           datanode to run Impala SQL statements, change the connection 
information (such as the <codeph>-i</codeph>
@@ -269,13 +260,6 @@ under the License.
           running the <cmdname>impalad</cmdname> daemon.
         </li>
 
-        <li rev="CDH-40363" audience="hidden">
-          For a cluster managed by Cloudera Manager (5.4.2 or higher), fill in 
the Impala configuration setting
-          <uicontrol>Impala Daemons Load Balancer</uicontrol> with the 
appropriate host:port combination.
-          Then restart the Impala service.
-          For systems using a recent level of Cloudera Manager, this is all 
the configuration you need; you can skip the remaining steps in this procedure.
-        </li>
-
         <li>
           On systems not managed by Cloudera Manager, or systems using 
Cloudera Manager earlier than 5.4.2:
 
@@ -320,24 +304,6 @@ under the License.
                 configuration snippet, add: 
<codeblock>--principal=impala/<varname>proxy_host</varname>@<varname>realm</varname>
   --be_principal=impala/<varname>actual_host</varname>@<varname>realm</varname>
   --keytab_file=<varname>path_to_merged_keytab</varname></codeblock>
-                <note audience="hidden">
-                  <p>On a cluster managed by Cloudera Manager 5.1 (or higher),
-                    when you set up Kerberos authentication using the wizard, 
you
-                    can choose to allow Cloudera Manager to deploy the
-                      <systemoutput>krb5.conf</systemoutput> on your cluster. 
In
-                    such a case, you do not need to explicitly modify safety 
valve
-                    parameters as directed above. </p>
-                  <p>Every host has a different <codeph>--be_principal</codeph>
-                    because the actual hostname is different on each host. </p>
-                  <p> Specify the fully qualified domain name (FQDN) for the 
proxy
-                    host, not the IP address. Use the exact FQDN as returned 
by a
-                    reverse DNS lookup for the associated IP address. </p>
-                </note>
-              </li>
-
-              <li audience="hidden">
-                On a cluster managed by Cloudera Manager, create a role group 
to set the configuration values from
-                the preceding step on a per-host basis.
               </li>
 
               <li>
@@ -352,10 +318,6 @@ under the License.
             Restart Impala to make the changes take effect. Follow the 
appropriate steps depending on whether you use
             Cloudera Manager or not:
             <ul>
-              <li audience="hidden">
-                On a cluster managed by Cloudera Manager, restart the Impala 
service.
-              </li>
-
               <li>
                 On a cluster not managed by Cloudera Manager, restart the 
<cmdname>impalad</cmdname> daemons on all
                 hosts in the cluster, as well as the 
<cmdname>statestored</cmdname> and <cmdname>catalogd</cmdname>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_scalability.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_scalability.xml 
b/docs/topics/impala_scalability.xml
index 9f7ce3c..2362110 100644
--- a/docs/topics/impala_scalability.xml
+++ b/docs/topics/impala_scalability.xml
@@ -169,7 +169,7 @@ Memory Usage: Additional Notes
 
       <p audience="hidden">
         Details to fill in in future: Impact of <q>load catalog in 
background</q> option.
-        Changing timeouts. Related Cloudera Manager settings.
+        Changing timeouts.
       </p>
 
       <p>
@@ -469,11 +469,6 @@ Memory Usage: Additional Notes
               query.
             </li>
 
-            <li audience="hidden">
-              The <uicontrol>Impala Queries</uicontrol> dialog in Cloudera 
Manager. You can see the peak memory
-              usage for a query, combined across all nodes in the cluster.
-            </li>
-
             <li>
               The <uicontrol>Queries</uicontrol> tab in the Impala debug web 
user interface. Select the query to
               examine and click the corresponding 
<uicontrol>Profile</uicontrol> link. This data breaks down the

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_ssl.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_ssl.xml b/docs/topics/impala_ssl.xml
index 4e0b722..726e040 100644
--- a/docs/topics/impala_ssl.xml
+++ b/docs/topics/impala_ssl.xml
@@ -42,156 +42,10 @@ under the License.
       in the cluster. This feature is important when you also use other 
features such as Kerberos
       authentication or Sentry authorization, where credentials are being
       transmitted back and forth.
-      <note type="important" id="CMCDH_EitherOK" audience="hidden">
-        <ul id="ul_e2s_bcd_np">
-          <li>You can use either Cloudera Manager or the following command-line
-            instructions to complete this configuration.</li>
-          <!-- Took out another too-specific conref, to the CDH minor version 
also in CDHVariables.xml. -->
-          <li>This information applies specifically to the version of Impala
-            shown in the HTML page header or on the PDF title page. If you use
-            an earlier version of CDH, see the documentation for that version
-            located at <xref
-              
href="http://www.cloudera.com/content/support/en/documentation.html";
-              format="html" scope="external">Cloudera 
Documentation</xref>.</li>
-        </ul>
-      </note>
     </p>
 
   </conbody>
 
-  <concept id="concept_gnk_2tt_qp" audience="hidden">
-
-    <title>Using Cloudera Manager</title>
-
-    <prolog>
-      <metadata>
-        <data name="Category" value="Cloudera Manager"/>
-      </metadata>
-    </prolog>
-
-    <conbody>
-
-      <p>
-        To configure Impala to listen for Beeswax and HiveServer2 requests on 
TLS/SSL-secured ports:
-        <ol id="ol_rnf_ftt_qp">
-          <li>
-            Open the Cloudera Manager Admin Console and go to the 
<uicontrol>Impala</uicontrol> service.
-          </li>
-
-          <!-- Formerly: 
conref="../shared/cm_common_elements.xml#cm/config_edit" -->
-          <li>Click the <uicontrol>Configuration</uicontrol> tab.</li>
-
-          <li>
-            Select <menucascade><uicontrol>Scope</uicontrol><uicontrol>Impala 
(Service-Wide)</uicontrol></menucascade>.
-          </li>
-
-          <li>
-            Select 
<menucascade><uicontrol>Category</uicontrol><uicontrol>Security</uicontrol></menucascade>.
-          </li>
-
-          <li>
-            Edit the following properties:
-            <table frame="all"
-              id="table_drf_ftt_qp">
-              <title>Impala SSL Properties</title>
-              <tgroup cols="2">
-                <colspec colname="c1" colnum="1" colwidth="1*"/>
-                <colspec colname="c2" colnum="2" colwidth="2.5*"/>
-                <thead>
-                  <row>
-                    <entry>
-                      Property
-                    </entry>
-                    <entry>
-                      Description
-                    </entry>
-                  </row>
-                </thead>
-                <tbody>
-                  <row>
-                    <entry>
-                      <b>Enable TLS/SSL for Impala Client Services</b>
-                    </entry>
-                    <entry>
-                      Encrypt communication between clients (like ODBC, JDBC, 
and the Impala shell) and the Impala daemon using Transport
-                      Layer Security (TLS) (formerly known as Secure Socket 
Layer (SSL)).
-                    </entry>
-                  </row>
-                  <row>
-                    <entry>
-                      <b>SSL/TLS Certificate for Clients</b>
-                    </entry>
-                    <entry>
-                      Local path to the X509 certificate that identifies the 
Impala daemon to clients during TLS/SSL connections. This
-                      file must be in PEM format.
-                    </entry>
-                  </row>
-                  <row>
-                    <entry>
-                      <b>SSL/TLS Private Key for Clients</b>
-                    </entry>
-                    <entry>
-                      Local path to the private key that matches the 
certificate specified in the Certificate for Clients. This file must be
-                      in PEM format.
-                    </entry>
-                  </row>
-                  <row>
-                    <entry>
-                      <b>SSL/TLS Private Key Password for Clients</b>
-                    </entry>
-                    <entry>
-                      A shell command for Impala to run on startup to retrieve 
the password for a password-protected private key file.
-                      The output of the command is truncated to a maximum of 
1024 bytes, and any trailing whitespace (such as spaces
-                      or newline characters) is trimmed. If the command exits 
with an error, Impala does not start. If the password
-                      is incorrect, clients cannot connect to the server 
regardless of whether the public key is correct.
-                    </entry>
-                  </row>
-                  <row>
-                    <entry>
-                      <b>SSL/TLS CA Certificate</b>
-                    </entry>
-                    <entry>
-                      Must be specified for TLS/SSL encryption to be enabled 
for communication
-                      between internal Impala components.
-                    </entry>
-                  </row>
-                  <row>
-                    <entry>
-                      <b>SSL/TLS Certificate for <varname>Impala 
component</varname> Webserver</b>
-                    </entry>
-                    <entry>
-                      There are three of these configuration settings, one 
each for <q>Impala Daemon</q>,
-                      <q>Catalog Server</q>, and <q>Statestore</q>.
-                      Each of these Impala components has its own internal web 
server that powers the
-                      associated web UI with diagnostic information.
-                      The configuration setting represents the local path to 
the X509 certificate that
-                      identifies the web server to clients during TLS/SSL 
connections. This
-                      file must be in PEM format.
-                    </entry>
-                  </row>
-                </tbody>
-              </tgroup>
-            </table>
-          </li>
-
-          <!-- Formerly: 
conref="../shared/cm_common_elements.xml#cm/save_changes_short" -->
-          <li>Click <uicontrol>Save Changes</uicontrol> to commit the 
changes.</li>
-
-          <li>
-            Restart the Impala service.
-          </li>
-        </ol>
-      </p>
-
-      <p>
-        For information on configuring TLS/SSL communication with the 
<codeph>impala-shell</codeph> interpreter, see
-        <xref href="#concept_q1p_j2d_rp/secref"/>.
-      </p>
-
-    </conbody>
-
-  </concept>
-
   <concept id="concept_q1p_j2d_rp">
 
     <title>Using the Command Line</title>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_txtfile.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_txtfile.xml b/docs/topics/impala_txtfile.xml
index 5d3f315..659d9f0 100644
--- a/docs/topics/impala_txtfile.xml
+++ b/docs/topics/impala_txtfile.xml
@@ -483,15 +483,6 @@ INSERT INTO csv SELECT * FROM 
other_file_format_table;</codeblock>
         <ol>
           <li>
             <b>Prepare your systems to work with LZO by downloading and 
installing the appropriate libraries:</b>
-            <p audience="hidden">
-              <b>On systems managed by Cloudera Manager using parcels:</b>
-            </p>
-
-            <p audience="hidden">
-              See the setup instructions for the LZO parcel in the Cloudera 
Manager documentation for
-              <xref 
href="http://www.cloudera.com/documentation/enterprise/latest/topics/cm_ig_install_gpl_extras.html";
 scope="external" format="html">Cloudera
-              Manager 5</xref>.
-            </p>
 
             <p>
               <b>On systems managed by Cloudera Manager using packages, or not 
managed by Cloudera Manager:</b>

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/8b417c69/docs/topics/impala_webui.xml
----------------------------------------------------------------------
diff --git a/docs/topics/impala_webui.xml b/docs/topics/impala_webui.xml
index 57d247c..176822a 100644
--- a/docs/topics/impala_webui.xml
+++ b/docs/topics/impala_webui.xml
@@ -104,11 +104,6 @@ under the License.
         suspect is having problems.
       </p>
 
-      <note audience="hidden">
-        To get a convenient picture of the health of all Impala nodes in a 
cluster, use the Cloudera Manager
-        interface, which collects the low-level operational information from 
all Impala nodes, and presents a
-        unified view of the entire cluster.
-      </note>
     </conbody>
 
     <concept audience="hidden" id="webui_impalad_disabling">

Reply via email to