Modified: knox/trunk/books/0.7.0/config_ldap_group_lookup.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/config_ldap_group_lookup.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/config_ldap_group_lookup.md (original)
+++ knox/trunk/books/0.7.0/config_ldap_group_lookup.md Fri Jan 15 15:24:45 2016
@@ -15,214 +15,214 @@
    limitations under the License.
 --->
 
-### LDAPGroupLookup ###
+### LDAP Group Lookup ###
 
 Knox can be configured to look up LDAP groups that the authenticated user 
belong to.
 Knox can look up both Static LDAP Groups and Dynamic LDAP Groups.
 The looked up groups are populated as Principal(s) in the Java Subject of 
authenticated user.
-Therefore service authorization rules can be defined in terms of LDAPGroups 
looked up from LDAP directory.
+Therefore service authorization rules can be defined in terms of LDAP groups 
looked up from a LDAP directory.
 
-To look up LDAPGroups of authenticated user from LDAP, you have to use 
org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm in Shiro configuration.
+To look up LDAP groups of authenticated user from LDAP, you have to use 
`org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm` in Shiro configuration.
 
-Please see below a sample Shiro configuration snippet from a topology file 
that was tested looking LDAPGroups.
+Please see below a sample Shiro configuration snippet from a topology file 
that was tested looking LDAP groups.
 
-        <provider>
-            <role>authentication</role>
-            <name>ShiroProvider</name>
-            <enabled>true</enabled>
-            <!-- 
-            session timeout in minutes,  this is really idle timeout,
-            defaults to 30mins, if the property value is not defined,, 
-            current client authentication would expire if client idles 
continuosly for more than this value
-            -->
-            <!-- defaults to: 30 minutes
-            <param>
-                <name>sessionTimeout</name>
-                <value>30</value>
-            </param>
-            -->
-
-            <!--
-              Use single KnoxLdapRealm to do authentication and ldap group 
look up
-            -->
-            <param>
-              <name>main.ldapRealm</name>
-              <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>
-            </param>
-            <param>
-              <name>main.ldapGroupContextFactory</name>
-              
<value>org.apache.hadoop.gateway.shirorealm.KnoxLdapContextFactory</value>
-            </param>
-            <param>
-              <name>main.ldapRealm.contextFactory</name>
-              <value>$ldapGroupContextFactory</value>
-            </param>
-            <!-- defaults to: simple
-            <param>
-              
<name>main.ldapRealm.contextFactory.authenticationMechanism</name>
-              <value>simple</value>
-            </param>
-            -->
-            <param>
-              <name>main.ldapRealm.contextFactory.url</name>
-              <value>ldap://localhost:33389</value>
-            </param>
-            <param>
-              <name>main.ldapRealm.userDnTemplate</name>
-              <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>
-            </param>
-
-            <param>
-              <name>main.ldapRealm.authorizationEnabled</name>
-              <!-- defaults to: false -->
-              <value>true</value>
-            </param>
-            <!-- defaults to: simple
-            <param>
-              
<name>main.ldapRealm.contextFactory.systemAuthenticationMechanism</name>
-              <value>simple</value>
-            </param>
-            -->
-            <param>
-              <name>main.ldapRealm.searchBase</name>
-              <value>ou=groups,dc=hadoop,dc=apache,dc=org</value>
-            </param>
-            <!-- defaults to: groupOfNames
-            <param>
-              <name>main.ldapRealm.groupObjectClass</name>
-              <value>groupOfNames</value>
-            </param>
-            -->
-            <!-- defaults to: member
-            <param>
-              <name>main.ldapRealm.memberAttribute</name>
-              <value>member</value>
-            </param>
-            -->
-            <param>
-              <name>main.cacheManager</name>
-              
<value>org.apache.shiro.cache.MemoryConstrainedCacheManager</value>
-            </param>
-            <param>
-              <name>main.securityManager.cacheManager</name>
-              <value>$cacheManager</value>
-            </param>
-            <param>
-              <name>main.ldapRealm.memberAttributeValueTemplate</name>
-              <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>
-            </param>
-            <!-- the above element is the template for most ldap servers 
-                for active directory use the following instead and
-                remove the above configuration.
-            <param>
-              <name>main.ldapRealm.memberAttributeValueTemplate</name>
-              <value>cn={0},ou=people,dc=hadoop,dc=apache,dc=org</value>
-            </param>
-            -->
-            <param>
-              <name>main.ldapRealm.contextFactory.systemUsername</name>
-              <value>uid=guest,ou=people,dc=hadoop,dc=apache,dc=org</value>
-            </param>
-            <param>
-              <name>main.ldapRealm.contextFactory.systemPassword</name>
-              <value>${ALIAS=ldcSystemPassword}</value>
-            </param>
-
-            <param>
-              <name>urls./**</name> 
-              <value>authcBasic</value>
-            </param>
+    <provider>
+        <role>authentication</role>
+        <name>ShiroProvider</name>
+        <enabled>true</enabled>
+        <!-- 
+        session timeout in minutes,  this is really idle timeout,
+        defaults to 30mins, if the property value is not defined,, 
+        current client authentication would expire if client idles 
continuously for more than this value
+        -->
+        <!-- defaults to: 30 minutes
+        <param>
+            <name>sessionTimeout</name>
+            <value>30</value>
+        </param>
+        -->
+
+        <!--
+          Use single KnoxLdapRealm to do authentication and ldap group look up
+        -->
+        <param>
+            <name>main.ldapRealm</name>
+            <value>org.apache.hadoop.gateway.shirorealm.KnoxLdapRealm</value>
+        </param>
+        <param>
+            <name>main.ldapGroupContextFactory</name>
+            
<value>org.apache.hadoop.gateway.shirorealm.KnoxLdapContextFactory</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.contextFactory</name>
+            <value>$ldapGroupContextFactory</value>
+        </param>
+        <!-- defaults to: simple
+        <param>
+            <name>main.ldapRealm.contextFactory.authenticationMechanism</name>
+            <value>simple</value>
+        </param>
+        -->
+        <param>
+            <name>main.ldapRealm.contextFactory.url</name>
+            <value>ldap://localhost:33389</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.userDnTemplate</name>
+            <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>
+        </param>
+
+        <param>
+            <name>main.ldapRealm.authorizationEnabled</name>
+            <!-- defaults to: false -->
+            <value>true</value>
+        </param>
+        <!-- defaults to: simple
+        <param>
+            
<name>main.ldapRealm.contextFactory.systemAuthenticationMechanism</name>
+            <value>simple</value>
+        </param>
+        -->
+        <param>
+            <name>main.ldapRealm.searchBase</name>
+            <value>ou=groups,dc=hadoop,dc=apache,dc=org</value>
+        </param>
+        <!-- defaults to: groupOfNames
+        <param>
+            <name>main.ldapRealm.groupObjectClass</name>
+            <value>groupOfNames</value>
+        </param>
+        -->
+        <!-- defaults to: member
+        <param>
+            <name>main.ldapRealm.memberAttribute</name>
+            <value>member</value>
+        </param>
+        -->
+        <param>
+             <name>main.cacheManager</name>
+             
<value>org.apache.shiro.cache.MemoryConstrainedCacheManager</value>
+        </param>
+        <param>
+            <name>main.securityManager.cacheManager</name>
+            <value>$cacheManager</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.memberAttributeValueTemplate</name>
+            <value>uid={0},ou=people,dc=hadoop,dc=apache,dc=org</value>
+        </param>
+        <!-- the above element is the template for most ldap servers 
+            for active directory use the following instead and
+            remove the above configuration.
+        <param>
+            <name>main.ldapRealm.memberAttributeValueTemplate</name>
+            <value>cn={0},ou=people,dc=hadoop,dc=apache,dc=org</value>
+        </param>
+        -->
+        <param>
+            <name>main.ldapRealm.contextFactory.systemUsername</name>
+            <value>uid=guest,ou=people,dc=hadoop,dc=apache,dc=org</value>
+        </param>
+        <param>
+            <name>main.ldapRealm.contextFactory.systemPassword</name>
+            <value>${ALIAS=ldcSystemPassword}</value>
+        </param>
+
+        <param>
+            <name>urls./**</name> 
+            <value>authcBasic</value>
+        </param>
 
-        </provider>
+    </provider>
 
 The configuration shown above would look up Static LDAP groups of 
authenticated user and populate the group principals in the Java Subject 
corresponding to authenticated user.
 
 If you want to look up Dynamic LDAP Groups instead of Static LDAP Groups, you 
would have to specify groupObjectClass and memberAttribute params as shown 
below:
 
-           <param>
-              <name>main.ldapRealm.groupObjectClass</name>
-              <value>groupOfUrls</value>
-            </param>
-            <param>
-              <name>main.ldapRealm.memberAttribute</name>
-              <value>memberUrl</value>
-            </param>
+    <param>
+        <name>main.ldapRealm.groupObjectClass</name>
+        <value>groupOfUrls</value>
+    </param>
+    <param>
+        <name>main.ldapRealm.memberAttribute</name>
+        <value>memberUrl</value>
+    </param>
 
 ### Template topology files and LDIF files to try out LDAP Group Look up ###
 
-Knox bundles some template topology files and ldif files that you can use to 
try and test LDAP Group Lookup and associated authorization acls.
+Knox bundles some template topology files and ldif files that you can use to 
try and test LDAP Group Lookup and associated authorization ACLs.
 All these template files are located under {GATEWAY_HOME}/templates.
 
 
 #### LDAP Static Group Lookup Templates, authentication and group lookup from 
the same directory ####
 
-topology file: sandbox.knoxrealm1.xml
-ldif file    : users.ldapgroups.ldif
+* topology file: sandbox.knoxrealm1.xml
+* ldif file: users.ldapgroups.ldif
 
 To try this out
 
-cd {GATEWAY_HOME}
-cp templates/sandbox.knoxrealm1.xml conf/topologies/sandbox.xml
-cp templates/users.ldapgroups.ldif conf/users.ldif
-java -jar bin/ldap.jar conf
-java -Dsandbox.ldcSystemPassword=guest-password -jar bin/gateway.jar 
-persist-master
+    cd {GATEWAY_HOME}
+    cp templates/sandbox.knoxrealm1.xml conf/topologies/sandbox.xml
+    cp templates/users.ldapgroups.ldif conf/users.ldif
+    java -jar bin/ldap.jar conf
+    java -Dsandbox.ldcSystemPassword=guest-password -jar bin/gateway.jar 
-persist-master
 
 Following call to WebHDFS should report HTTP/1.1 401 Unauthorized
 As guest is not a member of group "analyst", authorization provider states 
user should be member of group "analyst"
 
-curl  -i -v  -k -u guest:guest-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
+    curl  -i -v  -k -u guest:guest-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
 
 Following call to WebHDFS should report: {"Path":"/user/sam"}
 As sam is a member of group "analyst", authorization provider states user 
should be member of group "analyst"
 
-curl  -i -v  -k -u sam:sam-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
+    curl  -i -v  -k -u sam:sam-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
 
 
 #### LDAP Static Group Lookup Templates, authentication and group lookup from 
different  directories ####
 
-topology file: sandbox.knoxrealm2.xml
-ldif file    : users.ldapgroups.ldif
+* topology file: sandbox.knoxrealm2.xml
+* ldif file: users.ldapgroups.ldif
 
 To try this out
 
-cd {GATEWAY_HOME}
-cp templates/sandbox.knoxrealm2.xml conf/topologies/sandbox.xml
-cp templates/users.ldapgroups.ldif conf/users.ldif
-java -jar bin/ldap.jar conf
-java -Dsandbox.ldcSystemPassword=guest-password -jar bin/gateway.jar 
-persist-master
+    cd {GATEWAY_HOME}
+    cp templates/sandbox.knoxrealm2.xml conf/topologies/sandbox.xml
+    cp templates/users.ldapgroups.ldif conf/users.ldif
+    java -jar bin/ldap.jar conf
+    java -Dsandbox.ldcSystemPassword=guest-password -jar bin/gateway.jar 
-persist-master
 
 Following call to WebHDFS should report HTTP/1.1 401 Unauthorized
 As guest is not a member of group "analyst", authorization provider states 
user should be member of group "analyst"
 
-curl  -i -v  -k -u guest:guest-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
+    curl  -i -v  -k -u guest:guest-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
 
 Following call to WebHDFS should report: {"Path":"/user/sam"}
-As sam is a member of group "analyst", authorization provxider states user 
should be member of group "analyst"
+As sam is a member of group "analyst", authorization provider states user 
should be member of group "analyst"
 
-curl  -i -v  -k -u sam:sam-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
+    curl  -i -v  -k -u sam:sam-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
 
 #### LDAP Dynamic Group Lookup Templates, authentication and dynamic group 
lookup from same  directory ####
 
-topology file: sandbox.knoxrealmdg.xml
-ldif file    : users.ldapdynamicgroups.ldif
+* topology file: sandbox.knoxrealmdg.xml
+* ldif file: users.ldapdynamicgroups.ldif
 
 To try this out
 
-cd {GATEWAY_HOME}
-cp templates/sandbox.knoxrealmdg.xml conf/topologies/sandbox.xml
-cp templates/users.ldapdynamicgroups.ldif conf/users.ldif
-java -jar bin/ldap.jar conf
-java -Dsandbox.ldcSystemPassword=guest-password -jar bin/gateway.jar 
-persist-master
+    cd {GATEWAY_HOME}
+    cp templates/sandbox.knoxrealmdg.xml conf/topologies/sandbox.xml
+    cp templates/users.ldapdynamicgroups.ldif conf/users.ldif
+    java -jar bin/ldap.jar conf
+    java -Dsandbox.ldcSystemPassword=guest-password -jar bin/gateway.jar 
-persist-master
 
 Please note that user.ldapdynamicgroups.ldif also loads necessary schema to 
create dynamic groups in Apache DS.
 
 Following call to WebHDFS should report HTTP/1.1 401 Unauthorized
 As guest is not a member of dynamic group "directors", authorization provider 
states user should be member of group "directors"
 
-curl  -i -v  -k -u guest:guest-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
+    curl  -i -v  -k -u guest:guest-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
 
 Following call to WebHDFS should report: {"Path":"/user/bob"}
 As bob is a member of dynamic group "directors", authorization provider states 
user should be member of group "directors"
 
-curl  -i -v  -k -u sam:sam-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
+    curl  -i -v  -k -u sam:sam-password  -X GET 
https://localhost:8443/gateway/sandbox/webhdfs/v1?op=GETHOMEDIRECTORY
 

Modified: knox/trunk/books/0.7.0/config_mutual_authentication_ssl.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/config_mutual_authentication_ssl.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/config_mutual_authentication_ssl.md (original)
+++ knox/trunk/books/0.7.0/config_mutual_authentication_ssl.md Fri Jan 15 
15:24:45 2016
@@ -30,10 +30,10 @@ The following table describes the config
 | gateway.truststore.type                        | Keystore type of the trust 
store. Default is JKS.         |
 | gateway.trust.all.certs                        | Allows for all certificates 
to be trusted. Default is false.|
 
-By only indicating that it is needed with gateway.client.auth.needed, the 
{GATEWAY_HOME}/data/security/keystores/gateway.jks keystore is used. This is 
the identity keystore for the server and can also be used as the truststore.
-In addition, we can specify the path to a dedicated truststore via 
gateway.truststore.path. If the truststore password is different from the 
gateway master secret then it can be set using the 
+By only indicating that it is needed with `gateway.client.auth.needed`, the 
`{GATEWAY_HOME}/data/security/keystores/gateway.jks` keystore is used. This is 
the identity keystore for the server and can also be used as the truststore.
+We can specify the path to a dedicated truststore via 
`gateway.truststore.path`. If the truststore password is different from the 
gateway master secret then it can be set using
 
-  knoxcli.sh create-alias gateway-truststore-password --value {pwd} 
+    knoxcli.sh create-alias gateway-truststore-password --value {pwd} 
   
 Otherwise, the master secret will be used.
-If the truststore is not a JKS type then it can be set via 
gateway.truststore.type.
+If the truststore is not a JKS type then it can be set via 
`gateway.truststore.type`.
\ No newline at end of file

Modified: knox/trunk/books/0.7.0/config_preauth_sso_provider.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/config_preauth_sso_provider.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/config_preauth_sso_provider.md (original)
+++ knox/trunk/books/0.7.0/config_preauth_sso_provider.md Fri Jan 15 15:24:45 
2016
@@ -29,11 +29,11 @@ This provider was designed for use with
 
 The HeaderPreAuth provider is configured within the topology file and has a 
minimal configuration that assumes SM_USER for CA SiteMinder. The following 
example is the bare minimum configuration for SiteMinder (with no IP address 
validation).
 
-       <provider>
-      <role>federation</role>
-      <name>HeaderPreAuth</name>
-      <enabled>true</enabled>
-     </provider>
+    <provider>
+        <role>federation</role>
+        <name>HeaderPreAuth</name>
+        <enabled>true</enabled>
+    </provider>
 
 The following table describes the configuration options for the web app 
security provider:
 
@@ -46,42 +46,42 @@ preauth.ip.addresses|Optional parameter
 preauth.custom.header|Required parameter for indicating a custom header to use 
for extracting the preauthenticated principal. The value extracted from this 
header is utilized as the PrimaryPrincipal within the established Subject. An 
incoming request that is missing the configured header will be refused with a 
401 unauthorized HTTP status.|SM_USER for SiteMinder usecase
 preauth.custom.group.header|Optional parameter for indicating a HTTP header 
name that contains a comma separated list of groups. These are added to the 
authenticated Subject as group principals. A missing group header will result 
in no groups being extracted from the incoming request and a log entry but 
processing will continue.|null - which means that there will be no group 
principals extracted from the request and added to the established Subject.
 
-##### NOTE: Mutual authentication can be used to establish a strong trust 
relationship between clients and servers while using the Preauthenticated SSO 
provider. See the configuration for Mutual Authentication with SSL in this 
document
+NOTE: Mutual authentication can be used to establish a strong trust 
relationship between clients and servers while using the Preauthenticated SSO 
provider. See the configuration for Mutual Authentication with SSL in this 
document.
 
 ##### Configuration for SiteMinder
 The following is an example of a configuration of the preauthenticated sso 
provider that leverages the default SM_USER header name - assuming use with CA 
SiteMinder. It further configures the validation based on the IP address from 
the incoming request.
 
-       <provider>
-      <role>federation</role>
-      <name>HeaderPreAuth</name>
-      <enabled>true</enabled>
-      
<param><name>preauth.validation.method</name><value>preauth.ip.validation</value></param>
-      
<param><name>preauth.ip.addresses</name><value>127.0.0.2,127.0.0.1</value></param>
+    <provider>
+        <role>federation</role>
+        <name>HeaderPreAuth</name>
+        <enabled>true</enabled>
+        
<param><name>preauth.validation.method</name><value>preauth.ip.validation</value></param>
+        
<param><name>preauth.ip.addresses</name><value>127.0.0.2,127.0.0.1</value></param>
     </provider>
 
 ##### REST Invocation for SiteMinder
 The following curl command can be used to request a directory listing from 
HDFS while passing in the expected header SM_USER.
 
-       curl -k -i --header "SM_USER: guest" -v 
https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp?op=LISTSTATUS
+    curl -k -i --header "SM_USER: guest" -v 
https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp?op=LISTSTATUS
 
 Omitting the --header "SM_USER: guest" above will result in a rejected request.
 
 ##### Configuration for IBM Tivoli AM
-As an example for configuring the preauthenticated sso provider for another 
SSO provider, the following illustrates the values used for IBM's Tivoli Access 
Manager:
+As an example for configuring the preauthenticated SSO provider for another 
SSO provider, the following illustrates the values used for IBM's Tivoli Access 
Manager:
 
-       <provider>
-      <role>federation</role>
-      <name>HeaderPreAuth</name>
-      <enabled>true</enabled>
-      <param><name>preauth.custom.header</name><value>iv_user</value></param>
-      
<param><name>preauth.custom.group.header</name><value>iv_group</value></param>
-      
<param><name>preauth.validation.method</name><value>preauth.ip.validation</value></param>
-      
<param><name>preauth.ip.addresses</name><value>127.0.0.2,127.0.0.1</value></param>
+    <provider>
+        <role>federation</role>
+        <name>HeaderPreAuth</name>
+        <enabled>true</enabled>
+        <param><name>preauth.custom.header</name><value>iv_user</value></param>
+        
<param><name>preauth.custom.group.header</name><value>iv_group</value></param>
+        
<param><name>preauth.validation.method</name><value>preauth.ip.validation</value></param>
+        
<param><name>preauth.ip.addresses</name><value>127.0.0.2,127.0.0.1</value></param>
     </provider>
 
 ##### REST Invocation for Tivoli AM
 The following curl command can be used to request a directory listing from 
HDFS while passing in the expected headers of iv_user and iv_group. Note that 
the iv_group value in this command matches the expected ACL for webhdfs in the 
above topology file. Changing this from "admin" to "admin2" should result in a 
401 unauthorized response.
 
-       curl -k -i --header "iv_user: guest" --header "iv_group: admin" -v 
https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp?op=LISTSTATUS
+    curl -k -i --header "iv_user: guest" --header "iv_group: admin" -v 
https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp?op=LISTSTATUS
 
 Omitting the --header "iv_user: guest" above will result in a rejected request.

Modified: knox/trunk/books/0.7.0/config_webappsec_provider.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/config_webappsec_provider.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/config_webappsec_provider.md (original)
+++ knox/trunk/books/0.7.0/config_webappsec_provider.md Fri Jan 15 15:24:45 2016
@@ -43,15 +43,15 @@ As with all providers in the Knox gatewa
 
 Because of this one-to-many provider/filter relationship, there is an extra 
configuration element for this provider per filter. As you can see in the 
sample below, the actual filter configuration is defined entirely within the 
params of the WebAppSec provider.
 
-       <provider>
-         <role>webappsec</role>
-         <name>WebAppSec</name>
-         <enabled>true</enabled>
-         <param><name>csrf.enabled</name><value>true</value></param>
-         
<param><name>csrf.customHeader</name><value>X-XSRF-Header</value></param>
-         
<param><name>csrf.methodsToIgnore</name><value>GET,OPTIONS,HEAD</value></param>
-         <param><name>cors.enabled</name><value>true</value></param>
-       </provider>
+    <provider>
+        <role>webappsec</role>
+        <name>WebAppSec</name>
+        <enabled>true</enabled>
+        <param><name>csrf.enabled</name><value>true</value></param>
+        
<param><name>csrf.customHeader</name><value>X-XSRF-Header</value></param>
+        
<param><name>csrf.methodsToIgnore</name><value>GET,OPTIONS,HEAD</value></param>
+        <param><name>cors.enabled</name><value>true</value></param>
+    </provider>
 
 #### Descriptions ####
 The following tables describes the configuration options for the web app 
security provider:
@@ -69,7 +69,7 @@ csrf.methodsToIgnore|This is also an opt
 ###### REST Invocation
 The following curl command can be used to request a directory listing from 
HDFS while passing in the expected header X-XSRF-Header.
 
-       curl -k -i --header "X-XSRF-Header: valid" -v -u guest:guest-password 
https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp?op=LISTSTATUS
+    curl -k -i --header "X-XSRF-Header: valid" -v -u guest:guest-password 
https://localhost:8443/gateway/sandbox/webhdfs/v1/tmp?op=LISTSTATUS
 
 Omitting the --header "X-XSRF-Header: valid" above should result in an HTTP 
400 bad_request.
 
@@ -79,16 +79,16 @@ Disabling the provider will then allow a
 
 ###### Config
 
-Name | Description | Default
----------|-----------
-cors.enabled|This param enables the CORS capabilities|false
-cors.allowGenericHttpRequests|{true\|false} defaults to true. If true generic 
HTTP requests will be allowed to pass through the filter, else only valid and 
accepted CORS requests will be allowed (strict CORS filtering).|true
-cors.allowOrigin|{"\*"\|origin-list} defaults to "\*". Whitespace-separated 
list of origins that the CORS filter must allow. Requests from origins not 
included here will be refused with an HTTP 403 "Forbidden" response. If set to 
\* (asterisk) any origin will be allowed.|"\*"
-cors.allowSubdomains|{true\|false} defaults to false. If true the CORS filter 
will allow requests from any origin which is a subdomain origin of the allowed 
origins. A subdomain is matched by comparing its scheme and suffix (host name / 
IP address and optional port number).|false
-cors.supportedMethods|{method-list} defaults to GET, POST, HEAD, OPTIONS. List 
of the supported HTTP methods. These are advertised through the 
Access-Control-Allow-Methods header and must also be implemented by the actual 
CORS web service. Requests for methods not included here will be refused by the 
CORS filter with an HTTP 405 "Method not allowed" response.| GET, POST, HEAD, 
OPTIONS
-cors.supportedHeaders|{"\*"\|header-list} defaults to \*. The names of the 
supported author request headers. These are advertised through the 
Access-Control-Allow-Headers header. If the configuration property value is set 
to \* (asterisk) any author request header will be allowed. The CORS Filter 
implements this by simply echoing the requested value back to the browser.|\*
-cors.exposedHeaders | {header-list} defaults to empty list. List of the 
response headers other than simple response headers that the browser should 
expose to the author of the cross-domain request through the 
XMLHttpRequest.getResponseHeader() method. The CORS filter supplies this 
information through the Access-Control-Expose-Headers header.| empty
-cors.supportsCredentials | {true\|false} defaults to true. Indicates whether 
user credentials, such as cookies, HTTP authentication or client-side 
certificates, are supported. The CORS filter uses this value in constructing 
the Access-Control-Allow-Credentials header.|true
-cors.maxAge | {int} defaults to -1 (unspecified). Indicates how long the 
results of a preflight request can be cached by the web browser, in seconds. If 
-1 unspecified. This information is passed to the browser via the 
Access-Control-Max-Age header.| -1
-cors.tagRequests| {true\|false} defaults to false (no tagging). Enables HTTP 
servlet request tagging to provide CORS information to downstream handlers 
(filters and/or servlets).| false
+Name                         | Description | Default
+-----------------------------|-------------|---------
+cors.enabled                 | This param enables the CORS capabilities|false
+cors.allowGenericHttpRequests| {true\|false} defaults to true. If true generic 
HTTP requests will be allowed to pass through the filter, else only valid and 
accepted CORS requests will be allowed (strict CORS filtering).|true
+cors.allowOrigin             | {"\*"\|origin-list} defaults to "\*". 
Whitespace-separated list of origins that the CORS filter must allow. Requests 
from origins not included here will be refused with an HTTP 403 "Forbidden" 
response. If set to \* (asterisk) any origin will be allowed.|"\*"
+cors.allowSubdomains         | {true\|false} defaults to false. If true the 
CORS filter will allow requests from any origin which is a subdomain origin of 
the allowed origins. A subdomain is matched by comparing its scheme and suffix 
(host name / IP address and optional port number).|false
+cors.supportedMethods        | {method-list} defaults to GET, POST, HEAD, 
OPTIONS. List of the supported HTTP methods. These are advertised through the 
Access-Control-Allow-Methods header and must also be implemented by the actual 
CORS web service. Requests for methods not included here will be refused by the 
CORS filter with an HTTP 405 "Method not allowed" response.| GET, POST, HEAD, 
OPTIONS
+cors.supportedHeaders        | {"\*"\|header-list} defaults to \*. The names 
of the supported author request headers. These are advertised through the 
Access-Control-Allow-Headers header. If the configuration property value is set 
to \* (asterisk) any author request header will be allowed. The CORS Filter 
implements this by simply echoing the requested value back to the browser.|\*
+cors.exposedHeaders          | {header-list} defaults to empty list. List of 
the response headers other than simple response headers that the browser should 
expose to the author of the cross-domain request through the 
XMLHttpRequest.getResponseHeader() method. The CORS filter supplies this 
information through the Access-Control-Expose-Headers header.| empty
+cors.supportsCredentials     | {true\|false} defaults to true. Indicates 
whether user credentials, such as cookies, HTTP authentication or client-side 
certificates, are supported. The CORS filter uses this value in constructing 
the Access-Control-Allow-Credentials header.|true
+cors.maxAge                  | {int} defaults to -1 (unspecified). Indicates 
how long the results of a preflight request can be cached by the web browser, 
in seconds. If -1 unspecified. This information is passed to the browser via 
the Access-Control-Max-Age header.| -1
+cors.tagRequests             | {true\|false} defaults to false (no tagging). 
Enables HTTP servlet request tagging to provide CORS information to downstream 
handlers (filters and/or servlets).| false
 

Modified: knox/trunk/books/0.7.0/knox_cli.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/knox_cli.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/knox_cli.md (original)
+++ knox/trunk/books/0.7.0/knox_cli.md Fri Jan 15 15:24:45 2016
@@ -16,11 +16,11 @@
 --->
 
 ### Knox CLI ###
-The Knox CLI is a command line utility for management of various aspects of 
the Knox deployment. It is primarily concerned with the management of the 
security artifacts for the gateway instance and each of the deployed topologies 
or hadoop clusters that are gated by the Knox Gateway instance.
+The Knox CLI is a command line utility for the management of various aspects 
of the Knox deployment. It is primarily concerned with the management of the 
security artifacts for the gateway instance and each of the deployed topologies 
or Hadoop clusters that are gated by the Knox Gateway instance.
 
 The various security artifacts are also generated and populated automatically 
by the Knox Gateway runtime when they are not found at startup. The assumptions 
made in those cases are appropriate for a test or development gateway instance 
and assume 'localhost' for hostname specific activities. For production 
deployments the use of the CLI may aid in managing  some production deployments.
 
-The knoxcli.sh script is located in the {GATEWAY_HOME}/bin directory.
+The knoxcli.sh script is located in the `{GATEWAY_HOME}/bin` directory.
 
 #### Help ####
 ##### `bin/knoxcli.sh [--help]` #####
@@ -32,13 +32,13 @@ Displays Knox version information.
 
 #### Master secret persistence ####
 ##### `bin/knoxcli.sh create-master [--force][--help]` #####
-Creates and persists an encrypted master secret in a file within 
{GATEWAY_HOME}/data/security/master. 
+Creates and persists an encrypted master secret in a file within 
`{GATEWAY_HOME}/data/security/master`. 
 
 NOTE: This command fails when there is an existing master file in the expected 
location. You may force it to overwrite the master file with the \-\-force 
switch. NOTE: this will require you to change passwords protecting the 
keystores for the gateway identity keystores and all credential stores.
 
 #### Alias creation ####
 ##### `bin/knoxcli.sh create-alias name [--cluster c] [--value v] [--generate] 
[--help]` #####
-Creates a password alias and stores it in a credential store within the 
{GATEWAY_HOME}/data/security/keystores dir. 
+Creates a password alias and stores it in a credential store within the 
`{GATEWAY_HOME}/data/security/keystores` dir. 
 
 argument    | description
 ------------|-----------
@@ -49,7 +49,7 @@ name|name of the alias to create
 
 #### Alias deletion ####
 ##### `bin/knoxcli.sh delete-alias name [--cluster c] [--help]` #####
-Deletes a password and alias mapping from a credential store within 
{GATEWAY_HOME}/data/security/keystores.  
+Deletes a password and alias mapping from a credential store within 
`{GATEWAY_HOME}/data/security/keystores`.
 
 argument | description
 ---------|-----------
@@ -58,7 +58,7 @@ name | name of the alias to delete
 
 #### Alias listing ####
 ##### `bin/knoxcli.sh list-alias [--cluster c] [--help]` #####
-Lists the alias names for the credential store within 
{GATEWAY_HOME}/data/security/keystores.  
+Lists the alias names for the credential store within 
`{GATEWAY_HOME}/data/security/keystores`.
 
 NOTE: This command will list the aliases in lowercase which is a result of the 
underlying credential store implementation. Lookup of credentials is a case 
insensitive operation - so this is not an issue.
 
@@ -68,7 +68,7 @@ argument | description
 
 #### Self-signed cert creation ####
 ##### `bin/knoxcli.sh create-cert [--hostname n] [--help]` #####
-Creates and stores a self-signed certificate to represent the identity of the 
gateway instance. This is stored within the 
{GATEWAY_HOME}/data/security/keystores/gateway.jks keystore.  
+Creates and stores a self-signed certificate to represent the identity of the 
gateway instance. This is stored within the 
`{GATEWAY_HOME}/data/security/keystores/gateway.jks` keystore.  
 
 argument | description
 ---------|-----------

Modified: knox/trunk/books/0.7.0/likeised
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/likeised?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/likeised (original)
+++ knox/trunk/books/0.7.0/likeised Fri Jan 15 15:24:45 2016
@@ -18,7 +18,7 @@ s@<h2><a id="Gateway+Details@</div><div
 s@<h3><a id="Configuration@</div><div id="Configuration"><h3><a 
id="Configuration@
 s@<h3><a id="Knox+CLI@</div><div id="Knox+CLI"><h3><a id="Knox+CLI@
 s@<h3><a id="Authentication@</div><div id="Authentication"><h3><a 
id="Authentication@
-s@<h3><a id="LDAPGroupLookup@</div><div id="LDAPGroupLookup"><h3><a 
id="LDAPGroupLookup@
+s@<h3><a id="LDAP+Group+Lookup@</div><div id="LDAP+Group+Lookup"><h3><a 
id="LDAP+Group+Lookup@
 s@<h3><a id="Identity+Assertion@</div><div id="Identity+Assertion"><h3><a 
id="Identity+Assertion@
 s@<h3><a id="Authorization@</div><div id="Authorization"><h3><a 
id="Authorization@
 s@<h2><a id="Configuration@</div><div id="Configuration"><h2><a 
id="Configuration@
@@ -26,6 +26,7 @@ s@<h3><a id="Secure+Clusters@</div><div
 s@<h3><a id="High+Availability@</div><div id="High+Availability"><h3><a 
id="High+Availability@
 s@<h3><a id="Web+App+Security+Provider@</div><div 
id="Web+App+Security+Provider"><h3><a id="Web+App+Security+Provider@
 s@<h3><a id="Preauthenticated+SSO+Provider@</div><div 
id="Preauthenticated+SSO+Provider"><h3><a id="Preauthenticated+SSO+Provider@
+s@<h3><a id="Mutual+Authentication+with+SSL@</div><div 
id="Mutual+Authentication+with+SSL"><h3><a id="Mutual+Authentication+with+SSL@
 s@<h3><a id="Audit@</div><div id="Audit"><h3><a id="Audit@
 s@<h2><a id="Client+Details@</div><div id="Client+Details"><h2><a 
id="Client+Details@
 s@<h2><a id="Service+Details@</div><div id="Service+Details"><h2><a 
id="Service+Details@
@@ -34,6 +35,8 @@ s@<h3><a id="WebHCat@</div><div id="WebH
 s@<h3><a id="Oozie@</div><div id="Oozie"><h3><a id="Oozie@
 s@<h3><a id="HBase@</div><div id="HBase"><h3><a id="HBase@
 s@<h3><a id="Hive@</div><div id="Hive"><h3><a id="Hive@
+s@<h3><a id="Storm@</div><div id="Storm"><h3><a id="Storm@
+s@<h3><a id="Default+Service+HA+support@</div><div 
id="Default+Service+HA+support"><h3><a id="Default+Service+HA+support@
 s@<h2><a id="Limitations@</div><div id="Limitations"><h2><a id="Limitations@
 s@<h2><a id="Troubleshooting@</div><div id="Troubleshooting"><h2><a 
id="Troubleshooting@
 s@<h2><a id="Export+Controls@</div><div id="Export+Controls"><h2><a 
id="Export+Controls@

Modified: knox/trunk/books/0.7.0/quick_start.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/quick_start.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/quick_start.md (original)
+++ knox/trunk/books/0.7.0/quick_start.md Fri Jan 15 15:24:45 2016
@@ -45,7 +45,7 @@ Knox 0.7.0 supports Hadoop 2.x, the quic
 
 
 ### 2 - Download Hadoop 2.x VM ###
-The quick start provides a link to download Hadoop 2.0 based Hortonworks 
virtual machine [Sandbox](http://hortonworks.com/products/hdp-2/#install). 
Please note Knox supports other Hadoop distributions and is configurable 
against a full blown Hadoop cluster.
+The quick start provides a link to download Hadoop 2.0 based Hortonworks 
virtual machine [Sandbox](http://hortonworks.com/products/hdp-2/#install). 
Please note Knox supports other Hadoop distributions and is configurable 
against a full-blown Hadoop cluster.
 Configuring Knox for Hadoop 2.x version, or Hadoop deployed in EC2 or a custom 
Hadoop cluster is documented in advance deployment guide.
 
 
@@ -204,4 +204,4 @@ To validate that see the sections for th
 
     curl -i -k -u guest:guest-password -X GET \
         '{Value of Location header from command response above}'
-
+        

Modified: knox/trunk/books/0.7.0/service_default_ha.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_default_ha.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/service_default_ha.md (original)
+++ knox/trunk/books/0.7.0/service_default_ha.md Fri Jan 15 15:24:45 2016
@@ -25,7 +25,7 @@ The default HA functionality works on a
 to route all of a service's REST calls until a connection error occurs. The 
top URL is then put at the bottom of the
 list and the next URL is attempted. This goes on until the setting of 
'maxFailoverAttempts' is reached.
 
-At present the following services can avail this default High Availability 
functionality and have been tested for the
+At present the following services can use this default High Availability 
functionality and have been tested for the
 same:
 
 * WEBHCAT
@@ -35,40 +35,39 @@ same:
 To enable HA functionality for a service in Knox the following configuration 
has to be added to the topology file.
 
     <provider>
-       <role>ha</role>
-       <name>HaProvider</name>
-       <enabled>true</enabled>
-       <param>
-           <name>{SERVICE}</name>
-           <value>maxFailoverAttempts=3;failoverSleep=1000;enabled=true</value>
-       </param>
+         <role>ha</role>
+         <name>HaProvider</name>
+         <enabled>true</enabled>
+         <param>
+             <name>{SERVICE}</name>
+             
<value>maxFailoverAttempts=3;failoverSleep=1000;enabled=true</value>
+         </param>
     </provider>
 
-The role and name of the provider above must be as shown. The name in the 
'param' section i.e. {SERVICE} must match
+The role and name of the provider above must be as shown. The name in the 
'param' section i.e. `{SERVICE}` must match
 that of the service role name that is being configured for HA and the value in 
the 'param' section is the configuration
-for that particular service in HA mode. For example, the value of {SERVICE} 
can be 'WEBHCAT', 'HBASE' or 'OOZIE'.
+for that particular service in HA mode. For example, the value of `{SERVICE}` 
can be 'WEBHCAT', 'HBASE' or 'OOZIE'.
 
 To configure multiple services in HA mode, additional 'param' sections can be 
added.
 
 For example,
 
-
     <provider>
-       <role>ha</role>
-       <name>HaProvider</name>
-       <enabled>true</enabled>
-       <param>
-           <name>OOZIE</name>
-           <value>maxFailoverAttempts=3;failoverSleep=1000;enabled=true</value>
-       </param>
-       <param>
-           <name>HBASE</name>
-           <value>maxFailoverAttempts=3;failoverSleep=1000;enabled=true</value>
-       </param>
-       <param>
-           <name>WEBHCAT</name>
-           <value>maxFailoverAttempts=3;failoverSleep=1000;enabled=true</value>
-       </param>
+         <role>ha</role>
+         <name>HaProvider</name>
+         <enabled>true</enabled>
+         <param>
+             <name>OOZIE</name>
+             
<value>maxFailoverAttempts=3;failoverSleep=1000;enabled=true</value>
+         </param>
+         <param>
+             <name>HBASE</name>
+             
<value>maxFailoverAttempts=3;failoverSleep=1000;enabled=true</value>
+         </param>
+         <param>
+             <name>WEBHCAT</name>
+             
<value>maxFailoverAttempts=3;failoverSleep=1000;enabled=true</value>
+         </param>
     </provider>
 
 The various configuration parameters are described below:

Modified: knox/trunk/books/0.7.0/service_hbase.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_hbase.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/service_hbase.md (original)
+++ knox/trunk/books/0.7.0/service_hbase.md Fri Jan 15 15:24:45 2016
@@ -17,38 +17,40 @@
 
 ### HBase ###
 
-The HBase REST API is provided by the Stargate service for HBase.
-See the HBase Stargate Setup section below for getting started with stargate 
and Knox with the Hortonworks Sandbox environment.
+HBase provides an optional REST API (previously called Stargate).
+See the HBase REST Setup section below for getting started with the HBase REST 
API and Knox with the Hortonworks Sandbox environment.
 
 #### HBase URL Mapping ####
 
 | ------- | 
----------------------------------------------------------------------------- |
 | Gateway | 
`https://{gateway-host}:{gateway-port}/{gateway-path}/{cluster-name}/hbase` |
-| Cluster | `http://{stargate-host}:60080/`                                    
     |
+| Cluster | `http://{hbase-rest-host}:8080/`                                   
      |
 
 #### HBase Examples ####
 
-The examples below illustrate the set of basic operations with HBase instance 
using Stargate REST API.
-Use following link to get more details about HBase/Stargate API: 
http://wiki.apache.org/hadoop/Hbase/Stargate.
+The examples below illustrate the set of basic operations with HBase instance 
using the REST API.
+Use following link to get more details about HBase REST API: 
http://hbase.apache.org/book.html#_rest.
 
-Note: Some HBase examples may not work due to enabled [Access 
Control](https://hbase.apache.org/book/hbase.accesscontrol.configuration.html). 
User may not be granted for performing operations in samples. In order to check 
if Access Control is configured in the HBase instance verify hbase-site.xml for 
a presence of `org.apache.hadoop.hbase.security.access.AccessController` in 
`hbase.coprocessor.master.classes` and `hbase.coprocessor.region.classes` 
properties.  
+Note: Some HBase examples may not work due to enabled [Access 
Control](http://hbase.apache.org/book.html#_securing_access_to_your_data). User 
may not be granted access for performing operations in the samples. In order to 
check if Access Control is configured in the HBase instance verify 
`hbase-site.xml` for a presence of 
`org.apache.hadoop.hbase.security.access.AccessController` in 
`hbase.coprocessor.master.classes` and `hbase.coprocessor.region.classes` 
properties.  
 To grant the Read, Write, Create permissions to `guest` user execute the 
following command:
 
     echo grant 'guest', 'RWC' | hbase shell
 
-If you are using a cluster secured with Kerberos you will need to have used 
`kinit` to authenticate to the KDC    
+If you are using a cluster secured with Kerberos you will need to have used 
`kinit` to authenticate to the KDC.
 
-#### HBase Stargate Setup ####
+#### HBase REST API Setup ####
 
-#### Launch Stargate ####
+#### Launch REST API ####
 
-The command below launches the Stargate daemon on port 60080
+The command below launches the REST daemon on port 8080 (the default)
 
-    sudo {HBASE_BIN}/hbase-daemon.sh start rest -p 60080
+    sudo {HBASE_BIN}/hbase-daemon.sh start rest
+
+Where `{HBASE_BIN}` is `/usr/hdp/current/hbase-master/bin/` in the case of a 
HDP install.
 
-Where {HBASE_BIN} is /usr/hdp/current/hbase-master/bin/ in the case of a HDP 
install.
+To use a different port use the `-p` option:
 
-Port 60080 is used because it was specified in sample Hadoop cluster 
deployment `{GATEWAY_HOME}/conf/topologies/sandbox.xml`.
+    sudo {HBASE_BIN/hbase-daemon.sh start rest -p 60080
 
 #### Configure Sandbox port mapping for VirtualBox ####
 
@@ -57,17 +59,15 @@ Port 60080 is used because it was specif
 3. Select tab Network
 4. Select Adapter 1
 5. Press Port Forwarding button
-6. Press Plus button to insert new rule: Name=Stargate, Host Port=60080, Guest 
Port=60080
+6. Press Plus button to insert new rule: Name=HBASE REST, Host Port=60080, 
Guest Port=60080
 7. Press OK to close the rule window
 8. Press OK to Network window save the changes
 
-60080 port is used because it was specified in sample Hadoop cluster 
deployment `{GATEWAY_HOME}/conf/topologies/sandbox.xml`.
-
 #### HBase Restart ####
 
 If it becomes necessary to restart HBase you can log into the hosts running 
HBase and use these steps.
 
-    sudo {HBASE_BIN}hbase-daemon.sh stop rest
+    sudo {HBASE_BIN}/hbase-daemon.sh stop rest
     sudo -u hbase {HBASE_BIN}/hbase-daemon.sh stop regionserver
     sudo -u hbase {HBASE_BIN}/hbase-daemon.sh stop master
     sudo -u hbase {HBASE_BIN}/hbase-daemon.sh stop zookeeper
@@ -77,11 +77,11 @@ If it becomes necessary to restart HBase
     sudo -u hbase {HBASE_BIN}/hbase-daemon.sh start zookeeper
     sudo {HBASE_BIN}/hbase-daemon.sh start rest -p 60080
 
-Where {HBASE_BIN} is /usr/hdp/current/hbase-master/bin/ in the case of a HDP 
install.
+Where `{HBASE_BIN}` is `/usr/hdp/current/hbase-master/bin/` in the case of a 
HDP Sandbox install.
  
-#### HBase/Stargate client DSL ####
+#### HBase client DSL ####
 
-For more details about client DSL usage please follow this 
[page|https://cwiki.apache.org/confluence/display/KNOX/Client+Usage].
+For more details about client DSL usage please look at the chapter about the 
client DSL in this guide.
 
 After launching the shell, execute the following command to be able to use the 
snippets below.
 `import org.apache.hadoop.gateway.shell.hbase.HBase;`
@@ -313,7 +313,7 @@ After launching the shell, execute the f
 * Example
     * 
`HBase.session(session).table(tableName).scanner(scannerId).delete().now()`
 
-### HBase/Stargate via Client DSL ###
+### HBase via Client DSL ###
 
 This example illustrates sequence of all basic HBase operations: 
 1. get system version
@@ -520,7 +520,7 @@ Each line from the file below will need
 
     session.shutdown(10, SECONDS)
 
-### HBase/Stargate via cURL
+### HBase via cURL
 
 #### Get software version
 
@@ -530,7 +530,7 @@ Set Accept Header to "text/plain", "text
      -H "Accept:  application/json"\
      -X GET 'https://localhost:8443/gateway/sandbox/hbase/version'
 
-#### Get version information regarding the HBase cluster backing the Stargate 
instance
+#### Get version information regarding the HBase cluster backing the REST API 
instance
 
 Set Accept Header to "text/plain", "text/xml" or "application/x-protobuf"
 
@@ -538,7 +538,7 @@ Set Accept Header to "text/plain", "text
      -H "Accept: text/xml"\
      -X GET 'https://localhost:8443/gateway/sandbox/hbase/version/cluster'
 
-#### Get detailed status on the HBase cluster backing the Stargate instance.
+#### Get detailed status on the HBase cluster backing the REST API instance.
 
 Set Accept Header to "text/plain", "text/xml", "application/json" or 
"application/x-protobuf"
 

Modified: knox/trunk/books/0.7.0/service_hive.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_hive.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/service_hive.md (original)
+++ knox/trunk/books/0.7.0/service_hive.md Fri Jan 15 15:24:45 2016
@@ -18,7 +18,7 @@
 ### Hive ###
 
 The [Hive wiki pages](https://cwiki.apache.org/confluence/display/Hive/Home) 
describe Hive installation and configuration processes.
-In sandbox configuration file for Hive is located at /etc/hive/hive-site.xml.
+In sandbox configuration file for Hive is located at `/etc/hive/hive-site.xml`.
 Hive Server has to be started in HTTP mode.
 Note the properties shown below as they are related to configuration required 
by the gateway.
 
@@ -58,7 +58,7 @@ By default the gateway is configured to
 #### Hive JDBC URL Mapping ####
 
 | ------- | 
------------------------------------------------------------------------------- 
|
-| Gateway | 
jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password}?hive.server2.transport.mode=http;hive.server2.thrift.http.path={gateway-path}/{cluster-name}/hive|
+| Gateway | 
jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password};transportMode=http;httpPath={gateway-path}/{cluster-name}/hive|
 | Cluster |`http://{hive-host}:{hive-port}/{hive-path}`|
 
 #### Hive Examples ####
@@ -74,7 +74,7 @@ This guide provides detailed examples fo
      1. Hive JDBC in HTTP mode depends on following minimal libraries set to 
run successfully(must be in the classpath):
          * hive-jdbc-0.14.0-standalone.jar;
          * commons-logging-1.1.3.jar;
-     2. Connection URL has to be following:     
jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password}?hive.server2.transport.mode=http;hive.server2.thrift.http.path={gateway-path}/{cluster-name}/hive
+     2. Connection URL has to be following: 
`jdbc:hive2://{gateway-host}:{gateway-port}/;ssl=true;sslTrustStore={gateway-trust-store-path};trustStorePassword={gateway-trust-store-password};transportMode=http;httpPath={gateway-path}/{cluster-name}/hive`
      3. Look at 
https://cwiki.apache.org/confluence/display/Hive/GettingStarted#GettingStarted-DDLOperations
 for examples.
        Hint: For testing it would be better to execute `set 
hive.security.authorization.enabled=false` as the first statement.
        Hint: Good examples of Hive DDL/DML can be found here 
http://gettingstarted.hadooponazure.com/hw/hive.html
@@ -129,7 +129,7 @@ Sample example for creating new table, l
 
           statement = connection.createStatement();
 
-          // disable Hive authorization - it could be ommited if Hive 
authorization
+          // disable Hive authorization - it could be omitted if Hive 
authorization
           // was configured properly
           statement.execute( "set hive.security.authorization.enabled=false" );
 
@@ -176,7 +176,7 @@ Sample example for creating new table, l
 
 ###### Groovy ######
 
-Make sure that GATEWAY_HOME/ext directory contains following libraries for 
successful execution:
+Make sure that `{GATEWAY_HOME/ext}` directory contains following libraries for 
successful execution:
 
 - hive-jdbc-0.14.0-standalone.jar;
 - commons-logging-1.1.3.jar;
@@ -212,7 +212,7 @@ Each line from the file below will need
 
     statement = connection.createStatement();
 
-    // Disable Hive authorization - This can be ommited if Hive authorization 
is configured properly
+    // Disable Hive authorization - This can be omitted if Hive authorization 
is configured properly
     statement.execute( "set hive.security.authorization.enabled=false" );
 
     // Create sample table
@@ -277,12 +277,12 @@ configuration and not the Hive connectio
 To enable HA functionality for Hive in Knox the following configuration has to 
be added to the topology file.
 
     <provider>
-       <role>ha</role>
-       <name>HaProvider</name>
-       <enabled>true</enabled>
-       <param>
-           <name>HIVE</name>
-           
<value>maxFailoverAttempts=3;failoverSleep=1000;enabled=true;zookeeperEnsemble=machine1:2181,machine2:2181,machine3:2181;
+        <role>ha</role>
+        <name>HaProvider</name>
+        <enabled>true</enabled>
+        <param>
+            <name>HIVE</name>
+            
<value>maxFailoverAttempts=3;failoverSleep=1000;enabled=true;zookeeperEnsemble=machine1:2181,machine2:2181,machine3:2181;
            zookeeperNamespace=hiveserver2</value>
        </param>
     </provider>
@@ -321,5 +321,5 @@ And for the service configuration itself
         <role>HIVE</role>
     </service>
 
-Please note that there is no <url> tag specified here as the URLs for the Hive 
servers are obtained from Zookeeper.
+Please note that there is no `<url>` tag specified here as the URLs for the 
Hive servers are obtained from Zookeeper.
 

Modified: knox/trunk/books/0.7.0/service_oozie.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_oozie.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/service_oozie.md (original)
+++ knox/trunk/books/0.7.0/service_oozie.md Fri Jan 15 15:24:45 2016
@@ -18,49 +18,48 @@
 ### Oozie ###
 
 
-Oozie is a Hadoop component provides complex job workflows to be submitted and 
managed.
-Please refer to the latest [Oozie 
documentation](http://oozie.apache.org/docs/4.0.0/) for details.
+Oozie is a Hadoop component that provides complex job workflows to be 
submitted and managed.
+Please refer to the latest [Oozie 
documentation](http://oozie.apache.org/docs/4.2.0/) for details.
 
 In order to make Oozie accessible via the gateway there are several important 
Hadoop configuration settings.
 These all relate to the network endpoint exposed by various Hadoop services.
 
-The HTTP endpoint at which Oozie is running can be found via the 
oozie.base.url property in the oozie-site.xml file.
-In a Sandbox installation this can typically be found in 
/etc/oozie/conf/oozie-site.xml.
+The HTTP endpoint at which Oozie is running can be found via the 
`oozie.base.url property` in the `oozie-site.xml` file.
+In a Sandbox installation this can typically be found in 
`/etc/oozie/conf/oozie-site.xml`.
 
     <property>
         <name>oozie.base.url</name>
         <value>http://sandbox.hortonworks.com:11000/oozie</value>
     </property>
 
-The RPC address at which the Resource Manager exposes the JOBTRACKER endpoint 
can be found via the yarn.resourcemanager.address in the yarn-site.xml file.
-In a Sandbox installation this can typically be found in 
/etc/hadoop/conf/yarn-site.xml.
+The RPC address at which the Resource Manager exposes the JOBTRACKER endpoint 
can be found via the `yarn.resourcemanager.address` in the `yarn-site.xml` file.
+In a Sandbox installation this can typically be found in 
`/etc/hadoop/conf/yarn-site.xml`.
 
     <property>
         <name>yarn.resourcemanager.address</name>
         <value>sandbox.hortonworks.com:8050</value>
     </property>
 
-The RPC address at which the Name Node exposes its RPC endpoint can be found 
via the dfs.namenode.rpc-address in the hdfs-site.xml file.
-In a Sandbox installation this can typically be found in 
/etc/hadoop/conf/hdfs-site.xml.
+The RPC address at which the Name Node exposes its RPC endpoint can be found 
via the `dfs.namenode.rpc-address` in the `hdfs-site.xml` file.
+In a Sandbox installation this can typically be found in 
`/etc/hadoop/conf/hdfs-site.xml`.
 
     <property>
         <name>dfs.namenode.rpc-address</name>
         <value>sandbox.hortonworks.com:8020</value>
     </property>
 
-If HDFS has been configured to be in High Availability mode (HA), then instead 
of the RPC address mentioned above for the Name Node, look up
-and use the logical name of the service found via dfs.nameservices in 
hdfs-site.xml. For example,
+If HDFS has been configured to be in High Availability mode (HA), then instead 
of the RPC address mentioned above for the Name Node, look up and use the 
logical name of the service found via `dfs.nameservices` in `hdfs-site.xml`. 
For example,
 
     <property>
         <name>dfs.nameservices</name>
         <value>ha-service</value>
     </property>
 
-Please note, only one of the URL's, either the RPC endpoint or the HA service 
name should be used as the NAMENODE hdfs URL in the gateway topology file.
+Please note, only one of the URLs, either the RPC endpoint or the HA service 
name should be used as the NAMENODE hdfs URL in the gateway topology file.
 
 The information above must be provided to the gateway via a topology 
descriptor file.
 These topology descriptor files are placed in `{GATEWAY_HOME}/deployments`.
-An example that is setup for the default configuration of the Sandbox is 
{GATEWAY_HOME}/deployments/sandbox.xml.
+An example that is setup for the default configuration of the Sandbox is 
`{GATEWAY_HOME}/deployments/sandbox.xml`.
 These values will need to be changed for non-default Sandbox or other Hadoop 
cluster configuration.
 
     <service>

Modified: knox/trunk/books/0.7.0/service_storm.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_storm.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/service_storm.md (original)
+++ knox/trunk/books/0.7.0/service_storm.md Fri Jan 15 15:24:45 2016
@@ -22,26 +22,25 @@ retrieving metrics data and configuratio
 
 The docs for this can be found here
 
-https://github.com/apache/storm/blob/master/STORM-UI-REST-API.md
+https://github.com/apache/storm/blob/master/docs/documentation/ui-rest-api.md
 
 To enable this functionality, a topology file needs to have the following 
configuration:
 
-
     <service>
-            <role>STORM</role>
-            <url>http://<hostname>:<port></url>
+        <role>STORM</role>
+        <url>http://<hostname>:<port></url>
     </service>
 
 The default UI daemon port is 8744. If it is configured to some other port, 
that configuration can be
-found in storm.yaml as the value for the property 'ui.port'.
+found in `storm.yaml` as the value for the property `ui.port`.
 
 In addition to the storm service configuration above, a STORM-LOGVIEWER 
service must be configured if the
 log files are to be retrieved through Knox. The value of the port for the 
logviewer can be found by the property
 'logviewer.port' also in the file storm.yaml.
 
     <service>
-            <role>STORM-LOGVIEWER</role>
-            <url>http://<hostname>:<port></url>
+        <role>STORM-LOGVIEWER</role>
+        <url>http://<hostname>:<port></url>
     </service>
 
 

Modified: knox/trunk/books/0.7.0/service_webhcat.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_webhcat.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/service_webhcat.md (original)
+++ knox/trunk/books/0.7.0/service_webhcat.md Fri Jan 15 15:24:45 2016
@@ -17,10 +17,10 @@
 
 ### WebHCat ###
 
-WebHCat is a related but separate service from Hive.
+WebHCat (also called _Templeton_) is a related but separate service from 
HiveServer2.
 As such it is installed and configured independently.
 The [WebHCat wiki 
pages](https://cwiki.apache.org/confluence/display/Hive/WebHCat) describe this 
processes.
-In sandbox this configuration file for WebHCat is located at 
/etc/hadoop/hcatalog/webhcat-site.xml.
+In sandbox this configuration file for WebHCat is located at 
`/etc/hadoop/hcatalog/webhcat-site.xml`.
 Note the properties shown below as they are related to configuration required 
by the gateway.
 
     <property>
@@ -29,9 +29,9 @@ Note the properties shown below as they
     </property>
 
 Also important is the configuration of the JOBTRACKER RPC endpoint.
-For Hadoop 2 this can be found in the yarn-site.xml file.
-In Sandbox this file can be found at /etc/hadoop/conf/yarn-site.xml.
-The property yarn.resourcemanager.address within that file is relevant for the 
gateway's configuration.
+For Hadoop 2 this can be found in the `yarn-site.xml` file.
+In Sandbox this file can be found at `/etc/hadoop/conf/yarn-site.xml`.
+The property `yarn.resourcemanager.address` within that file is relevant for 
the gateway's configuration.
 
     <property>
         <name>yarn.resourcemanager.address</name>
@@ -73,6 +73,13 @@ For WebHCat URLs, the mapping of Knox Ga
 | Cluster | `http://{webhcat-host}:{webhcat-port}/templeton}`                  
             |
 
 
+#### WebHCat via cURL
+
+Use can use cURL to directly invoke the REST APIs via the gateway. For the 
full list of available REST calls look at the WebHCat documentation. This is a 
simple curl command to test the connection:
+
+    curl -i -k -u guest:guest-password 
'https://localhost:8443/gateway/sandbox/templeton/v1/status'
+
+
 #### WebHCat Example ####
 
 This example will submit the familiar WordCount Java MapReduce job to the 
Hadoop cluster via the gateway using the KnoxShell DSL.
@@ -88,14 +95,13 @@ You can manually type in the KnoxShell D
 
 Each line from the file `samples/ExampleWebHCatJob.groovy` would then need to 
be typed or copied into the interactive shell.
 
-
 #### WebHCat Client DSL ####
 
 ##### submitJava() - Submit a Java MapReduce job.
 
 * Request
     * jar (String) - The remote file name of the JAR containing the app to 
execute.
-    * app (String) - The app name to execute.  This is wordcount for example 
not the class name.
+    * app (String) - The app name to execute. This is _wordcount_ for example 
not the class name.
     * input (String) - The remote directory name to use as input for the job.
     * output (String) - The remote directory name to store output from the job.
 * Response

Modified: knox/trunk/books/0.7.0/service_webhdfs.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_webhdfs.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/service_webhdfs.md (original)
+++ knox/trunk/books/0.7.0/service_webhdfs.md Fri Jan 15 15:24:45 2016
@@ -20,7 +20,7 @@
 REST API access to HDFS in a Hadoop cluster is provided by WebHDFS.
 The [WebHDFS REST 
API](http://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/WebHDFS.html)
 documentation is available online.
 WebHDFS must be enabled in the hdfs-site.xml configuration file.
-In sandbox this configuration file is located at 
/etc/hadoop/conf/hdfs-site.xml.
+In the sandbox this configuration file is located at 
`/etc/hadoop/conf/hdfs-site.xml`.
 Note the properties shown below as they are related to configuration required 
by the gateway.
 Some of these represent the default values and may not actually be present in 
hdfs-site.xml.
 
@@ -70,14 +70,14 @@ For Name Node URLs, the mapping of Knox
 | Cluster | `http://{webhdfs-host}:50070/webhdfs`                              
           |
 
 However, there is a subtle difference to URLs that are returned by WebHDFS in 
the Location header of many requests.
-Direct WebHDFS requests may return Location headers that contain the address 
of a particular Data Node.
+Direct WebHDFS requests may return Location headers that contain the address 
of a particular DataNode.
 The gateway will rewrite these URLs to ensure subsequent requests come back 
through the gateway and internal cluster details are protected.
 
-A WebHDFS request to the Node Node to retrieve a file will return a URL of the 
form below in the Location header.
+A WebHDFS request to the NameNode to retrieve a file will return a URL of the 
form below in the Location header.
 
     http://{datanode-host}:{data-node-port}/webhdfs/v1/{path}?...
 
-Note that this URL contains the network location of a Data Node.
+Note that this URL contains the network location of a DataNode.
 The gateway will rewrite this URL to look like the URL below.
 
     
https://{gateway-host}:{gateway-port}/{gateway-path}/{custer-name}/webhdfs/data/v1/{path}?_={encrypted-query-parameters}
@@ -185,7 +185,7 @@ Use can use cURL to directly invoke the
 
 * Request
     * from( String name ) - The full name of the file in HDFS.
-    * file( String name ) - The name name of a local file to create with the 
content.
+    * file( String name ) - The name of a local file to create with the 
content.
     If this isn't specified the file content must be read from the response.
 * Response
     * BasicResponse
@@ -264,7 +264,7 @@ of the list. If the list is exhausted an
 will be tried again (the list will start again from the original top entry).
 
 * failoverSleep - 
-The amount of time in millis that the process will wait or sleep before 
attempting to failover.
+The amount of time in milliseconds that the process will wait or sleep before 
attempting to failover.
 
 * maxRetryAttempts - 
 The is the maximum number of times that a retry request will be attempted. 
Unlike failover, the retry is done on the 
@@ -272,12 +272,12 @@ same URL that failed. This is a special
 come out of safe mode so a retry is desirable here as opposed to a failover.
 
 * retrySleep - 
-The amount of time in millis that the process will wait or sleep before a 
retry is issued.
+The amount of time in milliseconds that the process will wait or sleep before 
a retry is issued.
 
 * enabled - 
 Flag to turn the particular service on or off for HA.
 
-And for the service configuration itself the additional URLs that standby 
nodes should be added to the list. The active 
+And for the service configuration itself the additional URLs should be added 
to the list. The active 
 URL (at the time of configuration) should ideally be added to the top of the 
list.
 
 

Modified: knox/trunk/books/0.7.0/service_yarn.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/service_yarn.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/service_yarn.md (original)
+++ knox/trunk/books/0.7.0/service_yarn.md Fri Jan 15 15:24:45 2016
@@ -17,9 +17,9 @@
 
 ### Yarn ###
 
-Knox provides gateway functionality for the REST APIs of the ResourceManager. 
The ResourceManager REST API's allow the
+Knox provides gateway functionality for the REST APIs of the ResourceManager. 
The ResourceManager REST APIs allow the
 user to get information about the cluster - status on the cluster, metrics on 
the cluster, scheduler information,
-information about nodes in the cluster, and information about applications on 
the cluster. Also as of hadoop version
+information about nodes in the cluster, and information about applications on 
the cluster. Also as of Hadoop version
 2.5.0, the user can submit a new application as well as kill it (or get state) 
using the 'Writable' APIs.
 
 The docs for this can be found here
@@ -28,14 +28,13 @@ http://hadoop.apache.org/docs/current/ha
 
 To enable this functionality, a topology file needs to have the following 
configuration:
 
-
     <service>
-            <role>RESOURCEMANAGER</role>
-            <url>http://<hostname>:<port>/ws</url>
+        <role>RESOURCEMANAGER</role>
+        <url>http://<hostname>:<port>/ws</url>
     </service>
 
 The default resource manager http port is 8088. If it is configured to some 
other port, that configuration can be
-found in yarn-site.xml under the property 
'yarn.resourcemanager.webapp.address'.
+found in `yarn-site.xml` under the property 
`yarn.resourcemanager.webapp.address`.
 
 #### Yarn URL Mapping ####
 

Modified: knox/trunk/books/0.7.0/x-forwarded-headers.md
URL: 
http://svn.apache.org/viewvc/knox/trunk/books/0.7.0/x-forwarded-headers.md?rev=1724836&r1=1724835&r2=1724836&view=diff
==============================================================================
--- knox/trunk/books/0.7.0/x-forwarded-headers.md (original)
+++ knox/trunk/books/0.7.0/x-forwarded-headers.md Fri Jan 15 15:24:45 2016
@@ -19,22 +19,22 @@
 Out-of-the-box Knox provides support for some X-Forwarded-* headers through 
the use of a Servlet Filter. Specifically the
 headers handled/populated by Knox are:
 
-- X-Forwarded-For
-- X-Forwarded-Proto
-- X-Forwarded-Port
-- X-Forwarded-Host
-- X-Forwarded-Server
-- X-Forwarded-Context
+* X-Forwarded-For
+* X-Forwarded-Proto
+* X-Forwarded-Port
+* X-Forwarded-Host
+* X-Forwarded-Server
+* X-Forwarded-Context
 
 If this functionality can be turned off by a configuration setting in the file 
gateway-site.xml and redeploying the
 necessary topology/topologies.
 
 The setting is (under the 'configuration' tag) :
 
-       <property>
-            <name>gateway.xforwarded.enabled</name>
-            <value>false</value>
-        </property>
+    <property>
+        <name>gateway.xforwarded.enabled</name>
+        <value>false</value>
+    </property>
 
 If this setting is absent, the default behavior is that the X-Forwarded-* 
header support is on or in other words,
 'gateway.xforwarded.enabled' is set to 'true' by default.
@@ -51,12 +51,12 @@ to the list. The value added is the clie
 
 ##### X-Forwarded-Proto #####
 
-The protocol used in the client request. If this header is passed into Knox 
it's value is maintained, otherwise Knox will
+The protocol used in the client request. If this header is passed into Knox 
its value is maintained, otherwise Knox will
 populate the header with the value 'https' if the request is a secure one or 
'http' otherwise.
 
 ##### X-Forwarded-Port #####
 
-The port used in the client request. If this header is passed into Knox it's 
value is maintained, otherwise Knox will
+The port used in the client request. If this header is passed into Knox its 
value is maintained, otherwise Knox will
 populate the header with the value of the port that the request was made 
coming into Knox.
 
 ##### X-Forwarded-Host #####


Reply via email to