http://git-wip-us.apache.org/repos/asf/hadoop/blob/875062b5/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
index 028ba2d..87a8f36 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.7.2.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
 <!-- Generated by the JDiff Javadoc doclet -->
 <!-- (http://www.jdiff.org) -->
-<!-- on Thu Aug 18 16:02:32 PDT 2016 -->
+<!-- on Wed Aug 24 13:54:04 PDT 2016 -->
 
 <api
   xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
@@ -9,7 +9,7 @@
   name="Apache Hadoop HDFS 2.7.2"
   jdversion="1.0.9">
 
-<!--  Command line arguments =  -doclet 
org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet 
-docletpath 
/Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar
 -verbose -classpath 
/Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/classes:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_92.jdk/Contents/Home/lib/tools.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/wtan/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/wtan/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/wtan/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/http
 
core-4.2.5.jar:/Users/wtan/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/wtan/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/wtan/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/wtan/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/wtan/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/wtan/.m2/repository/jline/jline/0.9.94/jline-0.9.94.jar:/Users/wtan/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/wtan/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/wtan/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/
 
wtan/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/wtan/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/wtan/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/wtan/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/wtan/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/wtan/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/wtan/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/wtan/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/wtan/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/wtan/.m2/repos
 
itory/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/wtan/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/wtan/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/wtan/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/wtan/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/wtan/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/wtan/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/wtan/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/wtan/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/wtan/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/wtan/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/wtan/.m2/repository/org/apache/curator/curat
 
or-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/wtan/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/wtan/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/wtan/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/wtan/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/wtan/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/wtan/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/wtan/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/wtan/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/wtan/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/wtan/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/wtan/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/wtan/.m2/repository/commons-lang/common
 
s-lang/2.6/commons-lang-2.6.jar:/Users/wtan/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/wtan/.m2/repository/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/Users/wtan/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/wtan/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/wtan/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/wtan/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/wtan/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/wtan/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/wtan/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/wtan/.m2/repository/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/Users/wtan/.m2/repository/xerces/xercesIm
 
pl/2.9.1/xercesImpl-2.9.1.jar:/Users/wtan/.m2/repository/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/Users/wtan/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/wtan/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar
 -sourcepath 
/Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java
 -doclet 
org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet 
-docletpath 
/Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar
 -apidir 
/Users/wtan/project/github/hadoop-common-trunk/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml
 -apiname Apache Hadoop HDFS 2.7.2 -->
+<!--  Command line arguments =  -doclet 
org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet 
-docletpath 
/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar
 -verbose -classpath 
/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/classes:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.7.2.jar:/Library/Java/JavaVirtualMachines/jdk1.8.0_40.jdk/Contents/Home/lib/tools.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.7.2.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.j
 
ar:/Users/vinodkv/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-kerberos-codec-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/vinodkv/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/vinodkv/.m2/repository/jline/jline/0.9.94/jline-0.9.94.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-common-project/hadoop-common/target/hadoop-common-2.7.2.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-mat
 
h3-3.1.1.jar:/Users/vinodkv/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/vinodkv/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/vinodkv/.m2/repository/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/Users/vinodkv/.m2/repository/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/vinodkv/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/vinodkv/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/vinodkv/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/vinodkv/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/vinodkv/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/vinodkv/.m2/repository/org/cod
 
ehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/vinodkv/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/vinodkv/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/vinodkv/.m2/repository/commons-configuration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/vinodkv/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/vinodkv/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/vinodkv/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/vinodkv/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/vinodkv/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/vinodkv/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/vinodkv/.m2/reposito
 
ry/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/Users/vinodkv/.m2/repository/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/Users/vinodkv/.m2/repository/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/Users/vinodkv/.m2/repository/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/Users/vinodkv/.m2/repository/org/tukaani/xz/1.0/xz-1.0.jar:/Users/vinodkv/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/vinodkv/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/vinodkv/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/vinodkv/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/vinodkv/.m2/repository/commons-cli/commons-cli/1.
 
2/commons-cli-1.2.jar:/Users/vinodkv/.m2/repository/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/vinodkv/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/vinodkv/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/vinodkv/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/vinodkv/.m2/repository/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/Users/vinodkv/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/vinodkv/.m2/repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/Users/vinodkv/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/vinodkv/.m2/repository/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/vinodkv/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/vinod
 
kv/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/vinodkv/.m2/repository/io/netty/netty/3.6.2.Final/netty-3.6.2.Final.jar:/Users/vinodkv/.m2/repository/io/netty/netty-all/4.0.23.Final/netty-all-4.0.23.Final.jar:/Users/vinodkv/.m2/repository/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/Users/vinodkv/.m2/repository/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/Users/vinodkv/.m2/repository/org/apache/htrace/htrace-core/3.1.0-incubating/htrace-core-3.1.0-incubating.jar:/Users/vinodkv/.m2/repository/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar
 -sourcepath 
/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/src/main/java
 -doclet 
org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet 
-docletpath 
/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/
 hadoop-hdfs/target/jdiff.jar -apidir 
/Users/vinodkv/Workspace/eclipse-workspace/apache-git/hadoop/hadoop-hdfs-project/hadoop-hdfs/target/site/jdiff/xml
 -apiname Apache Hadoop HDFS 2.7.2 -->
 <package name="org.apache.hadoop.fs">
   <!-- start class org.apache.hadoop.fs.BlockStorageLocation -->
   <class name="BlockStorageLocation" 
extends="org.apache.hadoop.fs.BlockLocation"
@@ -126,354 +126,8 @@
     </doc>
   </interface>
   <!-- end interface org.apache.hadoop.fs.VolumeId -->
-  <!-- start class org.apache.hadoop.fs.XAttr.Builder -->
-  <class name="XAttr.Builder" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="Builder"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="setNameSpace" return="org.apache.hadoop.fs.XAttr.Builder"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="ns" type="org.apache.hadoop.fs.XAttr.NameSpace"/>
-    </method>
-    <method name="setName" return="org.apache.hadoop.fs.XAttr.Builder"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-    <method name="setValue" return="org.apache.hadoop.fs.XAttr.Builder"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="value" type="byte[]"/>
-    </method>
-    <method name="build" return="org.apache.hadoop.fs.XAttr"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.fs.XAttr.Builder -->
-  <!-- start class org.apache.hadoop.fs.XAttr.NameSpace -->
-  <class name="XAttr.NameSpace" extends="java.lang.Enum"
-    abstract="false"
-    static="true" final="true" visibility="public"
-    deprecated="not deprecated">
-    <method name="values" return="org.apache.hadoop.fs.XAttr.NameSpace[]"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="valueOf" return="org.apache.hadoop.fs.XAttr.NameSpace"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="name" type="java.lang.String"/>
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.fs.XAttr.NameSpace -->
 </package>
 <package name="org.apache.hadoop.hdfs">
-  <!-- start interface org.apache.hadoop.hdfs.BlockReader -->
-  <interface name="BlockReader"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.fs.ByteBufferReadable"/>
-    <method name="read" return="int"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="buf" type="byte[]"/>
-      <param name="off" type="int"/>
-      <param name="len" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="skip" return="long"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="n" type="long"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Skip the given number of bytes]]>
-      </doc>
-    </method>
-    <method name="available" return="int"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Returns an estimate of the number of bytes that can be read
- (or skipped over) from this input stream without performing
- network I/O.
- This may return more than what is actually present in the block.]]>
-      </doc>
-    </method>
-    <method name="close"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Close the block reader.
-
- @throws IOException]]>
-      </doc>
-    </method>
-    <method name="readFully"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="buf" type="byte[]"/>
-      <param name="readOffset" type="int"/>
-      <param name="amtToRead" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Read exactly the given amount of data, throwing an exception
- if EOF is reached before that amount]]>
-      </doc>
-    </method>
-    <method name="readAll" return="int"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="buf" type="byte[]"/>
-      <param name="offset" type="int"/>
-      <param name="len" type="int"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Similar to {@link #readFully(byte[], int, int)} except that it 
will
- not throw an exception on EOF. However, it differs from the simple
- {@link #read(byte[], int, int)} call in that it is guaranteed to
- read the data if it is available. In other words, if this call
- does not throw an exception, then either the buffer has been
- filled or the next call will return EOF.]]>
-      </doc>
-    </method>
-    <method name="isLocal" return="boolean"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return              true only if this is a local read.]]>
-      </doc>
-    </method>
-    <method name="isShortCircuit" return="boolean"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return              true only if this is a short-circuit read.
-                      All short-circuit reads are also local.]]>
-      </doc>
-    </method>
-    <method name="getClientMmap" 
return="org.apache.hadoop.hdfs.shortcircuit.ClientMmap"
-      abstract="true" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="opts" type="java.util.EnumSet"/>
-      <doc>
-      <![CDATA[Get a ClientMmap object for this BlockReader.
-
- @param opts          The read options to use.
- @return              The ClientMmap object, or null if mmap is not
-                      supported.]]>
-      </doc>
-    </method>
-    <doc>
-    <![CDATA[A BlockReader is responsible for reading a single block
- from a single datanode.]]>
-    </doc>
-  </interface>
-  <!-- end interface org.apache.hadoop.hdfs.BlockReader -->
-  <!-- start class org.apache.hadoop.hdfs.BlockReaderFactory.BlockReaderPeer 
-->
-  <class name="BlockReaderFactory.BlockReaderPeer" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.BlockReaderFactory.BlockReaderPeer -->
-  <!-- start class org.apache.hadoop.hdfs.BlockReaderFactory.FailureInjector 
-->
-  <class name="BlockReaderFactory.FailureInjector" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="FailureInjector"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="injectRequestFileDescriptorsFailure"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <method name="getSupportsReceiptVerification" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.BlockReaderFactory.FailureInjector -->
-  <!-- start class org.apache.hadoop.hdfs.CorruptFileBlockIterator -->
-  <class name="CorruptFileBlockIterator" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <implements name="org.apache.hadoop.fs.RemoteIterator"/>
-    <constructor name="CorruptFileBlockIterator" 
type="org.apache.hadoop.hdfs.DFSClient, org.apache.hadoop.fs.Path"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </constructor>
-    <method name="getCallsMade" return="int"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return the number of calls made to the DFSClient.
- This is for debugging and testing purposes.]]>
-      </doc>
-    </method>
-    <method name="hasNext" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="next" return="org.apache.hadoop.fs.Path"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <doc>
-    <![CDATA[Provides an iterator interface for listCorruptFileBlocks.
- This class is used by DistributedFileSystem and Hdfs.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.CorruptFileBlockIterator -->
-  <!-- start class org.apache.hadoop.hdfs.DFSClient.Conf -->
-  <class name="DFSClient.Conf" extends="java.lang.Object"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="Conf" type="org.apache.hadoop.conf.Configuration"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="isUseLegacyBlockReaderLocal" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getDomainSocketPath" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="isShortCircuitLocalReads" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="isDomainSocketDataTraffic" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <field name="brfFailureInjector" 
type="org.apache.hadoop.hdfs.BlockReaderFactory.FailureInjector"
-      transient="false" volatile="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <doc>
-    <![CDATA[DFSClient configuration]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.DFSClient.Conf -->
-  <!-- start class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
-  <class name="DFSClient.DFSDataInputStream" 
extends="org.apache.hadoop.hdfs.client.HdfsDataInputStream"
-    abstract="false"
-    static="true" final="false" visibility="public"
-    deprecated="use {@link HdfsDataInputStream} instead.">
-    <constructor name="DFSDataInputStream" 
type="org.apache.hadoop.hdfs.DFSInputStream"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <exception name="IOException" type="java.io.IOException"/>
-    </constructor>
-    <doc>
-    <![CDATA[@deprecated use {@link HdfsDataInputStream} instead.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream -->
-  <!-- start class org.apache.hadoop.hdfs.DFSHedgedReadMetrics -->
-  <class name="DFSHedgedReadMetrics" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="DFSHedgedReadMetrics"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </constructor>
-    <method name="incHedgedReadOps"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="incHedgedReadOpsInCurThread"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="incHedgedReadWins"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getHedgedReadOps" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getHedgedReadOpsInCurThread" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getHedgedReadWins" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <field name="hedgedReadOps" type="java.util.concurrent.atomic.AtomicLong"
-      transient="false" volatile="false"
-      static="false" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="hedgedReadOpsWin" 
type="java.util.concurrent.atomic.AtomicLong"
-      transient="false" volatile="false"
-      static="false" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <field name="hedgedReadOpsInCurThread" 
type="java.util.concurrent.atomic.AtomicLong"
-      transient="false" volatile="false"
-      static="false" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-    <doc>
-    <![CDATA[The client-side metrics for hedged read feature.
- This class has a number of metrics variables that are publicly accessible,
- we can grab them from client side, like HBase.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.DFSHedgedReadMetrics -->
   <!-- start class org.apache.hadoop.hdfs.DFSInotifyEventInputStream -->
   <class name="DFSInotifyEventInputStream" extends="java.lang.Object"
     abstract="false"
@@ -571,80 +225,55 @@
     </doc>
   </class>
   <!-- end class org.apache.hadoop.hdfs.DFSInotifyEventInputStream -->
-  <!-- start class org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics -->
-  <class name="DFSInputStream.ReadStatistics" extends="java.lang.Object"
+  <!-- start class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
+  <class name="UnknownCipherSuiteException" extends="java.io.IOException"
     abstract="false"
-    static="true" final="false" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="ReadStatistics"
+    <constructor name="UnknownCipherSuiteException" type="java.lang.String"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <constructor name="ReadStatistics" 
type="org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics"
+    <doc>
+    <![CDATA[Thrown when an unknown cipher suite is encountered.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
+  <!-- start class 
org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
+  <class name="UnknownCryptoProtocolVersionException" 
extends="java.io.IOException"
+    abstract="false"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="UnknownCryptoProtocolVersionException"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <method name="getTotalBytesRead" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return The total bytes read.  This will always be at least as
- high as the other numbers, since it includes all of them.]]>
-      </doc>
-    </method>
-    <method name="getTotalLocalBytesRead" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return The total local bytes read.  This will always be at 
least
- as high as totalShortCircuitBytesRead, since all short-circuit
- reads are also local.]]>
-      </doc>
-    </method>
-    <method name="getTotalShortCircuitBytesRead" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return The total short-circuit local bytes read.]]>
-      </doc>
-    </method>
-    <method name="getTotalZeroCopyBytesRead" return="long"
-      abstract="false" native="false" synchronized="false"
+    <constructor name="UnknownCryptoProtocolVersionException" 
type="java.lang.String"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return The total number of zero-copy bytes read.]]>
-      </doc>
-    </method>
-    <method name="getRemoteBytesRead" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <doc>
-      <![CDATA[@return The total number of bytes read which were not local.]]>
-      </doc>
-    </method>
+    </constructor>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics -->
-  <!-- start class org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress -->
-  <class name="DFSUtil.ConfiguredNNAddress" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException 
-->
+  <doc>
+  <![CDATA[<p>A distributed implementation of {@link
+org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
+Google's <a href="http://research.google.com/archive/gfs.html";>GFS</a>.</p>
+
+<p>The most important difference is that unlike GFS, Hadoop DFS files
+have strictly one writer at any one time.  Bytes are always appended
+to the end of the writer's stream.  There is no notion of "record appends"
+or "mutations" that are then checked or reordered.  Writers simply emit
+a byte stream.  That byte stream is guaranteed to be stored in the
+order written.</p>]]>
+  </doc>
+</package>
+<package name="org.apache.hadoop.hdfs.client">
+  <!-- start class org.apache.hadoop.hdfs.client.BlockReportOptions -->
+  <class name="BlockReportOptions" extends="java.lang.Object"
     abstract="false"
-    static="true" final="false" visibility="public"
+    static="false" final="true" visibility="public"
     deprecated="not deprecated">
-    <method name="getNameserviceId" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getNamenodeId" return="java.lang.String"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getAddress" return="java.net.InetSocketAddress"
+    <method name="isIncremental" return="boolean"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
@@ -655,20851 +284,1830 @@
       deprecated="not deprecated">
     </method>
     <doc>
-    <![CDATA[Represent one of the NameNodes configured in the cluster.]]>
+    <![CDATA[Options that can be specified when manually triggering a block 
report.]]>
     </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress -->
-  <!-- start class org.apache.hadoop.hdfs.ExtendedBlockId -->
-  <class name="ExtendedBlockId" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.client.BlockReportOptions -->
+  <!-- start class org.apache.hadoop.hdfs.client.HdfsAdmin -->
+  <class name="HdfsAdmin" extends="java.lang.Object"
     abstract="false"
-    static="false" final="true" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="ExtendedBlockId" type="long, java.lang.String"
+    <constructor name="HdfsAdmin" type="java.net.URI, 
org.apache.hadoop.conf.Configuration"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Create a new HdfsAdmin client.
+
+ @param uri the unique URI of the HDFS file system to administer
+ @param conf configuration
+ @throws IOException in the event the file system could not be created]]>
+      </doc>
     </constructor>
-    <method name="fromExtendedBlock" 
return="org.apache.hadoop.hdfs.ExtendedBlockId"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="block" 
type="org.apache.hadoop.hdfs.protocol.ExtendedBlock"/>
-    </method>
-    <method name="getBlockId" return="long"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-    </method>
-    <method name="getBlockPoolId" return="java.lang.String"
+    <method name="setQuota"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="quota" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the namespace quota (count of files, directories, and sym 
links) for a
+ directory.
+
+ @param src the path to set the quota for
+ @param quota the value to set for the quota
+ @throws IOException in the event of error]]>
+      </doc>
     </method>
-    <method name="equals" return="boolean"
+    <method name="clearQuota"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="o" type="java.lang.Object"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Clear the namespace quota (count of files, directories and sym 
links) for a
+ directory.
+
+ @param src the path to clear the quota of
+ @throws IOException in the event of error]]>
+      </doc>
     </method>
-    <method name="hashCode" return="int"
+    <method name="setSpaceQuota"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="spaceQuota" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Set the storage space quota (size of files) for a directory. 
Note that
+ directories and sym links do not occupy storage space.
+
+ @param src the path to set the space quota of
+ @param spaceQuota the value to set for the space quota
+ @throws IOException in the event of error]]>
+      </doc>
     </method>
-    <method name="toString" return="java.lang.String"
+    <method name="clearSpaceQuota"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-    </method>
-    <doc>
-    <![CDATA[An immutable key which identifies a block.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.ExtendedBlockId -->
-  <!-- start class org.apache.hadoop.hdfs.HAUtil -->
-  <class name="HAUtil" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="isHAEnabled" return="boolean"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nsId" type="java.lang.String"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Returns true if HA for namenode is configured for the given 
nameservice
+      <![CDATA[Clear the storage space quota (size of files) for a directory. 
Note that
+ directories and sym links do not occupy storage space.
 
- @param conf Configuration
- @param nsId nameservice, or null if no federated NS is configured
- @return true if HA is configured in the configuration; else false.]]>
+ @param src the path to clear the space quota of
+ @throws IOException in the event of error]]>
       </doc>
     </method>
-    <method name="usesSharedEditsDir" return="boolean"
+    <method name="setQuotaByStorageType"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="type" type="org.apache.hadoop.fs.StorageType"/>
+      <param name="quota" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Returns true if HA is using a shared edits directory.
+      <![CDATA[Set the quota by storage type for a directory. Note that
+ directories and sym links do not occupy storage type quota.
 
- @param conf Configuration
- @return true if HA config is using a shared edits dir, false otherwise.]]>
+ @param src the target directory to set the quota by storage type
+ @param type the storage type to set for quota by storage type
+ @param quota the value to set for quota by storage type
+ @throws IOException in the event of error]]>
       </doc>
     </method>
-    <method name="getNameNodeId" return="java.lang.String"
+    <method name="clearQuotaByStorageType"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nsId" type="java.lang.String"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="type" type="org.apache.hadoop.fs.StorageType"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get the namenode Id by matching the {@code addressKey}
- with the the address of the local node.
-
- If {@link DFSConfigKeys#DFS_HA_NAMENODE_ID_KEY} is not specifically
- configured, this method determines the namenode Id by matching the local
- node's address with the configured addresses. When a match is found, it
- returns the namenode Id from the corresponding configuration key.
+      <![CDATA[Clear the space quota by storage type for a directory. Note that
+ directories and sym links do not occupy storage type quota.
 
- @param conf Configuration
- @return namenode Id on success, null on failure.
- @throws HadoopIllegalArgumentException on error]]>
+ @param src the target directory to clear the quota by storage type
+ @param type the storage type to clear for quota by storage type
+ @throws IOException in the event of error]]>
       </doc>
     </method>
-    <method name="getNameNodeIdFromAddress" return="java.lang.String"
+    <method name="allowSnapshot"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="address" type="java.net.InetSocketAddress"/>
-      <param name="keys" type="java.lang.String[]"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Similar to
- {@link DFSUtil#getNameServiceIdFromAddress(Configuration,
- InetSocketAddress, String...)}]]>
+      <![CDATA[Allow snapshot on a directory.
+ @param path The path of the directory where snapshots will be taken.]]>
       </doc>
     </method>
-    <method name="getNameNodeIdOfOtherNode" return="java.lang.String"
+    <method name="disallowSnapshot"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nsId" type="java.lang.String"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get the NN ID of the other node in an HA setup.
-
- @param conf the configuration of this node
- @return the NN ID of the other node in this nameservice]]>
+      <![CDATA[Disallow snapshot on a directory.
+ @param path The path of the snapshottable directory.]]>
       </doc>
     </method>
-    <method name="getConfForOtherNode" 
return="org.apache.hadoop.conf.Configuration"
+    <method name="addCacheDirective" return="long"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="myConf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="info" 
type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Given the configuration for this node, return a Configuration 
object for
- the other node in an HA setup.
+      <![CDATA[Add a new CacheDirectiveInfo.
 
- @param myConf the configuration of this node
- @return the configuration of the other node in an HA setup]]>
+ @param info Information about a directive to add.
+ @param flags {@link CacheFlag}s to use for this operation.
+ @return the ID of the directive that was created.
+ @throws IOException if the directive could not be added]]>
       </doc>
     </method>
-    <method name="shouldAllowStandbyReads" return="boolean"
+    <method name="modifyCacheDirective"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
+      <param name="info" 
type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
+      <param name="flags" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[This is used only by tests at the moment.
- @return true if the NN should allow read operations while in standby mode.]]>
+      <![CDATA[Modify a CacheDirective.
+
+ @param info Information about the directive to modify. You must set the ID
+          to indicate which CacheDirective you want to modify.
+ @param flags {@link CacheFlag}s to use for this operation.
+ @throws IOException if the directive could not be modified]]>
       </doc>
     </method>
-    <method name="setAllowStandbyReads"
-      abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="val" type="boolean"/>
-    </method>
-    <method name="isLogicalUri" return="boolean"
+    <method name="removeCacheDirective"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
+      <param name="id" type="long"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[@return true if the given nameNodeUri appears to be a logical 
URI.]]>
+      <![CDATA[Remove a CacheDirective.
+
+ @param id identifier of the CacheDirectiveInfo to remove
+ @throws IOException if the directive could not be removed]]>
       </doc>
     </method>
-    <method name="isClientFailoverConfigured" return="boolean"
+    <method name="listCacheDirectives" 
return="org.apache.hadoop.fs.RemoteIterator"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
+      <param name="filter" 
type="org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Check whether the client has a failover proxy provider 
configured
- for the namenode/nameservice.
+      <![CDATA[List cache directives. Incrementally fetches results from the 
server.
 
- @param conf Configuration
- @param nameNodeUri The URI of namenode
- @return true if failover is configured.]]>
+ @param filter Filter parameters to use when listing the directives, null to
+               list all directives visible to us.
+ @return A RemoteIterator which returns CacheDirectiveInfo objects.]]>
       </doc>
     </method>
-    <method name="useLogicalUri" return="boolean"
+    <method name="addCachePool"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
+      <param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Check whether logical URI is needed for the namenode and
- the corresponding failover proxy provider in the config.
+      <![CDATA[Add a cache pool.
 
- @param conf Configuration
- @param nameNodeUri The URI of namenode
- @return true if logical URI is needed. false, if not needed.
- @throws IOException most likely due to misconfiguration.]]>
+ @param info
+          The request to add a cache pool.
+ @throws IOException
+          If the request could not be completed.]]>
       </doc>
     </method>
-    <method name="getServiceUriFromToken" return="java.net.URI"
+    <method name="modifyCachePool"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="scheme" type="java.lang.String"/>
-      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <param name="info" type="org.apache.hadoop.hdfs.protocol.CachePoolInfo"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Parse the file system URI out of the provided token.]]>
+      <![CDATA[Modify an existing cache pool.
+
+ @param info
+          The request to modify a cache pool.
+ @throws IOException
+          If the request could not be completed.]]>
       </doc>
     </method>
-    <method name="buildTokenServiceForLogicalUri" 
return="org.apache.hadoop.io.Text"
+    <method name="removeCachePool"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="uri" type="java.net.URI"/>
-      <param name="scheme" type="java.lang.String"/>
+      <param name="poolName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get the service name used in the delegation token for the given 
logical
- HA service.
- @param uri the logical URI of the cluster
- @param scheme the scheme of the corresponding FileSystem
- @return the service name]]>
+      <![CDATA[Remove a cache pool.
+
+ @param poolName
+          Name of the cache pool to remove.
+ @throws IOException
+          if the cache pool did not exist, or could not be removed.]]>
       </doc>
     </method>
-    <method name="isTokenForLogicalUri" return="boolean"
+    <method name="listCachePools" return="org.apache.hadoop.fs.RemoteIterator"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="token" type="org.apache.hadoop.security.token.Token"/>
+      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[@return true if this token corresponds to a logical nameservice
- rather than a specific namenode.]]>
+      <![CDATA[List all cache pools.
+
+ @return A remote iterator from which you can get CachePoolEntry objects.
+          Requests will be made as needed.
+ @throws IOException
+          If there was an error listing cache pools.]]>
       </doc>
     </method>
-    <method name="buildTokenServicePrefixForLogicalUri" 
return="java.lang.String"
+    <method name="createEncryptionZone"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="scheme" type="java.lang.String"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <param name="keyName" type="java.lang.String"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AccessControlException" 
type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" 
type="java.io.FileNotFoundException"/>
+      <doc>
+      <![CDATA[Create an encryption zone rooted at an empty existing 
directory, using the
+ specified encryption key. An encryption zone has an associated encryption
+ key used when reading and writing files within the zone.
+
+ @param path    The path of the root of the encryption zone. Must refer to
+                an empty, existing directory.
+ @param keyName Name of key available at the KeyProvider.
+ @throws IOException            if there was a general IO exception
+ @throws AccessControlException if the caller does not have access to path
+ @throws FileNotFoundException  if the path does not exist]]>
+      </doc>
     </method>
-    <method name="cloneDelegationTokenForLogicalUri"
+    <method name="getEncryptionZoneForPath" 
return="org.apache.hadoop.hdfs.protocol.EncryptionZone"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="ugi" 
type="org.apache.hadoop.security.UserGroupInformation"/>
-      <param name="haUri" type="java.net.URI"/>
-      <param name="nnAddrs" type="java.util.Collection"/>
+      <param name="path" type="org.apache.hadoop.fs.Path"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <exception name="AccessControlException" 
type="org.apache.hadoop.security.AccessControlException"/>
+      <exception name="FileNotFoundException" 
type="java.io.FileNotFoundException"/>
       <doc>
-      <![CDATA[Locate a delegation token associated with the given HA cluster 
URI, and if
- one is found, clone it to also represent the underlying namenode address.
- @param ugi the UGI to modify
- @param haUri the logical URI for the cluster
- @param nnAddrs collection of NNs in the cluster to which the token
- applies]]>
+      <![CDATA[Get the path of the encryption zone for a given file or 
directory.
+
+ @param path The path to get the ez for.
+
+ @return The EncryptionZone of the ez, or null if path is not in an ez.
+ @throws IOException            if there was a general IO exception
+ @throws AccessControlException if the caller does not have access to path
+ @throws FileNotFoundException  if the path does not exist]]>
       </doc>
     </method>
-    <method name="getAddressOfActive" return="java.net.InetSocketAddress"
+    <method name="listEncryptionZones" 
return="org.apache.hadoop.fs.RemoteIterator"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="fs" type="org.apache.hadoop.fs.FileSystem"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get the internet address of the currently-active NN. This 
should rarely be
- used, since callers of this method who connect directly to the NN using the
- resulting InetSocketAddress will not be able to connect to the active NN if
- a failover were to occur after this method has been called.
-
- @param fs the file system to get the active address of.
- @return the internet address of the currently-active NN.
- @throws IOException if an error occurs while resolving the active NN.]]>
+      <![CDATA[Returns a RemoteIterator which can be used to list the 
encryption zones
+ in HDFS. For large numbers of encryption zones, the iterator will fetch
+ the list of zones in a number of small batches.
+ <p/>
+ Since the list is fetched in batches, it does not represent a
+ consistent snapshot of the entire list of encryption zones.
+ <p/>
+ This method can only be called by HDFS superusers.]]>
       </doc>
     </method>
-    <method name="getProxiesForAllNameNodesInNameservice" 
return="java.util.List"
+    <method name="getInotifyEventStream" 
return="org.apache.hadoop.hdfs.DFSInotifyEventInputStream"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nsId" type="java.lang.String"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get an RPC proxy for each NN in an HA nameservice. Used when a 
given RPC
- call should be made on every NN in an HA nameservice, not just the active.
+      <![CDATA[Exposes a stream of namesystem events. Only events occurring 
after the
+ stream is created are available.
+ See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
+ for information on stream usage.
+ See {@link org.apache.hadoop.hdfs.inotify.Event}
+ for information on the available events.
+ <p/>
+ Inotify users may want to tune the following HDFS parameters to
+ ensure that enough extra HDFS edits are saved to support inotify clients
+ that fall behind the current state of the namespace while reading events.
+ The default parameter values should generally be reasonable. If edits are
+ deleted before their corresponding events can be read, clients will see a
+ {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
+ {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
 
- @param conf configuration
- @param nsId the nameservice to get all of the proxies for.
- @return a list of RPC proxies for each NN in the nameservice.
- @throws IOException in the event of error.]]>
+ It should generally be sufficient to tune these parameters:
+ dfs.namenode.num.extra.edits.retained
+ dfs.namenode.max.extra.edits.segments.retained
+
+ Parameters that affect the number of created segments and the number of
+ edits that are considered necessary, i.e. do not count towards the
+ dfs.namenode.num.extra.edits.retained quota):
+ dfs.namenode.checkpoint.period
+ dfs.namenode.checkpoint.txns
+ dfs.namenode.num.checkpoints.retained
+ dfs.ha.log-roll.period
+ <p/>
+ It is recommended that local journaling be configured
+ (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
+ so that edit transfers from the shared journal can be avoided.
+
+ @throws IOException If there was an error obtaining the stream.]]>
       </doc>
     </method>
-    <method name="getProxiesForAllNameNodesInNameservice" 
return="java.util.List"
+    <method name="getInotifyEventStream" 
return="org.apache.hadoop.hdfs.DFSInotifyEventInputStream"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nsId" type="java.lang.String"/>
-      <param name="xface" type="java.lang.Class"/>
+      <param name="lastReadTxid" type="long"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Get an RPC proxy for each NN in an HA nameservice. Used when a 
given RPC
- call should be made on every NN in an HA nameservice, not just the active.
-
- @param conf configuration
- @param nsId the nameservice to get all of the proxies for.
- @param xface the protocol class.
- @return a list of RPC proxies for each NN in the nameservice.
- @throws IOException in the event of error.]]>
+      <![CDATA[A version of {@link HdfsAdmin#getInotifyEventStream()} meant 
for advanced
+ users who are aware of HDFS edits up to lastReadTxid (e.g. because they
+ have access to an FSImage inclusive of lastReadTxid) and only want to read
+ events after this point.]]>
       </doc>
     </method>
-    <method name="isAtLeastOneActive" return="boolean"
+    <method name="setStoragePolicy"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="namenodes" type="java.util.List"/>
+      <param name="src" type="org.apache.hadoop.fs.Path"/>
+      <param name="policyName" type="java.lang.String"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Used to ensure that at least one of the given HA NNs is 
currently in the
- active state..
+      <![CDATA[Set the source path to the specified storage policy.
 
- @param namenodes list of RPC proxies for each NN to check.
- @return true if at least one NN is active, false if all are in the standby 
state.
- @throws IOException in the event of error.]]>
+ @param src The source path referring to either a directory or a file.
+ @param policyName The name of the storage policy.]]>
       </doc>
     </method>
+    <doc>
+    <![CDATA[The public API for performing administrative functions on HDFS. 
Those writing
+ applications against HDFS should prefer this interface to directly accessing
+ functionality in DistributedFileSystem or DFSClient.
+
+ Note that this is distinct from the similarly-named {@link DFSAdmin}, which
+ is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
+ commands.]]>
+    </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.HAUtil -->
-  <!-- start class org.apache.hadoop.hdfs.KeyProviderCache -->
-  <class name="KeyProviderCache" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.client.HdfsAdmin -->
+  <!-- start class org.apache.hadoop.hdfs.client.HdfsDataInputStream -->
+  <class name="HdfsDataInputStream" 
extends="org.apache.hadoop.fs.FSDataInputStream"
     abstract="false"
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="KeyProviderCache" type="long"
+    <constructor name="HdfsDataInputStream" 
type="org.apache.hadoop.hdfs.DFSInputStream"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
     </constructor>
-    <method name="get" return="org.apache.hadoop.crypto.key.KeyProvider"
-      abstract="false" native="false" synchronized="false"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-    </method>
-    <method name="setKeyProvider"
-      abstract="false" native="false" synchronized="false"
+    <constructor name="HdfsDataInputStream" 
type="org.apache.hadoop.crypto.CryptoInputStream"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="keyProvider" 
type="org.apache.hadoop.crypto.key.KeyProvider"/>
       <exception name="IOException" type="java.io.IOException"/>
-    </method>
-    <field name="LOG" type="org.apache.commons.logging.Log"
-      transient="false" volatile="false"
-      static="true" final="true" visibility="public"
-      deprecated="not deprecated">
-    </field>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.KeyProviderCache -->
-  <!-- start class org.apache.hadoop.hdfs.NameNodeProxies -->
-  <class name="NameNodeProxies" extends="java.lang.Object"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="NameNodeProxies"
-      static="false" final="false" visibility="public"
-      deprecated="not deprecated">
     </constructor>
-    <method name="createProxy" 
return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
+    <method name="getWrappedStream" return="java.io.InputStream"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
-      <param name="xface" type="java.lang.Class"/>
-      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Creates the namenode proxy with the passed protocol. This will 
handle
- creation of either HA- or non-HA-enabled proxy objects, depending upon
- if the provided URI is a configured logical URI.
+      <![CDATA[Get a reference to the wrapped output stream. We always want to 
return the
+ actual underlying InputStream, even when we're using a CryptoStream. e.g.
+ in the delegated methods below.
 
- @param conf the configuration containing the required IPC
-        properties, client failover configurations, etc.
- @param nameNodeUri the URI pointing either to a specific NameNode
-        or to a logical nameservice.
- @param xface the IPC interface which should be created
- @return an object containing both the proxy and the associated
-         delegation token service it corresponds to
- @throws IOException if there is an error creating the proxy]]>
+ @return the underlying output stream]]>
       </doc>
     </method>
-    <method name="createProxy" 
return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
+    <method name="getCurrentDatanode" 
return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
-      <param name="xface" type="java.lang.Class"/>
-      <param name="fallbackToSimpleAuth" 
type="java.util.concurrent.atomic.AtomicBoolean"/>
-      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Creates the namenode proxy with the passed protocol. This will 
handle
- creation of either HA- or non-HA-enabled proxy objects, depending upon
- if the provided URI is a configured logical URI.
-
- @param conf the configuration containing the required IPC
-        properties, client failover configurations, etc.
- @param nameNodeUri the URI pointing either to a specific NameNode
-        or to a logical nameservice.
- @param xface the IPC interface which should be created
- @param fallbackToSimpleAuth set to true or false during calls to indicate if
-   a secure client falls back to simple auth
- @return an object containing both the proxy and the associated
-         delegation token service it corresponds to
- @throws IOException if there is an error creating the proxy]]>
+      <![CDATA[Get the datanode from which the stream is currently reading.]]>
       </doc>
     </method>
-    <method name="createProxyWithLossyRetryHandler" 
return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
+    <method name="getCurrentBlock" 
return="org.apache.hadoop.hdfs.protocol.ExtendedBlock"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="config" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
-      <param name="xface" type="java.lang.Class"/>
-      <param name="numResponseToDrop" type="int"/>
-      <param name="fallbackToSimpleAuth" 
type="java.util.concurrent.atomic.AtomicBoolean"/>
-      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Generate a dummy namenode proxy instance that utilizes our 
hacked
- {@link LossyRetryInvocationHandler}. Proxy instance generated using this
- method will proactively drop RPC responses. Currently this method only
- support HA setup. null will be returned if the given configuration is not
- for HA.
-
- @param config the configuration containing the required IPC
-        properties, client failover configurations, etc.
- @param nameNodeUri the URI pointing either to a specific NameNode
-        or to a logical nameservice.
- @param xface the IPC interface which should be created
- @param numResponseToDrop The number of responses to drop for each RPC call
- @param fallbackToSimpleAuth set to true or false during calls to indicate if
-   a secure client falls back to simple auth
- @return an object containing both the proxy and the associated
-         delegation token service it corresponds to. Will return null of the
-         given configuration does not support HA.
- @throws IOException if there is an error creating the proxy]]>
+      <![CDATA[Get the block containing the target position.]]>
       </doc>
     </method>
-    <method name="createNonHAProxy" 
return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
+    <method name="getAllBlocks" return="java.util.List"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nnAddr" type="java.net.InetSocketAddress"/>
-      <param name="xface" type="java.lang.Class"/>
-      <param name="ugi" 
type="org.apache.hadoop.security.UserGroupInformation"/>
-      <param name="withRetries" type="boolean"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Creates an explicitly non-HA-enabled proxy object. Most of the 
time you
- don't want to use this, and should instead use {@link 
NameNodeProxies#createProxy}.
-
- @param conf the configuration object
- @param nnAddr address of the remote NN to connect to
- @param xface the IPC interface which should be created
- @param ugi the user who is making the calls on the proxy object
- @param withRetries certain interfaces have a non-standard retry policy
- @return an object containing both the proxy and the associated
-         delegation token service it corresponds to
- @throws IOException]]>
+      <![CDATA[Get the collection of blocks that has already been located.]]>
       </doc>
     </method>
-    <method name="createNonHAProxy" 
return="org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo"
+    <method name="getVisibleLength" return="long"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nnAddr" type="java.net.InetSocketAddress"/>
-      <param name="xface" type="java.lang.Class"/>
-      <param name="ugi" 
type="org.apache.hadoop.security.UserGroupInformation"/>
-      <param name="withRetries" type="boolean"/>
-      <param name="fallbackToSimpleAuth" 
type="java.util.concurrent.atomic.AtomicBoolean"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Creates an explicitly non-HA-enabled proxy object. Most of the 
time you
- don't want to use this, and should instead use {@link 
NameNodeProxies#createProxy}.
+      <![CDATA[Get the visible length of the file. It will include the length 
of the last
+ block even if that is in UnderConstruction state.
 
- @param conf the configuration object
- @param nnAddr address of the remote NN to connect to
- @param xface the IPC interface which should be created
- @param ugi the user who is making the calls on the proxy object
- @param withRetries certain interfaces have a non-standard retry policy
- @param fallbackToSimpleAuth - set to true or false during this method to
-   indicate if a secure client falls back to simple auth
- @return an object containing both the proxy and the associated
-         delegation token service it corresponds to
- @throws IOException]]>
+ @return The visible length of the file.]]>
       </doc>
     </method>
-    <method name="getFailoverProxyProviderClass" return="java.lang.Class"
+    <method name="getReadStatistics" 
return="org.apache.hadoop.hdfs.DFSInputStream.ReadStatistics"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
-      <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[Gets the configured Failover proxy provider's class]]>
+      <![CDATA[Get statistics about the reads which this DFSInputStream has 
done.
+ Note that because HdfsDataInputStream is buffered, these stats may
+ be higher than you would expect just by adding up the number of
+ bytes read through HdfsDataInputStream.]]>
       </doc>
     </method>
-    <method name="createFailoverProxyProvider" 
return="org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider"
+    <method name="clearReadStatistics"
       abstract="false" native="false" synchronized="false"
-      static="true" final="false" visibility="public"
+      static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
-      <param name="nameNodeUri" type="java.net.URI"/>
-      <param name="xface" type="java.lang.Class"/>
-      <param name="checkPort" type="boolean"/>
-      <param name="fallbackToSimpleAuth" 
type="java.util.concurrent.atomic.AtomicBoolean"/>
-      <exception name="IOException" type="java.io.IOException"/>
-      <doc>
-      <![CDATA[Creates the Failover proxy provider instance]]>
-      </doc>
     </method>
     <doc>
-    <![CDATA[Create proxy objects to communicate with a remote NN. All remote 
access to an
- NN should be funneled through this class. Most of the time you'll want to use
- {@link NameNodeProxies#createProxy(Configuration, URI, Class)}, which will
- create either an HA- or non-HA-enabled client proxy as appropriate.]]>
+    <![CDATA[The Hdfs implementation of {@link FSDataInputStream}.]]>
     </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.NameNodeProxies -->
-  <!-- start class org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo -->
-  <class name="NameNodeProxies.ProxyAndInfo" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.client.HdfsDataInputStream -->
+  <!-- start class org.apache.hadoop.hdfs.client.HdfsDataOutputStream -->
+  <class name="HdfsDataOutputStream" 
extends="org.apache.hadoop.fs.FSDataOutputStream"
     abstract="false"
-    static="true" final="false" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="ProxyAndInfo" type="PROXYTYPE, 
org.apache.hadoop.io.Text, java.net.InetSocketAddress"
+    <constructor name="HdfsDataOutputStream" 
type="org.apache.hadoop.hdfs.DFSOutputStream, 
org.apache.hadoop.fs.FileSystem.Statistics, long"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
+      <exception name="IOException" type="java.io.IOException"/>
     </constructor>
-    <method name="getProxy" return="PROXYTYPE"
-      abstract="false" native="false" synchronized="false"
+    <constructor name="HdfsDataOutputStream" 
type="org.apache.hadoop.hdfs.DFSOutputStream, 
org.apache.hadoop.fs.FileSystem.Statistics"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-    </method>
-    <method name="getDelegationTokenService" return="org.apache.hadoop.io.Text"
-      abstract="false" native="false" synchronized="false"
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <constructor name="HdfsDataOutputStream" 
type="org.apache.hadoop.crypto.CryptoOutputStream, 
org.apache.hadoop.fs.FileSystem.Statistics, long"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-    </method>
-    <method name="getAddress" return="java.net.InetSocketAddress"
-      abstract="false" native="false" synchronized="false"
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <constructor name="HdfsDataOutputStream" 
type="org.apache.hadoop.crypto.CryptoOutputStream, 
org.apache.hadoop.fs.FileSystem.Statistics"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-    </method>
-    <doc>
-    <![CDATA[Wrapper for a client proxy as well as its associated service ID.
- This is simply used as a tuple-like return type for
- {@link NameNodeProxies#createProxy} and
- {@link NameNodeProxies#createNonHAProxy}.]]>
-    </doc>
-  </class>
-  <!-- end class org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo -->
-  <!-- start interface org.apache.hadoop.hdfs.RemotePeerFactory -->
-  <interface name="RemotePeerFactory"    abstract="true"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <method name="newConnectedPeer" return="org.apache.hadoop.hdfs.net.Peer"
-      abstract="true" native="false" synchronized="false"
+      <exception name="IOException" type="java.io.IOException"/>
+    </constructor>
+    <method name="getCurrentBlockReplication" return="int"
+      abstract="false" native="false" synchronized="true"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-      <param name="addr" type="java.net.InetSocketAddress"/>
-      <param name="blockToken" type="org.apache.hadoop.security.token.Token"/>
-      <param name="datanodeId" 
type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
       <exception name="IOException" type="java.io.IOException"/>
       <doc>
-      <![CDATA[@param addr          The address to connect to.
- @param blockToken    Token used during optional SASL negotiation
- @param datanodeId    ID of destination DataNode
- @return              A new Peer connected to the address.
+      <![CDATA[Get the actual number of replicas of the current block.
+
+ This can be different from the designated replication factor of the file
+ because the namenode does not maintain replication for the blocks which are
+ currently being written to. Depending on the configuration, the client may
+ continue to write to a block even if a few datanodes in the write pipeline
+ have failed, or the client may add a new datanodes once a datanode has
+ failed.
 
- @throws IOException  If there was an error connecting or creating
-                      the remote socket, encrypted stream, etc.]]>
+ @return the number of valid replicas of the current block]]>
       </doc>
     </method>
-  </interface>
-  <!-- end interface org.apache.hadoop.hdfs.RemotePeerFactory -->
-  <!-- start class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
-  <class name="UnknownCipherSuiteException" extends="java.io.IOException"
-    abstract="false"
-    static="false" final="false" visibility="public"
-    deprecated="not deprecated">
-    <constructor name="UnknownCipherSuiteException" type="java.lang.String"
+    <method name="hsync"
+      abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
-    </constructor>
+      <param name="syncFlags" type="java.util.EnumSet"/>
+      <exception name="IOException" type="java.io.IOException"/>
+      <doc>
+      <![CDATA[Sync buffered data to DataNodes (flush to disk devices).
+
+ @param syncFlags
+          Indicate the detailed semantic and actions of the hsync.
+ @throws IOException
+ @see FSDataOutputStream#hsync()]]>
+      </doc>
+    </method>
     <doc>
-    <![CDATA[Thrown when an unknown cipher suite is encountered.]]>
+    <![CDATA[The Hdfs implementation of {@link FSDataOutputStream}.]]>
     </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.UnknownCipherSuiteException -->
-  <!-- start class 
org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException -->
-  <class name="UnknownCryptoProtocolVersionException" 
extends="java.io.IOException"
+  <!-- end class org.apache.hadoop.hdfs.client.HdfsDataOutputStream -->
+  <!-- start class org.apache.hadoop.hdfs.client.HdfsUtils -->
+  <class name="HdfsUtils" extends="java.lang.Object"
     abstract="false"
     static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="UnknownCryptoProtocolVersionException"
+    <constructor name="HdfsUtils"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <constructor name="UnknownCryptoProtocolVersionException" 
type="java.lang.String"
+    <method name="isHealthy" return="boolean"
+      abstract="false" native="false" synchronized="false"
+      static="true" final="false" visibility="public"
+      deprecated="not deprecated">
+      <param name="uri" type="java.net.URI"/>
+      <doc>
+      <![CDATA[Is the HDFS healthy?
+ HDFS is considered as healthy if it is up and not in safemode.
+
+ @param uri the HDFS URI.  Note that the URI path is ignored.
+ @return true if HDFS is healthy; false, otherwise.]]>
+      </doc>
+    </method>
+    <doc>
+    <![CDATA[The public utility API for HDFS.]]>
+    </doc>
+  </class>
+  <!-- end class org.apache.hadoop.hdfs.client.HdfsUtils -->
+</package>
+<package name="org.apache.hadoop.hdfs.inotify">
+  <!-- start class org.apache.hadoop.hdfs.inotify.Event -->
+  <class name="Event" extends="java.lang.Object"
+    abstract="true"
+    static="false" final="false" visibility="public"
+    deprecated="not deprecated">
+    <constructor name="Event" 
type="org.apache.hadoop.hdfs.inotify.Event.EventType"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
+    <method name="getEventType" 
return="org.apache.hadoop.hdfs.inotify.Event.EventType"
+      abstract="false" native="false" synchronized="false"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </method>
+    <doc>
+    <![CDATA[Events sent by the inotify system. Note that no events are 
necessarily sent
+ when a file is opened for read (although a MetadataUpdateEvent will be sent
+ if the atime is updated).]]>
+    </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException 
-->
-  <doc>
-  <![CDATA[<p>A distributed implementation of {@link
-org.apache.hadoop.fs.FileSystem}.  This is loosely modelled after
-Google's <a href="http://research.google.com/archive/gfs.html";>GFS</a>.</p>
-
-<p>The most important difference is that unlike GFS, Hadoop DFS files
-have strictly one writer at any one time.  Bytes are always appended
-to the end of the writer's stream.  There is no notion of "record appends"
-or "mutations" that are then checked or reordered.  Writers simply emit
-a byte stream.  That byte stream is guaranteed to be stored in the
-order written.</p>]]>
-  </doc>
-</package>
-<package name="org.apache.hadoop.hdfs.client">
-  <!-- start class org.apache.hadoop.hdfs.client.BlockReportOptions -->
-  <class name="BlockReportOptions" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.inotify.Event -->
+  <!-- start class org.apache.hadoop.hdfs.inotify.EventBatch -->
+  <class name="EventBatch" extends="java.lang.Object"
     abstract="false"
-    static="false" final="true" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <method name="isIncremental" return="boolean"
+    <constructor name="EventBatch" type="long, 
org.apache.hadoop.hdfs.inotify.Event[]"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <method name="getTxid" return="long"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
-    <method name="toString" return="java.lang.String"
+    <method name="getEvents" return="org.apache.hadoop.hdfs.inotify.Event[]"
       abstract="false" native="false" synchronized="false"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </method>
     <doc>
-    <![CDATA[Options that can be specified when manually triggering a block 
report.]]>
+    <![CDATA[A batch of events that all happened on the same transaction ID.]]>
     </doc>
   </class>
-  <!-- end class org.apache.hadoop.hdfs.client.BlockReportOptions -->
-  <!-- start class org.apache.hadoop.hdfs.client.BlockReportOptions.Factory -->
-  <class name="BlockReportOptions.Factory" extends="java.lang.Object"
+  <!-- end class org.apache.hadoop.hdfs.inotify.EventBatch -->
+  <!-- start class org.apache.hadoop.hdfs.inotify.MissingEventsException -->
+  <class name="MissingEventsException" extends="java.lang.Exception"
     abstract="false"
-    static="true" final="false" visibility="public"
+    static="false" final="false" visibility="public"
     deprecated="not deprecated">
-    <constructor name="Factory"
+    <constructor name="MissingEventsException"
+      static="false" final="false" visibility="public"
+      deprecated="not deprecated">
+    </constructor>
+    <constructor name="MissingEventsException" type="long, long"
       static="false" final="false" visibility="public"
       deprecated="not deprecated">
     </constructor>
-    <method name="setIncremental" return="org.apache.hadoop.hdfs.client.BlockRe

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to