Repository: hadoop
Updated Branches:
  refs/heads/branch-2 04621537c -> bb1ed426d


HDFS-8356. Document missing properties in hdfs-default.xml. Contributed by Ray 
Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb1ed426
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb1ed426
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb1ed426

Branch: refs/heads/branch-2
Commit: bb1ed426d1b3ffddd23061e0a34b51da91094ccc
Parents: 0462153
Author: Akira Ajisaka <aajis...@apache.org>
Authored: Tue Apr 12 15:24:58 2016 +0900
Committer: Akira Ajisaka <aajis...@apache.org>
Committed: Tue Apr 12 15:28:05 2016 +0900

----------------------------------------------------------------------
 .../src/site/markdown/DeprecatedProperties.md   |    2 +
 .../src/main/resources/hdfs-default.xml         | 1012 +++++++++++++++++-
 .../hadoop/tools/TestHdfsConfigFields.java      |   98 +-
 3 files changed, 1095 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb1ed426/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md 
b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
index 815ae5d..4311548 100644
--- 
a/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
+++ 
b/hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
@@ -33,6 +33,7 @@ The following table lists the configuration property names 
that are deprecated i
 | dfs.http.address | dfs.namenode.http-address |
 | dfs.https.address | dfs.namenode.https-address |
 | dfs.https.client.keystore.resource | dfs.client.https.keystore.resource |
+| dfs.https.enable | dfs.http.policy |
 | dfs.https.need.client.auth | dfs.client.https.need-auth |
 | dfs.max.objects | dfs.namenode.max.objects |
 | dfs.max-repl-streams | dfs.namenode.replication.max-streams |
@@ -51,6 +52,7 @@ The following table lists the configuration property names 
that are deprecated i
 | dfs.secondary.http.address | dfs.namenode.secondary.http-address |
 | dfs.socket.timeout | dfs.client.socket-timeout |
 | dfs.umaskmode | fs.permissions.umask-mode |
+| dfs.web.ugi | hadoop.http.staticuser.user |
 | dfs.write.packet.size | dfs.client-write-packet-size |
 | fs.checkpoint.dir | dfs.namenode.checkpoint.dir |
 | fs.checkpoint.edits.dir | dfs.namenode.checkpoint.edits.dir |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb1ed426/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 397295c..d7417f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -264,7 +264,7 @@
      dfs.datanode.dns.interface.
    </description>
  </property>
- 
+
 <property>
   <name>dfs.datanode.dns.nameserver</name>
   <value>default</value>
@@ -276,7 +276,7 @@
     dfs.datanode.dns.nameserver.
   </description>
  </property>
- 
+
  <property>
   <name>dfs.namenode.backup.address</name>
   <value>0.0.0.0:50100</value>
@@ -285,7 +285,7 @@
     If the port is 0 then the server will start on a free port.
   </description>
 </property>
- 
+
  <property>
   <name>dfs.namenode.backup.http-address</name>
   <value>0.0.0.0:50105</value>
@@ -1429,6 +1429,13 @@
   <description>
     The prefix for a given nameservice, contains a comma-separated
     list of namenodes for a given nameservice (eg EXAMPLENAMESERVICE).
+
+    Unique identifiers for each NameNode in the nameservice, delimited by
+    commas. This will be used by DataNodes to determine all the NameNodes
+    in the cluster. For example, if you used “mycluster” as the nameservice
+    ID previously, and you wanted to use “nn1” and “nn2” as the 
individual
+    IDs of the NameNodes, you would configure a property
+    dfs.ha.namenodes.mycluster, and its value "nn1,nn2".
   </description>
 </property>
 
@@ -2976,4 +2983,1003 @@
     refreshes the configuration files used by the class.
   </description>
 </property>
+
+<property>
+  <name>datanode.https.port</name>
+  <value>50475</value>
+  <description>
+    HTTPS port for DataNode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.balancer.dispatcherThreads</name>
+  <value>200</value>
+  <description>
+    Size of the thread pool for the HDFS balancer block mover.
+    dispatchExecutor
+  </description>
+</property>
+
+<property>
+  <name>dfs.balancer.movedWinWidth</name>
+  <value>5400000</value>
+  <description>
+    Window of time in ms for the HDFS balancer tracking blocks and its
+    locations.
+  </description>
+</property>
+
+<property>
+  <name>dfs.balancer.moverThreads</name>
+  <value>1000</value>
+  <description>
+    Thread pool size for executing block moves.
+    moverThreadAllocator
+  </description>
+</property>
+
+<property>
+  <name>dfs.balancer.max-size-to-move</name>
+  <value>10737418240</value>
+  <description>
+    Maximum number of bytes that can be moved by the balancer in a single
+    thread.
+  </description>
+</property>
+
+<property>
+  <name>dfs.balancer.getBlocks.min-block-size</name>
+  <value>10485760</value>
+  <description>
+    Minimum block threshold size in bytes to ignore when fetching a source's
+    block list.
+  </description>
+</property>
+
+<property>
+  <name>dfs.balancer.getBlocks.size</name>
+  <value>2147483648</value>
+  <description>
+    Total size in bytes of Datanode blocks to get when fetching a source's
+    block list.
+  </description>
+</property>
+
+<property>
+  <name>dfs.block.invalidate.limit</name>
+  <value>1000</value>
+  <description>
+    Limit on the list of invalidated block list kept by the Namenode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.block.misreplication.processing.limit</name>
+  <value>10000</value>
+  <description>
+    Maximum number of blocks to process for initializing replication queues.
+  </description>
+</property>
+
+<property>
+  <name>dfs.block.replicator.classname</name>
+  
<value>org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault</value>
+  <description>
+    Class representing block placement policy for non-striped files.
+  </description>
+</property>
+
+<property>
+  <name>dfs.blockreport.incremental.intervalMsec</name>
+  <value>0</value>
+  <description>
+    If set to a positive integer, the value in ms to wait between sending
+    incremental block reports from the Datanode to the Namenode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.checksum.type</name>
+  <value>CRC32C</value>
+  <description>
+    Checksum type
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.block.write.locateFollowingBlock.retries</name>
+  <value>5</value>
+  <description>
+    Number of retries to use when finding the next block during HDFS writes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.failover.proxy.provider</name>
+  <value></value>
+  <description>
+    The prefix (plus a required nameservice ID) for the class name of the
+    configured Failover proxy provider for the host.  For more detailed
+    information, please consult the "Configuration Details" section of
+    the HDFS High Availability documentation.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.key.provider.cache.expiry</name>
+  <value>864000000</value>
+  <description>
+    DFS client security key cache expiration in milliseconds.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.max.block.acquire.failures</name>
+  <value>3</value>
+  <description>
+    Maximum failures allowed when trying to get block information from a 
specific datanode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.read.prefetch.size</name>
+  <value></value>
+  <description>
+    The number of bytes for the DFSClient will fetch from the Namenode
+    during a read operation.  Defaults to 10 * ${dfs.blocksize}.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.read.short.circuit.replica.stale.threshold.ms</name>
+  <value>1800000</value>
+  <description>
+    Threshold in milliseconds for read entries during short-circuit local 
reads.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.read.shortcircuit.buffer.size</name>
+  <value>1048576</value>
+  <description>
+    Buffer size in bytes for short-circuit local reads.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.replica.accessor.builder.classes</name>
+  <value></value>
+  <description>
+    Comma-separated classes for building ReplicaAccessor.  If the classes
+    are specified, client will use external BlockReader that uses the
+    ReplicaAccessor built by the builder.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.retry.interval-ms.get-last-block-length</name>
+  <value>4000</value>
+  <description>
+    Retry interval in milliseconds to wait between retries in getting
+    block lengths from the datanodes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.retry.max.attempts</name>
+  <value>10</value>
+  <description>
+    Max retry attempts for DFSClient talking to namenodes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.retry.policy.enabled</name>
+  <value>false</value>
+  <description>
+    If true, turns on DFSClient retry policy.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.retry.policy.spec</name>
+  <value>10000,6,60000,10</value>
+  <description>
+    Set to pairs of timeouts and retries for DFSClient.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.retry.times.get-last-block-length</name>
+  <value>3</value>
+  <description>
+    Number of retries for calls to fetchLocatedBlocksAndGetLastBlockLength().
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.retry.window.base</name>
+  <value>3000</value>
+  <description>
+    Base time window in ms for DFSClient retries.  For each retry attempt,
+    this value is extended linearly (e.g. 3000 ms for first attempt and
+    first retry, 6000 ms for second retry, 9000 ms for third retry, etc.).
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.socket-timeout</name>
+  <value>60000</value>
+  <description>
+    Default timeout value in milliseconds for all sockets.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.socketcache.capacity</name>
+  <value>16</value>
+  <description>
+    Socket cache capacity (in entries) for short-circuit reads.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.socketcache.expiryMsec</name>
+  <value>3000</value>
+  <description>
+    Socket cache expiration for short-circuit reads in msec.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.test.drop.namenode.response.number</name>
+  <value>0</value>
+  <description>
+    The number of Namenode responses dropped by DFSClient for each RPC call.  
Used
+    for testing the NN retry cache.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.hedged.read.threadpool.size</name>
+  <value>0</value>
+  <description>
+    Support 'hedged' reads in DFSClient. To enable this feature, set the 
parameter
+    to a positive number. The threadpool size is how many threads to dedicate
+    to the running of these 'hedged', concurrent reads in your client.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.hedged.read.threshold.millis</name>
+  <value>500</value>
+  <description>
+    Configure 'hedged' reads in DFSClient. This is the number of milliseconds
+    to wait before starting up a 'hedged' read.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.use.legacy.blockreader</name>
+  <value>false</value>
+  <description>
+    If true, use the RemoteBlockReader class for local read short circuit.  If 
false, use
+    the newer RemoteBlockReader2 class.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.write.byte-array-manager.count-limit</name>
+  <value>2048</value>
+  <description>
+    The maximum number of arrays allowed for each array length.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.write.byte-array-manager.count-reset-time-period-ms</name>
+  <value>10000</value>
+  <description>
+    The time period in milliseconds that the allocation count for each array 
length is
+    reset to zero if there is no increment.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.write.byte-array-manager.count-threshold</name>
+  <value>128</value>
+  <description>
+    The count threshold for each array length so that a manager is created 
only after the
+    allocation count exceeds the threshold. In other words, the particular 
array length
+    is not managed until the allocation count exceeds the threshold.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.write.byte-array-manager.enabled</name>
+  <value>false</value>
+  <description>
+    If true, enables byte array manager used by DFSOutputStream.
+  </description>
+</property>
+
+<property>
+  <name>dfs.client.write.max-packets-in-flight</name>
+  <value>80</value>
+  <description>
+    The maximum number of DFSPackets allowed in flight.
+  </description>
+</property>
+
+<property>
+  <name>dfs.content-summary.limit</name>
+  <value>5000</value>
+  <description>
+    The maximum content summary counts allowed in one locking period. 0 or a 
negative number
+    means no limit (i.e. no yielding).
+  </description>
+</property>
+
+<property>
+  <name>dfs.content-summary.sleep-microsec</name>
+  <value>500</value>
+  <description>
+    The length of time in microseconds to put the thread to sleep, between 
reaquiring the locks
+    in content summary computation.
+  </description>
+</property>
+
+<property>
+  <name>dfs.data.transfer.client.tcpnodelay</name>
+  <value>true</value>
+  <description>
+    If true, set TCP_NODELAY to sockets for transferring data from DFS client.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.balance.max.concurrent.moves</name>
+  <value>5</value>
+  <description>
+    Maximum number of threads for Datanode balancer pending moves.  This
+    value is reconfigurable via the "dfsadmin -reconfig" command.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.fsdataset.factory</name>
+  <value></value>
+  <description>
+    The class name for the underlying storage that stores replicas for a
+    Datanode.  Defaults to
+    org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.fsdataset.volume.choosing.policy</name>
+  <value></value>
+  <description>
+    The class name of the policy for choosing volumes in the list of
+    directories.  Defaults to
+    
org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy.
+    If you would like to take into account available disk space, set the
+    value to
+    
"org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy".
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.hostname</name>
+  <value></value>
+  <description>
+    Optional.  The hostname for the Datanode containing this
+    configuration file.  Will be different for each machine.
+    Defaults to current hostname.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.lazywriter.interval.sec</name>
+  <value>60</value>
+  <description>
+    Interval in seconds for Datanodes for lazy persist writes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.network.counts.cache.max.size</name>
+  <value>2147483647</value>
+  <description>
+    The maximum number of entries the datanode per-host network error
+    count cache may contain.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.oob.timeout-ms</name>
+  <value>1500,0,0,0</value>
+  <description>
+    Timeout value when sending OOB response for each OOB type, which are
+    OOB_RESTART, OOB_RESERVED1, OOB_RESERVED2, and OOB_RESERVED3,
+    respectively.  Currently, only OOB_RESTART is used.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.parallel.volumes.load.threads.num</name>
+  <value></value>
+  <description>
+    Maximum number of threads to use for upgrading data directories.
+    The default value is the number of storage directories in the
+    DataNode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.ram.disk.replica.tracker</name>
+  <value></value>
+  <description>
+    Name of the class implementing the RamDiskReplicaTracker interface.
+    Defaults to
+    
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.restart.replica.expiration</name>
+  <value>50</value>
+  <description>
+    During shutdown for restart, the amount of time in seconds budgeted for
+    datanode restart.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.socket.reuse.keepalive</name>
+  <value>4000</value>
+  <description>
+    The window of time in ms before the DataXceiver closes a socket for a
+    single request.  If a second request occurs within that window, the
+    socket can be reused.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.socket.write.timeout</name>
+  <value>480000</value>
+  <description>
+    Timeout in ms for clients socket writes to DataNodes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.sync.behind.writes.in.background</name>
+  <value>false</value>
+  <description>
+    If set to true, then sync_file_range() system call will occur
+    asynchronously.  This property is only valid when the property
+    dfs.datanode.sync.behind.writes is true.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.transferTo.allowed</name>
+  <value>true</value>
+  <description>
+    If false, break block tranfers on 32-bit machines greater than
+    or equal to 2GB into smaller chunks.
+  </description>
+</property>
+
+<property>
+  <name>dfs.ha.fencing.methods</name>
+  <value></value>
+  <description>
+    A list of scripts or Java classes which will be used to fence
+    the Active NameNode during a failover.  See the HDFS High
+    Availability documentation for details on automatic HA
+    configuration.
+  </description>
+</property>
+
+<property>
+  <name>dfs.ha.standby.checkpoints</name>
+  <value>true</value>
+  <description>
+    If true, a NameNode in Standby state periodically takes a checkpoint
+    of the namespace, saves it to its local storage and then upload to
+    the remote NameNode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.ha.zkfc.port</name>
+  <value>8019</value>
+  <description>
+    The port number that the zookeeper failover controller RPC
+    server binds to.
+  </description>
+</property>
+
+<property>
+  <name>dfs.http.port</name>
+  <value></value>
+  <description>
+    The http port for used for Hftp, HttpFS, and WebHdfs file systems.
+  </description>
+</property>
+
+<property>
+  <name>dfs.https.port</name>
+  <value></value>
+  <description>
+    The https port for used for Hsftp and SWebHdfs file systems.
+  </description>
+</property>
+
+<property>
+  <name>dfs.journalnode.edits.dir</name>
+  <value>/tmp/hadoop/dfs/journalnode/</value>
+  <description>
+    The directory where the journal edit files are stored.
+  </description>
+</property>
+
+<property>
+  <name>dfs.journalnode.kerberos.internal.spnego.principal</name>
+  <value></value>
+  <description>
+    Kerberos SPNEGO principal name used by the journal node.
+  </description>
+</property>
+
+<property>
+  <name>dfs.journalnode.kerberos.principal</name>
+  <value></value>
+  <description>
+    Kerberos principal name for the journal node.
+  </description>
+</property>
+
+<property>
+  <name>dfs.journalnode.keytab.file</name>
+  <value></value>
+  <description>
+    Kerberos keytab file for the journal node.
+  </description>
+</property>
+
+<property>
+  <name>dfs.ls.limit</name>
+  <value>1000</value>
+  <description>
+    Limit the number of files printed by ls. If less or equal to
+    zero, at most DFS_LIST_LIMIT_DEFAULT (= 1000) will be printed.
+  </description>
+</property>
+
+<property>
+  <name>dfs.mover.movedWinWidth</name>
+  <value>5400000</value>
+  <description>
+    The minimum time interval, in milliseconds, that a block can be
+    moved to another location again.
+  </description>
+</property>
+
+<property>
+  <name>dfs.mover.moverThreads</name>
+  <value>1000</value>
+  <description>
+    Configure the balancer's mover thread pool size.
+  </description>
+</property>
+
+<property>
+  <name>dfs.mover.retry.max.attempts</name>
+  <value>10</value>
+  <description>
+    The maximum number of retries before the mover consider the
+    move failed.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.audit.log.async</name>
+  <value>false</value>
+  <description>
+    If true, enables asynchronous audit log.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.audit.log.token.tracking.id</name>
+  <value>false</value>
+  <description>
+    If true, adds a tracking ID for all audit log events.
+  </description>
+</property>
+
+<property>
+  
<name>dfs.namenode.available-space-block-placement-policy.balanced-space-preference-fraction</name>
+  <value>0.6</value>
+  <description>
+    Only used when the dfs.block.replicator.classname is set to
+    
org.apache.hadoop.hdfs.server.blockmanagement.AvailableSpaceBlockPlacementPolicy.
+    Special value between 0 and 1, noninclusive.  Increases chance of
+    placing blocks on Datanodes with less disk space used.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.backup.dnrpc-address</name>
+  <value></value>
+  <description>
+    Service RPC address for the backup Namenode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.delegation.token.always-use</name>
+  <value>false</value>
+  <description>
+    For testing.  Setting to true always allows the DT secret manager
+    to be used, even if security is disabled.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.edits.asynclogging</name>
+  <value>false</value>
+  <description>
+    If set to true, enables asynchronous edit logs in the Namenode.  If set
+    to false, the Namenode uses the traditional synchronous edit logs.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.edits.dir.minimum</name>
+  <value>1</value>
+  <description>
+    dfs.namenode.edits.dir includes both required directories
+    (specified by dfs.namenode.edits.dir.required) and optional directories.
+
+    The number of usable optional directories must be greater than or equal
+    to this property.  If the number of usable optional directories falls
+    below dfs.namenode.edits.dir.minimum, HDFS will issue an error.
+
+    This property defaults to 1.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.edits.journal-plugin</name>
+  <value></value>
+  <description>
+    When FSEditLog is creating JournalManagers from dfs.namenode.edits.dir,
+    and it encounters a URI with a schema different to "file" it loads the
+    name of the implementing class from
+    "dfs.namenode.edits.journal-plugin.[schema]". This class must implement
+    JournalManager and have a constructor which takes (Configuration, URI).
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.file.close.num-committed-allowed</name>
+  <value>0</value>
+  <description>
+    Normally a file can only be closed with all its blocks are committed.
+    When this value is set to a positive integer N, a file can be closed
+    when N blocks are committed and the rest complete.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.inode.attributes.provider.class</name>
+  <value></value>
+  <description>
+    Name of class to use for delegating HDFS authorization.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.max-num-blocks-to-log</name>
+  <value>1000</value>
+  <description>
+    Puts a limit on the number of blocks printed to the log by the Namenode
+    after a block report.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.max.op.size</name>
+  <value>52428800</value>
+  <description>
+    Maximum opcode size in bytes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.name.cache.threshold</name>
+  <value>10</value>
+  <description>
+    Frequently accessed files that are accessed more times than this
+    threshold are cached in the FSDirectory nameCache.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.replication.max-streams</name>
+  <value>2</value>
+  <description>
+    Hard limit for the number of highest-priority replication streams.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.replication.max-streams-hard-limit</name>
+  <value>4</value>
+  <description>
+    Hard limit for all replication streams.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.replication.pending.timeout-sec</name>
+  <value>-1</value>
+  <description>
+    Timeout in seconds for block replication.  If this value is 0 or less,
+    then it will default to 5 minutes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.namenode.stale.datanode.minimum.interval</name>
+  <value>3</value>
+  <description>
+    Minimum number of missed heartbeats intervals for a datanode to
+    be marked stale by the Namenode.  The actual interval is calculated as
+    (dfs.namenode.stale.datanode.minimum.interval * dfs.heartbeat.interval)
+    in seconds.  If this value is greater than the property
+    dfs.namenode.stale.datanode.interval, then the calculated value above
+    is used.
+  </description>
+</property>
+
+<property>
+  <name>dfs.pipeline.ecn</name>
+  <value>false</value>
+  <description>
+    If true, allows ECN (explicit congestion notification) from the
+    Datanode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.qjournal.accept-recovery.timeout.ms</name>
+  <value>120000</value>
+  <description>
+    Quorum timeout in milliseconds during accept phase of
+    recovery/synchronization for a specific segment.
+  </description>
+</property>
+
+<property>
+  <name>dfs.qjournal.finalize-segment.timeout.ms</name>
+  <value>120000</value>
+  <description>
+    Quorum timeout in milliseconds during finalizing for a specific
+    segment.
+  </description>
+</property>
+
+<property>
+  <name>dfs.qjournal.get-journal-state.timeout.ms</name>
+  <value>120000</value>
+  <description>
+    Timeout in milliseconds when calling getJournalState().
+    JournalNodes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.qjournal.new-epoch.timeout.ms</name>
+  <value>120000</value>
+  <description>
+    Timeout in milliseconds when getting an epoch number for write
+    access to JournalNodes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.qjournal.prepare-recovery.timeout.ms</name>
+  <value>120000</value>
+  <description>
+    Quorum timeout in milliseconds during preparation phase of
+    recovery/synchronization for a specific segment.
+  </description>
+</property>
+
+<property>
+  <name>dfs.qjournal.queued-edits.limit.mb</name>
+  <value>10</value>
+  <description>
+    Queue size in MB for quorum journal edits.
+  </description>
+</property>
+
+<property>
+  <name>dfs.qjournal.select-input-streams.timeout.ms</name>
+  <value>20000</value>
+  <description>
+    Timeout in milliseconds for accepting streams from JournalManagers.
+  </description>
+</property>
+
+<property>
+  <name>dfs.qjournal.start-segment.timeout.ms</name>
+  <value>20000</value>
+  <description>
+    Quorum timeout in milliseconds for starting a log segment.
+  </description>
+</property>
+
+<property>
+  <name>dfs.qjournal.write-txns.timeout.ms</name>
+  <value>20000</value>
+  <description>
+    Write timeout in milliseconds when writing to a quorum of remote
+    journals.
+  </description>
+</property>
+
+<property>
+  <name>dfs.quota.by.storage.type.enabled</name>
+  <value>true</value>
+  <description>
+    If true, enables quotas based on storage type.
+  </description>
+</property>
+
+<property>
+  <name>dfs.secondary.namenode.kerberos.principal</name>
+  <value></value>
+  <description>
+    Kerberos principal name for the Secondary NameNode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.secondary.namenode.keytab.file</name>
+  <value></value>
+  <description>
+    Kerberos keytab file for the Secondary NameNode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.support.append</name>
+  <value>true</value>
+  <description>
+    Enables append support on the NameNode.
+  </description>
+</property>
+
+<property>
+  <name>dfs.web.authentication.filter</name>
+  <value>org.apache.hadoop.hdfs.web.AuthFilter</value>
+  <description>
+    Authentication filter class used for WebHDFS.
+  </description>
+</property>
+
+<property>
+  <name>dfs.web.authentication.simple.anonymous.allowed</name>
+  <value></value>
+  <description>
+    If true, allow anonymous user to access WebHDFS. Set to
+    false to disable anonymous authentication.
+  </description>
+</property>
+
+<property>
+  <name>dfs.web.ugi</name>
+  <value></value>
+  <description>
+    dfs.web.ugi is deprecated. Use hadoop.http.staticuser.user instead.
+  </description>
+</property>
+
+<property>
+  <name>dfs.webhdfs.netty.high.watermark</name>
+  <value>65535</value>
+  <description>
+    High watermark configuration to Netty for Datanode WebHdfs.
+  </description>
+</property>
+
+<property>
+  <name>dfs.webhdfs.netty.low.watermark</name>
+  <value>32768</value>
+  <description>
+    Low watermark configuration to Netty for Datanode WebHdfs.
+  </description>
+</property>
+
+<property>
+  <name>dfs.webhdfs.oauth2.access.token.provider</name>
+  <value></value>
+  <description>
+    Access token provider class for WebHDFS using OAuth2.
+    Defaults to 
org.apache.hadoop.hdfs.web.oauth2.ConfCredentialBasedAccessTokenProvider.
+  </description>
+</property>
+
+<property>
+  <name>dfs.webhdfs.oauth2.client.id</name>
+  <value></value>
+  <description>
+    Client id used to obtain access token with either credential or
+    refresh token.
+  </description>
+</property>
+
+<property>
+  <name>dfs.webhdfs.oauth2.enabled</name>
+  <value>false</value>
+  <description>
+    If true, enables OAuth2 in WebHDFS
+  </description>
+</property>
+
+<property>
+  <name>dfs.webhdfs.oauth2.refresh.url</name>
+  <value></value>
+  <description>
+    URL against which to post for obtaining bearer token with
+    either credential or refresh token.
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.keypassword</name>
+  <value></value>
+  <description>
+    Keystore key password for HTTPS SSL configuration
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.location</name>
+  <value></value>
+  <description>
+    Keystore location for HTTPS SSL configuration
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.keystore.password</name>
+  <value></value>
+  <description>
+    Keystore password for HTTPS SSL configuration
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.truststore.location</name>
+  <value></value>
+  <description>
+    Truststore location for HTTPS SSL configuration
+  </description>
+</property>
+
+<property>
+  <name>ssl.server.truststore.password</name>
+  <value></value>
+  <description>
+    Truststore password for HTTPS SSL configuration
+  </description>
+</property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb1ed426/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
index 9637f59..ae13f06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java
@@ -41,11 +41,74 @@ public class TestHdfsConfigFields extends 
TestConfigurationFieldsBase {
   public void initializeMemberVariables() {
     xmlFilename = new String("hdfs-default.xml");
     configurationClasses = new Class[] { HdfsClientConfigKeys.class,
-        DFSConfigKeys.class};
+        DFSConfigKeys.class };
 
     // Set error modes
     errorIfMissingConfigProps = true;
-    errorIfMissingXmlProps = false;
+    errorIfMissingXmlProps = true;
+
+    // Initialize used variables
+    configurationPropsToSkipCompare = new HashSet<String>();
+
+    // Ignore testing based parameter
+    configurationPropsToSkipCompare.add("ignore.secure.ports.for.testing");
+
+    // Remove deprecated properties listed in Configuration#DeprecationDelta
+    configurationPropsToSkipCompare.add(DFSConfigKeys.DFS_DF_INTERVAL_KEY);
+
+    // Remove deprecated properties in DeprecatedProperties.md
+    configurationPropsToSkipCompare.add(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY);
+
+    // Remove default properties
+    configurationPropsToSkipCompare
+        .add(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_DEFAULT);
+    configurationPropsToSkipCompare
+        .add(DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
+
+    // Remove support property
+    configurationPropsToSkipCompare
+        .add(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY);
+    configurationPropsToSkipCompare
+        .add(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY);
+
+    // Purposely hidden, based on comments in DFSConfigKeys
+    configurationPropsToSkipCompare
+        .add(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY);
+
+    // Fully deprecated properties?
+    configurationPropsToSkipCompare
+        .add("dfs.corruptfilesreturned.max");
+    configurationPropsToSkipCompare
+        .add("dfs.datanode.hdfs-blocks-metadata.enabled");
+    configurationPropsToSkipCompare
+        .add("dfs.metrics.session-id");
+    configurationPropsToSkipCompare
+        .add("dfs.datanode.synconclose");
+    configurationPropsToSkipCompare
+        .add("dfs.datanode.non.local.lazy.persist");
+    configurationPropsToSkipCompare
+        .add("dfs.namenode.tolerate.heartbeat.multiplier");
+    configurationPropsToSkipCompare
+        .add("dfs.namenode.stripe.min");
+    configurationPropsToSkipCompare
+        .add("dfs.namenode.replqueue.threshold-pct");
+
+    // Removed by HDFS-6440
+    configurationPropsToSkipCompare
+        .add("dfs.ha.log-roll.rpc.timeout");
+
+    // Example (not real) property in hdfs-default.xml
+    configurationPropsToSkipCompare.add("dfs.ha.namenodes");
+
+    // Property used for internal testing only
+    configurationPropsToSkipCompare
+        .add(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION);
+
+    // Property not intended for users
+    configurationPropsToSkipCompare
+        .add(DFSConfigKeys.DFS_DATANODE_STARTUP_KEY);
+    configurationPropsToSkipCompare
+        .add(DFSConfigKeys.DFS_NAMENODE_STARTUP_KEY);
 
     // Allocate
     xmlPropsToSkipCompare = new HashSet<String>();
@@ -58,29 +121,36 @@ public class TestHdfsConfigFields extends 
TestConfigurationFieldsBase {
     // Used dynamically as part of 
DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX
     xmlPropsToSkipCompare.add("dfs.namenode.edits.journal-plugin.qjournal");
 
-    // Example (not real) property in hdfs-default.xml
-    xmlPropsToSkipCompare.add("dfs.ha.namenodes.EXAMPLENAMESERVICE");
-
     // Defined in org.apache.hadoop.fs.CommonConfigurationKeys
     
xmlPropsToSkipCompare.add("hadoop.user.group.metrics.percentiles.intervals");
 
     // Used oddly by DataNode to create new config String
     xmlPropsToSkipCompare.add("hadoop.hdfs.configuration.version");
 
-    // Kept in the NfsConfiguration class in the hadoop-hdfs-nfs module
-    xmlPrefixToSkipCompare.add("nfs");
-
-    // Not a hardcoded property.  Used by SaslRpcClient
-    xmlPrefixToSkipCompare.add("dfs.namenode.kerberos.principal.pattern");
-
-    // Skip comparing in branch-2.  Removed in trunk with HDFS-7985.
-    xmlPropsToSkipCompare.add("dfs.webhdfs.enabled");
-
     // Some properties have moved to HdfsClientConfigKeys
     
xmlPropsToSkipCompare.add("dfs.client.short.circuit.replica.stale.threshold.ms");
 
     // Ignore HTrace properties
     xmlPropsToSkipCompare.add("fs.client.htrace");
     xmlPropsToSkipCompare.add("hadoop.htrace");
+
+    // Ignore SpanReceiveHost properties
+    xmlPropsToSkipCompare.add("dfs.htrace.spanreceiver.classes");
+    xmlPropsToSkipCompare.add("dfs.client.htrace.spanreceiver.classes");
+
+    // Remove deprecated properties listed in Configuration#DeprecationDelta
+    xmlPropsToSkipCompare.add(DFSConfigKeys.DFS_DF_INTERVAL_KEY);
+
+    // Skip this until TestConfigurationFieldsBase can integrate DfsClientConf
+    xmlPropsToSkipCompare.add("dfs.datanode.hdfs-blocks-metadata.enabled");
+
+    // Kept in the NfsConfiguration class in the hadoop-hdfs-nfs module
+    xmlPrefixToSkipCompare.add("nfs");
+
+    // Not a hardcoded property.  Used by SaslRpcClient
+    xmlPrefixToSkipCompare.add("dfs.namenode.kerberos.principal.pattern");
+
+    // Skip over example property
+    xmlPrefixToSkipCompare.add("dfs.ha.namenodes");
   }
 }

Reply via email to