Repository: hadoop
Updated Branches:
  refs/heads/branch-2 701b96ca8 -> f52fcdc2e
  refs/heads/trunk 72389c78d -> 78a7e8d3a


HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f52fcdc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f52fcdc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f52fcdc2

Branch: refs/heads/branch-2
Commit: f52fcdc2e023a3e1537bbb4ab65a91f2a28ab972
Parents: 701b96c
Author: Steve Loughran <ste...@apache.org>
Authored: Tue Feb 17 18:14:31 2015 +0000
Committer: Steve Loughran <ste...@apache.org>
Committed: Tue Feb 17 18:14:31 2015 +0000

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 +
 .../src/main/resources/core-default.xml         | 71 +++++++++++++++++-
 .../src/site/markdown/tools/hadoop-aws/index.md | 77 +++++++++++++++++++-
 3 files changed, 146 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f52fcdc2/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a8b38ed..5fce31f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -202,6 +202,8 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-11600. Fix up source codes to be compiled with Guava 17.0. (ozawa)
 
+    HADOOP-11522. Update S3A Documentation. (Thomas Demoor via stevel)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f52fcdc2/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 000c764..46196ae 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -682,12 +682,12 @@ for ldap providers in the same way as above does.
 </property>
 
 <property>
-  <name>fs.s3a.access.key</name>
+  <name>fs.s3a.awsAccessKeyId</name>
   <description>AWS access key ID. Omit for Role-based 
authentication.</description>
 </property>
 
 <property>
-  <name>fs.s3a.secret.key</name>
+  <name>fs.s3a.awsSecretAccessKey</name>
   <description>AWS secret key. Omit for Role-based 
authentication.</description>
 </property>
 
@@ -704,6 +704,46 @@ for ldap providers in the same way as above does.
 </property>
 
 <property>
+  <name>fs.s3a.endpoint</name>
+  <description>AWS S3 endpoint to connect to. An up-to-date list is
+    provided in the AWS Documentation: regions and endpoints. Without this
+    property, the standard region (s3.amazonaws.com) is assumed.
+  </description>
+</property>
+
+<property>
+  <name>fs.s3a.proxy.host</name>
+  <description>Hostname of the (optional) proxy server for S3 
connections.</description>
+</property>
+
+<property>
+  <name>fs.s3a.proxy.port</name>
+  <description>Proxy server port. If this property is not set
+    but fs.s3a.proxy.host is, port 80 or 443 is assumed (consistent with
+    the value of fs.s3a.connection.ssl.enabled).</description>
+</property>
+
+<property>
+  <name>fs.s3a.proxy.username</name>
+  <description>Username for authenticating with proxy server.</description>
+</property>
+
+<property>
+  <name>fs.s3a.proxy.password</name>
+  <description>Password for authenticating with proxy server.</description>
+</property>
+
+<property>
+  <name>fs.s3a.proxy.domain</name>
+  <description>Domain for authenticating with proxy server.</description>
+</property>
+
+<property>
+  <name>fs.s3a.proxy.workstation</name>
+  <description>Workstation for authenticating with proxy server.</description>
+</property>
+
+<property>
   <name>fs.s3a.attempts.maximum</name>
   <value>10</value>
   <description>How many times we should retry commands on transient 
errors.</description>
@@ -723,6 +763,33 @@ for ldap providers in the same way as above does.
 </property>
 
 <property>
+  <name>fs.s3a.threads.max</name>
+  <value>256</value>
+  <description> Maximum number of concurrent active (part)uploads,
+    which each use a thread from the threadpool.</description>
+</property>
+
+<property>
+  <name>fs.s3a.threads.core</name>
+  <value>15</value>
+  <description>Number of core threads in the threadpool.</description>
+</property>
+
+<property>
+  <name>fs.s3a.threads.keepalivetime</name>
+  <value>60</value>
+  <description>Number of seconds a thread can be idle before being
+    terminated.</description>
+</property>
+
+<property>
+  <name>fs.s3a.max.total.tasks</name>
+  <value>1000</value>
+  <description>Number of (part)uploads allowed to the queue before
+    blocking additional uploads.</description>
+</property>
+
+<property>
   <name>fs.s3a.multipart.size</name>
   <value>104857600</value>
   <description>How big (in bytes) to split upload or copy operations up 
into.</description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f52fcdc2/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
----------------------------------------------------------------------
diff --git 
a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md 
b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
index 375f82c..8f511dd 100644
--- a/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
+++ b/hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
@@ -32,7 +32,7 @@ The specifics of using these filesystems are documented below.
 
 ## Warning: Object Stores are not filesystems.
 
-Amazon S3 is an example of "an object store". In order to achieve scalalablity
+Amazon S3 is an example of "an object store". In order to achieve scalability
 and especially high availability, S3 has —as many other cloud object stores 
have
 done— relaxed some of the constraints which classic "POSIX" filesystems 
promise.
 
@@ -165,6 +165,46 @@ If you do any of these: change your credentials 
immediately!
     </property>
 
     <property>
+      <name>fs.s3a.endpoint</name>
+      <description>AWS S3 endpoint to connect to. An up-to-date list is
+        provided in the AWS Documentation: regions and endpoints. Without this
+        property, the standard region (s3.amazonaws.com) is assumed.
+      </description>
+    </property>
+
+    <property>
+      <name>fs.s3a.proxy.host</name>
+      <description>Hostname of the (optional) proxy server for S3 
connections.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.proxy.port</name>
+      <description>Proxy server port. If this property is not set
+        but fs.s3a.proxy.host is, port 80 or 443 is assumed (consistent with
+        the value of fs.s3a.connection.ssl.enabled).</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.proxy.username</name>
+      <description>Username for authenticating with proxy server.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.proxy.password</name>
+      <description>Password for authenticating with proxy server.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.proxy.domain</name>
+      <description>Domain for authenticating with proxy server.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.proxy.workstation</name>
+      <description>Workstation for authenticating with proxy 
server.</description>
+    </property>
+
+    <property>
       <name>fs.s3a.attempts.maximum</name>
       <value>10</value>
       <description>How many times we should retry commands on transient 
errors.</description>
@@ -184,6 +224,33 @@ If you do any of these: change your credentials 
immediately!
     </property>
 
     <property>
+      <name>fs.s3a.threads.max</name>
+      <value>256</value>
+      <description> Maximum number of concurrent active (part)uploads,
+      which each use a thread from the threadpool.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.threads.core</name>
+      <value>15</value>
+      <description>Number of core threads in the threadpool.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.threads.keepalivetime</name>
+      <value>60</value>
+      <description>Number of seconds a thread can be idle before being
+        terminated.</description>
+    </property>
+
+    <property>
+      <name>fs.s3a.max.total.tasks</name>
+      <value>1000</value>
+      <description>Number of (part)uploads allowed to the queue before
+      blocking additional uploads.</description>
+    </property>
+
+    <property>
       <name>fs.s3a.multipart.size</name>
       <value>104857600</value>
       <description>How big (in bytes) to split upload or copy operations up 
into.</description>
@@ -231,6 +298,9 @@ If you do any of these: change your credentials immediately!
 
 ## Testing the S3 filesystem clients
 
+Due to eventual consistency, tests may fail without reason. Transient
+failures, which no longer occur upon rerunning the test, should thus be 
ignored.
+
 To test the S3* filesystem clients, you need to provide two files
 which pass in authentication details to the test runner
 
@@ -256,7 +326,10 @@ each filesystem for its testing.
 2. `test.fs.s3.name` : the URL of the bucket for "S3"  tests
 
 The contents of each bucket will be destroyed during the test process:
-do not use the bucket for any purpose other than testing.
+do not use the bucket for any purpose other than testing. Furthermore, for
+s3a, all in-progress multi-part uploads to the bucket will be aborted at the
+start of a test (by forcing fs.s3a.multipart.purge=true) to clean up the
+temporary state of previously failed tests.
 
 Example:
 

Reply via email to