http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-hdds/docs/themes/ozonedoc/static/js/ozonedoc.js
----------------------------------------------------------------------
diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/js/ozonedoc.js 
b/hadoop-hdds/docs/themes/ozonedoc/static/js/ozonedoc.js
new file mode 100644
index 0000000..3f96f00
--- /dev/null
+++ b/hadoop-hdds/docs/themes/ozonedoc/static/js/ozonedoc.js
@@ -0,0 +1,23 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+$(
+  function(){
+    $("table").addClass("table table-condensed table-bordered table-striped");
+  }
+);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-hdds/docs/themes/ozonedoc/theme.toml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/docs/themes/ozonedoc/theme.toml 
b/hadoop-hdds/docs/themes/ozonedoc/theme.toml
new file mode 100644
index 0000000..9f427fe
--- /dev/null
+++ b/hadoop-hdds/docs/themes/ozonedoc/theme.toml
@@ -0,0 +1,2 @@
+
+name = "Ozonedoc"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-hdds/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index a6b0d84..d4fa64e 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -37,6 +37,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
     <module>container-service</module>
     <module>server-scm</module>
     <module>tools</module>
+    <module>docs</module>
 
   </modules>
 
@@ -108,6 +109,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-hdds-docs</artifactId>
+        <version>${hdds.version}</version>
+      </dependency>
+
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-hdds-container-service</artifactId>
         <version>${hdds.version}</version>
         <type>test-jar</type>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-hdds/server-scm/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index 5fcd2bb..7789c32 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -51,6 +51,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-docs</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-container-service</artifactId>
       <scope>test</scope>
       <type>test-jar</type>
@@ -118,6 +123,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd";>
                   </outputDirectory>
                   <includes>webapps/static/**/*.*</includes>
                 </artifactItem>
+                <artifactItem>
+                  <groupId>org.apache.hadoop</groupId>
+                  <artifactId>hadoop-hdds-docs</artifactId>
+                  <outputDirectory>${project.build.outputDirectory}/webapps/scm
+                  </outputDirectory>
+                  <includes>docs/**/*.*</includes>
+                </artifactItem>
               </artifactItems>
               <overWriteSnapshots>true</overWriteSnapshots>
             </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching 
b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
index 612ee54..109bed1 100755
--- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
+++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
@@ -107,8 +107,7 @@ run cp 
"${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore
 
 
 # Optional documentation, could be missing
-cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/ozone/webapps/ozoneManager/
-cp -r "${ROOT}/hadoop-ozone/docs/target/classes/webapps/docs" 
./share/hadoop/hdds/webapps/scm/
+cp -r "${ROOT}/hadoop-hdds/docs/target/classes/docs" ./
 
 #Copy docker compose files
 run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/compose" .

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml
index b69b910..f625cb1 100644
--- a/hadoop-ozone/dist/pom.xml
+++ b/hadoop-ozone/dist/pom.xml
@@ -191,5 +191,9 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-datanode</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-docs</artifactId>
+    </dependency>
   </dependencies>
 </project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/README.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/README.md b/hadoop-ozone/docs/README.md
deleted file mode 100644
index 85817a7..0000000
--- a/hadoop-ozone/docs/README.md
+++ /dev/null
@@ -1,55 +0,0 @@
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-# Hadoop Ozone/HDDS docs
-
-This subproject contains the inline documentation for Ozone/HDDS components.
-
-You can create a new page with:
-
-```
-hugo new content/title.md
-```
-
-You can check the rendering with:
-
-```
-hugo serve
-```
-
-This maven project will create the rendered HTML page during the build (ONLY 
if hugo is available). 
-And the dist project will include the documentation.
-
-You can adjust the menu hierarchy with adjusting the header of the markdown 
file:
-
-To show it in the main header add the menu entry:
-
-```
----
-menu: main
----
-```
-
-To show it as a subpage, you can set the parent. (The value could be the title 
of the parent page, 
-our you can defined an `id: ...` in the parent markdown and use that in the 
parent reference.
-
-```
----
-menu:
-   main:
-          parent: "Getting started"
----
-```

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/archetypes/default.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/archetypes/default.md 
b/hadoop-ozone/docs/archetypes/default.md
deleted file mode 100644
index f4cc999..0000000
--- a/hadoop-ozone/docs/archetypes/default.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-title: "{{ replace .Name "-" " " | title }}"
-menu: main
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/config.yaml
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/config.yaml b/hadoop-ozone/docs/config.yaml
deleted file mode 100644
index e86b599..0000000
--- a/hadoop-ozone/docs/config.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-languageCode: "en-us"
-DefaultContentLanguage: "en"
-title: "Ozone"
-theme: "ozonedoc"
-pygmentsCodeFences: true
-uglyurls: true
-relativeURLs: true
-
-menu:
-  main:
-  - identifier: Starting
-    name: "Getting Started"
-    title: "Getting Started"
-    url: runningviadocker.html
-    weight: 1
-  - identifier: Client
-    name: Client
-    title: Client
-    url: commandshell.html
-    weight: 2
-  - identifier: Tools
-    name: Tools
-    title: Tools
-    url: dozone.html
-    weight: 3

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/BucketCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/BucketCommands.md 
b/hadoop-ozone/docs/content/BucketCommands.md
deleted file mode 100644
index 3ab3505..0000000
--- a/hadoop-ozone/docs/content/BucketCommands.md
+++ /dev/null
@@ -1,122 +0,0 @@
----
-title: Bucket Commands
-menu:
-   main:
-      parent: Client
-      weight: 3
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone shell supports the following bucket commands.
-
-  * [create](#create)
-  * [delete](#delete)
-  * [info](#info)
-  * [list](#list)
-  * [update](#update)
-
-### Create
-
-The bucket create command allows a user to create a bucket.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the bucket in 
**/volume/bucket** format.
-
-{{< highlight bash >}}
-ozone sh bucket create /hive/jan
-{{< /highlight >}}
-
-The above command will create a bucket called _jan_ in the _hive_ volume.
-Since no scheme was specified this command defaults to O3 (RPC) protocol.
-
-### Delete
-
-The bucket delete commands allows an user to delete a volume. If the
-bucket is not empty then this command will fail.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the bucket
-
-{{< highlight bash >}}
-ozone sh volume delete /hive/jan
-{{< /highlight >}}
-
-The above command will delete _jan_ bucket if it is empty.
-
-### Info
-
-The bucket info commands returns the information about the bucket.
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the bucket.
-
-{{< highlight bash >}}
-ozone sh bucket info /hive/jan
-{{< /highlight >}}
-
-The above command will print out the information about _jan_ bucket.
-
-### List
-
-The bucket list commands allows uset to list the buckets in a volume.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-| -l, --length                   | Maximum number of results to return. 
Default: 100
-| -p, --prefix                   | Optional, Only buckets that match this 
prefix will be returned.
-| -s, --start                    | The listing will start from key after the 
start key.
-|  Uri                           | The name of the _volume_.
-
-{{< highlight bash >}}
-ozone sh bucket list /hive
-{{< /highlight >}}
-
-This command will  list all buckets on the volume _hive_.
-
-
-
-### Update
-
-The bucket update command allows changing access permissions on bucket.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-| --addAcl                       | Optional, Comma separated ACLs that will 
added to bucket.
-|  --removeAcl                   | Optional, Comma separated list of acl to 
remove.
-|  Uri                           | The name of the bucket.
-
-{{< highlight bash >}}
-ozone sh bucket update --addAcl=user:bilbo:rw /hive/jan
-{{< /highlight >}}
-
-The above command gives user bilbo read/write permission to the bucket.
-
-You can try out these commands from the docker instance of the [Alpha
-Cluster](runningviadocker.html).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/BuildingSources.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/BuildingSources.md 
b/hadoop-ozone/docs/content/BuildingSources.md
deleted file mode 100644
index 1953f47..0000000
--- a/hadoop-ozone/docs/content/BuildingSources.md
+++ /dev/null
@@ -1,54 +0,0 @@
----
-title: Building from Sources
-weight: 1
-menu:
-   main:
-      parent: Starting
-      weight: 5
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-***This is a guide on how to build the ozone sources.  If you are <font
-color="red">not</font>
-planning to build sources yourself, you can safely skip this page.***
-
-If you are a Hadoop ninja, and wise in the ways of Apache,  you already know
-that a real Apache release is a source release.
-
-If you want to build from sources, Please untar the source tarball and run
-the ozone build command. This instruction assumes that you have all the
-dependencies to build Hadoop on your build machine. If you need instructions
-on how to build Hadoop, please look at the Apache Hadoop Website.
-
-{{< highlight bash >}}
-mvn clean package -DskipTests=true -Dmaven.javadoc.skip=true -Phdds -Pdist 
-Dtar -DskipShade
-{{< /highlight >}}
-
-
-This will build an ozone-\<version\>.tar.gz in your target directory.
-
-You can copy this tarball and use this instead of binary artifacts that are
-provided along with the official release.
-
-## How to test the build
-You can run the acceptance tests in the hadoop-ozone directory to make sure
-that  your build is functional. To launch the acceptance tests, please follow
- the instructions in the **README.md** in the
- ```$hadoop_src/hadoop-ozone/acceptance-test``` directory. Acceptance tests
- will start a small ozone cluster and verify that ozone shell and ozone file
- system is fully functional.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/CommandShell.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/CommandShell.md 
b/hadoop-ozone/docs/content/CommandShell.md
deleted file mode 100644
index fdd6c01..0000000
--- a/hadoop-ozone/docs/content/CommandShell.md
+++ /dev/null
@@ -1,109 +0,0 @@
----
-title: Ozone CLI
-menu:
-   main:
-      parent: Client
-      weight: 1
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone has a set of command line tools that can be used to manage ozone.
-
-All these commands are invoked via the ```ozone``` script.
-
-The commands supported by ozone are:
-
-   * **classpath** - Prints the class path needed to get the hadoop jar and the
-    required libraries.
-   * **fs** - Runs a command on ozone file system.
-   * **datanode** - Via daemon command, the HDDS data nodes can be started or
-   stopped.
-   * **envvars** - Display computed Hadoop environment variables.
-   * **freon** -  Runs the ozone load generator.
-   * **genesis**  - Developer Only, Ozone micro-benchmark application.
-   * **getconf** -  Reads ozone config values from configuration.
-   * **jmxget**  - Get JMX exported values from NameNode or DataNode.
-   * **om** -   Ozone Manager, via daemon command can be started or stopped.
-   * **sh** -  Primary command line interface for ozone.
-   * **scm** -  Storage Container Manager service, via daemon can be
-   stated or stopped.
-   * **scmcli** -  Developer only, Command Line Interface for the Storage
-   Container Manager.
-   * **version** - Prints the version of Ozone and HDDS.
-   * **genconf** -  Generate minimally required ozone configs and output to
-   ozone-site.xml.
-
-## Understanding Ozone command shell
-The most used command when working with Ozone is the Ozone command shell.
-Ozone command shell gives a command shell interface to work against
-Ozone.
-
-The Ozone shell commands take the following format.
-
-> _ozone sh object action url_
-
-**ozone** script is used to invoke all Ozone sub-commands. The ozone shell is
-invoked via ```sh``` command.
-
-The object can be a volume, bucket or a key. The action is various verbs like
- create, list, delete etc.
-
-
-Ozone URL can point to a volume, bucket or keys in the following format:
-
-_\[scheme\]\[server:port\]/volume/bucket/key_
-
-
-Where,
-
-1. Scheme - Can be one of the following
-    * o3  - Ozone's native RPC protocol. If you specify this scheme, the
-    native RPC protocol is used while communicating with Ozone Manager and
-    data nodes.
-    * http/https - If an HTTP protocol is specified, then Ozone shell assumes
-     that you are interested in using the Ozone Rest protocol and falls back
-     to using the REST protocol instead of RPC.
- If no protocol is specified, the Ozone shell defaults to the native RPC
- protocol.
-
-2. Server:Port - This is the address of the Ozone Manager. This can be server
- only, in that case, the default port is used. If this value is omitted
-then the defaults specified in the ozone-site.xml will be used for Ozone
-Manager address.
-
-Depending on the call, the volume/bucket/key names will be part of the URL.
-Please see volume commands, bucket commands, and key commands section for more
-detail.
-
-## Invoking help
-
-Ozone shell help can be invoked at _object_ level or at _action_ level.
-For example:
-
-{{< highlight bash >}}
-ozone sh volume --help
-{{< /highlight >}}
-
-This will show all possible actions for volumes.
-
-or it can be invoked to explain a specific action like
-{{< highlight bash >}}
-ozone sh volume create --help
-{{< /highlight >}}
-This command will give you command line options of the create command.
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/Concepts.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/Concepts.md 
b/hadoop-ozone/docs/content/Concepts.md
deleted file mode 100644
index 7f7dd3b..0000000
--- a/hadoop-ozone/docs/content/Concepts.md
+++ /dev/null
@@ -1,108 +0,0 @@
----
-title: Architecture
-date: "2017-10-10"
-menu: main
----
-
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone is a redundant, distributed object store build by
-leveraging primitives present in HDFS. The primary design point of ozone is 
scalability, and it aims to scale to billions of objects.
-
-Ozone consists of volumes, buckets, and keys. A volume is similar to a home 
directory in the ozone world. Only an administrator can create it. Volumes are 
used to store buckets. Once a volume is created users can create as many 
buckets as needed. Ozone stores data as keys which live inside these buckets.
-
-Ozone namespace is composed of many storage volumes. Storage volumes are also 
used as the basis for storage accounting.
-
-To access a key, an Ozone URL has the following format:
-
-```
-http://servername:port/volume/bucket/key
-```
-
-Where the server name is the name of a data node, the port is the data node 
HTTP port. The volume represents the name of the ozone volume; bucket is an 
ozone bucket created by the user and key represents the file.
-
-Please look at the [command line interface]({{< ref "CommandShell.md#shell" 
>}})  for more info.
-
-Ozone supports both REST and RPC protocols. Clients can choose either of these 
protocols to communicate with Ozone. Please see the [client documentation]({{< 
ref "JavaApi.md" >}}) for more details.
-
-Ozone separates namespace management and block space management; this helps
-ozone to scale much better. The namespace is managed by a daemon called
-[Ozone Manager ]({{< ref "OzoneManager.md" >}}) (OM),  and block space is
-managed by [Storage Container Manager] ({{< ref "Hdds.md" >}}) (SCM).
-
-The data nodes provide replication and ability to store blocks; these blocks 
are stored in groups to reduce the metadata pressure on SCM. This groups of 
blocks are called storage containers. Hence the block manager is called storage 
container
-manager.
-
-Ozone Overview
---------------
-
-
The following diagram is a high-level overview of the core components of 
Ozone.


-
-![Architecture diagram](../../OzoneOverview.svg)
-
-The main elements of Ozone are
:
-
-### Ozone Manager

-
-[Ozone Manager]({{< ref "OzoneManager.md" >}}) (OM) takes care of the Ozone's 
namespace.
-All ozone objects like volumes, buckets, and keys are managed by OM. In Short, 
OM is the metadata manager for Ozone.
-OM talks to blockManager(SCM) to get blocks and passes it on to the Ozone
-client.  Ozone client writes data to these blocks.
-OM will eventually be replicated via Apache Ratis for High Availability.

-
-### Storage Container Manager
-
-[Storage Container Manager]({{< ref "Hdds.md" >}}) (SCM) is the block and 
cluster manager for Ozone.
-SCM along with data nodes offer a service called 'storage containers'.
-A storage container is a group unrelated of blocks that are managed together 
as a single entity.
-
-SCM offers the following abstractions.


-
-![SCM Abstractions](../../SCMBlockDiagram.png)
-
-### Blocks
-Blocks are similar to blocks in HDFS. They are replicated store of data. 
Client writes data to blocks.
-
-### Containers
-A collection of blocks replicated and managed together.
-
-### Pipelines
-SCM allows each storage container to choose its method of replication.
-For example, a storage container might decide that it needs only one copy of a 
 block
-and might choose a stand-alone pipeline. Another storage container might want 
to have a very high level of reliability and pick a RATIS based pipeline. In 
other words, SCM allows different kinds of replication strategies to co-exist. 
The client while writing data, chooses a storage container with required 
properties.
-
-### Pools
-A group of data nodes is called a pool. For scaling purposes,
-we define a pool as a set of machines. This makes management of data nodes 
easier.
-
-### Nodes
-The data node where data is stored. SCM monitors these nodes via heartbeat.
-
-### Clients
-Ozone ships with a set of clients. Ozone [CLI]({{< ref "CommandShell.md#shell" 
>}}) is the command line interface like 'hdfs' command.
 [Freon] ({{< ref 
"Freon.md" >}}) is a  load generation tool for Ozone.

-
-### REST Handler
-Ozone provides an RPC (Remote Procedure Call) as well as a  REST 
(Representational State Transfer)  interface. This allows clients to be written 
in many languages quickly. Ozone strives to maintain an API compatibility 
between REST and RPC.
-For most purposes, a client can make one line change to switch from REST to 
RPC or vice versa.  

-
-### Ozone File System
-Ozone file system (TODO: Add documentation) is a Hadoop compatible file 
system. This allows Hadoop services and applications like Hive and Spark to run 
against
-Ozone without any change.
-
-### Ozone Client
-This is similar to DFSClient in HDFS. This is the standard client to talk to 
Ozone. All other components that we have discussed so far rely on Ozone client. 
Ozone client supports both RPC and REST protocols.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/Dozone.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/Dozone.md 
b/hadoop-ozone/docs/content/Dozone.md
deleted file mode 100644
index f6efb0f..0000000
--- a/hadoop-ozone/docs/content/Dozone.md
+++ /dev/null
@@ -1,110 +0,0 @@
----
-title: "Dozone & Dev Tools"
-date: 2017-08-10
-menu:
-   main:
-      parent: Tools
----
-
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Dozone stands for docker for ozone. Ozone supports docker to make it easy to 
develop and test ozone.  Starting a docker based ozone container is simple.
-
-In the `compose/ozone` directory there are two files that define the docker 
and ozone settings.
-
-Developers can
-
-{{< highlight bash >}}
-cd compose/ozone
-{{< /highlight >}}
-
-and simply run
-
-{{< highlight bash >}}
-docker-compose up -d
-{{< /highlight >}}
-
-to run a ozone cluster on docker.
-
-This command will launch a Namenode, OM, SCM and a data node.
-
-To access the OM UI, one can run 'http://localhost:9874'.
-
-_Please note_: dozone does not map the data node ports to the 9864. Instead, 
it maps to the ephemeral port range. So many examples in the command shell will 
not work if you run those commands from the host machine. To find out where the 
data node port is listening, you can run the `docker ps` command or always ssh 
into a container before running ozone commands.
-
-To shutdown a running docker based ozone cluster, please run
-
-{{< highlight bash >}}
-docker-compose down
-{{< /highlight >}}
-
-
-Adding more config settings
----------------------------
-The file called `docker-config` contains all ozone specific config settings. 
This file is processed to create the ozone-site.xml.
-
-Useful Docker & Ozone Commands
-------------------------------
-
-If you make any modifications to ozone, the simplest way to test it is to run 
freon and unit tests.
-
-Here are the instructions to run freon in a docker based cluster.
-
-{{< highlight bash >}}
-docker-compose exec datanode bash
-{{< /highlight >}}
-
-This will open a bash shell on the data node container.
-Now we can execute freon for load generation.
-
-{{< highlight bash >}}
-ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10
-{{< /highlight >}}
-
-Here is a set  helpful commands while working with docker for ozone.
-To check the status of the components:
-
-{{< highlight bash >}}
-docker-compose ps
-{{< /highlight >}}
-
-To get logs from a specific node/service:
-
-{{< highlight bash >}}
-docker-compose logs scm
-{{< /highlight >}}
-
-
-As the WebUI ports are forwarded to the external machine, you can check the 
web UI:
-
-* For the Storage Container Manager: http://localhost:9876
-* For the Ozone Managerr: http://localhost:9874
-* For the Datanode: check the port with docker ps (as there could be multiple 
data node ports are mapped to the ephemeral port range)
-* For the Namenode: http://localhost:9870
-
-You can start multiple data nodes with:
-
-{{< highlight bash >}}
-docker-compose scale datanode=3
-{{< /highlight >}}
-
-You can test the commands from the [Ozone CLI]({{< ref "CommandShell.md#shell" 
>}}) after opening a new bash shell in one of the containers:
-
-{{< highlight bash >}}
-docker-compose exec datanode bash
-{{< /highlight >}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/Freon.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/Freon.md 
b/hadoop-ozone/docs/content/Freon.md
deleted file mode 100644
index 6ef0280..0000000
--- a/hadoop-ozone/docs/content/Freon.md
+++ /dev/null
@@ -1,64 +0,0 @@
----
-title: Freon
-date: "2017-09-02T23:58:17-07:00"
-menu:
-   main:
-      parent: Tools
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Overview
---------
-
-Freon is a load-generator for Ozone. This tool is used for testing the 
functionality of ozone.
-
-### Random keys
-
-In randomkeys mode, the data written into ozone cluster is randomly generated.
-Each key will be of size 10 KB.
-
-The number of volumes/buckets/keys can be configured. The replication type and
-factor (eg. replicate with ratis to 3 nodes) Also can be configured.
-
-For more information use
-
-`bin/ozone freon --help`
-
-### Example
-
-{{< highlight bash >}}
-ozone freon randomkeys --numOfVolumes=10 --numOfBuckets 10 --numOfKeys 10  
--replicationType=RATIS --factor=THREE
-{{< /highlight >}}
-
-{{< highlight bash >}}
-***************************************************
-Status: Success
-Git Base Revision: 48aae081e5afacbb3240657556b26c29e61830c3
-Number of Volumes created: 10
-Number of Buckets created: 100
-Number of Keys added: 1000
-Ratis replication factor: THREE
-Ratis replication type: RATIS
-Average Time spent in volume creation: 00:00:00,035
-Average Time spent in bucket creation: 00:00:00,319
-Average Time spent in key creation: 00:00:03,659
-Average Time spent in key write: 00:00:10,894
-Total bytes written: 10240000
-Total Execution time: 00:00:16,898
-***********************
-{{< /highlight >}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/Hdds.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/Hdds.md 
b/hadoop-ozone/docs/content/Hdds.md
deleted file mode 100644
index 35654cd..0000000
--- a/hadoop-ozone/docs/content/Hdds.md
+++ /dev/null
@@ -1,65 +0,0 @@
----
-title: "Hadoop Distributed Data Store"
-date: "2017-09-14"
-menu:
-   main:
-       parent: Architecture
-weight: 10
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-SCM Overview
-------------
-
-Storage Container Manager or SCM is a very important component of ozone. SCM
-offers block and container-based services to Ozone Manager.  A container is a
-collection of unrelated blocks under ozone. SCM and data nodes work together
-to maintain the replication levels needed by the cluster.
-
-It is easier to look at a putKey operation to understand the role that SCM 
plays.
-
-To put a key, a client makes a call to OM with the following arguments.
-
--- putKey(keyName, data, pipeline type, replication count)
-
-1. keyName - refers to the file name.
-2. data - The data that the client wants to write.
-3. pipeline type - Allows the client to select the pipeline type.  A pipeline
- refers to the replication strategy used for replicating a block.  Ozone
- currently supports Stand Alone and Ratis as two different pipeline types.
-4. replication count - This specifies how many copies of the block replica 
should be maintained.
-
-In most cases, the client does not specify the pipeline type and  replication
- count. The default pipeline type and replication count are used.
-
-
-Ozone Manager when it receives the putKey call, makes a call to SCM asking
-for a pipeline instance with the specified property. So if the client asked
-for RATIS replication strategy and a replication count of three, then OM
-requests SCM to return a set of data nodes that meet this capability.
-
-If SCM can find this a pipeline ( that is a set of data nodes) that can meet
-the requirement from the client, then those nodes are returned to OM. OM will
-persist this info and return a tuple consisting of {BlockID, ContainerName, 
and Pipeline}.
-
-If SCM is not able to find a pipeline, then SCM creates a logical pipeline and 
then returns it.
-
-
-SCM manages blocks, containers, and pipelines.  To return healthy pipelines,
-SCM also needs to understand the node health. So SCM listens to heartbeats
-from data nodes and acts as the node manager too.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/JavaApi.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/JavaApi.md 
b/hadoop-ozone/docs/content/JavaApi.md
deleted file mode 100644
index e538f4b..0000000
--- a/hadoop-ozone/docs/content/JavaApi.md
+++ /dev/null
@@ -1,172 +0,0 @@
----
-title: "Java API"
-date: "2017-09-14"
-menu:
-   main:
-      parent: "Client"
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Introduction
--------------
-
-Ozone ships with it own client library, that supports both RPC(Remote
-Procedure call) and REST(Representational State Transfer). This library is
-the primary user interface to ozone.
-
-It is trivial to switch from RPC to REST or vice versa, by setting the
-property _ozone.client.protocol_ in the configuration or by calling the
-appropriate factory method.
-
-## Creating an Ozone client
-The Ozone client factory creates the ozone client. It allows the user to
-specify the protocol of communication. For example, to get an REST client, we
-can use
-
-{{< highlight java >}}
-OzoneClient ozClient = OzoneClientFactory.getRestClient();
-{{< /highlight >}}
-
-And to get a RPC client we can call
-
-{{< highlight java >}}
-OzoneClient ozClient = OzoneClientFactory.getRpcClient();
-{{< /highlight >}}
-
-If the user want to create a client based on the configuration, then they can
-call
-
-{{< highlight java >}}
-OzoneClient ozClient = OzoneClientFactory.getClient();
-{{< /highlight >}}
-
-and an appropriate client based on configuration will be returned.
-
-## Writing data using Ozone Client
-
-The hierarchy of data inside ozone is a volume, bucket and a key. A volume
-is a collection of buckets. A bucket is a collection of keys. To write data
-to the ozone, you need a volume, bucket and a key.
-
-### Creating a Volume
-
-Once we have a client, we need to get a reference to the ObjectStore.  This
-is done via
-
-{{< highlight java >}}
-ObjectStore objectStore = ozClient.getObjectStore();
-{{< /highlight >}}
-
-An object store represents an active cluster against which the client is 
working.
-
-{{< highlight java >}}
-// Let us create a volume to store our game assets.
-// This uses default arguments for creating that volume.
-objectStore.createVolume(“assets”);
-
-// Let us verify that the volume got created.
-OzoneVolume assets = objectStore.getVolume(“assets”);
-{{< /highlight >}}
-
-
-It is possible to pass an array of arguments to the createVolume by creating 
volume arguments.
-
-### Creating a Bucket
-
-Once you have a volume, you can create buckets inside the volume.
-
-{{< highlight bash >}}
-// Let us create a bucket called videos.
-assets.createBucket(“videos”);
-Ozonebucket video = assets.getBucket(“videos”);
-{{< /highlight >}}
-
-At this point we have a usable volume and a bucket. Our volume is called 
assets and bucket is called videos.
-
-Now we can create a Key.
-
-### Reading and Writing a Key
-
-With a bucket object the users can now read and write keys. The following code 
reads a video called intro.mp4 from the local disk and stores in the video 
bucket that we just created.
-
-{{< highlight bash >}}
-// read data from the file, this is a user provided function.
-byte [] vidoeData = readFile(“into.mp4”);
-
-// Create an output stream and write data.
-OzoneOutputStream videoStream = video.createKey(“intro.mp4”, 1048576);
-videoStream.write(videoData);
-
-// Close the stream when it is done.
- videoStream.close();
-
-
-// We can use the same bucket to read the file that we just wrote, by creating 
an input Stream.
-// Let us allocate a byte array to hold the video first.
-byte[] data = new byte[(int)1048576];
-OzoneInputStream introStream = video.readKey(“intro.mp4”);
-// read intro.mp4 into the data buffer
-introStream.read(data);
-introStream.close();
-{{< /highlight >}}
-
-
-Here is a complete example of the code that we just wrote. Please note the 
close functions being called in this program.
-
-{{< highlight java >}}
-// Let us create a client
-OzoneClient ozClient = OzoneClientFactory.getClient();
-
-// Get a reference to the ObjectStore using the client
-ObjectStore objectStore = ozClient.getObjectStore();
-
-// Let us create a volume to store our game assets.
-// This default arguments for creating that volume.
-objectStore.createVolume(“assets”);
-
-// Let us verify that the volume got created.
-OzoneVolume assets = objectStore.getVolume(“assets”);
-
-// Let us create a bucket called videos.
-assets.createBucket(“videos”);
-Ozonebucket video = assets.getBucket(“videos”);
-
-// read data from the file, this is assumed to be a user provided function.
-byte [] vidoeData = readFile(“into.mp4”);
-
-// Create an output stream and write data.
-OzoneOutputStream videoStream = video.createKey(“intro.mp4”, 1048576);
-videoStream.write(videoData);
-
-// Close the stream when it is done.
- videoStream.close();
-
-
-// We can use the same bucket to read the file that we just wrote, by creating 
an input Stream.
-// Let us allocate a byte array to hold the video first.
-
-byte[] data = new byte[(int)1048576];
-OzoneInputStream introStream = video.readKey(“into.mp4”);
-introStream.read(data);
-
-// Close the stream when it is done.
-introStream.close();
-
-// Close the client.
-ozClient.close();
-{{< /highlight >}}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/KeyCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/KeyCommands.md 
b/hadoop-ozone/docs/content/KeyCommands.md
deleted file mode 100644
index 0139a28..0000000
--- a/hadoop-ozone/docs/content/KeyCommands.md
+++ /dev/null
@@ -1,127 +0,0 @@
----
-title: Key Commands
-menu:
-   main:
-      parent: Client
-      weight: 3
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone shell supports the following key commands.
-
-  * [get](#get)
-  * [put](#put)
-  * [delete](#delete)
-  * [info](#info)
-  * [list](#list)
-
-
-### Get
-
-The key get command downloads a key from Ozone cluster to local file system.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the key in 
**/volume/bucket/key** format.
-|  FileName                      | Local file to download the key to.
-
-
-{{< highlight bash >}}
-ozone sh key get /hive/jan/sales.orc sales.orc
-{{< /highlight >}}
-Downloads the file sales.orc from the _/hive/jan_ bucket and writes to the
-local file sales.orc.
-
-### Put
-
-Uploads a file from the local file system to the specified bucket.
-
-***Params:***
-
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the key in 
**/volume/bucket/key** format.
-|  FileName                      | Local file to upload.
-| -r, --replication              | Optional, Number of copies, ONE or THREE 
are the options. Picks up the default from cluster configuration.
-
-{{< highlight bash >}}
-ozone sh key put /hive/jan/corrected-sales.orc sales.orc
-{{< /highlight >}}
-The above command will put the sales.orc as a new key into 
_/hive/jan/corrected-sales.orc_.
-
-### Delete
-
-The delete key command removes the key from the bucket.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the key.
-
-{{< highlight bash >}}
-ozone sh key delete /hive/jan/corrected-sales.orc
-{{< /highlight >}}
-
-The above command deletes the key _/hive/jan/corrected-sales.orc_.
-
-
-### Info
-
-The key info commands returns the information about the key.
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-|  Uri                           | The name of the key.
-
-{{< highlight bash >}}
-ozone sh key info /hive/jan/sales.orc
-{{< /highlight >}}
-
-The above command will print out the information about _/hive/jan/sales.orc_
-key.
-
-### List
-
-The key list commands allows user to list all keys in a bucket.
-
-***Params:***
-
-| Arguments                      |  Comment                                |
-|--------------------------------|-----------------------------------------|
-| -l, --length                   | Maximum number of results to return. 
Default: 1000
-| -p, --prefix                   | Optional, Only buckets that match this 
prefix will be returned.
-| -s, --start                    | The listing will start from key after the 
start key.
-|  Uri                           | The name of the _volume_.
-
-{{< highlight bash >}}
-ozone sh key list /hive/jan
-{{< /highlight >}}
-
-This command will  list all key in the bucket _/hive/jan_.
-
-
-
-
-
-You can try out these commands from the docker instance of the [Alpha
-Cluster](runningviadocker.html).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/OzoneFS.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/OzoneFS.md 
b/hadoop-ozone/docs/content/OzoneFS.md
deleted file mode 100644
index 06102a2..0000000
--- a/hadoop-ozone/docs/content/OzoneFS.md
+++ /dev/null
@@ -1,80 +0,0 @@
----
-title: Ozone File System
-date: 2017-09-14
-menu: main
-menu:
-   main:
-      parent: Client
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-There are many Hadoop compatible files systems under Hadoop. Hadoop compatible 
file systems ensures that storage backends like Ozone can easily be integrated 
into Hadoop eco-system.
-
-## Setting up the Ozone file system
-
-To create an ozone file system, we have to choose a bucket where the file 
system would live. This bucket will be used as the backend store for 
OzoneFileSystem. All the files and directories will be stored as keys in this 
bucket.
-
-Please run the following commands to create a volume and bucket, if you don't 
have them already.
-
-{{< highlight bash >}}
-ozone sh volume create /volume
-ozone sh bucket create /volume/bucket
-{{< /highlight >}}
-
-Once this is created, please make sure that bucket exists via the listVolume 
or listBucket commands.
-
-Please add the following entry to the core-site.xml.
-
-{{< highlight xml >}}
-<property>
-  <name>fs.o3fs.impl</name>
-  <value>org.apache.hadoop.fs.ozone.OzoneFileSystem</value>
-</property>
-<property>
-  <name>fs.defaultFS</name>
-  <value>o3fs://localhost:9864/volume/bucket</value>
-</property>
-{{< /highlight >}}
-
-This will make this bucket to be the default file system for HDFS dfs commands 
and register the o3fs file system type..
-
-You also need to add the ozone-filesystem.jar file to the classpath:
-
-{{< highlight bash >}}
-export 
HADOOP_CLASSPATH=/opt/ozone/share/hadoop/ozonefs/hadoop-ozone-filesystem.jar:$HADOOP_CLASSPATH
-{{< /highlight >}}
-
-
-
-
-Once the default Filesystem has been setup, users can run commands like ls, 
put, mkdir, etc.
-For example,
-
-{{< highlight bash >}}
-hdfs dfs -ls /
-{{< /highlight >}}
-
-or
-
-{{< highlight bash >}}
-hdfs dfs -mkdir /users
-{{< /highlight >}}
-
-
-Or put command etc. In other words, all programs like Hive, Spark, and Distcp 
will work against this file system.
-Please note that any keys created/deleted in the bucket using methods apart 
from OzoneFileSystem will show up as diectories and files in the Ozone File 
System.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/OzoneManager.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/OzoneManager.md 
b/hadoop-ozone/docs/content/OzoneManager.md
deleted file mode 100644
index 5eb8663..0000000
--- a/hadoop-ozone/docs/content/OzoneManager.md
+++ /dev/null
@@ -1,77 +0,0 @@
----
-title: "Ozone Manager"
-date: "2017-09-14"
-menu:
-   main:
-       parent: Architecture
-weight: 11
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-OM Overview
--------------
-
-Ozone Manager or OM is the namespace manager for Ozone. The clients (RPC 
clients, Rest proxy, Ozone file system, etc.) communicate with OM to create and 
delete various ozone objects.
-
-Each ozone volume is the root of a namespace under OM. This is very different 
from HDFS which provides a single rooted file system.
-
-Ozone's namespace is a collection of volumes or is a forest instead of a
-single rooted tree as in HDFS. This property makes it easy to deploy multiple
- OMs for scaling, this feature is under development.
-
-OM Metadata
------------------
-
-Conceptually, OM maintains a list of volumes, buckets, and keys. For each 
user, it maintains a list of volumes. For each volume, the list of buckets and 
for each bucket the list of keys.
-
-Right now, OM is a single instance service. Ozone already relies on Apache 
Ratis (A Replicated State Machine based on Raft protocol). OM will be extended 
to replicate all its metadata via Ratis. With that, OM will be highly available.
-
-OM UI
-------------
-
-OM supports a simple UI for the time being. The default port of OM is 9874. To 
access the OM UI, the user can connect to http://OM:port or for a concrete 
example,
-```
-http://omserver:9874/
-```
-OM UI primarily tries to measure load and latency of OM. The first section of 
OM UI relates to the number of operations seen by the cluster broken down by 
the object, operation and whether the operation was successful.
-
-The latter part of the UI is focused on latency and number of operations that 
OM is performing.
-
-One of the hardest problems in HDFS world is discovering the numerous settings 
offered to tune HDFS. Ozone solves that problem by tagging the configs. To 
discover settings, click on "Common Tools"->Config.  This will take you to the 
ozone config UI.
-
-Config UI
-------------
-
-The ozone config UI is a matrix with row representing the tags, and columns 
representing All, OM and SCM.
-
-Suppose a user wanted to discover the required settings for ozone. Then the 
user can tick the checkbox that says "Required."
-This will filter out all "Required" settings along with the description of 
what each setting does.
-
-The user can combine different checkboxes and UI will combine the results. 
That is, If you have more than one row selected, then all keys for those chosen 
tags are displayed together.
-
-We are hopeful that this leads to a more straightforward way of discovering 
settings that manage ozone.
-
-
-OM and SCM
--------------------
-[Storage container manager]({{< ref "Hdds.md" >}}) or (SCM) is the block 
manager
- for ozone. When a client requests OM for a set of data nodes to write data, 
OM talks to SCM and gets a block.
-
-A block returned by SCM contains a pipeline, which is a set of nodes that we 
participate in that block replication.
-
-So OM is dependent on SCM for reading and writing of Keys. However, OM is 
independent of SCM while doing metadata operations like ozone volume or bucket 
operations.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/RealCluster.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/RealCluster.md 
b/hadoop-ozone/docs/content/RealCluster.md
deleted file mode 100644
index 78dd46e..0000000
--- a/hadoop-ozone/docs/content/RealCluster.md
+++ /dev/null
@@ -1,74 +0,0 @@
----
-title: Starting an Ozone Cluster
-weight: 1
-menu:
-   main:
-      parent: Starting
-      weight: 3
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Before we boot up the Ozone cluster, we need to initialize both SCM and Ozone 
Manager.
-
-{{< highlight bash >}}
-ozone scm --init
-{{< /highlight >}}
-This allows SCM to create the cluster Identity and initialize its state.
-The ```init``` command is similar to Namenode format. Init command is executed 
only once, that allows SCM to create all the required on-disk structures to 
work correctly.
-{{< highlight bash >}}
-ozone --daemon start scm
-{{< /highlight >}}
-
-Once we know SCM is up and running, we can create an Object Store for our use. 
This is done by running the following command.
-
-{{< highlight bash >}}
-ozone om --init
-{{< /highlight >}}
-
-
-Once Ozone manager has created the Object Store, we are ready to run the name
-services.
-
-{{< highlight bash >}}
-ozone --daemon start om
-{{< /highlight >}}
-
-At this point Ozone's name services, the Ozone manager, and the block service  
SCM is both running.
-**Please note**: If SCM is not running
-```om --init``` command will fail. SCM start will fail if on-disk data 
structures are missing. So please make sure you have done both ```scm --init``` 
and ```om --init``` commands.
-
-Now we need to start the data nodes. Please run the following command on each 
datanode.
-{{< highlight bash >}}
-ozone --daemon start datanode
-{{< /highlight >}}
-
-At this point SCM, Ozone Manager and data nodes are up and running.
-
-***Congratulations!, You have set up a functional ozone cluster.***
-
--------
-If you want to make your life simpler, you can just run
-{{< highlight bash >}}
-ozone scm --init
-ozone om --init
-start-ozone.sh
-{{< /highlight >}}
-This assumes that you have set up the slaves file correctly and ssh
-configuration that allows ssh-ing to all data nodes. This is the same as the
-HDFS configuration, so please refer to HDFS documentation on how to set this
-up.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/Rest.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/Rest.md 
b/hadoop-ozone/docs/content/Rest.md
deleted file mode 100644
index a25d3ab..0000000
--- a/hadoop-ozone/docs/content/Rest.md
+++ /dev/null
@@ -1,544 +0,0 @@
----
-title: REST API
-menu:
-   main:
-      parent: Client
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-The Ozone REST API's allows user to access ozone via  REST protocol.
-
-## Authentication and Authorization
-
-For time being, The default authentication mode of REST API is insecure access
-mode, which is *Simple* mode. Under this mode, ozone server trusts the user
-name specified by client and it does not perform any authentication.
-
-User name can be specified in HTTP header by
-
-* `x-ozone-user: {USER_NAME}`
-
-for example if add following header *x-ozone-user: bilbo* in the HTTP request,
-then operation will be executed as *bilbo* user.
-In *Simple* mode, there is no real authorization either. Client can be
-authorized to obtain administrator privilege by using HTTP header
-
-* `Authorization: {AUTH_METHOD} {SIGNATURE}`
-
-for example set following header *Authorization: OZONE root* in the HTTP 
request,
-then ozone will authorize the client with administrator privilege.
-
-## Common REST Headers
-
-The following HTTP headers must be set for each REST call.
-
-| Property | Description |
-|:---- |:----
-| Authorization | The authorization field determines which authentication 
method is used by ozone. Currently only *simple* mode is supported, the 
corresponding value is *OZONE*. Optionally an user name can be set as *OZONE 
{USER_NAME}* to authorize as a particular user. |
-| Date | Standard HTTP header that represents dates. The format is - day of 
the week, month, day, year and time (military time format) in GMT. Any other 
time zone will be rejected by ozone server. Eg. *Date : Mon, Apr 4, 2016 
06:22:00 GMT*. This field is required. |
-| x-ozone-version | A required HTTP header to indicate which version of API 
this call will be communicating to. E.g *x-ozone-version: v1*. Currently ozone 
only publishes v1 version API. |
-
-## Common Reply Headers
-
-The common reply headers are part of all Ozone server replies.
-
-| Property | Description |
-|:---- |:----
-| Date | This is the HTTP date header and it is set to server’s local time 
expressed in GMT. |
-| x-ozone-request-id | This is a UUID string that represents an unique request 
ID. This ID is used to track the request through the ozone system and is useful 
for debugging purposes. |
-| x-ozone-server-name | Fully qualified domain name of the sever which handled 
the request. |
-
-## Volume APIs
-
-### Create a Volume
-
-This API allows admins to create a new storage volume.
-
-Schema:
-
-- `POST /{volume}?quota=<VOLUME_QUOTA>`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| quota | long<BYTES \| MB \| GB \| TB> | Optional. Quota size in BYTEs, MBs, 
GBs or TBs |
-
-Sample HTTP POST request:
-
-    curl -i -X POST -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H 
"Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE root" 
"http://localhost:9880/volume-to-create";
-
-this request creates a volume as user *bilbo*, the authorization field is set 
to *OZONE root* because this call requires administration privilege. The client 
receives a response with zero content length.
-
-    HTTP/1.1 201 Created
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 2173deb5-bbb7-4f0a-8236-f354784e3bae
-    Date: Tue, 27 Jun 2017 07:42:04 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Update Volume
-
-This API allows administrators to update volume info such as ownership and 
quota. This API requires administration privilege.
-
-Schema:
-
-- `PUT /{volume}?quota=<VOLUME_QUOTA>`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| quota | long<BYTES \| MB \| GB \| TB>  \| remove | Optional. Quota size in 
BYTEs, MBs, GBs or TBs. Or use string value *remove* to remove an existing 
quota for a volume. |
-
-Sample HTTP PUT request:
-
-    curl -X PUT -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 
04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user: john"  
http://localhost:9880/volume-to-update
-
-this request modifies the owner of */volume-to-update* to *john*.
-
-### Delete Volume
-
-This API allows user to delete a volume owned by themselves if the volume is 
not empty. Administrators can delete volumes owned by any user.
-
-Schema:
-
-- `DELETE /{volume}`
-
-Sample HTTP DELETE request:
-
-    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 
04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user: bilbo"  
http://localhost:9880/volume-to-delete
-
-this request deletes an empty volume */volume-to-delete*. The client receives 
a zero length content.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 6af14c64-e3a9-40fe-9634-df60b7cbbc6a
-    Date: Tue, 27 Jun 2017 08:49:52 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Info Volume
-
-This API allows user to read the info of a volume owned by themselves. 
Administrators can read volume info owned by any user.
-
-Schema:
-
-- `GET /{volume}?info=volume`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| info | "volume" | Required and enforced with this value. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9880/volume-of-bilbo?info=volume";
-
-this request gets the info of volume */volume-of-bilbo*, the client receives a 
response with a JSON object of volume info.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: a2224806-beaf-42dd-a68e-533cd7508f74
-    Date: Tue, 27 Jun 2017 07:55:35 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 171
-    Connection: keep-alive
-
-    {
-      "owner" : { "name" : "bilbo" },
-      "quota" : { "unit" : "TB", "size" : 1048576 },
-      "volumeName" : "volume-of-bilbo",
-      "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT",
-      "createdBy" : "root"
-    }
-
-### List Volumes
-
-This API allows user to list all volumes owned by themselves. Administrators 
can list all volumes owned by any user.
-
-Schema:
-
-- `GET 
/?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_VOLUME_KEY>`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| prefix | string | Optional. Only volumes with this prefix are included in 
the result. |
-| max-keys | int | Optional. Maximum number of volumes included in the result. 
Default is 1024 if not specified. |
-| prev-key | string | Optional. Volume name from where listing should start, 
this key is excluded in the result. It must be a valid volume name. |
-| root-scan | bool | Optional. List all volumes in the cluster if this is set 
to true. Default false. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9880/?max-keys=100&prefix=Jan";
-
-this request gets all volumes owned by *bilbo* and each volume's name contains 
prefix *Jan*, the result at most contains *100* entries. The client receives a 
list of SON objects, each of them describes the info of a volume.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 7fa0dce1-a8bd-4387-bc3c-1dac4b710bb1
-    Date: Tue, 27 Jun 2017 08:07:04 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 602
-    Connection: keep-alive
-
-    {
-      "volumes" : [
-        {
-          "owner" : { "name" : "bilbo"},
-          "quota" : { "unit" : "TB", "size" : 2 },
-          "volumeName" : "Jan-vol1",
-          "createdOn" : "Tue, 27 Jun 2017 07:42:04 GMT",
-          "createdBy" : root
-      },
-      ...
-      ]
-    }
-
-## Bucket APIs
-
-### Create Bucket
-
-This API allows an user to create a bucket in a volume.
-
-Schema:
-
-- `POST /{volume}/{bucket}`
-
-Additional HTTP Headers:
-
-| HTTP Header | Value | Description |
-|:---- |:---- |:----
-| x-ozone-acl | ozone ACLs | Optional. Ozone acls. |
-| x-ozone-storage-class | <DEFAULT \| ARCHIVE \| DISK \| RAM_DISK \| SSD > | 
Optional. Storage type for a volume. |
-| x-ozone-bucket-versioning | enabled/disabled | Optional. Do enable bucket 
versioning or not. |
-
-Sample HTTP POST request:
-
-    curl -i -X POST -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H 
"Date: Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
http://localhost:9880/volume-of-bilbo/bucket-0
-
-this request creates a bucket *bucket-0* under volume *volume-of-bilbo*.
-
-    HTTP/1.1 201 Created
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 49acfeec-4c85-470a-872b-2eaebd8d751e
-    Date: Tue, 27 Jun 2017 08:55:25 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Update Bucket
-
-Updates bucket meta-data, like ACLs.
-
-Schema:
-
-- `PUT /{volume}/{bucket}`
-
-Additional HTTP Headers:
-
-| HTTP Header | Value | Description |
-|:---- |:---- |:----
-| x-ozone-acl | ozone ACLs | Optional. Ozone acls. |
-| x-ozone-bucket-versioning | enabled/disabled | Optional. Do enable bucket 
versioning or not. |
-
-Sample HTTP PUT request:
-
-    curl -i -X PUT -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: 
Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" -H "x-ozone-acl: ADD 
user:peregrin:rw" http://localhost:9880/volume-of-bilbo/bucket-to-update
-
-this request adds an ACL policy specified by HTTP header *x-ozone-acl* to 
bucket */volume-of-bilbo/bucket-to-update*, the ACL field *ADD 
user:peregrin:rw* gives add additional read/write permission to user *peregrin* 
to this bucket.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: b061a295-5faf-4b98-94b9-8b3e87c8eb5e
-    Date: Tue, 27 Jun 2017 09:02:37 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Delete Bucket
-
-Deletes a bucket if it is empty. An user can only delete bucket owned by 
themselves, and administrators can delete buckets owned by any user, as long as 
it is empty.
-
-Schema:
-
-- `DELETE /{volume}/{bucket}`
-
-Sample HTTP DELETE request:
-
-    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 
04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" 
"http://localhost:9880/volume-of-bilbo/bucket-0";
-
-this request deletes bucket */volume-of-bilbo/bucket-0*. The client receives a 
zero length content response.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: f57acd7a-2116-4c2f-aa2f-5a483db81c9c
-    Date: Tue, 27 Jun 2017 09:16:52 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-
-### Info Bucket
-
-This API returns information about a given bucket.
-
-Schema:
-
-- `GET /{volume}/{bucket}?info=bucket`
-
-Query Parameters:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| info | "bucket" | Required and enforced with this value. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9880/volume-of-bilbo/bucket-0?info=bucket";
-
-this request gets the info of bucket */volume-of-bilbo/bucket-0*. The client 
receives a response of JSON object contains bucket info.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: f125485b-8cae-4c7f-a2d6-5b1fefd6f193
-    Date: Tue, 27 Jun 2017 09:08:31 GMT
-    Content-Type: application/json
-    Content-Length: 138
-    Connection: keep-alive
-
-    {
-      "volumeName" : "volume-of-bilbo",
-      "bucketName" : "bucket-0",
-      "createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT",
-      "acls" : [ ],
-      "versioning" : "DISABLED",
-      "storageType" : "DISK"
-    }
-
-### List Buckets
-
-List buckets in a given volume.
-
-Schema:
-
-- `GET 
/{volume}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_BUCKET_KEY>`
-
-Query Parameters:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| prefix | string | Optional. Only buckets with this prefix are included in 
the result. |
-| max-keys | int | Optional. Maximum number of buckets included in the result. 
Default is 1024 if not specified. |
-| prev-key | string | Optional. Bucket name from where listing should start, 
this key is excluded in the result. It must be a valid bucket name. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9880/volume-of-bilbo?max-keys=10";
-
-this request lists all the buckets under volume *volume-of-bilbo*, and the 
result at most contains 10 entries. The client receives response of a array of 
JSON objects, each of them represents for a bucket info.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: e048c3d5-169c-470f-9903-632d9f9e32d5
-    Date: Tue, 27 Jun 2017 09:12:18 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 207
-    Connection: keep-alive
-
-    {
-      "buckets" : [ {
-        "volumeName" : "volume-of-bilbo",
-        "bucketName" : "bucket-0",
-        "createdOn" : "Tue, 27 Jun 2017 08:55:25 GMT",
-        "acls" : [ ],
-        "versioning" : null,
-        "storageType" : "DISK",
-        "bytesUsed" : 0,
-        "keyCount" : 0
-        },
-        ...
-      ]
-    }
-
-## Key APIs
-
-### Put Key
-
-This API allows user to create or overwrite keys inside of a bucket.
-
-Schema:
-
-- `PUT /{volume}/{bucket}/{key}`
-
-Additional HTTP headers:
-
-| HTTP Header | Value | Description |
-|:---- |:---- |:----
-| Content-MD5 | MD5 digest | Standard HTTP header, file hash. |
-
-Sample PUT HTTP request:
-
-    curl -X PUT -T /path/to/localfile -H "Authorization:OZONE" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" 
"http://localhost:9880/volume-of-bilbo/bucket-0/file-0";
-
-this request uploads a local file */path/to/localfile* specified by option 
*-T* to ozone as user *bilbo*, mapped to ozone key 
*/volume-of-bilbo/bucket-0/file-0*. The client receives a zero length content 
response.
-
-### Get Key
-
-This API allows user to get or download a key from an ozone bucket.
-
-Schema:
-
-- `GET /{volume}/{bucket}/{key}`
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9880/volume-of-bilbo/bucket-0/file-0";
-
-this request reads the content of key */volume-of-bilbo/bucket-0/file-0*. If 
the content of the file is plain text, it can be directly dumped onto stdout.
-
-    HTTP/1.1 200 OK
-    Content-Type: application/octet-stream
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 1bcd7de7-d8e3-46bb-afee-bdc933d383b8
-    Date: Tue, 27 Jun 2017 09:35:29 GMT
-    Content-Length: 6
-    Connection: keep-alive
-
-    Hello Ozone!
-
-if the file is not plain text, specify *-O* option in curl command and the 
file *file-0* will be downloaded into current working directory, file name will 
be same as the key. A sample request like following:
-
-    curl -O -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: 
Mon, 26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9880/volume-of-bilbo/bucket-0/file-1";
-
-response looks like following:
-
-    % Total    % Received % Xferd  Average Speed   Time    Time     Time  
Current
-                                 Dload  Upload   Total   Spent    Left  Speed
-    100 6148k  100 6148k    0     0  24.0M      0 --:--:-- --:--:-- --:--:-- 
24.1M
-
-### Delete Key
-
-This API allows user to delete a key from a bucket.
-
-Schema:
-
-- `DELETE /{volume}/{bucket}/{key}`
-
-Sample HTTP DELETE request:
-
-    curl -i -X DELETE -H "Authorization:OZONE root" -H "Date: Mon, 26 Jun 2017 
04:23:30 GMT" -H "x-ozone-version: v1" -H "x-ozone-user:bilbo" 
"http://localhost:9880/volume-of-bilbo/bucket-0/file-0";
-
-this request deletes key */volume-of-bilbo/bucket-0/file-0*. The client 
receives a zero length content result:
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: f8c4a373-dd5f-4e3a-b6c4-ddf7e191fe91
-    Date: Tue, 27 Jun 2017 14:19:48 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 0
-    Connection: keep-alive
-
-### Info Key
-
-This API returns information about a given key.
-
-Schema:
-
-- `GET /{volume}/{bucket}/{key}?info=key`
-
-Query Parameter:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| info | String, "key" | Required and enforced with this value. |
-
-Sample HTTP DELETE request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http://localhost:9880/volume-of-bilbo/buket-0/file-0?info=key";
-
-this request returns information of the key 
*/volume-of-bilbo/bucket-0/file-0*. The client receives a JSON object listed 
attributes of the key.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: c674343c-a0f2-49e4-bbd6-daa73e7dc131
-    Date: Mon, 03 Jul 2017 14:28:45 GMT
-    Content-Type: application/octet-stream
-    Content-Length: 73
-    Connection: keep-alive
-
-    {
-      "version" : 0,
-      "md5hash" : null,
-      "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-      "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-      "size" : 0,
-      "keyName" : "file-0"
-    }
-
-### List Keys
-
-This API allows user to list keys in a bucket.
-
-Schema:
-
-- `GET 
/{volume}/{bucket}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_KEY>`
-
-Query Parameters:
-
-| Query Parameter | Value | Description |
-|:---- |:---- |:----
-| prefix | string | Optional. Only keys with this prefix are included in the 
result. |
-| max-keys | int | Optional. Maximum number of keys included in the result. 
Default is 1024 if not specified. |
-| prev-key | string | Optional. Key name from where listing should start, this 
key is excluded in the result. It must be a valid key name. |
-
-Sample HTTP GET request:
-
-    curl -i -H "x-ozone-user: bilbo" -H "x-ozone-version: v1" -H "Date: Mon, 
26 Jun 2017 04:23:30 GMT" -H "Authorization:OZONE" 
"http:/localhost:9880/volume-of-bilbo/bucket-0/?max-keys=100&prefix=file"
-
-this request list keys under bucket */volume-of-bilbo/bucket-0*, the listing 
result is filtered by prefix *file*. The client receives an array of JSON 
objects, each of them represents the info of a matched key.
-
-    HTTP/1.1 200 OK
-    x-ozone-server-name: localhost
-    x-ozone-request-id: 7f9fc970-9904-4c56-b671-83a086c6f555
-    Date: Tue, 27 Jun 2017 09:48:59 GMT
-    Content-Type: application/json
-    Content-Length: 209
-    Connection: keep-alive
-
-    {
-      "name" : null,
-      "prefix" : file,
-      "maxKeys" : 0,
-      "truncated" : false,
-      "keyList" : [ {
-          "version" : 0,
-          "md5hash" : null,
-          "createdOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-          "modifiedOn" : "Mon, 26 Jun 2017 04:23:30 GMT",
-          "size" : 0,
-          "keyName" : "file-0"
-          },
-          ...
-       ]
-    }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/RunningViaDocker.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/RunningViaDocker.md 
b/hadoop-ozone/docs/content/RunningViaDocker.md
deleted file mode 100644
index 0b8fece..0000000
--- a/hadoop-ozone/docs/content/RunningViaDocker.md
+++ /dev/null
@@ -1,73 +0,0 @@
----
-title: Alpha Cluster
-weight: 1
-menu:
-   main:
-      parent: Starting
-      weight: 1
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-
-***This is an alpha release of Ozone. Please don't use this release in
-production.*** Please check the road map page for features under
-development.
-
-The easiest way to run ozone is to download the release tarball and launch
-ozone via Docker. Docker will create a small ozone cluster on your machine,
-including the data nodes and ozone services.
-
-## Running Ozone via Docker
-
-
-**This assumes that you have Docker installed on the machine.**
-
-* Download the Ozone tarball and untar it.
-
-* Go to the directory where the docker compose files exist and tell
-`docker-compose` to start Ozone in the background. This will start a small
-ozone instance on your machine.
-
-{{< highlight bash >}}
-cd ozone-0.2.1-SNAPSHOT/compose/ozone/
-
-docker-compose up -d
-{{< /highlight >}}
-
-
-To verify that ozone is working as expected, let us log into a data node and
-run _freon_, the load generator for Ozone. The ```exec datanode bash``` command
-will open a bash shell on the datanode. The ozone freon command is executed
-within the datanode container. You can quit freon via CTRL-C any time. The
-```rk``` profile instructs freon to generate random keys.
-
-{{< highlight bash >}}
-docker-compose exec datanode bash
-ozone freon rk
-{{< /highlight >}}
-
-You can check out the **OzoneManager UI** at http://localhost:9874/ to see the
-activity generated by freon.
-While you are there, please don't forget to check out the ozone configuration 
explorer.
-
-***Congratulations, You have just run your first ozone cluster.***
-
-To shutdown the cluster, please run
-{{< highlight bash >}}
-docker-compose down
-{{< /highlight >}}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b01f039/hadoop-ozone/docs/content/RunningWithHDFS.md
----------------------------------------------------------------------
diff --git a/hadoop-ozone/docs/content/RunningWithHDFS.md 
b/hadoop-ozone/docs/content/RunningWithHDFS.md
deleted file mode 100644
index 2fd2bd6..0000000
--- a/hadoop-ozone/docs/content/RunningWithHDFS.md
+++ /dev/null
@@ -1,77 +0,0 @@
----
-title: Running concurrently with HDFS
-weight: 1
-menu:
-   main:
-      parent: Starting
-      weight: 4
----
-<!---
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-Ozone is designed to work with HDFS. So it is easy to deploy ozone in an
-existing HDFS cluster.
-
-Ozone does *not* support security today. It is a work in progress and tracked
- in
-[HDDS-4](https://issues.apache.org/jira/browse/HDDS-4). If you enable ozone
-in a secure HDFS cluster, for your own protection Ozone will refuse to work.
-
-In other words, till Ozone security work is done, Ozone will not work in any
-secure clusters.
-
-The container manager part of Ozone runs inside DataNodes as a pluggable 
module.
-To activate ozone you should define the service plugin implementation class.
-
-<div class="alert alert-warning" role="alert">
-<b>Important</b>: It should be added to the <b>hdfs-site.xml</b> as the plugin 
should
-be activated as part of the normal HDFS Datanode bootstrap.
-</div>
-
-{{< highlight xml >}}
-<property>
-   <name>dfs.datanode.plugins</name>
-   <value>org.apache.hadoop.ozone.HddsDatanodeService</value>
-</property>
-{{< /highlight >}}
-
-You also need to add the ozone-datanode-plugin jar file to the classpath:
-
-{{< highlight bash >}}
-export 
HADOOP_CLASSPATH=/opt/ozone/share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin.jar
-{{< /highlight >}}
-
-
-
-To start ozone with HDFS you should start the the following components:
-
- 1. HDFS Namenode (from Hadoop distribution)
- 2. HDFS Datanode (from the Hadoop distribution with the plugin on the
- classpath from the Ozone distribution)
- 3. Ozone Manager (from the Ozone distribution)
- 4. Storage Container manager (from the Ozone distribution)
-
-Please check the log of the datanode whether the HDDS/Ozone plugin is started 
or
-not. Log of datanode should contain something like this:
-
-```
-2018-09-17 16:19:24 INFO  HddsDatanodeService:158 - Started plug-in 
org.apache.hadoop.ozone.web.OzoneHddsDatanodeService@6f94fb9d
-```
-
-<div class="alert alert-warning" role="alert">
-<b>Note:</b> The current version of Ozone is tested with Hadoop 3.1.
-</div>
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to