This is an automated email from the ASF dual-hosted git repository.

sijie pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/pulsar.git


The following commit(s) were added to refs/heads/master by this push:
     new 43dd7a7  Update Helm Chart Documentation (#6725)
43dd7a7 is described below

commit 43dd7a7c17ae72bf139a66830133138fccbcfcb6
Author: Sijie Guo <[email protected]>
AuthorDate: Mon Apr 13 10:17:41 2020 -0700

    Update Helm Chart Documentation (#6725)
    
    *Motivation*
    
    The current helm chart is lacking documentation. This pull request aims to 
add documentation.
    
    *Changes*
    
    - Update Helm chart documentation
    - Add a get-started section with Helm chart
    - Remove the documentation of using yaml files.
---
 .ci/chart_test.sh                                  |   3 +
 .ci/helm.sh                                        |  10 +-
 .github/workflows/pulsar.yml                       |   9 +-
 .github/workflows/pulsar_bk_tls.yml                |   9 +-
 .github/workflows/pulsar_broker_tls.yml            |   9 +-
 .github/workflows/pulsar_function.yml              |   9 +-
 .github/workflows/pulsar_image.yml                 |   9 +-
 .github/workflows/pulsar_jwt_asymmetric.yml        |   9 +-
 .github/workflows/pulsar_jwt_symmetric.yml         |   9 +-
 .github/workflows/pulsar_tls.yml                   |   9 +-
 .github/workflows/pulsar_zk_tls.yml                |   9 +-
 .github/workflows/pulsar_zkbk_tls.yml              |   9 +-
 deployment/kubernetes/helm/README.md               |  56 +--
 site2/docs/deploy-kubernetes.md                    | 390 +--------------------
 site2/docs/getting-started-helm.md                 | 333 ++++++++++++++++++
 site2/docs/helm-deploy.md                          | 384 ++++++++++++++++++++
 site2/docs/helm-install.md                         |  39 +++
 site2/docs/helm-overview.md                        | 110 ++++++
 site2/docs/helm-prepare.md                         |  77 ++++
 site2/docs/helm-tools.md                           |  42 +++
 site2/docs/helm-upgrade.md                         |  34 ++
 site2/website/sidebars.json                        |   9 +
 .../version-2.5.0/deploy-kubernetes.md             | 390 +--------------------
 .../version-2.5.0/getting-started-helm.md          | 334 ++++++++++++++++++
 .../versioned_docs/version-2.5.0/helm-deploy.md    | 385 ++++++++++++++++++++
 .../versioned_docs/version-2.5.0/helm-install.md   |  40 +++
 .../versioned_docs/version-2.5.0/helm-overview.md  | 111 ++++++
 .../versioned_docs/version-2.5.0/helm-prepare.md   |  78 +++++
 .../versioned_docs/version-2.5.0/helm-tools.md     |  43 +++
 .../versioned_docs/version-2.5.0/helm-upgrade.md   |  35 ++
 .../versioned_sidebars/version-2.5.0-sidebars.json |   9 +
 31 files changed, 2094 insertions(+), 908 deletions(-)

diff --git a/.ci/chart_test.sh b/.ci/chart_test.sh
index ca202f1..a34bc14 100755
--- a/.ci/chart_test.sh
+++ b/.ci/chart_test.sh
@@ -52,3 +52,6 @@ if [[ "x${FUNCTION}" == "xtrue" ]]; then
     # install cert manager
     ci::test_pulsar_function
 fi
+
+# delete the cluster
+ci::delete_cluster
diff --git a/.ci/helm.sh b/.ci/helm.sh
index 175e674..5ef6696 100644
--- a/.ci/helm.sh
+++ b/.ci/helm.sh
@@ -23,15 +23,23 @@ CHARTS_HOME=`cd 
${BINDIR}/../deployment/kubernetes/helm/;pwd`
 OUTPUT_BIN=${CHARTS_HOME}/output/bin
 HELM=${OUTPUT_BIN}/helm
 KUBECTL=${OUTPUT_BIN}/kubectl
+KIND_BIN=$OUTPUT_BIN/kind
 NAMESPACE=pulsar
 CLUSTER=pulsar-ci
+CLUSTER_ID=$(uuidgen)
 
 function ci::create_cluster() {
     echo "Creating a kind cluster ..."
-    ${CHARTS_HOME}/hack/kind-cluster-build.sh --name pulsar-ci -c 1 -v 10
+    ${CHARTS_HOME}/hack/kind-cluster-build.sh --name pulsar-ci-${CLUSTER_ID} 
-c 1 -v 10
     echo "Successfully created a kind cluster."
 }
 
+function ci::delete_cluster() {
+    echo "Deleting a kind cluster ..."
+    kind delete cluster --name=pulsar-ci-${CLUSTER_ID}
+    echo "Successfully delete a kind cluster."
+}
+
 function ci::install_storage_provisioner() {
     echo "Installing the local storage provisioner ..."
     ${HELM} repo add streamnative https://charts.streamnative.io
diff --git a/.github/workflows/pulsar.yml b/.github/workflows/pulsar.yml
index f37b624..9c91812 100644
--- a/.github/workflows/pulsar.yml
+++ b/.github/workflows/pulsar.yml
@@ -40,15 +40,8 @@ jobs:
         with:
           args: site2 .asf.yaml ct.yaml
 
-      - name: Lint chart
-        id: lint
-        uses: helm/chart-testing-action@master
-        if: steps.docs.outputs.changed_only == 'no'
-        with:
-          command: lint
-
       - name: Install chart
         run: |
           .ci/chart_test.sh .ci/clusters/values-local-pv.yaml
         # Only build a kind cluster if there are chart changes to test.
-        if: steps.lint.outputs.changed == 'true' && 
steps.docs.outputs.changed_only == 'no'
+        if: steps.docs.outputs.changed_only == 'no'
diff --git a/.github/workflows/pulsar_bk_tls.yml 
b/.github/workflows/pulsar_bk_tls.yml
index 1415003..5d98b96 100644
--- a/.github/workflows/pulsar_bk_tls.yml
+++ b/.github/workflows/pulsar_bk_tls.yml
@@ -40,15 +40,8 @@ jobs:
         with:
           args: site2 .asf.yaml ct.yaml
 
-      - name: Run chart-testing (lint)
-        id: lint
-        uses: helm/chart-testing-action@master
-        if: steps.docs.outputs.changed_only == 'no'
-        with:
-          command: lint
-
       - name: Run chart-testing (install)
         run: |
           .ci/chart_test.sh .ci/clusters/values-bk-tls.yaml
         # Only build a kind cluster if there are chart changes to test.
-        if: steps.lint.outputs.changed == 'true' && 
steps.docs.outputs.changed_only == 'no'
+        if: steps.docs.outputs.changed_only == 'no'
diff --git a/.github/workflows/pulsar_broker_tls.yml 
b/.github/workflows/pulsar_broker_tls.yml
index cc55778..a62dc3d 100644
--- a/.github/workflows/pulsar_broker_tls.yml
+++ b/.github/workflows/pulsar_broker_tls.yml
@@ -40,15 +40,8 @@ jobs:
         with:
           args: site2 .asf.yaml ct.yaml
 
-      - name: Run chart-testing (lint)
-        id: lint
-        uses: helm/chart-testing-action@master
-        if: steps.docs.outputs.changed_only == 'no'
-        with:
-          command: lint
-
       - name: Run chart-testing (install)
         run: |
           .ci/chart_test.sh .ci/clusters/values-broker-tls.yaml
         # Only build a kind cluster if there are chart changes to test.
-        if: steps.lint.outputs.changed == 'true' && 
steps.docs.outputs.changed_only == 'no'
+        if: steps.docs.outputs.changed_only == 'no'
diff --git a/.github/workflows/pulsar_function.yml 
b/.github/workflows/pulsar_function.yml
index 814e7c9..5ffca89 100644
--- a/.github/workflows/pulsar_function.yml
+++ b/.github/workflows/pulsar_function.yml
@@ -40,17 +40,10 @@ jobs:
         with:
           args: site2 .asf.yaml ct.yaml
 
-      - name: Lint chart
-        id: lint
-        uses: helm/chart-testing-action@master
-        if: steps.docs.outputs.changed_only == 'no'
-        with:
-          command: lint
-
       - name: Install chart
         run: |
           .ci/chart_test.sh .ci/clusters/values-function.yaml
         env:
           FUNCTION: "true"
         # Only build a kind cluster if there are chart changes to test.
-        if: steps.lint.outputs.changed == 'true' && 
steps.docs.outputs.changed_only == 'no'
+        if: steps.docs.outputs.changed_only == 'no'
diff --git a/.github/workflows/pulsar_image.yml 
b/.github/workflows/pulsar_image.yml
index 6c324ae..f2c8cbb 100644
--- a/.github/workflows/pulsar_image.yml
+++ b/.github/workflows/pulsar_image.yml
@@ -40,15 +40,8 @@ jobs:
         with:
           args: site2 .asf.yaml ct.yaml
 
-      - name: Lint chart
-        id: lint
-        uses: helm/chart-testing-action@master
-        if: steps.docs.outputs.changed_only == 'no'
-        with:
-          command: lint
-
       - name: Install chart
         run: |
           .ci/chart_test.sh .ci/clusters/values-pulsar-image.yaml
         # Only build a kind cluster if there are chart changes to test.
-        if: steps.lint.outputs.changed == 'true' && 
steps.docs.outputs.changed_only == 'no'
+        if: steps.docs.outputs.changed_only == 'no'
diff --git a/.github/workflows/pulsar_jwt_asymmetric.yml 
b/.github/workflows/pulsar_jwt_asymmetric.yml
index e444a32..a1bc338 100644
--- a/.github/workflows/pulsar_jwt_asymmetric.yml
+++ b/.github/workflows/pulsar_jwt_asymmetric.yml
@@ -40,17 +40,10 @@ jobs:
         with:
           args: site2 .asf.yaml ct.yaml
 
-      - name: Run chart-testing (lint)
-        id: lint
-        uses: helm/chart-testing-action@master
-        if: steps.docs.outputs.changed_only == 'no'
-        with:
-          command: lint
-
       - name: Run chart-testing (install)
         run: |
           .ci/chart_test.sh .ci/clusters/values-jwt-asymmetric.yaml
         env:
           SYMMETRIC: "false"
         # Only build a kind cluster if there are chart changes to test.
-        if: steps.lint.outputs.changed == 'true' && 
steps.docs.outputs.changed_only == 'no'
+        if: steps.docs.outputs.changed_only == 'no'
diff --git a/.github/workflows/pulsar_jwt_symmetric.yml 
b/.github/workflows/pulsar_jwt_symmetric.yml
index 711248d..a2d789f 100644
--- a/.github/workflows/pulsar_jwt_symmetric.yml
+++ b/.github/workflows/pulsar_jwt_symmetric.yml
@@ -40,17 +40,10 @@ jobs:
         with:
           args: site2 .asf.yaml ct.yaml
 
-      - name: Lint chart
-        id: lint
-        uses: helm/chart-testing-action@master
-        if: steps.docs.outputs.changed_only == 'no'
-        with:
-          command: lint
-
       - name: Run chart-testing (install)
         run: |
           .ci/chart_test.sh .ci/clusters/values-jwt-symmetric.yaml
         env:
           SYMMETRIC: "true"
         # Only build a kind cluster if there are chart changes to test.
-        if: steps.lint.outputs.changed == 'true' && 
steps.docs.outputs.changed_only == 'no'
+        if: steps.docs.outputs.changed_only == 'no'
diff --git a/.github/workflows/pulsar_tls.yml b/.github/workflows/pulsar_tls.yml
index 6ac9399..283812f 100644
--- a/.github/workflows/pulsar_tls.yml
+++ b/.github/workflows/pulsar_tls.yml
@@ -40,15 +40,8 @@ jobs:
         with:
           args: site2 .asf.yaml ct.yaml
 
-      - name: Lint chart
-        id: lint
-        uses: helm/chart-testing-action@master
-        if: steps.docs.outputs.changed_only == 'no'
-        with:
-          command: lint
-
       - name: Install chart
         run: |
           .ci/chart_test.sh .ci/clusters/values-tls.yaml
         # Only build a kind cluster if there are chart changes to test.
-        if: steps.lint.outputs.changed == 'true' && 
steps.docs.outputs.changed_only == 'no'
+        if: steps.docs.outputs.changed_only == 'no'
diff --git a/.github/workflows/pulsar_zk_tls.yml 
b/.github/workflows/pulsar_zk_tls.yml
index cf8a34a..ba77037 100644
--- a/.github/workflows/pulsar_zk_tls.yml
+++ b/.github/workflows/pulsar_zk_tls.yml
@@ -40,15 +40,8 @@ jobs:
         with:
           args: site2 .asf.yaml ct.yaml
 
-      - name: Lint chart
-        id: lint
-        uses: helm/chart-testing-action@master
-        if: steps.docs.outputs.changed_only == 'no'
-        with:
-          command: lint
-
       - name: Install chart
         run: |
           .ci/chart_test.sh .ci/clusters/values-zk-tls.yaml
         # Only build a kind cluster if there are chart changes to test.
-        if: steps.lint.outputs.changed == 'true' && 
steps.docs.outputs.changed_only == 'no'
+        if: steps.docs.outputs.changed_only == 'no'
diff --git a/.github/workflows/pulsar_zkbk_tls.yml 
b/.github/workflows/pulsar_zkbk_tls.yml
index 045b0b4..d0ebadd 100644
--- a/.github/workflows/pulsar_zkbk_tls.yml
+++ b/.github/workflows/pulsar_zkbk_tls.yml
@@ -40,15 +40,8 @@ jobs:
         with:
           args: site2 .asf.yaml ct.yaml
 
-      - name: Lint chart
-        id: lint
-        uses: helm/chart-testing-action@master
-        if: steps.docs.outputs.changed_only == 'no'
-        with:
-          command: lint
-
       - name: Install chart
         run: |
           .ci/chart_test.sh .ci/clusters/values-zkbk-tls.yaml
         # Only build a kind cluster if there are chart changes to test.
-        if: steps.lint.outputs.changed == 'true' && 
steps.docs.outputs.changed_only == 'no'
+        if: steps.docs.outputs.changed_only == 'no'
diff --git a/deployment/kubernetes/helm/README.md 
b/deployment/kubernetes/helm/README.md
index 7fed190..b88f477 100644
--- a/deployment/kubernetes/helm/README.md
+++ b/deployment/kubernetes/helm/README.md
@@ -19,58 +19,4 @@
 
 -->
 
-This directory contains the Helm Chart required
-to do a complete Pulsar deployment on Kubernetes.
-
-## Install Helm
-
-Before you start, you need to install helm.
-Following [helm 
documentation](https://docs.helm.sh/using_helm/#installing-helm) to install it.
-
-## Deploy Pulsar
-
-### Minikube
-
-#### Install Minikube
-
-[Install and configure 
minikube](https://github.com/kubernetes/minikube#installation) with
-a [VM driver](https://github.com/kubernetes/minikube#requirements), e.g. 
`kvm2` on Linux
-or `hyperkit` or `VirtualBox` on macOS.
-
-#### Create a K8S cluster on Minikube
-
-```
-minikube start --memory=8192 --cpus=4
-```
-
-#### Set kubectl to use Minikube.
-
-```
-kubectl config use-context minikube
-```
-
-After you created a K8S cluster on Minikube, you can access its dashboard via 
following command:
-
-```
-minikube dashboard
-```
-
-The command will automatically trigger open a webpage in your browser.
-
-#### Install Pulsar Chart
-
-Assume you already cloned pulsar repo in `PULSAR_HOME` directory.
-
-1. Go to Pulsar helm chart directory
-    ```shell
-    cd ${PULSAR_HOME}/deployment/kubernetes/helm
-    ```
-1. Install helm chart.
-    ```shell
-    helm install --values pulsar/values-mini.yaml ./pulsar
-    ```
-
-Once the helm chart is completed on installation, you can access the cluster 
via:
-
-- Web service url: `http://$(minikube ip):30001/`
-- Pulsar service url: `pulsar://$(minikube ip):30002/`
+Read [Deploying Pulsar on 
Kubernetes](http://pulsar.apache.org/docs/en/deploy-kubernetes/) for more 
details.
diff --git a/site2/docs/deploy-kubernetes.md b/site2/docs/deploy-kubernetes.md
index ad59212..c16df09 100644
--- a/site2/docs/deploy-kubernetes.md
+++ b/site2/docs/deploy-kubernetes.md
@@ -4,390 +4,8 @@ title: Deploying Pulsar on Kubernetes
 sidebar_label: Kubernetes
 ---
 
-> ### Tips
->
-> If you want to enable all builtin [Pulsar IO](io-overview.md) connectors in 
your Pulsar deployment, you can choose to use `apachepulsar/pulsar-all` image 
instead of
-> `apachepulsar/pulsar` image. `apachepulsar/pulsar-all` image has already 
bundled [all builtin connectors](io-overview.md#working-with-connectors).
+For those looking to get up and running with these charts as fast
+as possible, in a **non-production** use case, we provide
+a [quick start guide](getting-started-helm.md) for Proof of Concept (PoC) 
deployments.
 
-You can easily deploy Pulsar in [Kubernetes](https://kubernetes.io/) clusters, 
either in managed clusters on [Google Kubernetes 
Engine](#pulsar-on-google-kubernetes-engine) or [Amazon Web 
Services](https://aws.amazon.com/) or in [custom 
clusters](#pulsar-on-a-custom-kubernetes-cluster).
-
-The deployment method shown in this guide relies on [YAML](http://yaml.org/) 
definitions for Kubernetes [resources](https://kubernetes.io/docs/reference/). 
The {@inject: github:`deployment/kubernetes`:/deployment/kubernetes} 
subdirectory of the [Pulsar package](pulsar:download_page_url) holds resource 
definitions for:
-
-* A two-bookie BookKeeper cluster
-* A three-node ZooKeeper cluster
-* A three-broker Pulsar cluster
-* A [monitoring stack]() consisting of [Prometheus](https://prometheus.io/), 
[Grafana](https://grafana.com), and the [Pulsar 
dashboard](administration-dashboard.md)
-* A [pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) from which 
you can run administrative commands using the 
[`pulsar-admin`](reference-pulsar-admin.md) CLI tool
-
-## Setup
-
-To get started, install a source package from the [downloads 
page](pulsar:download_page_url).
-
-> Note that the Pulsar binary package does *not* contain the necessary YAML 
resources to deploy Pulsar on Kubernetes.
-
-If you want to change the number of bookies, brokers, or ZooKeeper nodes in 
your Pulsar cluster, modify the `replicas` parameter in the `spec` section of 
the appropriate 
[`Deployment`](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/)
 or 
[`StatefulSet`](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/)
 resource.
-
-## Pulsar on Google Kubernetes Engine
-
-[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine) (GKE) 
automates the creation and management of Kubernetes clusters in [Google Compute 
Engine](https://cloud.google.com/compute/) (GCE).
-
-### Prerequisites
-
-To get started, you need:
-
-* A Google Cloud Platform account, which you can sign up for at 
[cloud.google.com](https://cloud.google.com)
-* An existing Cloud Platform project
-* The [Google Cloud SDK](https://cloud.google.com/sdk/downloads) (in 
particular the [`gcloud`](https://cloud.google.com/sdk/gcloud/) and 
[`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/#download-as-part-of-the-google-cloud-sdk)
 tools).
-
-### Create a new Kubernetes cluster
-
-You can create a new GKE cluster entering the [`container clusters 
create`](https://cloud.google.com/sdk/gcloud/reference/container/clusters/create)
 command for `gcloud`. This command enables you to specify the number of nodes 
in the cluster, the machine types of those nodes, and so on.
-
-The following example creates a new GKE cluster for Kubernetes version 
[1.6.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v164) 
in the 
[us-central1-a](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)
 zone. The cluster is named `pulsar-gke-cluster` and consists of three VMs, 
each using two locally attached SSDs and running on 
[n1-standard-8](https://cloud.google.com/compute/docs/machine-types) machines. 
[Bookie](reference-terminology.md#b [...]
-
-```bash
-$ gcloud container clusters create pulsar-gke-cluster \
-  --zone=us-central1-a \
-  --machine-type=n1-standard-8 \
-  --num-nodes=3 \
-  --local-ssd-count=2 \
-```
-
-By default, bookies run on all the machines that have locally attached SSD 
disks. In this example, all of those machines have two SSDs, but you can add 
different types of machines to the cluster later. You can control which 
machines host bookie servers using 
[labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels).
-
-### Dashboard
-
-You can observe your cluster in the [Kubernetes 
Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/)
 by downloading the credentials for your Kubernetes cluster and opening up a 
proxy to the cluster:
-
-```bash
-$ gcloud container clusters get-credentials pulsar-gke-cluster \
-  --zone=us-central1-a \
-  --project=your-project-name
-$ kubectl proxy
-```
-
-By default, the proxy is opened on port 8001. Now you can navigate to 
[localhost:8001/ui](http://localhost:8001/ui) in your browser to access the 
dashboard. At first your GKE cluster is empty, but that changes as you begin 
deploying Pulsar components using `kubectl` [component by 
component](#deploying-pulsar-components),
-or using [`helm`](#deploying-pulsar-components-helm).
-
-## Pulsar on Amazon Web Services
-
-You can run Kubernetes on [Amazon Web Services](https://aws.amazon.com/) (AWS) 
in a variety of ways. A very simple way that is [recently 
introduced](https://aws.amazon.com/blogs/compute/kubernetes-clusters-aws-kops/) 
involves using the [Kubernetes Operations](https://github.com/kubernetes/kops) 
(kops) tool.
-
-You can find detailed instructions for setting up a Kubernetes cluster on AWS 
from [here](https://github.com/kubernetes/kops/blob/master/docs/aws.md).
-
-When you create a cluster using those instructions, your `kubectl` config in 
`~/.kube/config` (on MacOS and Linux) is updated for you, so you probably do 
not need to change your configuration. Nonetheless, you can ensure that 
`kubectl` can interact with your cluster by listing the nodes in the cluster:
-
-```bash
-$ kubectl get nodes
-```
-
-If `kubectl` works with your cluster, you can proceed to deploy Pulsar 
components using `kubectl` [component by 
component](#deploying-pulsar-components),
-or using [`helm`](#deploying-pulsar-components-helm).
-
-## Pulsar on a custom Kubernetes cluster
-
-You can deploy Pulsar on a custom, non-GKE Kubernetes cluster as well. You can 
find detailed documentation on how to choose a Kubernetes installation method 
that suits your needs in the [Picking the Right 
Solution](https://kubernetes.io/docs/setup/pick-right-solution) guide in the 
Kubernetes docs.
-
-The easiest way to run a Kubernetes cluster is to do so locally. To install a 
mini local cluster for testing purposes and running in local VMs, you can 
either:
-
-1. Use [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) 
to run a single-node Kubernetes cluster.
-1. Create a local cluster running on multiple VMs on the same machine.
-
-### Minikube
-
-1. [Install and configure 
minikube](https://github.com/kubernetes/minikube#installation) with
-   a [VM driver](https://github.com/kubernetes/minikube#requirements), for 
example, `kvm2` on Linux or `hyperkit` or `VirtualBox` on macOS.
-1. Create a kubernetes cluster on Minikube.
-    ```shell
-    minikube start --memory=8192 --cpus=4 \
-        --kubernetes-version=<version>
-    ```
-    `<version>` can be any [Kubernetes version supported by your minikube 
installation](https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/).
 Example: `v1.16.1`
-1. Set `kubectl` to use Minikube.
-    ```shell
-    kubectl config use-context minikube
-    ```
-
-In order to use the [Kubernetes 
Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/)
-with local Kubernetes cluster on Minikube, enter the command below:
-
-```bash
-$ minikube dashboard
-```
-
-The command automatically triggers opening a webpage in your browser. At first 
your local cluster is empty, but that changes as you begin deploying Pulsar 
components using `kubectl` [component by 
component](#deploying-pulsar-components),
-or using [`helm`](#deploying-pulsar-components-helm).
-
-### Multiple VMs
-
-For the second option, follow the 
[instructions](https://github.com/pires/kubernetes-vagrant-coreos-cluster) for 
running Kubernetes using [CoreOS](https://coreos.com/) on 
[Vagrant](https://www.vagrantup.com/). You can follow an abridged version of 
those instructions from here.
-
-
-First, make sure you have [Vagrant](https://www.vagrantup.com/downloads.html) 
and [VirtualBox](https://www.virtualbox.org/wiki/Downloads) installed. Then 
clone the repo and start up the cluster:
-
-```bash
-$ git clone https://github.com/pires/kubernetes-vagrant-coreos-cluster
-$ cd kubernetes-vagrant-coreos-cluster
-
-# Start a three-VM cluster
-$ NODES=3 USE_KUBE_UI=true vagrant up
-```
-
-Create SSD disk mount points on the VMs using this script:
-
-```bash
-$ for vm in node-01 node-02 node-03; do
-    NODES=3 vagrant ssh $vm -c "sudo mkdir -p /mnt/disks/ssd0"
-    NODES=3 vagrant ssh $vm -c "sudo mkdir -p /mnt/disks/ssd1"
-  done
-```
-
-Bookies expect two logical devices to mount for 
[journal](concepts-architecture-overview.md#journal-storage) and persistent 
message storage to be available. In this VM exercise, you can create two 
directories on each VM.
-
-Once the cluster is up, you can verify that `kubectl` can access it:
-
-```bash
-$ kubectl get nodes
-NAME           STATUS                     AGE       VERSION
-172.17.8.101   Ready,SchedulingDisabled   10m       v1.6.4
-172.17.8.102   Ready                      8m        v1.6.4
-172.17.8.103   Ready                      6m        v1.6.4
-172.17.8.104   Ready                      4m        v1.6.4
-```
-
-In order to use the [Kubernetes 
Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/)
 with your local Kubernetes cluster, first, you need to use `kubectl` to create 
a proxy to the cluster:
-
-```bash
-$ kubectl proxy
-```
-
-Now you can access the web interface at 
[localhost:8001/ui](http://localhost:8001/ui). At first your local cluster is 
empty, but that changes as you begin deploying Pulsar components using 
`kubectl` [component by component](#deploying-pulsar-components), or using 
[`helm`](#deploying-pulsar-components-helm).
-
-## Deploy Pulsar components
-
-Now that you have set up a Kubernetes cluster, either on [Google Kubernetes 
Engine](#pulsar-on-google-kubernetes-engine) or on a [custom 
cluster](#pulsar-on-a-custom-kubernetes-cluster), you can begin deploying the 
components that make up Pulsar. You can find the YAML resource definitions for 
Pulsar components in the `kubernetes` folder of the [Pulsar source 
package](pulsar:download_page_url).
-
-In that package, you can find different sets of resource definitions for 
different environments.
-
-- `deployment/kubernetes/google-kubernetes-engine`: for Google Kubernetes 
Engine (GKE)
-- `deployment/kubernetes/aws`: for AWS
-- `deployment/kubernetes/generic`: for a custom Kubernetes cluster
-
-To begin, `cd` into the appropriate folder.
-
-### Deploy ZooKeeper
-
-You *must* deploy ZooKeeper as the first Pulsar component, as ZooKeeper is a 
dependency for the others.
-
-```bash
-$ kubectl apply -f zookeeper.yaml
-```
-
-Wait until all three ZooKeeper server pods are up and have the status 
`Running`. You can check on the status of the ZooKeeper pods at any time:
-
-```bash
-$ kubectl get pods -l component=zookeeper
-NAME      READY     STATUS             RESTARTS   AGE
-zk-0      1/1       Running            0          18m
-zk-1      1/1       Running            0          17m
-zk-2      0/1       Running            6          15m
-```
-
-This step may take several minutes, as Kubernetes needs to download the Docker 
image on the VMs.
-
-### Initialize cluster metadata
-
-Once ZooKeeper runs, you need to [initialize the 
metadata](#cluster-metadata-initialization) for the Pulsar cluster in 
ZooKeeper. This includes system metadata for 
[BookKeeper](reference-terminology.md#bookkeeper) and Pulsar more broadly. You 
only need to run the Kubernetes 
[job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/)
 in the `cluster-metadata.yaml` file once:
-
-```bash
-$ kubectl apply -f cluster-metadata.yaml
-```
-
-For the sake of reference, that job runs the following command on an ephemeral 
pod:
-
-```bash
-$ bin/pulsar initialize-cluster-metadata \
-  --cluster local \
-  --zookeeper zookeeper \
-  --configuration-store zookeeper \
-  --web-service-url http://broker.default.svc.cluster.local:8080/ \
-  --broker-service-url pulsar://broker.default.svc.cluster.local:6650/
-```
-
-### Deploy the rest of the components
-
-Once you have successfully initialized cluster metadata, you can then deploy 
the bookies, brokers, monitoring stack ([Prometheus](https://prometheus.io), 
[Grafana](https://grafana.com), and the [Pulsar 
dashboard](administration-dashboard.md)), and Pulsar cluster proxy:
-
-```bash
-$ kubectl apply -f bookie.yaml
-$ kubectl apply -f broker.yaml
-$ kubectl apply -f proxy.yaml
-$ kubectl apply -f monitoring.yaml
-$ kubectl apply -f admin.yaml
-```
-
-You can check on the status of the pods for these components either in the 
Kubernetes Dashboard or using `kubectl`:
-
-```bash
-$ kubectl get pods -w -l app=pulsar
-```
-
-### Set up properties and namespaces
-
-Once all of the components are up and running, you need to create at least one 
Pulsar tenant and at least one namespace.
-
->If Pulsar [authentication and authorization](security-overview.md) is turned 
on,you do not have to strictly perform this step though you are allowed to 
change [policies](admin-api-namespaces.md) for each of the namespaces later.
-
-You can create properties and namespaces (and perform any other administrative 
tasks) using the `pulsar-admin` pod that is already configured to act as an 
admin client for your newly created Pulsar cluster. One easy way to perform 
administrative tasks is to create an alias for the 
[`pulsar-admin`](reference-pulsar-admin.md) tool installed on the admin pod.
-
-```bash
-$ alias pulsar-admin='kubectl exec pulsar-admin -it -- bin/pulsar-admin'
-```
-
-Now, any time you run `pulsar-admin`, you can run commands from that pod. This 
command creates a tenant called `ten`:
-
-```bash
-$ pulsar-admin tenants create ten \
-  --admin-roles admin \
-  --allowed-clusters local
-```
-
-This command creates a `ns` namespace under the `ten` tenant:
-
-```bash
-$ pulsar-admin namespaces create ten/ns
-```
-
-To verify that everything has gone as planned:
-
-```bash
-$ pulsar-admin tenants list
-public
-ten
-
-$ pulsar-admin namespaces list ten
-ten/ns
-```
-
-Now that you have a namespace and tenant set up, you can move on to 
[experimenting with your Pulsar cluster](#experimenting-with-your-cluster) from 
within the cluster or [connecting to the cluster](#client-connections) using a 
Pulsar client.
-
-### Experiment with your cluster
-
-Now that you have successfully created a tenant and namespace, you can begin 
experimenting with your running Pulsar cluster. Using the same `pulsar-admin` 
pod via an alias, as in the section above, you can use 
[`pulsar-perf`](reference-cli-tools.md#pulsar-perf) to create a test 
[producer](reference-terminology.md#producer) to publish 10,000 messages a 
second on a topic in the [tenant](reference-terminology.md#tenant) and 
[namespace](reference-terminology.md#namespace) you have created.
-
-First, create an alias to use the `pulsar-perf` tool via the admin pod:
-
-```bash
-$ alias pulsar-perf='kubectl exec pulsar-admin -it -- bin/pulsar-perf'
-```
-
-Now, produce messages:
-
-```bash
-$ pulsar-perf produce persistent://public/default/my-topic \
-  --rate 10000
-```
-
-Similarly, you can start a [consumer](reference-terminology.md#consumer) to 
subscribe to and receive all the messages on that topic:
-
-```bash
-$ pulsar-perf consume persistent://public/default/my-topic \
-  --subscriber-name my-subscription-name
-```
-
-You can also view [stats](administration-stats.md) for the topic using the 
[`pulsar-admin`](reference-pulsar-admin.md#persistent-stats) tool:
-
-```bash
-$ pulsar-admin persistent stats persistent://public/default/my-topic
-```
-
-### Monitor
-
-The default monitoring stack for Pulsar on Kubernetes consists of 
[Prometheus](#prometheus), [Grafana](#grafana), and the [Pulsar 
dashbaord](administration-dashboard.md).
-
-> If you deploy the cluster to Minikube, the following monitoring ports are 
mapped at the minikube VM:
->
-> - Prometheus port: 30003
-> - Grafana port: 30004
-> - Dashboard port: 30005
->
-> You can use `minikube ip` to find the IP address of the minikube VM, and 
then use their mapped ports
-> to access corresponding services. For example, you can access Pulsar 
dashboard at `http://$(minikube ip):30005`.
-
-#### Prometheus
-
-A [Prometheus](https://prometheus.io) instance running inside the cluster can 
collect all Pulsar metrics in Kubernetes. Typically, you do not have to access 
Prometheus directly. Instead, you can use the [Grafana interface](#grafana) 
that displays the data stored in Prometheus.
-
-#### Grafana
-
-In your Kubernetes cluster, you can use [Grafana](https://grafana.com) to view 
dashbaords for Pulsar [namespaces](reference-terminology.md#namespace) (message 
rates, latency, and storage), JVM stats, 
[ZooKeeper](https://zookeeper.apache.org), and 
[BookKeeper](reference-terminology.md#bookkeeper). You can get access to the 
pod serving Grafana using the 
[`port-forward`](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster)
 command of `kubectl`:
-
-```bash
-$ kubectl port-forward \
-  $(kubectl get pods -l component=grafana -o 
jsonpath='{.items[*].metadata.name}') 3000
-```
-
-You can then access the dashboard in your web browser at 
[localhost:3000](http://localhost:3000).
-
-#### Pulsar dashboard
-
-While Grafana and Prometheus are used to provide graphs with historical data, 
[Pulsar dashboard](administration-dashboard.md) reports more detailed current 
data for individual [topics](reference-terminology.md#topic).
-
-For example, you can have sortable tables showing all namespaces, topics, and 
broker stats, with details on the IP address for consumers, how long they have 
been connected, and much more.
-
-You can access to the pod serving the Pulsar dashboard using the 
[`port-forward`](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster)
 command of `kubectl`:
-
-```bash
-$ kubectl port-forward \
-  $(kubectl get pods -l component=dashboard -o 
jsonpath='{.items[*].metadata.name}') 8080:80
-```
-
-You can then access the dashboard in your web browser at 
[localhost:8080](http://localhost:8080).
-
-### Client connections
-
-> If you deploy the cluster to Minikube, the proxy ports are mapped at the 
minikube VM:
->
-> - Http port: 30001
-> - Pulsar binary protocol port: 30002
->
-> You can use `minikube ip` to find the IP address of the minikube VM, and 
then use their mapped ports
-> to access corresponding services. For example, pulsar webservice url is at 
`http://$(minikube ip):30001`.
-
-Once your Pulsar cluster is running on Kubernetes, you can connect to it using 
a Pulsar client. You can fetch the IP address for the Pulsar proxy running in 
your Kubernetes cluster using `kubectl`:
-
-```bash
-$ kubectl get service broker-proxy \
-  --output=jsonpath='{.status.loadBalancer.ingress[*].ip}'
-```
-
-If the IP address for the proxy is, for example, 35.12.13.198, you can connect 
to Pulsar using `pulsar://35.12.13.198:6650`.
-
-You can find client documentation for:
-
-* [Java](client-libraries-java.md)
-* [Python](client-libraries-python.md)
-* [C++](client-libraries-cpp.md)
-
-
-## Deploy Pulsar components (helm)
-
-Pulsar also provides a [Helm](https://docs.helm.sh/) chart for deploying a 
Pulsar cluster to Kubernetes. Before you start, make sure you follow [Helm 
documentation](https://docs.helm.sh/using_helm) to install helm.
-
-> Assume you clone a pulsar repo under a `PULSAR_HOME` directory.
-
-### Minikube
-
-1. Go to Pulsar helm chart directory
-    ```shell
-    cd ${PULSAR_HOME}/deployment/kubernetes/helm
-    ```
-1. Install helm chart to a K8S cluster on Minikube.
-    ```shell
-    helm install --values pulsar/values-mini.yaml ./pulsar
-    ```
-
-Once the helm chart is completed on installation, you can access the cluster 
via:
-
-- Web service url: `http://$(minikube ip):30001/`
-- Pulsar service url: `pulsar://$(minikube ip):30002/`
+For those looking to configure and install a Pulsar cluster on Kubernetes for 
production usage, you should follow the complete [Installation 
Guide](helm-install.md).
\ No newline at end of file
diff --git a/site2/docs/getting-started-helm.md 
b/site2/docs/getting-started-helm.md
new file mode 100644
index 0000000..7276a12
--- /dev/null
+++ b/site2/docs/getting-started-helm.md
@@ -0,0 +1,333 @@
+---
+id: kubernetes-helm
+title: Get started in Kubernetes
+sidebar_label: Run Pulsar in Kubernetes
+---
+
+This section guides you through every step of installing and running
+Apache Pulsar with Helm on Kubernetes quickly, including
+
+- Install the Apache Pulsar on Kubernetes using Helm
+- Start and stop Apache Pulsar
+- Create topics using `pulsar-admin`
+- Produce and consume messages using Pulsar clients
+- Monitor Apache Pulsar status with Prometheus and Grafana
+
+For deploying a Pulsar cluster for production usage, please read the 
documentation on [how to configure and install a Pulsar Helm 
chart](helm-deploy.md).
+
+## Prerequisite
+
+- Kubernetes server 1.14.0+
+- kubectl 1.14.0+
+- Helm 3.0+
+
+> Tip
+> For the following steps, step 2 and step 3 are for developers and step 4 and 
step 5 are for administrators.
+
+## Step 0: Prepare a Kubernetes cluster
+
+Before installing a Pulsar Helm chart, you have to create a Kubernetes 
cluster. You can follow [the instructions](helm-prepare.md) to prepare a 
Kubernetes cluster.
+
+We use [Minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) 
in this quick start guide.
+
+1. Create a kubernetes cluster on Minikube.
+
+    ```bash
+    minikube start --memory=8192 --cpus=4 --kubernetes-version=<k8s-version>
+    ```
+
+    The `<k8s-version>` can be any [Kubernetes version supported by your 
minikube 
installation](https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/).
 Example: `v1.16.1.
+
+2. Set `kubectl` to use Minikube.
+
+    ```bash
+    kubectl config use-context minikube
+    ```
+
+3. In order to use the [Kubernetes 
Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/)
 with local Kubernetes cluster on Minikube, enter the command below:
+
+    ```bash
+    minikube dashboard
+    ```
+    The command automatically triggers opening a webpage in your browser. 
+
+## Step 1: Install Pulsar Helm Chart
+
+1. Clone the Pulsar Helm chart repository.
+
+    ```bash
+    git clone https://github.com/apache/pulsar
+    cd deployment/kubernetes/helm/
+    ```
+
+2. Run `prepare_helm_release.sh` to create secrets required for installing 
Apache Pulsar Helm chart. The username `pulsar` and password `pulsar` are used 
for logging into Grafana dashboard and Pulsar Manager.
+
+    ```bash
+    ./scripts/pulsar/prepare_helm_release.sh \
+        -n pulsar \
+        -k pulsar-mini \
+        --control-center-admin pulsar \
+        --control-center-password pulsar \
+        -c
+    ```
+
+3. Use the Pulsar Helm chart to install a Pulsar cluster to Kubernetes.
+
+    ```bash
+    helm install \
+        --values examples/values-minikube.yaml \
+        pulsar-mini pulsar
+    ```
+
+4. Check the status of all pods.
+
+    ```bash
+    kubectl get pods -n pulsar
+    ```
+
+    If all pods start up successfully, you can see `STATUS` changes to 
`Running` or `Completed`.
+
+    **Output**
+
+    ```bash
+    NAME                                         READY   STATUS      RESTARTS  
 AGE
+    pulsar-mini-bookie-0                         1/1     Running     0         
 9m27s
+    pulsar-mini-bookie-init-5gphs                0/1     Completed   0         
 9m27s
+    pulsar-mini-broker-0                         1/1     Running     0         
 9m27s
+    pulsar-mini-grafana-6b7bcc64c7-4tkxd         1/1     Running     0         
 9m27s
+    pulsar-mini-prometheus-5fcf5dd84c-w8mgz      1/1     Running     0         
 9m27s
+    pulsar-mini-proxy-0                          1/1     Running     0         
 9m27s
+    pulsar-mini-pulsar-init-t7cqt                0/1     Completed   0         
 9m27s
+    pulsar-mini-pulsar-manager-9bcbb4d9f-htpcs   1/1     Running     0         
 9m27s
+    pulsar-mini-toolset-0                        1/1     Running     0         
 9m27s
+    pulsar-mini-zookeeper-0                      1/1     Running     0         
 9m27s
+    ```
+
+5. Check the status of all services in the namespace `pulsar`.
+
+    ```bash
+    kubectl get services -n pulsar
+    ```
+
+    **Output**
+    
+    ```bash
+    NAME                         TYPE           CLUSTER-IP       EXTERNAL-IP   
PORT(S)                       AGE
+    pulsar-mini-bookie           ClusterIP      None             <none>        
3181/TCP,8000/TCP             11m
+    pulsar-mini-broker           ClusterIP      None             <none>        
8080/TCP,6650/TCP             11m
+    pulsar-mini-grafana          LoadBalancer   10.106.141.246   <pending>     
3000:31905/TCP                11m
+    pulsar-mini-prometheus       ClusterIP      None             <none>        
9090/TCP                      11m
+    pulsar-mini-proxy            LoadBalancer   10.97.240.109    <pending>     
80:32305/TCP,6650:31816/TCP   11m
+    pulsar-mini-pulsar-manager   LoadBalancer   10.103.192.175   <pending>     
9527:30190/TCP                11m
+    pulsar-mini-toolset          ClusterIP      None             <none>        
<none>                        11m
+    pulsar-mini-zookeeper        ClusterIP      None             <none>        
2888/TCP,3888/TCP,2181/TCP    11m
+    ```
+
+## Step 2: Use pulsar-admin to create Pulsar tenants/namespaces/topics
+
+`pulsar-admin` is the CLI tool for Pulsar. In this step, you can use 
`pulsar-admin` to create resources including tenants, namespaces, and topics.
+
+1. Enter the `toolset` container.
+
+    ```bash
+    kubectl exec -it -n pulsar pulsar-mini-toolset-0 /bin/bash
+    ```
+
+2. In the `toolset` container, create a tenant named `apache`.
+
+    ```bash
+    bin/pulsar-admin tenants create apache
+    ```
+
+    Then you can list the tenants to see if the tenant is created successfully.
+
+    ```bash
+    bin/pulsar-admin tenants list
+    ```
+
+    You should see a similar output as below. The tenant `apache` has been 
successfully created. 
+
+    ```bash
+    "apache"
+    "public"
+    "pulsar"
+    ```
+
+3. In the `toolset` container, create a namespace named `pulsar` in the tenant 
`apache`.
+
+    ```bash
+    bin/pulsar-admin namespaces create apache/pulsar
+    ```
+
+    Then you can list the namespaces of tenant `apache` to see if the 
namespace is created successfully.
+
+    ```bash
+    bin/pulsar-admin namespaces list apache
+    ```
+
+    You should see a similar output as below. The namespace `apache/pulsar` 
has been successfully created. 
+
+    ```bash
+    "apache/pulsar"
+    ```
+
+4. In the `toolset` container, create a topic `test-topic` with `4` partitions 
in the namespace `apache/pulsar`.
+
+    ```bash
+    bin/pulsar-admin topics create-partitioned-topic apache/pulsar/test-topic 
-p 4
+    ```
+
+5. In the `toolset` container, list all the partitioned topics in the 
namespace `apache/pulsar`.
+
+    ```bash
+    bin/pulsar-admin topics list-partitioned-topics apache/pulsar
+    ```
+
+    Then you can see all the partitioned topics in the namespace 
`apache/pulsar`.
+
+    ```bash
+    "persistent://apache/pulsar/test-topic"
+    ```
+
+## Step 3: Use Pulsar client to produce and consume messages
+
+You can use the Pulsar client to create producers and consumers to produce and 
consume messages.
+
+By default the Helm chart expose the Pulsar cluster through a Kubernetes 
`LoadBalancer`. In Minikube, you can use the following command to get the IP 
address of the proxy service.
+
+```bash
+kubectl get services -n pulsar | grep pulsar-mini-proxy
+```
+
+You will see a similar output as below.
+
+```bash
+pulsar-mini-proxy            LoadBalancer   10.97.240.109    <pending>     
80:32305/TCP,6650:31816/TCP   28m
+```
+
+This output tells what are the node ports that Pulsar cluster's binary port 
and http port are exposed to. The port after `80:` is the http port while the 
port after `6650:` is the binary port.
+
+Then you can find the ip address of your minikube server by running the 
following command.
+
+```bash
+minikube ip
+```
+
+At this point, you will get the service urls to connect to your Pulsar client.
+
+```
+webServiceUrl=http://$(minikube ip):<exposed-http-port>/
+brokerServiceUrl=pulsar://$(minikube ip):<exposed-binary-port>/
+```
+
+Then proceed with the following steps:
+
+1. Download the Apache Pulsar tarball from [downloads 
page](https://pulsar.apache.org/en/download/).
+
+2. Decompress the tarball based on your download file.
+
+    ```bash
+    tar -xf <file-name>.tar.gz
+    ```
+
+3. Expose `PULSAR_HOME`.
+
+    (1) Enter the directory of the decompressed download file.
+
+    (2) Expose `PULSAR_HOME` as the environment variable.
+
+    ```bash
+    export PULSAR_HOME=$(pwd)
+    ```
+
+4. Configure the Pulsar client.
+
+    In the `${PULSAR_HOME}/conf/client.conf` file, replace `webServiceUrl` and 
`brokerServiceUrl` with the service urls you get from the above steps.
+
+5. Create a subscription to consume messages from `apache/pulsar/test-topic`.
+
+    ```bash
+    bin/pulsar-client consume -s sub apache/pulsar/test-topic  -n 0
+    ```
+
+6. Open a new terminal. In the new terminal, create a producer and send 10 
messages to the `test-topic` topic.
+
+    ```bash
+    bin/pulsar-client produce apache/pulsar/test-topic  -m "---------hello 
apache pulsar-------" -n 10
+    ```
+
+7. Verify the results.
+
+    - From producer side
+
+        **Output**
+        
+        The messages have been produced successfully.
+
+        ```bash
+        18:15:15.489 [main] INFO  
org.apache.pulsar.client.cli.PulsarClientTool - 10 messages successfully 
produced
+        ```
+
+    - From consumer side
+
+        **Output**
+
+        At the same time, you can receive the messages as below.
+
+        ```bash
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ```
+
+## Step 4: Use Pulsar Manager to manage the cluster
+
+[Pulsar Manager](administration-pulsar-manager.md) is a web-based GUI 
management tool for managing and monitoring Pulsar.
+
+1. By default, the `Pulsar Manager` is exposed as a separate `LoadBalancer`. 
You can open the Pulsar Manager UI using the following command:
+
+    ```bash
+    minikube service pulsar-mini-pulsar-mananger
+    ```
+
+2. The pulsar manager UI will be open in your browser. You can use username 
`pulsar` and password `pulsar` to log into Pulsar Manager.
+
+3. In Pulsar Manager UI, you can create an environment. 
+
+    - Click `New Environment` button in the top-left corner.
+    - Type `pulsar-mini` for the field `Environment Name` in the popup window.
+    - Type `http://pulsar-mini-broker:8080` for the field `Service URL` in the 
popup window.
+    - Click `Confirm` button in the popup window.
+
+4. After successfully created an environment, you will be redirected to the 
`tenants` page of that environment. Then you can create `tenants`, `namespaces` 
and `topics` using Pulsar Manager.
+
+## Step 5: Use Prometheus and Grafana to monitor the cluster
+
+Grafana is an open-source visualization tool, which can be used for 
visualizing time series data into dashboards.
+
+1. By default, the Grafana is exposed as a separate `LoadBalancer`. You can 
open the Grafana UI using the following command:
+
+    ```bash
+    minikube service pulsar-mini-grafana -n pulsar
+    ```
+
+2. The Grafana UI will be open in your browser. You can use username `pulsar` 
and password `pulsar` to log into Grafana Dashboard.
+
+3. You will be able to view dashboards for different components of a Pulsar 
cluster.
\ No newline at end of file
diff --git a/site2/docs/helm-deploy.md b/site2/docs/helm-deploy.md
new file mode 100644
index 0000000..85de9e7
--- /dev/null
+++ b/site2/docs/helm-deploy.md
@@ -0,0 +1,384 @@
+---
+id: helm-deploy
+title: Deploying a Pulsar cluster using Helm
+sidebar_label: Deployment
+---
+
+Before running `helm install`, you need to make some decisions about how you 
will run Pulsar.
+Options can be specified using Helm's `--set option.name=value` command line 
option.
+
+## Selecting configuration options
+
+In each section collect the options that will be combined to use with `helm 
install`.
+
+### Kubernetes Namespace
+
+By default, the chart is installed to a namespace called `pulsar`.
+
+```yaml
+namespace: pulsar
+```
+
+If you decide to install the chart into a different k8s namespace, you can 
include this option in your Helm install command:
+
+```bash
+--set namespace=<different-k8s-namespace>
+```
+
+By default, the chart doesn't create the namespace.
+
+```yaml
+namespaceCreate: false
+```
+
+If you want the chart to create the k8s namespace automatically, you can 
include this option in your Helm install command.
+
+```bash
+--set namespaceCreate=true
+```
+
+### Persistence
+
+By default the chart creates Volume Claims with the expectation that a dynamic 
provisioner will create the underlying Persistent Volumes.
+
+```yaml
+volumes:
+  persistence: true
+  # configure the components to use local persistent volume
+  # the local provisioner should be installed prior to enable local persistent 
volume
+  local_storage: false
+```
+
+If you would like to use local persistent volumes as the persistent storage 
for your Helm release, you can install 
[local-storage-provisioner](#install-local-storage-provisioner) and include the 
following option in your Helm install command. 
+
+```bash
+--set volumes.local_storage=true
+```
+
+> **Important**: After initial installation, making changes to your storage 
settings requires manually editing Kubernetes objects,
+> so it's best to plan ahead before installing your production instance of 
Pulsar to avoid extra storage migration work.
+
+This chart is designed for production use, To use this chart in a development 
environment (e.g. minikube), you can disable persistence by including this 
option in your Helm install command.
+
+```bash
+--set volumes.persistence=false
+```
+
+### Affinity 
+
+By default `anti-affinity` is turned on to ensure pods of same component can 
run on different nodes.
+
+```yaml
+affinity:
+  anti_affinity: true
+```
+
+If you are planning to use this chart in a development environment (e.g. 
minikue), you can disable `anti-affinity` by including this option in your Helm 
install command.
+
+```bash
+--set affinity.anti_affinity=false
+```
+
+### Components
+
+This chart is designed for production usage. It deploys a production-ready 
Pulsar cluster including Pulsar core components and monitoring components.
+
+You can customize the components to deploy by turning on/off individual 
components.
+
+```yaml
+## Components
+##
+## Control what components of Apache Pulsar to deploy for the cluster
+components:
+  # zookeeper
+  zookeeper: true
+  # bookkeeper
+  bookkeeper: true
+  # bookkeeper - autorecovery
+  autorecovery: true
+  # broker
+  broker: true
+  # functions
+  functions: true
+  # proxy
+  proxy: true
+  # toolset
+  toolset: true
+  # pulsar manager
+  pulsar_manager: true
+
+## Monitoring Components
+##
+## Control what components of the monitoring stack to deploy for the cluster
+monitoring:
+  # monitoring - prometheus
+  prometheus: true
+  # monitoring - grafana
+  grafana: true
+```
+
+### Docker Images
+
+This chart is designed to enable controlled upgrades. So it provides the 
capability to configure independent image versions for components. You can 
customize the images by setting individual component.
+
+```yaml
+## Images
+##
+## Control what images to use for each component
+images:
+  zookeeper:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+    pullPolicy: IfNotPresent
+  bookie:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+    pullPolicy: IfNotPresent
+  autorecovery:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+    pullPolicy: IfNotPresent
+  broker:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+    pullPolicy: IfNotPresent
+  proxy:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+    pullPolicy: IfNotPresent
+  functions:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+  prometheus:
+    repository: prom/prometheus
+    tag: v1.6.3
+    pullPolicy: IfNotPresent
+  grafana:
+    repository: streamnative/apache-pulsar-grafana-dashboard-k8s
+    tag: 0.0.4
+    pullPolicy: IfNotPresent
+  pulsar_manager:
+    repository: apachepulsar/pulsar-manager
+    tag: v0.1.0
+    pullPolicy: IfNotPresent
+    hasCommand: false
+```
+
+### TLS
+
+This Pulsar Chart can be configured to enable TLS to protect all the traffic 
between components. Before you enable TLS, you have to provision TLS 
certificates
+for the components you have configured to enable TLS.
+
+- [Provision TLS certs using 
`cert-manager`](#provision-tls-certs-using-cert-manager)
+
+#### Provision TLS certs using cert-manager
+
+In order to using `cert-manager` to provision the TLS certificates, you have 
to install
+[cert-manager](#install-cert-manager) before installing the Pulsar chart. After
+successfully install cert manager, you can then set 
`certs.internal_issuer.enabled`
+to `true`. So the Pulsar chart will use `cert-manager` to generate 
`selfsigning` TLS
+certs for the configured components.
+
+```yaml
+certs:
+  internal_issuer:
+    enabled: false
+    component: internal-cert-issuer
+    type: selfsigning
+```
+You can also customize the generated TLS certificates by configuring the 
fields as the following.
+
+```yaml
+tls:
+  # common settings for generating certs
+  common:
+    # 90d
+    duration: 2160h
+    # 15d
+    renewBefore: 360h
+    organization:
+      - pulsar
+    keySize: 4096
+    keyAlgorithm: rsa
+    keyEncoding: pkcs8
+```
+
+#### Enable TLS
+
+After installing `cert-manager`, you can then set `tls.enabled` to `true` to 
enable TLS encryption for the entire cluster.
+
+```yaml
+tls:
+  enabled: false
+```
+
+You can also control whether to enable TLS encryption for individual component.
+
+```yaml
+tls:
+  # settings for generating certs for proxy
+  proxy:
+    enabled: false
+    cert_name: tls-proxy
+  # settings for generating certs for broker
+  broker:
+    enabled: false
+    cert_name: tls-broker
+  # settings for generating certs for bookies
+  bookie:
+    enabled: false
+    cert_name: tls-bookie
+  # settings for generating certs for zookeeper
+  zookeeper:
+    enabled: false
+    cert_name: tls-zookeeper
+  # settings for generating certs for recovery
+  autorecovery:
+    cert_name: tls-recovery
+  # settings for generating certs for toolset
+  toolset:
+    cert_name: tls-toolset
+```
+
+### Authentication
+
+Authentication is disabled by default. You can set 
`auth.authentication.enabled` to `true` to turn on authentication.
+Currently this chart only supports JWT authentication provider. You can set 
`auth.authentication.provider` to `jwt` to use JWT authentication provider.
+
+```yaml
+# Enable or disable broker authentication and authorization.
+auth:
+  authentication:
+    enabled: false
+    provider: "jwt"
+    jwt:
+      # Enable JWT authentication
+      # If the token is generated by a secret key, set the usingSecretKey as 
true.
+      # If the token is generated by a private key, set the usingSecretKey as 
false.
+      usingSecretKey: false
+  superUsers:
+    # broker to broker communication
+    broker: "broker-admin"
+    # proxy to broker communication
+    proxy: "proxy-admin"
+    # pulsar-admin client to broker/proxy communication
+    client: "admin"
+```
+
+If you decide to enable authentication, you can run [prepare helm 
release](#prepare-the-helm-release) to generate token secret keys and tokens 
for three super users specified in `auth.superUsers` field. The generated token 
keys and super user tokens are uploaded and stored as kubernetes secrets 
prefixed with `<pulsar-release-name>-token-`. You can use following command to 
find those secrets.
+
+```bash
+kubectl get secrets -n <k8s-namespace>
+```
+
+### Authorization
+
+Authorization is disabled by default. Authorization can be enabled
+only if Authentication is enabled.
+
+```yaml
+auth:
+  authorization:
+    enabled: false
+```
+
+You can include this option to turn on authorization.
+
+```bash
+--set auth.authorization.enabled=true
+```
+
+### CPU and RAM resource requirements
+
+The resource requests, and number of replicas for the Pulsar components in 
this Chart are set by default to be adequate for a small production deployment. 
If you are trying to deploy a non-production instance, you can reduce the 
defaults in order to fit into a smaller cluster.
+
+Once you have all of your configuration options collected, we need
+to install dependent charts before proceeding to install the Pulsar
+Chart.
+
+## Install Dependent Charts
+
+### Install Local Storage Provisioner
+
+If you decide to use local persistent volumes as the persistent storage, you 
need to [install a storage provisioner for local persistent 
volumes](https://kubernetes.io/blog/2019/04/04/kubernetes-1.14-local-persistent-volumes-ga/).
+
+One of the easiest way to get started is to use the local storage provisioner 
provided along with the Pulsar Helm chart.
+
+```
+helm repo add streamnative https://charts.streamnative.io
+helm repo update
+helm install pulsar-storage-provisioner streamnative/local-storage-provisioner
+```
+
+### Install Cert Manager
+
+The Pulsar Chart uses [cert-manager](https://github.com/jetstack/cert-manager) 
to automate provisioning and managing TLS certificates. If you decide to enable 
TLS encryption for brokers or proxies, you need to install cert-manager first.
+
+You can follow the [official 
instructions](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm)
 to install cert-manager.
+
+Alternatively, we provide a bash script 
[install-cert-manager.sh](https://github.com/apache/pulsar/blob/master/deployment/kubernetes/helm/scripts/cert-manager/install-cert-manager.sh)
 to install a cert-manager release to namespace `cert-manager`.
+
+```bash
+git clone https://github.com/apache/pulsar
+cd deployment/kubernetes/helm
+./scripts/cert-manager/install-cert-manager.sh
+```
+
+## Prepare the Helm Release
+
+Once you have install all the dependent charts and collected all of your 
configuration options, you can run 
[prepare_helm_release.sh](https://github.com/apache/pulsar/blob/master/deployment/kubernetes/helm/scripts/pulsar/prepare_helm_release.sh)
 to prepare the helm release.
+
+```bash
+git clone https://github.com/apache/pulsar
+cd deployment/kubernetes/helm
+./scripts/pulsar/prepare_helm_release.sh -n <k8s-namespace> -k 
<helm-release-name>
+```
+
+The `prepare_helm_release` creates following resources:
+
+- A k8s namespace for installing the Pulsar release
+- Create a secret for storing the username and password of control center 
administrator. The username and password can be passed to 
`prepare_helm_release.sh` through flags `--control-center-admin` and 
`--control-center-password`. The username and password is used for logging into 
Grafana dashboard and Pulsar Manager.
+- Create the JWT secret keys and tokens for three superusers: `broker-admin`, 
`proxy-admin`, and `admin`. By default, it generates asymmeric pubic/private 
key pair. You can choose to generate symmeric secret key by specifying 
`--symmetric`.
+    - `proxy-admin` role is used for proxies to communicate to brokers.
+    - `broker-admin` role is used for inter-broker communications.
+    - `admin` role is used by the admin tools.
+
+## Deploy using Helm
+
+Once you have done the following three things, you can proceed to install a 
Helm release.
+
+- Collect all of your configuration options
+- Install dependent charts
+- Prepare the Helm release
+
+In this example, we've named our Helm release `pulsar`.
+
+```bash
+git clone https://github.com/apache/pulsar
+cd deployment/kubernetes/helm
+helm upgrade --install pulsar pulsar \
+    --timeout 600 \
+    --set [your configuration options]
+```
+
+You can also use `--version <installation version>` option if you would like 
to install a specific version of Pulsar Helm chart.
+
+## Monitoring the Deployment
+
+This will output the list of resources installed once the deployment finishes 
which may take 5-10 minutes.
+
+The status of the deployment can be checked by running `helm status pulsar` 
which can also be done while the deployment is taking place if you run the 
command in another terminal.
+
+## Accessing the Pulsar Cluster
+
+The default values will create a `ClusterIP` for the following resources you 
can use to interact with the cluster.
+
+- Proxy: You can use the IP address to produce and consume messages to the 
installed Pulsar cluster.
+- Pulsar Manager: You can access the pulsar manager UI at 
`http://<pulsar-manager-ip>:9527`.
+- Grafana Dashboard: You can access the Grafana dashboard at 
`http://<grafana-dashboard-ip>:3000`.
+
+To find the IP address of those components use:
+
+```bash
+kubectl get service -n <k8s-namespace>
+```
\ No newline at end of file
diff --git a/site2/docs/helm-install.md b/site2/docs/helm-install.md
new file mode 100644
index 0000000..cce9b8f
--- /dev/null
+++ b/site2/docs/helm-install.md
@@ -0,0 +1,39 @@
+---
+id: helm-install
+title: Install Apache Pulsar using Helm
+sidebar_label: Install 
+---
+
+Install Apache Pulsar on Kubernetes with the official Pulsar Helm chart.
+
+## Requirements
+
+In order to deploy Apache Pulsar on Kubernetes, the following are required.
+
+1. kubectl 1.14 or higher, compatible with your cluster ([+/- 1 minor release 
from your 
cluster](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin))
+2. Helm v3 (3.0.2 or higher)
+3. A Kubernetes cluster, version 1.14 or higher.
+
+## Environment setup
+
+Before proceeding to deploying Pulsar, you need to prepare your environment.
+
+### Tools
+
+`helm` and `kubectl` need to be [installed on your computer](helm-tools.md).
+
+## Cloud cluster preparation
+
+> NOTE: Kubernetes 1.14 or higher is required, due to the usage of certain 
Kubernetes features.
+
+Follow the instructions to create and connect to the Kubernetes cluster of 
your choice:
+
+- [Google Kubernetes Engine](helm-prepare.md#google-kubernetes-engine)
+
+## Deploy Pulsar
+
+With the environment set up and configuration generated, you can now proceed 
to the [deployment of Pulsar](helm-deploy.md).
+
+## Upgrade Pulsar
+
+If you are upgrading an existing Kubernetes installation, follow the [upgrade 
documentation](helm-upgrade.md) instead.
diff --git a/site2/docs/helm-overview.md b/site2/docs/helm-overview.md
new file mode 100644
index 0000000..efd6fa5
--- /dev/null
+++ b/site2/docs/helm-overview.md
@@ -0,0 +1,110 @@
+---
+id: helm-overview
+title: Apache Pulsar Helm Chart
+sidebar_label: Overview
+---
+
+This is the official supported Helm chart to install Apache Pulsar on a 
cloud-native environment. It was enhanced based on StreamNative's [Helm 
Chart](https://github.com/streamnative/charts).
+
+## Introduction
+
+The Apache Pulsar Helm chart is one of the most convenient ways 
+to operate Pulsar on Kubernetes. This chart contains all the required 
components to get started and can scale to large deployments.
+
+This chart includes all the components for a complete experience, but each 
part can be configured to install separately.
+
+- Pulsar core components:
+    - ZooKeeper
+    - Bookies
+    - Brokers
+    - Function workers
+    - Proxies
+- Control Center:
+    - Pulsar Manager
+    - Prometheus
+    - Grafana
+    - Alert Manager
+
+It includes support for:
+
+- Security
+    - Automatically provisioned TLS certs, using 
[Jetstack](https://www.jetstack.io/)'s 
[cert-manager](https://cert-manager.io/docs/)
+        - self-signed
+        - [Let's Encrypt](https://letsencrypt.org/)
+    - TLS Encryption
+        - Proxy
+        - Broker
+        - Toolset
+        - Bookie
+        - ZooKeeper
+    - Authentication
+        - JWT
+    - Authorization
+- Storage
+    - Non-persistence storage
+    - Persistence Volume
+    - Local Persistent Volumes
+- Functions
+    - Kubernetes Runtime
+    - Process Runtime
+    - Thread Runtime
+- Operations
+    - Independent Image Versions for all components, enabling controlled 
upgrades
+
+## Pulsar Helm chart quick start
+
+For those looking to get up and running with these charts as fast
+as possible, in a **non-production** use case, we provide
+a [quick start guide](getting-started-helm.md) for Proof of Concept (PoC) 
deployments.
+
+This guide walks the user through deploying these charts with default
+values & features, but *does not* meet production ready requirements.
+If you wish to deploy these charts into production under sustained load,
+you should follow the complete [Installation Guide](helm-install.md).
+
+## Troubleshooting
+
+We've done our best to make these charts as seamless as possible,
+occasionally troubles do surface outside of our control. We've collected
+tips and tricks for troubleshooting common issues. Please examine these first 
before raising an [issue](https://github.com/apache/pulsar/issues/new/choose), 
and feel free to add to them by raising a [Pull 
Request](https://github.com/apache/pulsar/compare)!
+
+## Installation
+
+The Apache Pulsar Helm chart contains all required dependencies.
+
+If you are just looking to deploy a Proof of Concept for testing,
+we strongly suggest you follow our [Quick Start 
Guide](getting-started-helm.md) for your first iteration.
+
+1. [Preparation](helm-prepare.md)
+2. [Deployment](helm-deploy.md)
+
+## Upgrading
+
+Once your Pulsar Chart is installed, configuration changes and chart
+updates should be done using `helm upgrade`.
+
+```bash
+git clone https://github.com/apache/pulsar
+cd deployment/kubernetes/helm
+helm get values <pulsar-release-name> > pulsar.yaml
+helm upgrade <pulsar-release-name> pulsar -f pulsar.yaml
+```
+
+For more detailed information, see [Upgrading](helm-upgrade.md).
+
+## Uninstall
+
+To uninstall the Pulsar Chart, run the following command:
+
+```bash
+helm delete <pulsar-release-name>
+```
+
+For the purposes of continuity, these charts have some Kubernetes objects that 
are not removed when performing `helm delete`.
+These items we require you to *conciously* remove them, as they affect 
re-deployment should you choose to.
+
+* PVCs for stateful data, which you must *consciously* remove
+    - ZooKeeper: This is your metadata.
+    - BookKeeper: This is your data.
+    - Prometheus: This is your metrics data, which can be safely removed.
+* Secrets, if generated by our [prepare release 
script](https://github.com/apache/pulsar/blob/master/deployment/kubernetes/helm/scripts/pulsar/prepare_helm_release.sh).
 They contain secret keys, tokens, etc. You can use [cleanup release 
script](https://github.com/apache/pulsar/blob/master/deployment/kubernetes/helm/scripts/pulsar/cleanup_helm_release.sh)
 to remove these secrets and tokens as needed.
\ No newline at end of file
diff --git a/site2/docs/helm-prepare.md b/site2/docs/helm-prepare.md
new file mode 100644
index 0000000..426c830
--- /dev/null
+++ b/site2/docs/helm-prepare.md
@@ -0,0 +1,77 @@
+---
+id: helm-prepare
+title: Preparing Kubernetes resources
+sidebar_label: Prepare
+---
+
+For a fully functional Pulsar cluster, you will need a few resources before 
deploying the Apache Pulsar Helm chart. The following provides instructions to 
prepare the Kubernetes cluster before deploying the Pulsar Helm chart.
+
+- [Google Kubernetes Engine](#google-kubernetes-engine)
+
+## Google Kubernetes Engine
+
+To get started easier, a script is provided to automate the cluster creation. 
Alternatively, a cluster can be created manually as well.
+
+- [Manual cluster creation](#manual-cluster-creation)
+- [Scripted cluster creation](#scripted-cluster-creation)
+
+### Manual cluster creation
+
+To provision a Kubernetes cluster manually, follow the [GKE 
instructions](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster).
+
+Alternatively you can use the [instructions](#scripted-cluster-creation) below 
to provision a GKE cluster as needed.
+
+### Scripted cluster creation
+
+A [bootstrap 
script](https://github.com/streamnative/charts/tree/master/scripts/pulsar/gke_bootstrap_script.sh)
 has been created to automate much of the setup process for users on GCP/GKE.
+
+The script will:
+
+1. Create a new GKE cluster.
+2. Allow the cluster to modify DNS records.
+3. Setup `kubectl`, and connect it to the cluster.
+
+Google Cloud SDK is a dependency of this script, so make sure it's [set up 
correctly](helm-tools.md#connect-to-a-gke-cluster) in order for the script to 
work.
+
+The script reads various parameters from environment variables and an argument 
`up` or `down` for bootstrap and clean-up respectively.
+
+The table below describes all variables.
+
+| **Variable** | **Description** | **Default value** |
+| ------------ | --------------- | ----------------- |
+| PROJECT      | The ID of your GCP project | No defaults, required to be set. 
|
+| CLUSTER_NAME | Name of the GKE cluster | `pulsar-dev` |
+| CONFDIR | Configuration directory to store kubernetes config | Defaults to 
${HOME}/.config/streamnative |
+| INT_NETWORK | The IP space to use within this cluster | `default` |
+| LOCAL_SSD_COUNT | The number of local SSD counts | Defaults to 4 |
+| MACHINE_TYPE | The type of machine to use for nodes | `n1-standard-4` |
+| NUM_NODES | The number of nodes to be created in each of the cluster's zones 
| 4 |
+| PREEMPTIBLE | Create nodes using preemptible VM instances in the new 
cluster. | false |
+| REGION | Compute region for the cluster | `us-east1` |
+| USE_LOCAL_SSD | Flag to create a cluster with local SSDs | Defaults to false 
|
+| ZONE | Compute zone for the cluster | `us-east1-b` |
+| ZONE_EXTENSION | The extension (`a`, `b`, `c`) of the zone name of the 
cluster | `b` |
+| EXTRA_CREATE_ARGS | Extra arguments passed to create command | |
+
+Run the script, by passing in your desired parameters. It can work with the 
default parameters except for `PROJECT` which is required:
+
+```bash
+PROJECT=<gcloud project id> scripts/pulsar/gke_bootstrap_script.sh up
+```
+
+The script can also be used to clean up the created GKE resources:
+
+```bash
+PROJECT=<gcloud project id> scripts/pulsar/gke_bootstrap_script.sh down
+```
+
+#### Create a cluster with local SSDs
+
+If you are planning to install a Pulsar Helm chart using local persistent 
volumes, you need to create a GKE cluster with local SSDs. You can do so using 
the provided script by specifying `USE_LOCAL_SSD` to be `true`. A sample 
command is listed as below:
+
+```
+PROJECT=<gcloud project id> USE_LOCAL_SSD=true 
LOCAL_SSD_COUNT=<local-ssd-count> scripts/pulsar/gke_bootstrap_script.sh up
+```
+## Next Steps
+
+Continue with the [installation of the chart](helm-deploy.md) once you have 
the cluster up and running.
diff --git a/site2/docs/helm-tools.md b/site2/docs/helm-tools.md
new file mode 100644
index 0000000..33a1870
--- /dev/null
+++ b/site2/docs/helm-tools.md
@@ -0,0 +1,42 @@
+---
+id: helm-tools
+title: Required tools for deploying Pulsar Helm Chart
+sidebar_label: Required Tools
+---
+
+Before deploying Pulsar to your Kubernetes cluster, there are some tools you 
must have installed locally.
+
+## kubectl
+
+kubectl is the tool that talks to the Kubernetes API. kubectl 1.14 or higher 
is required and it needs to be compatible with your cluster ([+/- 1 minor 
release from your 
cluster](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin)).
+
+[Install kubectl locally by following the Kubernetes 
documentation](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl).
+
+The server version of kubectl cannot be obtained until we connect to a 
cluster. Proceed with setting up Helm.
+
+## Helm
+
+Helm is the package manager for Kubernetes. The Apache Pulsar Helm Chart is 
tested and supported with Helm v3.
+
+### Get Helm
+
+You can get Helm from the project's [releases 
page](https://github.com/helm/helm/releases), or follow other options under the 
official documentation of [installing 
Helm](https://helm.sh/docs/intro/install/).
+
+### Next steps
+
+Once kubectl and Helm are configured, you can continue to configuring your 
[Kubernetes cluster](helm-prepare.md).
+
+## Additional information
+
+### Templates
+
+Templating in Helm is done via golang's 
[text/template](https://golang.org/pkg/text/template/) and 
[sprig](https://godoc.org/github.com/Masterminds/sprig).
+
+Some information on how all the inner workings behave:
+
+- [Functions and 
Pipelines](https://helm.sh/docs/chart_template_guide/functions_and_pipelines/)
+- [Subcharts and 
Globals](https://helm.sh/docs/chart_template_guide/subcharts_and_globals/)
+
+### Tips and tricks
+
+Helm repository has some additional information on developing with Helm in 
it's [tips and tricks 
section](https://helm.sh/docs/howto/charts_tips_and_tricks/).
\ No newline at end of file
diff --git a/site2/docs/helm-upgrade.md b/site2/docs/helm-upgrade.md
new file mode 100644
index 0000000..ec7ff3c
--- /dev/null
+++ b/site2/docs/helm-upgrade.md
@@ -0,0 +1,34 @@
+---
+id: helm-upgrade
+title: Upgrade a Pulsar Helm release
+sidebar_label: Upgrade
+---
+
+Before upgrading your Pulsar installation, you need to check the changelog 
corresponding to the specific release you want to upgrade
+to and look for any release notes that might pertain to the new Pulsar chart 
version.
+
+We also recommend that you need to provide all values using `helm upgrade 
--set key=value` syntax or `-f values.yml` instead of using `--reuse-values` 
because some of the current values might be deprecated.
+
+> **NOTE**:
+>
+> You can retrieve your previous `--set` arguments cleanly, with `helm get 
values <release-name>`. If you direct this into a file (`helm get values 
<release-name> > pulsar.yml`), you can safely
+pass this file via `-f`. Thus `helm upgrade <release-name> pulsar -f 
pulsar.yaml`. This safely replaces the behavior of `--reuse-values`.
+
+## Steps
+
+The following are the steps to upgrade Apache Pulsar to a newer version:
+
+1. Check the change log for the specific version you would like to upgrade to
+2. Go through [deployment documentation](helm-deploy.md) step by step
+3. Extract your previous `--set` arguments with
+    ```bash
+    helm get values <release-name> > pulsar.yaml
+    ```
+4. Decide on all the values you need to set
+5. Perform the upgrade, with all `--set` arguments extracted in step 4
+    ```bash
+    helm upgrade <release-name> pulsar \
+        --version <new version> \
+        -f pulsar.yaml \
+        --set ...
+    ```
\ No newline at end of file
diff --git a/site2/website/sidebars.json b/site2/website/sidebars.json
index bee59cc..ba336a4 100644
--- a/site2/website/sidebars.json
+++ b/site2/website/sidebars.json
@@ -4,6 +4,7 @@
       "pulsar-2.0",
       "standalone",
       "standalone-docker",
+      "kubernetes-helm",
       "client-libraries"
     ],
     "Concepts and Architecture": [
@@ -48,6 +49,14 @@
       "sql-deployment-configurations",
       "sql-rest-api"
     ],
+    "Kubernetes (Helm)": [
+      "helm-overview",
+      "helm-prepare",
+      "helm-install",
+      "helm-deploy",
+      "helm-upgrade",
+      "helm-tools"
+    ],
     "Deployment": [
       "deploy-aws",
       "deploy-kubernetes",
diff --git a/site2/website/versioned_docs/version-2.5.0/deploy-kubernetes.md 
b/site2/website/versioned_docs/version-2.5.0/deploy-kubernetes.md
index f9c0ed5..f3c4f51 100644
--- a/site2/website/versioned_docs/version-2.5.0/deploy-kubernetes.md
+++ b/site2/website/versioned_docs/version-2.5.0/deploy-kubernetes.md
@@ -5,390 +5,8 @@ sidebar_label: Kubernetes
 original_id: deploy-kubernetes
 ---
 
-> ### Tips
->
-> If you want to enable all builtin [Pulsar IO](io-overview.md) connectors in 
your Pulsar deployment, you can choose to use `apachepulsar/pulsar-all` image 
instead of
-> `apachepulsar/pulsar` image. `apachepulsar/pulsar-all` image has already 
bundled [all builtin connectors](io-overview.md#working-with-connectors).
+For those looking to get up and running with these charts as fast
+as possible, in a **non-production** use case, we provide
+a [quick start guide](getting-started-helm.md) for Proof of Concept (PoC) 
deployments.
 
-You can easily deploy Pulsar in [Kubernetes](https://kubernetes.io/) clusters, 
either in managed clusters on [Google Kubernetes 
Engine](#pulsar-on-google-kubernetes-engine) or [Amazon Web 
Services](https://aws.amazon.com/) or in [custom 
clusters](#pulsar-on-a-custom-kubernetes-cluster).
-
-The deployment method shown in this guide relies on [YAML](http://yaml.org/) 
definitions for Kubernetes [resources](https://kubernetes.io/docs/reference/). 
The {@inject: github:`deployment/kubernetes`:/deployment/kubernetes} 
subdirectory of the [Pulsar package](pulsar:download_page_url) holds resource 
definitions for:
-
-* A two-bookie BookKeeper cluster
-* A three-node ZooKeeper cluster
-* A three-broker Pulsar cluster
-* A [monitoring stack]() consisting of [Prometheus](https://prometheus.io/), 
[Grafana](https://grafana.com), and the [Pulsar 
dashboard](administration-dashboard.md)
-* A [pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/) from which 
you can run administrative commands using the 
[`pulsar-admin`](reference-pulsar-admin.md) CLI tool
-
-## Setup
-
-To get started, install a source package from the [downloads 
page](pulsar:download_page_url).
-
-> Note that the Pulsar binary package does *not* contain the necessary YAML 
resources to deploy Pulsar on Kubernetes.
-
-If you want to change the number of bookies, brokers, or ZooKeeper nodes in 
your Pulsar cluster, modify the `replicas` parameter in the `spec` section of 
the appropriate 
[`Deployment`](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/)
 or 
[`StatefulSet`](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/)
 resource.
-
-## Pulsar on Google Kubernetes Engine
-
-[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine) (GKE) 
automates the creation and management of Kubernetes clusters in [Google Compute 
Engine](https://cloud.google.com/compute/) (GCE).
-
-### Prerequisites
-
-To get started, you need:
-
-* A Google Cloud Platform account, which you can sign up for at 
[cloud.google.com](https://cloud.google.com)
-* An existing Cloud Platform project
-* The [Google Cloud SDK](https://cloud.google.com/sdk/downloads) (in 
particular the [`gcloud`](https://cloud.google.com/sdk/gcloud/) and 
[`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/#download-as-part-of-the-google-cloud-sdk)
 tools).
-
-### Create a new Kubernetes cluster
-
-You can create a new GKE cluster entering the [`container clusters 
create`](https://cloud.google.com/sdk/gcloud/reference/container/clusters/create)
 command for `gcloud`. This command enables you to specify the number of nodes 
in the cluster, the machine types of those nodes, and so on.
-
-The following example creates a new GKE cluster for Kubernetes version 
[1.6.4](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#v164) 
in the 
[us-central1-a](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)
 zone. The cluster is named `pulsar-gke-cluster` and consists of three VMs, 
each using two locally attached SSDs and running on 
[n1-standard-8](https://cloud.google.com/compute/docs/machine-types) machines. 
[Bookie](reference-terminology.md#b [...]
-
-```bash
-$ gcloud container clusters create pulsar-gke-cluster \
-  --zone=us-central1-a \
-  --machine-type=n1-standard-8 \
-  --num-nodes=3 \
-  --local-ssd-count=2 \
-```
-
-By default, bookies run on all the machines that have locally attached SSD 
disks. In this example, all of those machines have two SSDs, but you can add 
different types of machines to the cluster later. You can control which 
machines host bookie servers using 
[labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels).
-
-### Dashboard
-
-You can observe your cluster in the [Kubernetes 
Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/)
 by downloading the credentials for your Kubernetes cluster and opening up a 
proxy to the cluster:
-
-```bash
-$ gcloud container clusters get-credentials pulsar-gke-cluster \
-  --zone=us-central1-a \
-  --project=your-project-name
-$ kubectl proxy
-```
-
-By default, the proxy is opened on port 8001. Now you can navigate to 
[localhost:8001/ui](http://localhost:8001/ui) in your browser to access the 
dashboard. At first your GKE cluster is empty, but that changes as you begin 
deploying Pulsar components using `kubectl` [component by 
component](#deploying-pulsar-components),
-or using [`helm`](#deploying-pulsar-components-helm).
-
-## Pulsar on Amazon Web Services
-
-You can run Kubernetes on [Amazon Web Services](https://aws.amazon.com/) (AWS) 
in a variety of ways. A very simple way that is [recently 
introduced](https://aws.amazon.com/blogs/compute/kubernetes-clusters-aws-kops/) 
involves using the [Kubernetes Operations](https://github.com/kubernetes/kops) 
(kops) tool.
-
-You can find detailed instructions for setting up a Kubernetes cluster on AWS 
from [here](https://github.com/kubernetes/kops/blob/master/docs/aws.md).
-
-When you create a cluster using those instructions, your `kubectl` config in 
`~/.kube/config` (on MacOS and Linux) is updated for you, so you probably do 
not need to change your configuration. Nonetheless, you can ensure that 
`kubectl` can interact with your cluster by listing the nodes in the cluster:
-
-```bash
-$ kubectl get nodes
-```
-
-If `kubectl` works with your cluster, you can proceed to deploy Pulsar 
components using `kubectl` [component by 
component](#deploying-pulsar-components),
-or using [`helm`](#deploying-pulsar-components-helm).
-
-## Pulsar on a custom Kubernetes cluster
-
-You can deploy Pulsar on a custom, non-GKE Kubernetes cluster as well. You can 
find detailed documentation on how to choose a Kubernetes installation method 
that suits your needs in the [Picking the Right 
Solution](https://kubernetes.io/docs/setup/pick-right-solution) guide in the 
Kubernetes docs.
-
-The easiest way to run a Kubernetes cluster is to do so locally. To install a 
mini local cluster for testing purposes and running in local VMs, you can 
either:
-
-1. Use [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) 
to run a single-node Kubernetes cluster.
-1. Create a local cluster running on multiple VMs on the same machine.
-
-### Minikube
-
-1. [Install and configure 
minikube](https://github.com/kubernetes/minikube#installation) with
-   a [VM driver](https://github.com/kubernetes/minikube#requirements), for 
example, `kvm2` on Linux or `hyperkit` or `VirtualBox` on macOS.
-1. Create a kubernetes cluster on Minikube.
-    ```shell
-    minikube start --memory=8192 --cpus=4 \
-        --kubernetes-version=<version>
-    ```
-    `<version>` can be any [Kubernetes version supported by your minikube 
installation](https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/).
 Example: `v1.16.1`
-1. Set `kubectl` to use Minikube.
-    ```shell
-    kubectl config use-context minikube
-    ```
-
-In order to use the [Kubernetes 
Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/)
-with local Kubernetes cluster on Minikube, enter the command below:
-
-```bash
-$ minikube dashboard
-```
-
-The command automatically triggers opening a webpage in your browser. At first 
your local cluster is empty, but that changes as you begin deploying Pulsar 
components using `kubectl` [component by 
component](#deploying-pulsar-components),
-or using [`helm`](#deploying-pulsar-components-helm).
-
-### Multiple VMs
-
-For the second option, follow the 
[instructions](https://github.com/pires/kubernetes-vagrant-coreos-cluster) for 
running Kubernetes using [CoreOS](https://coreos.com/) on 
[Vagrant](https://www.vagrantup.com/). You can follow an abridged version of 
those instructions from here.
-
-
-First, make sure you have [Vagrant](https://www.vagrantup.com/downloads.html) 
and [VirtualBox](https://www.virtualbox.org/wiki/Downloads) installed. Then 
clone the repo and start up the cluster:
-
-```bash
-$ git clone https://github.com/pires/kubernetes-vagrant-coreos-cluster
-$ cd kubernetes-vagrant-coreos-cluster
-
-# Start a three-VM cluster
-$ NODES=3 USE_KUBE_UI=true vagrant up
-```
-
-Create SSD disk mount points on the VMs using this script:
-
-```bash
-$ for vm in node-01 node-02 node-03; do
-    NODES=3 vagrant ssh $vm -c "sudo mkdir -p /mnt/disks/ssd0"
-    NODES=3 vagrant ssh $vm -c "sudo mkdir -p /mnt/disks/ssd1"
-  done
-```
-
-Bookies expect two logical devices to mount for 
[journal](concepts-architecture-overview.md#journal-storage) and persistent 
message storage to be available. In this VM exercise, you can create two 
directories on each VM.
-
-Once the cluster is up, you can verify that `kubectl` can access it:
-
-```bash
-$ kubectl get nodes
-NAME           STATUS                     AGE       VERSION
-172.17.8.101   Ready,SchedulingDisabled   10m       v1.6.4
-172.17.8.102   Ready                      8m        v1.6.4
-172.17.8.103   Ready                      6m        v1.6.4
-172.17.8.104   Ready                      4m        v1.6.4
-```
-
-In order to use the [Kubernetes 
Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/)
 with your local Kubernetes cluster, first, you need to use `kubectl` to create 
a proxy to the cluster:
-
-```bash
-$ kubectl proxy
-```
-
-Now you can access the web interface at 
[localhost:8001/ui](http://localhost:8001/ui). At first your local cluster is 
empty, but that changes as you begin deploying Pulsar components using 
`kubectl` [component by component](#deploying-pulsar-components), or using 
[`helm`](#deploying-pulsar-components-helm).
-
-## Deploy Pulsar components
-
-Now that you have set up a Kubernetes cluster, either on [Google Kubernetes 
Engine](#pulsar-on-google-kubernetes-engine) or on a [custom 
cluster](#pulsar-on-a-custom-kubernetes-cluster), you can begin deploying the 
components that make up Pulsar. You can find the YAML resource definitions for 
Pulsar components in the `kubernetes` folder of the [Pulsar source 
package](pulsar:download_page_url).
-
-In that package, you can find different sets of resource definitions for 
different environments.
-
-- `deployment/kubernetes/google-kubernetes-engine`: for Google Kubernetes 
Engine (GKE)
-- `deployment/kubernetes/aws`: for AWS
-- `deployment/kubernetes/generic`: for a custom Kubernetes cluster
-
-To begin, `cd` into the appropriate folder.
-
-### Deploy ZooKeeper
-
-You *must* deploy ZooKeeper as the first Pulsar component, as ZooKeeper is a 
dependency for the others.
-
-```bash
-$ kubectl apply -f zookeeper.yaml
-```
-
-Wait until all three ZooKeeper server pods are up and have the status 
`Running`. You can check on the status of the ZooKeeper pods at any time:
-
-```bash
-$ kubectl get pods -l component=zookeeper
-NAME      READY     STATUS             RESTARTS   AGE
-zk-0      1/1       Running            0          18m
-zk-1      1/1       Running            0          17m
-zk-2      0/1       Running            6          15m
-```
-
-This step may take several minutes, as Kubernetes needs to download the Docker 
image on the VMs.
-
-### Initialize cluster metadata
-
-Once ZooKeeper runs, you need to [initialize the 
metadata](#cluster-metadata-initialization) for the Pulsar cluster in 
ZooKeeper. This includes system metadata for 
[BookKeeper](reference-terminology.md#bookkeeper) and Pulsar more broadly. You 
only need to run the Kubernetes 
[job](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/)
 in the `cluster-metadata.yaml` file once:
-
-```bash
-$ kubectl apply -f cluster-metadata.yaml
-```
-
-For the sake of reference, that job runs the following command on an ephemeral 
pod:
-
-```bash
-$ bin/pulsar initialize-cluster-metadata \
-  --cluster local \
-  --zookeeper zookeeper \
-  --configuration-store zookeeper \
-  --web-service-url http://broker.default.svc.cluster.local:8080/ \
-  --broker-service-url pulsar://broker.default.svc.cluster.local:6650/
-```
-
-### Deploy the rest of the components
-
-Once you have successfully initialized cluster metadata, you can then deploy 
the bookies, brokers, monitoring stack ([Prometheus](https://prometheus.io), 
[Grafana](https://grafana.com), and the [Pulsar 
dashboard](administration-dashboard.md)), and Pulsar cluster proxy:
-
-```bash
-$ kubectl apply -f bookie.yaml
-$ kubectl apply -f broker.yaml
-$ kubectl apply -f proxy.yaml
-$ kubectl apply -f monitoring.yaml
-$ kubectl apply -f admin.yaml
-```
-
-You can check on the status of the pods for these components either in the 
Kubernetes Dashboard or using `kubectl`:
-
-```bash
-$ kubectl get pods -w -l app=pulsar
-```
-
-### Set up properties and namespaces
-
-Once all of the components are up and running, you need to create at least one 
Pulsar tenant and at least one namespace.
-
->If Pulsar [authentication and authorization](security-overview.md) is turned 
on,you do not have to strictly perform this step though you are allowed to 
change [policies](admin-api-namespaces.md) for each of the namespaces later.
-
-You can create properties and namespaces (and perform any other administrative 
tasks) using the `pulsar-admin` pod that is already configured to act as an 
admin client for your newly created Pulsar cluster. One easy way to perform 
administrative tasks is to create an alias for the 
[`pulsar-admin`](reference-pulsar-admin.md) tool installed on the admin pod.
-
-```bash
-$ alias pulsar-admin='kubectl exec pulsar-admin -it -- bin/pulsar-admin'
-```
-
-Now, any time you run `pulsar-admin`, you can run commands from that pod. This 
command creates a tenant called `ten`:
-
-```bash
-$ pulsar-admin tenants create ten \
-  --admin-roles admin \
-  --allowed-clusters local
-```
-
-This command creates a `ns` namespace under the `ten` tenant:
-
-```bash
-$ pulsar-admin namespaces create ten/ns
-```
-
-To verify that everything has gone as planned:
-
-```bash
-$ pulsar-admin tenants list
-public
-ten
-
-$ pulsar-admin namespaces list ten
-ten/ns
-```
-
-Now that you have a namespace and tenant set up, you can move on to 
[experimenting with your Pulsar cluster](#experimenting-with-your-cluster) from 
within the cluster or [connecting to the cluster](#client-connections) using a 
Pulsar client.
-
-### Experiment with your cluster
-
-Now that you have successfully created a tenant and namespace, you can begin 
experimenting with your running Pulsar cluster. Using the same `pulsar-admin` 
pod via an alias, as in the section above, you can use 
[`pulsar-perf`](reference-cli-tools.md#pulsar-perf) to create a test 
[producer](reference-terminology.md#producer) to publish 10,000 messages a 
second on a topic in the [tenant](reference-terminology.md#tenant) and 
[namespace](reference-terminology.md#namespace) you have created.
-
-First, create an alias to use the `pulsar-perf` tool via the admin pod:
-
-```bash
-$ alias pulsar-perf='kubectl exec pulsar-admin -it -- bin/pulsar-perf'
-```
-
-Now, produce messages:
-
-```bash
-$ pulsar-perf produce persistent://public/default/my-topic \
-  --rate 10000
-```
-
-Similarly, you can start a [consumer](reference-terminology.md#consumer) to 
subscribe to and receive all the messages on that topic:
-
-```bash
-$ pulsar-perf consume persistent://public/default/my-topic \
-  --subscriber-name my-subscription-name
-```
-
-You can also view [stats](administration-stats.md) for the topic using the 
[`pulsar-admin`](reference-pulsar-admin.md#persistent-stats) tool:
-
-```bash
-$ pulsar-admin persistent stats persistent://public/default/my-topic
-```
-
-### Monitor
-
-The default monitoring stack for Pulsar on Kubernetes consists of 
[Prometheus](#prometheus), [Grafana](#grafana), and the [Pulsar 
dashbaord](administration-dashboard.md).
-
-> If you deploy the cluster to Minikube, the following monitoring ports are 
mapped at the minikube VM:
->
-> - Prometheus port: 30003
-> - Grafana port: 30004
-> - Dashboard port: 30005
->
-> You can use `minikube ip` to find the IP address of the minikube VM, and 
then use their mapped ports
-> to access corresponding services. For example, you can access Pulsar 
dashboard at `http://$(minikube ip):30005`.
-
-#### Prometheus
-
-A [Prometheus](https://prometheus.io) instance running inside the cluster can 
collect all Pulsar metrics in Kubernetes. Typically, you do not have to access 
Prometheus directly. Instead, you can use the [Grafana interface](#grafana) 
that displays the data stored in Prometheus.
-
-#### Grafana
-
-In your Kubernetes cluster, you can use [Grafana](https://grafana.com) to view 
dashbaords for Pulsar [namespaces](reference-terminology.md#namespace) (message 
rates, latency, and storage), JVM stats, 
[ZooKeeper](https://zookeeper.apache.org), and 
[BookKeeper](reference-terminology.md#bookkeeper). You can get access to the 
pod serving Grafana using the 
[`port-forward`](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster)
 command of `kubectl`:
-
-```bash
-$ kubectl port-forward \
-  $(kubectl get pods -l component=grafana -o 
jsonpath='{.items[*].metadata.name}') 3000
-```
-
-You can then access the dashboard in your web browser at 
[localhost:3000](http://localhost:3000).
-
-#### Pulsar dashboard
-
-While Grafana and Prometheus are used to provide graphs with historical data, 
[Pulsar dashboard](administration-dashboard.md) reports more detailed current 
data for individual [topics](reference-terminology.md#topic).
-
-For example, you can have sortable tables showing all namespaces, topics, and 
broker stats, with details on the IP address for consumers, how long they have 
been connected, and much more.
-
-You can access to the pod serving the Pulsar dashboard using the 
[`port-forward`](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster)
 command of `kubectl`:
-
-```bash
-$ kubectl port-forward \
-  $(kubectl get pods -l component=dashboard -o 
jsonpath='{.items[*].metadata.name}') 8080:80
-```
-
-You can then access the dashboard in your web browser at 
[localhost:8080](http://localhost:8080).
-
-### Client connections
-
-> If you deploy the cluster to Minikube, the proxy ports are mapped at the 
minikube VM:
->
-> - Http port: 30001
-> - Pulsar binary protocol port: 30002
->
-> You can use `minikube ip` to find the IP address of the minikube VM, and 
then use their mapped ports
-> to access corresponding services. For example, pulsar webservice url is at 
`http://$(minikube ip):30001`.
-
-Once your Pulsar cluster is running on Kubernetes, you can connect to it using 
a Pulsar client. You can fetch the IP address for the Pulsar proxy running in 
your Kubernetes cluster using `kubectl`:
-
-```bash
-$ kubectl get service broker-proxy \
-  --output=jsonpath='{.status.loadBalancer.ingress[*].ip}'
-```
-
-If the IP address for the proxy is, for example, 35.12.13.198, you can connect 
to Pulsar using `pulsar://35.12.13.198:6650`.
-
-You can find client documentation for:
-
-* [Java](client-libraries-java.md)
-* [Python](client-libraries-python.md)
-* [C++](client-libraries-cpp.md)
-
-
-## Deploy Pulsar components (helm)
-
-Pulsar also provides a [Helm](https://docs.helm.sh/) chart for deploying a 
Pulsar cluster to Kubernetes. Before you start, make sure you follow [Helm 
documentation](https://docs.helm.sh/using_helm) to install helm.
-
-> Assume you clone a pulsar repo under a `PULSAR_HOME` directory.
-
-### Minikube
-
-1. Go to Pulsar helm chart directory
-    ```shell
-    cd ${PULSAR_HOME}/deployment/kubernetes/helm
-    ```
-1. Install helm chart to a K8S cluster on Minikube.
-    ```shell
-    helm install --values pulsar/values-mini.yaml ./pulsar
-    ```
-
-Once the helm chart is completed on installation, you can access the cluster 
via:
-
-- Web service url: `http://$(minikube ip):30001/`
-- Pulsar service url: `pulsar://$(minikube ip):30002/`
+For those looking to configure and install a Pulsar cluster on Kubernetes for 
production usage, you should follow the complete [Installation 
Guide](helm-install.md).
diff --git a/site2/website/versioned_docs/version-2.5.0/getting-started-helm.md 
b/site2/website/versioned_docs/version-2.5.0/getting-started-helm.md
new file mode 100644
index 0000000..70a4f2f
--- /dev/null
+++ b/site2/website/versioned_docs/version-2.5.0/getting-started-helm.md
@@ -0,0 +1,334 @@
+---
+id: version-2.5.0-kubernetes-helm
+title: Get started in Kubernetes
+sidebar_label: Run Pulsar in Kubernetes
+original_id: kubernetes-helm
+---
+
+This section guides you through every step of installing and running
+Apache Pulsar with Helm on Kubernetes quickly, including
+
+- Install the Apache Pulsar on Kubernetes using Helm
+- Start and stop Apache Pulsar
+- Create topics using `pulsar-admin`
+- Produce and consume messages using Pulsar clients
+- Monitor Apache Pulsar status with Prometheus and Grafana
+
+For deploying a Pulsar cluster for production usage, please read the 
documentation on [how to configure and install a Pulsar Helm 
chart](helm-deploy.md).
+
+## Prerequisite
+
+- Kubernetes server 1.14.0+
+- kubectl 1.14.0+
+- Helm 3.0+
+
+> Tip
+> For the following steps, step 2 and step 3 are for developers and step 4 and 
step 5 are for administrators.
+
+## Step 0: Prepare a Kubernetes cluster
+
+Before installing a Pulsar Helm chart, you have to create a Kubernetes 
cluster. You can follow [the instructions](helm-prepare.md) to prepare a 
Kubernetes cluster.
+
+We use [Minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) 
in this quick start guide.
+
+1. Create a kubernetes cluster on Minikube.
+
+    ```bash
+    minikube start --memory=8192 --cpus=4 --kubernetes-version=<k8s-version>
+    ```
+
+    The `<k8s-version>` can be any [Kubernetes version supported by your 
minikube 
installation](https://minikube.sigs.k8s.io/docs/reference/configuration/kubernetes/).
 Example: `v1.16.1.
+
+2. Set `kubectl` to use Minikube.
+
+    ```bash
+    kubectl config use-context minikube
+    ```
+
+3. In order to use the [Kubernetes 
Dashboard](https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/)
 with local Kubernetes cluster on Minikube, enter the command below:
+
+    ```bash
+    minikube dashboard
+    ```
+    The command automatically triggers opening a webpage in your browser. 
+
+## Step 1: Install Pulsar Helm Chart
+
+1. Clone the Pulsar Helm chart repository.
+
+    ```bash
+    git clone https://github.com/apache/pulsar
+    cd deployment/kubernetes/helm/
+    ```
+
+2. Run `prepare_helm_release.sh` to create secrets required for installing 
Apache Pulsar Helm chart. The username `pulsar` and password `pulsar` are used 
for logging into Grafana dashboard and Pulsar Manager.
+
+    ```bash
+    ./scripts/pulsar/prepare_helm_release.sh \
+        -n pulsar \
+        -k pulsar-mini \
+        --control-center-admin pulsar \
+        --control-center-password pulsar \
+        -c
+    ```
+
+3. Use the Pulsar Helm chart to install a Pulsar cluster to Kubernetes.
+
+    ```bash
+    helm install \
+        --values examples/values-minikube.yaml \
+        pulsar-mini pulsar
+    ```
+
+4. Check the status of all pods.
+
+    ```bash
+    kubectl get pods -n pulsar
+    ```
+
+    If all pods start up successfully, you can see `STATUS` changes to 
`Running` or `Completed`.
+
+    **Output**
+
+    ```bash
+    NAME                                         READY   STATUS      RESTARTS  
 AGE
+    pulsar-mini-bookie-0                         1/1     Running     0         
 9m27s
+    pulsar-mini-bookie-init-5gphs                0/1     Completed   0         
 9m27s
+    pulsar-mini-broker-0                         1/1     Running     0         
 9m27s
+    pulsar-mini-grafana-6b7bcc64c7-4tkxd         1/1     Running     0         
 9m27s
+    pulsar-mini-prometheus-5fcf5dd84c-w8mgz      1/1     Running     0         
 9m27s
+    pulsar-mini-proxy-0                          1/1     Running     0         
 9m27s
+    pulsar-mini-pulsar-init-t7cqt                0/1     Completed   0         
 9m27s
+    pulsar-mini-pulsar-manager-9bcbb4d9f-htpcs   1/1     Running     0         
 9m27s
+    pulsar-mini-toolset-0                        1/1     Running     0         
 9m27s
+    pulsar-mini-zookeeper-0                      1/1     Running     0         
 9m27s
+    ```
+
+5. Check the status of all services in the namespace `pulsar`.
+
+    ```bash
+    kubectl get services -n pulsar
+    ```
+
+    **Output**
+    
+    ```bash
+    NAME                         TYPE           CLUSTER-IP       EXTERNAL-IP   
PORT(S)                       AGE
+    pulsar-mini-bookie           ClusterIP      None             <none>        
3181/TCP,8000/TCP             11m
+    pulsar-mini-broker           ClusterIP      None             <none>        
8080/TCP,6650/TCP             11m
+    pulsar-mini-grafana          LoadBalancer   10.106.141.246   <pending>     
3000:31905/TCP                11m
+    pulsar-mini-prometheus       ClusterIP      None             <none>        
9090/TCP                      11m
+    pulsar-mini-proxy            LoadBalancer   10.97.240.109    <pending>     
80:32305/TCP,6650:31816/TCP   11m
+    pulsar-mini-pulsar-manager   LoadBalancer   10.103.192.175   <pending>     
9527:30190/TCP                11m
+    pulsar-mini-toolset          ClusterIP      None             <none>        
<none>                        11m
+    pulsar-mini-zookeeper        ClusterIP      None             <none>        
2888/TCP,3888/TCP,2181/TCP    11m
+    ```
+
+## Step 2: Use pulsar-admin to create Pulsar tenants/namespaces/topics
+
+`pulsar-admin` is the CLI tool for Pulsar. In this step, you can use 
`pulsar-admin` to create resources including tenants, namespaces, and topics.
+
+1. Enter the `toolset` container.
+
+    ```bash
+    kubectl exec -it -n pulsar pulsar-mini-toolset-0 /bin/bash
+    ```
+
+2. In the `toolset` container, create a tenant named `apache`.
+
+    ```bash
+    bin/pulsar-admin tenants create apache
+    ```
+
+    Then you can list the tenants to see if the tenant is created successfully.
+
+    ```bash
+    bin/pulsar-admin tenants list
+    ```
+
+    You should see a similar output as below. The tenant `apache` has been 
successfully created. 
+
+    ```bash
+    "apache"
+    "public"
+    "pulsar"
+    ```
+
+3. In the `toolset` container, create a namespace named `pulsar` in the tenant 
`apache`.
+
+    ```bash
+    bin/pulsar-admin namespaces create apache/pulsar
+    ```
+
+    Then you can list the namespaces of tenant `apache` to see if the 
namespace is created successfully.
+
+    ```bash
+    bin/pulsar-admin namespaces list apache
+    ```
+
+    You should see a similar output as below. The namespace `apache/pulsar` 
has been successfully created. 
+
+    ```bash
+    "apache/pulsar"
+    ```
+
+4. In the `toolset` container, create a topic `test-topic` with `4` partitions 
in the namespace `apache/pulsar`.
+
+    ```bash
+    bin/pulsar-admin topics create-partitioned-topic apache/pulsar/test-topic 
-p 4
+    ```
+
+5. In the `toolset` container, list all the partitioned topics in the 
namespace `apache/pulsar`.
+
+    ```bash
+    bin/pulsar-admin topics list-partitioned-topics apache/pulsar
+    ```
+
+    Then you can see all the partitioned topics in the namespace 
`apache/pulsar`.
+
+    ```bash
+    "persistent://apache/pulsar/test-topic"
+    ```
+
+## Step 3: Use Pulsar client to produce and consume messages
+
+You can use the Pulsar client to create producers and consumers to produce and 
consume messages.
+
+By default the Helm chart expose the Pulsar cluster through a Kubernetes 
`LoadBalancer`. In Minikube, you can use the following command to get the IP 
address of the proxy service.
+
+```bash
+kubectl get services -n pulsar | grep pulsar-mini-proxy
+```
+
+You will see a similar output as below.
+
+```bash
+pulsar-mini-proxy            LoadBalancer   10.97.240.109    <pending>     
80:32305/TCP,6650:31816/TCP   28m
+```
+
+This output tells what are the node ports that Pulsar cluster's binary port 
and http port are exposed to. The port after `80:` is the http port while the 
port after `6650:` is the binary port.
+
+Then you can find the ip address of your minikube server by running the 
following command.
+
+```bash
+minikube ip
+```
+
+At this point, you will get the service urls to connect to your Pulsar client.
+
+```
+webServiceUrl=http://$(minikube ip):<exposed-http-port>/
+brokerServiceUrl=pulsar://$(minikube ip):<exposed-binary-port>/
+```
+
+Then proceed with the following steps:
+
+1. Download the Apache Pulsar tarball from [downloads 
page](https://pulsar.apache.org/en/download/).
+
+2. Decompress the tarball based on your download file.
+
+    ```bash
+    tar -xf <file-name>.tar.gz
+    ```
+
+3. Expose `PULSAR_HOME`.
+
+    (1) Enter the directory of the decompressed download file.
+
+    (2) Expose `PULSAR_HOME` as the environment variable.
+
+    ```bash
+    export PULSAR_HOME=$(pwd)
+    ```
+
+4. Configure the Pulsar client.
+
+    In the `${PULSAR_HOME}/conf/client.conf` file, replace `webServiceUrl` and 
`brokerServiceUrl` with the service urls you get from the above steps.
+
+5. Create a subscription to consume messages from `apache/pulsar/test-topic`.
+
+    ```bash
+    bin/pulsar-client consume -s sub apache/pulsar/test-topic  -n 0
+    ```
+
+6. Open a new terminal. In the new terminal, create a producer and send 10 
messages to the `test-topic` topic.
+
+    ```bash
+    bin/pulsar-client produce apache/pulsar/test-topic  -m "---------hello 
apache pulsar-------" -n 10
+    ```
+
+7. Verify the results.
+
+    - From producer side
+
+        **Output**
+        
+        The messages have been produced successfully.
+
+        ```bash
+        18:15:15.489 [main] INFO  
org.apache.pulsar.client.cli.PulsarClientTool - 10 messages successfully 
produced
+        ```
+
+    - From consumer side
+
+        **Output**
+
+        At the same time, you can receive the messages as below.
+
+        ```bash
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ----- got message -----
+        ---------hello apache pulsar-------
+        ```
+
+## Step 4: Use Pulsar Manager to manage the cluster
+
+[Pulsar Manager](administration-pulsar-manager.md) is a web-based GUI 
management tool for managing and monitoring Pulsar.
+
+1. By default, the `Pulsar Manager` is exposed as a separate `LoadBalancer`. 
You can open the Pulsar Manager UI using the following command:
+
+    ```bash
+    minikube service pulsar-mini-pulsar-mananger
+    ```
+
+2. The pulsar manager UI will be open in your browser. You can use username 
`pulsar` and password `pulsar` to log into Pulsar Manager.
+
+3. In Pulsar Manager UI, you can create an environment. 
+
+    - Click `New Environment` button in the top-left corner.
+    - Type `pulsar-mini` for the field `Environment Name` in the popup window.
+    - Type `http://pulsar-mini-broker:8080` for the field `Service URL` in the 
popup window.
+    - Click `Confirm` button in the popup window.
+
+4. After successfully created an environment, you will be redirected to the 
`tenants` page of that environment. Then you can create `tenants`, `namespaces` 
and `topics` using Pulsar Manager.
+
+## Step 5: Use Prometheus and Grafana to monitor the cluster
+
+Grafana is an open-source visualization tool, which can be used for 
visualizing time series data into dashboards.
+
+1. By default, the Grafana is exposed as a separate `LoadBalancer`. You can 
open the Grafana UI using the following command:
+
+    ```bash
+    minikube service pulsar-mini-grafana -n pulsar
+    ```
+
+2. The Grafana UI will be open in your browser. You can use username `pulsar` 
and password `pulsar` to log into Grafana Dashboard.
+
+3. You will be able to view dashboards for different components of a Pulsar 
cluster.
\ No newline at end of file
diff --git a/site2/website/versioned_docs/version-2.5.0/helm-deploy.md 
b/site2/website/versioned_docs/version-2.5.0/helm-deploy.md
new file mode 100644
index 0000000..c879249
--- /dev/null
+++ b/site2/website/versioned_docs/version-2.5.0/helm-deploy.md
@@ -0,0 +1,385 @@
+---
+id: version-2.5.0-helm-deploy
+title: Deploying a Pulsar cluster using Helm
+sidebar_label: Deployment
+original_id: helm-deploy
+---
+
+Before running `helm install`, you need to make some decisions about how you 
will run Pulsar.
+Options can be specified using Helm's `--set option.name=value` command line 
option.
+
+## Selecting configuration options
+
+In each section collect the options that will be combined to use with `helm 
install`.
+
+### Kubernetes Namespace
+
+By default, the chart is installed to a namespace called `pulsar`.
+
+```yaml
+namespace: pulsar
+```
+
+If you decide to install the chart into a different k8s namespace, you can 
include this option in your Helm install command:
+
+```bash
+--set namespace=<different-k8s-namespace>
+```
+
+By default, the chart doesn't create the namespace.
+
+```yaml
+namespaceCreate: false
+```
+
+If you want the chart to create the k8s namespace automatically, you can 
include this option in your Helm install command.
+
+```bash
+--set namespaceCreate=true
+```
+
+### Persistence
+
+By default the chart creates Volume Claims with the expectation that a dynamic 
provisioner will create the underlying Persistent Volumes.
+
+```yaml
+volumes:
+  persistence: true
+  # configure the components to use local persistent volume
+  # the local provisioner should be installed prior to enable local persistent 
volume
+  local_storage: false
+```
+
+If you would like to use local persistent volumes as the persistent storage 
for your Helm release, you can install 
[local-storage-provisioner](#install-local-storage-provisioner) and include the 
following option in your Helm install command. 
+
+```bash
+--set volumes.local_storage=true
+```
+
+> **Important**: After initial installation, making changes to your storage 
settings requires manually editing Kubernetes objects,
+> so it's best to plan ahead before installing your production instance of 
Pulsar to avoid extra storage migration work.
+
+This chart is designed for production use, To use this chart in a development 
environment (e.g. minikube), you can disable persistence by including this 
option in your Helm install command.
+
+```bash
+--set volumes.persistence=false
+```
+
+### Affinity 
+
+By default `anti-affinity` is turned on to ensure pods of same component can 
run on different nodes.
+
+```yaml
+affinity:
+  anti_affinity: true
+```
+
+If you are planning to use this chart in a development environment (e.g. 
minikue), you can disable `anti-affinity` by including this option in your Helm 
install command.
+
+```bash
+--set affinity.anti_affinity=false
+```
+
+### Components
+
+This chart is designed for production usage. It deploys a production-ready 
Pulsar cluster including Pulsar core components and monitoring components.
+
+You can customize the components to deploy by turning on/off individual 
components.
+
+```yaml
+## Components
+##
+## Control what components of Apache Pulsar to deploy for the cluster
+components:
+  # zookeeper
+  zookeeper: true
+  # bookkeeper
+  bookkeeper: true
+  # bookkeeper - autorecovery
+  autorecovery: true
+  # broker
+  broker: true
+  # functions
+  functions: true
+  # proxy
+  proxy: true
+  # toolset
+  toolset: true
+  # pulsar manager
+  pulsar_manager: true
+
+## Monitoring Components
+##
+## Control what components of the monitoring stack to deploy for the cluster
+monitoring:
+  # monitoring - prometheus
+  prometheus: true
+  # monitoring - grafana
+  grafana: true
+```
+
+### Docker Images
+
+This chart is designed to enable controlled upgrades. So it provides the 
capability to configure independent image versions for components. You can 
customize the images by setting individual component.
+
+```yaml
+## Images
+##
+## Control what images to use for each component
+images:
+  zookeeper:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+    pullPolicy: IfNotPresent
+  bookie:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+    pullPolicy: IfNotPresent
+  autorecovery:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+    pullPolicy: IfNotPresent
+  broker:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+    pullPolicy: IfNotPresent
+  proxy:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+    pullPolicy: IfNotPresent
+  functions:
+    repository: apachepulsar/pulsar-all
+    tag: 2.5.0
+  prometheus:
+    repository: prom/prometheus
+    tag: v1.6.3
+    pullPolicy: IfNotPresent
+  grafana:
+    repository: streamnative/apache-pulsar-grafana-dashboard-k8s
+    tag: 0.0.4
+    pullPolicy: IfNotPresent
+  pulsar_manager:
+    repository: apachepulsar/pulsar-manager
+    tag: v0.1.0
+    pullPolicy: IfNotPresent
+    hasCommand: false
+```
+
+### TLS
+
+This Pulsar Chart can be configured to enable TLS to protect all the traffic 
between components. Before you enable TLS, you have to provision TLS 
certificates
+for the components you have configured to enable TLS.
+
+- [Provision TLS certs using 
`cert-manager`](#provision-tls-certs-using-cert-manager)
+
+#### Provision TLS certs using cert-manager
+
+In order to using `cert-manager` to provision the TLS certificates, you have 
to install
+[cert-manager](#install-cert-manager) before installing the Pulsar chart. After
+successfully install cert manager, you can then set 
`certs.internal_issuer.enabled`
+to `true`. So the Pulsar chart will use `cert-manager` to generate 
`selfsigning` TLS
+certs for the configured components.
+
+```yaml
+certs:
+  internal_issuer:
+    enabled: false
+    component: internal-cert-issuer
+    type: selfsigning
+```
+You can also customize the generated TLS certificates by configuring the 
fields as the following.
+
+```yaml
+tls:
+  # common settings for generating certs
+  common:
+    # 90d
+    duration: 2160h
+    # 15d
+    renewBefore: 360h
+    organization:
+      - pulsar
+    keySize: 4096
+    keyAlgorithm: rsa
+    keyEncoding: pkcs8
+```
+
+#### Enable TLS
+
+After installing `cert-manager`, you can then set `tls.enabled` to `true` to 
enable TLS encryption for the entire cluster.
+
+```yaml
+tls:
+  enabled: false
+```
+
+You can also control whether to enable TLS encryption for individual component.
+
+```yaml
+tls:
+  # settings for generating certs for proxy
+  proxy:
+    enabled: false
+    cert_name: tls-proxy
+  # settings for generating certs for broker
+  broker:
+    enabled: false
+    cert_name: tls-broker
+  # settings for generating certs for bookies
+  bookie:
+    enabled: false
+    cert_name: tls-bookie
+  # settings for generating certs for zookeeper
+  zookeeper:
+    enabled: false
+    cert_name: tls-zookeeper
+  # settings for generating certs for recovery
+  autorecovery:
+    cert_name: tls-recovery
+  # settings for generating certs for toolset
+  toolset:
+    cert_name: tls-toolset
+```
+
+### Authentication
+
+Authentication is disabled by default. You can set 
`auth.authentication.enabled` to `true` to turn on authentication.
+Currently this chart only supports JWT authentication provider. You can set 
`auth.authentication.provider` to `jwt` to use JWT authentication provider.
+
+```yaml
+# Enable or disable broker authentication and authorization.
+auth:
+  authentication:
+    enabled: false
+    provider: "jwt"
+    jwt:
+      # Enable JWT authentication
+      # If the token is generated by a secret key, set the usingSecretKey as 
true.
+      # If the token is generated by a private key, set the usingSecretKey as 
false.
+      usingSecretKey: false
+  superUsers:
+    # broker to broker communication
+    broker: "broker-admin"
+    # proxy to broker communication
+    proxy: "proxy-admin"
+    # pulsar-admin client to broker/proxy communication
+    client: "admin"
+```
+
+If you decide to enable authentication, you can run [prepare helm 
release](#prepare-the-helm-release) to generate token secret keys and tokens 
for three super users specified in `auth.superUsers` field. The generated token 
keys and super user tokens are uploaded and stored as kubernetes secrets 
prefixed with `<pulsar-release-name>-token-`. You can use following command to 
find those secrets.
+
+```bash
+kubectl get secrets -n <k8s-namespace>
+```
+
+### Authorization
+
+Authorization is disabled by default. Authorization can be enabled
+only if Authentication is enabled.
+
+```yaml
+auth:
+  authorization:
+    enabled: false
+```
+
+You can include this option to turn on authorization.
+
+```bash
+--set auth.authorization.enabled=true
+```
+
+### CPU and RAM resource requirements
+
+The resource requests, and number of replicas for the Pulsar components in 
this Chart are set by default to be adequate for a small production deployment. 
If you are trying to deploy a non-production instance, you can reduce the 
defaults in order to fit into a smaller cluster.
+
+Once you have all of your configuration options collected, we need
+to install dependent charts before proceeding to install the Pulsar
+Chart.
+
+## Install Dependent Charts
+
+### Install Local Storage Provisioner
+
+If you decide to use local persistent volumes as the persistent storage, you 
need to [install a storage provisioner for local persistent 
volumes](https://kubernetes.io/blog/2019/04/04/kubernetes-1.14-local-persistent-volumes-ga/).
+
+One of the easiest way to get started is to use the local storage provisioner 
provided along with the Pulsar Helm chart.
+
+```
+helm repo add streamnative https://charts.streamnative.io
+helm repo update
+helm install pulsar-storage-provisioner streamnative/local-storage-provisioner
+```
+
+### Install Cert Manager
+
+The Pulsar Chart uses [cert-manager](https://github.com/jetstack/cert-manager) 
to automate provisioning and managing TLS certificates. If you decide to enable 
TLS encryption for brokers or proxies, you need to install cert-manager first.
+
+You can follow the [official 
instructions](https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm)
 to install cert-manager.
+
+Alternatively, we provide a bash script 
[install-cert-manager.sh](https://github.com/apache/pulsar/blob/master/deployment/kubernetes/helm/scripts/cert-manager/install-cert-manager.sh)
 to install a cert-manager release to namespace `cert-manager`.
+
+```bash
+git clone https://github.com/apache/pulsar
+cd deployment/kubernetes/helm
+./scripts/cert-manager/install-cert-manager.sh
+```
+
+## Prepare the Helm Release
+
+Once you have install all the dependent charts and collected all of your 
configuration options, you can run 
[prepare_helm_release.sh](https://github.com/apache/pulsar/blob/master/deployment/kubernetes/helm/scripts/pulsar/prepare_helm_release.sh)
 to prepare the helm release.
+
+```bash
+git clone https://github.com/apache/pulsar
+cd deployment/kubernetes/helm
+./scripts/pulsar/prepare_helm_release.sh -n <k8s-namespace> -k 
<helm-release-name>
+```
+
+The `prepare_helm_release` creates following resources:
+
+- A k8s namespace for installing the Pulsar release
+- Create a secret for storing the username and password of control center 
administrator. The username and password can be passed to 
`prepare_helm_release.sh` through flags `--control-center-admin` and 
`--control-center-password`. The username and password is used for logging into 
Grafana dashboard and Pulsar Manager.
+- Create the JWT secret keys and tokens for three superusers: `broker-admin`, 
`proxy-admin`, and `admin`. By default, it generates asymmeric pubic/private 
key pair. You can choose to generate symmeric secret key by specifying 
`--symmetric`.
+    - `proxy-admin` role is used for proxies to communicate to brokers.
+    - `broker-admin` role is used for inter-broker communications.
+    - `admin` role is used by the admin tools.
+
+## Deploy using Helm
+
+Once you have done the following three things, you can proceed to install a 
Helm release.
+
+- Collect all of your configuration options
+- Install dependent charts
+- Prepare the Helm release
+
+In this example, we've named our Helm release `pulsar`.
+
+```bash
+git clone https://github.com/apache/pulsar
+cd deployment/kubernetes/helm
+helm upgrade --install pulsar pulsar \
+    --timeout 600 \
+    --set [your configuration options]
+```
+
+You can also use `--version <installation version>` option if you would like 
to install a specific version of Pulsar Helm chart.
+
+## Monitoring the Deployment
+
+This will output the list of resources installed once the deployment finishes 
which may take 5-10 minutes.
+
+The status of the deployment can be checked by running `helm status pulsar` 
which can also be done while the deployment is taking place if you run the 
command in another terminal.
+
+## Accessing the Pulsar Cluster
+
+The default values will create a `ClusterIP` for the following resources you 
can use to interact with the cluster.
+
+- Proxy: You can use the IP address to produce and consume messages to the 
installed Pulsar cluster.
+- Pulsar Manager: You can access the pulsar manager UI at 
`http://<pulsar-manager-ip>:9527`.
+- Grafana Dashboard: You can access the Grafana dashboard at 
`http://<grafana-dashboard-ip>:3000`.
+
+To find the IP address of those components use:
+
+```bash
+kubectl get service -n <k8s-namespace>
+```
\ No newline at end of file
diff --git a/site2/website/versioned_docs/version-2.5.0/helm-install.md 
b/site2/website/versioned_docs/version-2.5.0/helm-install.md
new file mode 100644
index 0000000..0a9cb57
--- /dev/null
+++ b/site2/website/versioned_docs/version-2.5.0/helm-install.md
@@ -0,0 +1,40 @@
+---
+id: version-2.5.0-helm-install
+title: Install Apache Pulsar using Helm
+sidebar_label: Install 
+original_id: helm-install
+---
+
+Install Apache Pulsar on Kubernetes with the official Pulsar Helm chart.
+
+## Requirements
+
+In order to deploy Apache Pulsar on Kubernetes, the following are required.
+
+1. kubectl 1.14 or higher, compatible with your cluster ([+/- 1 minor release 
from your 
cluster](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin))
+2. Helm v3 (3.0.2 or higher)
+3. A Kubernetes cluster, version 1.14 or higher.
+
+## Environment setup
+
+Before proceeding to deploying Pulsar, you need to prepare your environment.
+
+### Tools
+
+`helm` and `kubectl` need to be [installed on your computer](helm-tools.md).
+
+## Cloud cluster preparation
+
+> NOTE: Kubernetes 1.14 or higher is required, due to the usage of certain 
Kubernetes features.
+
+Follow the instructions to create and connect to the Kubernetes cluster of 
your choice:
+
+- [Google Kubernetes Engine](helm-prepare.md#google-kubernetes-engine)
+
+## Deploy Pulsar
+
+With the environment set up and configuration generated, you can now proceed 
to the [deployment of Pulsar](helm-deploy.md).
+
+## Upgrade Pulsar
+
+If you are upgrading an existing Kubernetes installation, follow the [upgrade 
documentation](helm-upgrade.md) instead.
diff --git a/site2/website/versioned_docs/version-2.5.0/helm-overview.md 
b/site2/website/versioned_docs/version-2.5.0/helm-overview.md
new file mode 100644
index 0000000..1930871
--- /dev/null
+++ b/site2/website/versioned_docs/version-2.5.0/helm-overview.md
@@ -0,0 +1,111 @@
+---
+id: version-2.5.0-helm-overview
+title: Apache Pulsar Helm Chart
+sidebar_label: Overview
+original_id: helm-overview
+---
+
+This is the official supported Helm chart to install Apache Pulsar on a 
cloud-native environment. It was enhanced based on StreamNative's [Helm 
Chart](https://github.com/streamnative/charts).
+
+## Introduction
+
+The Apache Pulsar Helm chart is one of the most convenient ways 
+to operate Pulsar on Kubernetes. This chart contains all the required 
components to get started and can scale to large deployments.
+
+This chart includes all the components for a complete experience, but each 
part can be configured to install separately.
+
+- Pulsar core components:
+    - ZooKeeper
+    - Bookies
+    - Brokers
+    - Function workers
+    - Proxies
+- Control Center:
+    - Pulsar Manager
+    - Prometheus
+    - Grafana
+    - Alert Manager
+
+It includes support for:
+
+- Security
+    - Automatically provisioned TLS certs, using 
[Jetstack](https://www.jetstack.io/)'s 
[cert-manager](https://cert-manager.io/docs/)
+        - self-signed
+        - [Let's Encrypt](https://letsencrypt.org/)
+    - TLS Encryption
+        - Proxy
+        - Broker
+        - Toolset
+        - Bookie
+        - ZooKeeper
+    - Authentication
+        - JWT
+    - Authorization
+- Storage
+    - Non-persistence storage
+    - Persistence Volume
+    - Local Persistent Volumes
+- Functions
+    - Kubernetes Runtime
+    - Process Runtime
+    - Thread Runtime
+- Operations
+    - Independent Image Versions for all components, enabling controlled 
upgrades
+
+## Pulsar Helm chart quick start
+
+For those looking to get up and running with these charts as fast
+as possible, in a **non-production** use case, we provide
+a [quick start guide](getting-started-helm.md) for Proof of Concept (PoC) 
deployments.
+
+This guide walks the user through deploying these charts with default
+values & features, but *does not* meet production ready requirements.
+If you wish to deploy these charts into production under sustained load,
+you should follow the complete [Installation Guide](helm-install.md).
+
+## Troubleshooting
+
+We've done our best to make these charts as seamless as possible,
+occasionally troubles do surface outside of our control. We've collected
+tips and tricks for troubleshooting common issues. Please examine these first 
before raising an [issue](https://github.com/apache/pulsar/issues/new/choose), 
and feel free to add to them by raising a [Pull 
Request](https://github.com/apache/pulsar/compare)!
+
+## Installation
+
+The Apache Pulsar Helm chart contains all required dependencies.
+
+If you are just looking to deploy a Proof of Concept for testing,
+we strongly suggest you follow our [Quick Start 
Guide](getting-started-helm.md) for your first iteration.
+
+1. [Preparation](helm-prepare.md)
+2. [Deployment](helm-deploy.md)
+
+## Upgrading
+
+Once your Pulsar Chart is installed, configuration changes and chart
+updates should be done using `helm upgrade`.
+
+```bash
+git clone https://github.com/apache/pulsar
+cd deployment/kubernetes/helm
+helm get values <pulsar-release-name> > pulsar.yaml
+helm upgrade <pulsar-release-name> pulsar -f pulsar.yaml
+```
+
+For more detailed information, see [Upgrading](helm-upgrade.md).
+
+## Uninstall
+
+To uninstall the Pulsar Chart, run the following command:
+
+```bash
+helm delete <pulsar-release-name>
+```
+
+For the purposes of continuity, these charts have some Kubernetes objects that 
are not removed when performing `helm delete`.
+These items we require you to *conciously* remove them, as they affect 
re-deployment should you choose to.
+
+* PVCs for stateful data, which you must *consciously* remove
+    - ZooKeeper: This is your metadata.
+    - BookKeeper: This is your data.
+    - Prometheus: This is your metrics data, which can be safely removed.
+* Secrets, if generated by our [prepare release 
script](https://github.com/apache/pulsar/blob/master/deployment/kubernetes/helm/scripts/pulsar/prepare_helm_release.sh).
 They contain secret keys, tokens, etc. You can use [cleanup release 
script](https://github.com/apache/pulsar/blob/master/deployment/kubernetes/helm/scripts/pulsar/cleanup_helm_release.sh)
 to remove these secrets and tokens as needed.
\ No newline at end of file
diff --git a/site2/website/versioned_docs/version-2.5.0/helm-prepare.md 
b/site2/website/versioned_docs/version-2.5.0/helm-prepare.md
new file mode 100644
index 0000000..373589c
--- /dev/null
+++ b/site2/website/versioned_docs/version-2.5.0/helm-prepare.md
@@ -0,0 +1,78 @@
+---
+id: version-2.5.0-helm-prepare
+title: Preparing Kubernetes resources
+sidebar_label: Prepare
+original_id: helm-prepare
+---
+
+For a fully functional Pulsar cluster, you will need a few resources before 
deploying the Apache Pulsar Helm chart. The following provides instructions to 
prepare the Kubernetes cluster before deploying the Pulsar Helm chart.
+
+- [Google Kubernetes Engine](#google-kubernetes-engine)
+
+## Google Kubernetes Engine
+
+To get started easier, a script is provided to automate the cluster creation. 
Alternatively, a cluster can be created manually as well.
+
+- [Manual cluster creation](#manual-cluster-creation)
+- [Scripted cluster creation](#scripted-cluster-creation)
+
+### Manual cluster creation
+
+To provision a Kubernetes cluster manually, follow the [GKE 
instructions](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster).
+
+Alternatively you can use the [instructions](#scripted-cluster-creation) below 
to provision a GKE cluster as needed.
+
+### Scripted cluster creation
+
+A [bootstrap 
script](https://github.com/streamnative/charts/tree/master/scripts/pulsar/gke_bootstrap_script.sh)
 has been created to automate much of the setup process for users on GCP/GKE.
+
+The script will:
+
+1. Create a new GKE cluster.
+2. Allow the cluster to modify DNS records.
+3. Setup `kubectl`, and connect it to the cluster.
+
+Google Cloud SDK is a dependency of this script, so make sure it's [set up 
correctly](helm-tools.md#connect-to-a-gke-cluster) in order for the script to 
work.
+
+The script reads various parameters from environment variables and an argument 
`up` or `down` for bootstrap and clean-up respectively.
+
+The table below describes all variables.
+
+| **Variable** | **Description** | **Default value** |
+| ------------ | --------------- | ----------------- |
+| PROJECT      | The ID of your GCP project | No defaults, required to be set. 
|
+| CLUSTER_NAME | Name of the GKE cluster | `pulsar-dev` |
+| CONFDIR | Configuration directory to store kubernetes config | Defaults to 
${HOME}/.config/streamnative |
+| INT_NETWORK | The IP space to use within this cluster | `default` |
+| LOCAL_SSD_COUNT | The number of local SSD counts | Defaults to 4 |
+| MACHINE_TYPE | The type of machine to use for nodes | `n1-standard-4` |
+| NUM_NODES | The number of nodes to be created in each of the cluster's zones 
| 4 |
+| PREEMPTIBLE | Create nodes using preemptible VM instances in the new 
cluster. | false |
+| REGION | Compute region for the cluster | `us-east1` |
+| USE_LOCAL_SSD | Flag to create a cluster with local SSDs | Defaults to false 
|
+| ZONE | Compute zone for the cluster | `us-east1-b` |
+| ZONE_EXTENSION | The extension (`a`, `b`, `c`) of the zone name of the 
cluster | `b` |
+| EXTRA_CREATE_ARGS | Extra arguments passed to create command | |
+
+Run the script, by passing in your desired parameters. It can work with the 
default parameters except for `PROJECT` which is required:
+
+```bash
+PROJECT=<gcloud project id> scripts/pulsar/gke_bootstrap_script.sh up
+```
+
+The script can also be used to clean up the created GKE resources:
+
+```bash
+PROJECT=<gcloud project id> scripts/pulsar/gke_bootstrap_script.sh down
+```
+
+#### Create a cluster with local SSDs
+
+If you are planning to install a Pulsar Helm chart using local persistent 
volumes, you need to create a GKE cluster with local SSDs. You can do so using 
the provided script by specifying `USE_LOCAL_SSD` to be `true`. A sample 
command is listed as below:
+
+```
+PROJECT=<gcloud project id> USE_LOCAL_SSD=true 
LOCAL_SSD_COUNT=<local-ssd-count> scripts/pulsar/gke_bootstrap_script.sh up
+```
+## Next Steps
+
+Continue with the [installation of the chart](helm-deploy.md) once you have 
the cluster up and running.
diff --git a/site2/website/versioned_docs/version-2.5.0/helm-tools.md 
b/site2/website/versioned_docs/version-2.5.0/helm-tools.md
new file mode 100644
index 0000000..47785cf
--- /dev/null
+++ b/site2/website/versioned_docs/version-2.5.0/helm-tools.md
@@ -0,0 +1,43 @@
+---
+id: version-2.5.0-helm-tools
+title: Required tools for deploying Pulsar Helm Chart
+sidebar_label: Required Tools
+original_id: helm-tools
+---
+
+Before deploying Pulsar to your Kubernetes cluster, there are some tools you 
must have installed locally.
+
+## kubectl
+
+kubectl is the tool that talks to the Kubernetes API. kubectl 1.14 or higher 
is required and it needs to be compatible with your cluster ([+/- 1 minor 
release from your 
cluster](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin)).
+
+[Install kubectl locally by following the Kubernetes 
documentation](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl).
+
+The server version of kubectl cannot be obtained until we connect to a 
cluster. Proceed with setting up Helm.
+
+## Helm
+
+Helm is the package manager for Kubernetes. The Apache Pulsar Helm Chart is 
tested and supported with Helm v3.
+
+### Get Helm
+
+You can get Helm from the project's [releases 
page](https://github.com/helm/helm/releases), or follow other options under the 
official documentation of [installing 
Helm](https://helm.sh/docs/intro/install/).
+
+### Next steps
+
+Once kubectl and Helm are configured, you can continue to configuring your 
[Kubernetes cluster](helm-prepare.md).
+
+## Additional information
+
+### Templates
+
+Templating in Helm is done via golang's 
[text/template](https://golang.org/pkg/text/template/) and 
[sprig](https://godoc.org/github.com/Masterminds/sprig).
+
+Some information on how all the inner workings behave:
+
+- [Functions and 
Pipelines](https://helm.sh/docs/chart_template_guide/functions_and_pipelines/)
+- [Subcharts and 
Globals](https://helm.sh/docs/chart_template_guide/subcharts_and_globals/)
+
+### Tips and tricks
+
+Helm repository has some additional information on developing with Helm in 
it's [tips and tricks 
section](https://helm.sh/docs/howto/charts_tips_and_tricks/).
\ No newline at end of file
diff --git a/site2/website/versioned_docs/version-2.5.0/helm-upgrade.md 
b/site2/website/versioned_docs/version-2.5.0/helm-upgrade.md
new file mode 100644
index 0000000..fc762b8
--- /dev/null
+++ b/site2/website/versioned_docs/version-2.5.0/helm-upgrade.md
@@ -0,0 +1,35 @@
+---
+id: version-2.5.0-helm-upgrade
+title: Upgrade a Pulsar Helm release
+sidebar_label: Upgrade
+original_id: helm-upgrade
+---
+
+Before upgrading your Pulsar installation, you need to check the changelog 
corresponding to the specific release you want to upgrade
+to and look for any release notes that might pertain to the new Pulsar chart 
version.
+
+We also recommend that you need to provide all values using `helm upgrade 
--set key=value` syntax or `-f values.yml` instead of using `--reuse-values` 
because some of the current values might be deprecated.
+
+> **NOTE**:
+>
+> You can retrieve your previous `--set` arguments cleanly, with `helm get 
values <release-name>`. If you direct this into a file (`helm get values 
<release-name> > pulsar.yml`), you can safely
+pass this file via `-f`. Thus `helm upgrade <release-name> pulsar -f 
pulsar.yaml`. This safely replaces the behavior of `--reuse-values`.
+
+## Steps
+
+The following are the steps to upgrade Apache Pulsar to a newer version:
+
+1. Check the change log for the specific version you would like to upgrade to
+2. Go through [deployment documentation](helm-deploy.md) step by step
+3. Extract your previous `--set` arguments with
+    ```bash
+    helm get values <release-name> > pulsar.yaml
+    ```
+4. Decide on all the values you need to set
+5. Perform the upgrade, with all `--set` arguments extracted in step 4
+    ```bash
+    helm upgrade <release-name> pulsar \
+        --version <new version> \
+        -f pulsar.yaml \
+        --set ...
+    ```
\ No newline at end of file
diff --git a/site2/website/versioned_sidebars/version-2.5.0-sidebars.json 
b/site2/website/versioned_sidebars/version-2.5.0-sidebars.json
index 50c92fd..5270e22 100644
--- a/site2/website/versioned_sidebars/version-2.5.0-sidebars.json
+++ b/site2/website/versioned_sidebars/version-2.5.0-sidebars.json
@@ -4,6 +4,7 @@
       "version-2.5.0-pulsar-2.0",
       "version-2.5.0-standalone",
       "version-2.5.0-standalone-docker",
+      "version-2.5.0-kubernetes-helm",
       "version-2.5.0-client-libraries"
     ],
     "Concepts and Architecture": [
@@ -49,6 +50,14 @@
       "version-2.5.0-sql-deployment-configurations",
       "version-2.5.0-sql-rest-api"
     ],
+    "Kubernetes (Helm)": [
+      "version-2.5.0-helm-overview",
+      "version-2.5.0-helm-prepare",
+      "version-2.5.0-helm-install",
+      "version-2.5.0-helm-deploy",
+      "version-2.5.0-helm-upgrade",
+      "version-2.5.0-helm-tools"
+    ],
     "Deployment": [
       "version-2.5.0-deploy-aws",
       "version-2.5.0-deploy-kubernetes",

Reply via email to