This is an automated email from the ASF dual-hosted git repository.

ywkim pushed a commit to branch cnb
in repository https://gitbox.apache.org/repos/asf/bigtop.git


The following commit(s) were added to refs/heads/cnb by this push:
     new 248b1cb  BIGTOP-3260: Add hostpath provisioner and NFS exports
248b1cb is described below

commit 248b1cbd7b6064200d9c1905137061b44187270e
Author: Youngwoo Kim <yw...@apache.org>
AuthorDate: Tue Oct 22 15:32:57 2019 +0900

    BIGTOP-3260: Add hostpath provisioner and NFS exports
---
 README.md                          | 37 +++++++++++++++++++++++++++++++---
 kubectl/plugin/kubectl-bigtop      | 21 ++++++++++++++++---
 storage/hostpath/README.md         | 16 +++++++++++++++
 storage/hostpath/hostpath-pvc.yaml | 11 ++++++++++
 storage/rook/nfs/nfs-ceph.yaml     | 41 ++++++++++++++++++++++++++++++++++++++
 storage/rook/nfs/nfs-hostpath.yaml | 35 ++++++++++++++++++++++++++++++++
 storage/rook/nfs/nfs.yaml          | 37 ++++++++++++++++++++++++++++++++++
 storage/rook/nfs/storageclass.yaml | 13 ++++++++++++
 8 files changed, 205 insertions(+), 6 deletions(-)

diff --git a/README.md b/README.md
index 79b9e3a..114029c 100755
--- a/README.md
+++ b/README.md
@@ -115,7 +115,7 @@ $ vagrant ssh k8s-1
 ```
 ```
 k8s-1$ kubectl plugin list
-k8s-1$ kubectl bigtop kubectl-config && kubectl bigtop helm-install
+k8s-1$ kubectl bigtop kubectl-config && kubectl bigtop helm-deploy
 
 ```
 
@@ -176,8 +176,39 @@ $ kubectl get storageclass
 rook-ceph-block
 ```
 
+Mark ```rook-ceph-block``` StorageClass as default:
+```
+kubectl patch storageclass rook-ceph-block -p '{"metadata": 
{"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
+
+```
+
+### NFS
+
+```
+$ kubectl create -f dl/rook-1.1.2/cluster/examples/kubernetes/nfs/operator.yaml
+$ kubectl -n rook-nfs-system get pod
+
+# NFS by the default storageClass
+$ kubectl create -f storage/rook/nfs/nfs.yaml
+
+# NFS by the Ceph RBD volumes
+$ kubectl create -f storage/rook/nfs/nfs-ceph.yaml
+
+# NFS by the ```hostpath``` storageClass
+$ kubectl create -f storage/rook/nfs/nfs-hostpath.yaml
+
+# storageClass by NFS export
+$ kubectl create -f storage/rook/nfs/storageclass.yaml
+```
+
+If you want to mark ```rook-nfs-share1``` storageClass as default:
+```
+$ kubectl patch storageclass rook-nfs-share1 -p '{"metadata": 
{"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
+$ kubectl get storageclass
+```
+
 ### Rook Minio
-Create Minio operator:
+Deploy Minio operator:
 ```
 $ kubectl create -f 
dl/rook-1.1.2/cluster/examples/kubernetes/minio/operator.yaml
 
@@ -265,7 +296,7 @@ $ kubectl -n bigtop exec kafka-client -- kafka-topics \
 ```
 
 ### Schema Registry
-Optionally, You can create schema registry service for Kafka:
+Optionally, You can deploy schema registry service for Kafka:
 ```
 helm install --name kafka-schema-registry --namespace bigtop -f 
kafka/schema-registry/values.yaml \
 --set 
kafkaStore.overrideBootstrapServers="kafka-0.kafka-headless:9092\,kafka-1.kafka-headless:9092\,kafka-2.kafka-headless:9092"
 \
diff --git a/kubectl/plugin/kubectl-bigtop b/kubectl/plugin/kubectl-bigtop
index 1291376..a59beb3 100755
--- a/kubectl/plugin/kubectl-bigtop
+++ b/kubectl/plugin/kubectl-bigtop
@@ -49,7 +49,7 @@ if [[ "$1" == "cluster-info" ]]; then
 fi
 
 # Install Helm
-if [[ "$1" == "helm-install" ]]; then
+if [[ "$1" == "helm-deploy" ]]; then
     curl -L https://git.io/get_helm.sh | bash
     helm version -c
     kubectl --namespace kube-system create serviceaccount tiller
@@ -63,12 +63,14 @@ if [[ "$1" == "helm-install" ]]; then
 fi
 
 # Install Rook-Ceph
-if [[ "$1" == "rook-ceph-install" ]]; then
+if [[ "$1" == "rook-ceph-deploy" ]]; then
 
     kubectl create -f 
$BIGTOP_HOME/dl/rook-1.1.2/cluster/examples/kubernetes/ceph/common.yaml
     kubectl create -f 
$BIGTOP_HOME/dl/rook-1.1.2/cluster/examples/kubernetes/ceph/operator.yaml
     kubectl -n rook-ceph get pod
 
+    sleep 10s
+
     # Ceph cluster
     # test
     kubectl create -f $BIGTOP_HOME/storage/rook/ceph/cluster-test.yaml
@@ -82,6 +84,8 @@ if [[ "$1" == "rook-ceph-install" ]]; then
 
     # StorageClass: Ceph RBD
     kubectl create -f 
$BIGTOP_HOME/dl/rook-1.1.2/cluster/examples/kubernetes/ceph/csi/rbd/storageclass.yaml
+    # Make the storageClass as default
+    kubectl patch storageclass rook-ceph-block -p '{"metadata": 
{"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
     kubectl get storageclass
     exit 0
 fi
@@ -92,8 +96,19 @@ if [[ "$1" == "rook-ceph-toolbox" ]]; then
     exit 0
 fi
 
+# Rook NFS
+if [[ "$1" == "rook-nfs-deploy" ]]; then
+    kubectl create -f 
$BIGTOP_HOME/dl/rook-1.1.2/cluster/examples/kubernetes/nfs/operator.yaml
+    sleep 10s
+    kubectl -n rook-nfs-system get pod
+    kubectl create -f $BIGTOP_HOME/storage/rook/nfs/nfs.yaml
+    # kubectl create -f $BIGTOP_HOME/storage/rook/nfs/nfs-ceph.yaml
+    kubectl create -f $BIGTOP_HOME/storage/rook/nfs/storageclass.yaml
+    kubectl get storageclass
+fi
+
 # Minio
-if [[ "$1" == "minio-install" ]]; then
+if [[ "$1" == "minio-deploy" ]]; then
     helm install --name bigtop-minio --namespace bigtop -f 
storage/minio/values.yaml stable/minio
     exit 0
 fi
diff --git a/storage/hostpath/README.md b/storage/hostpath/README.md
new file mode 100644
index 0000000..58e2818
--- /dev/null
+++ b/storage/hostpath/README.md
@@ -0,0 +1,16 @@
+# Install dynamic hostpath provisioner Helm chart
+
+```
+$ helm repo add rimusz https://charts.rimusz.net
+$ helm repo update
+$ helm upgrade --install hostpath-provisioner \
+--namespace kube-system \
+--set storageClass.defaultClass=false \
+rimusz/hostpath-provisioner
+
+```
+
+Mark ```hostpath``` StorageClass as default:
+```
+kubectl patch storageclass hostpath -p '{"metadata": 
{"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
+```
diff --git a/storage/hostpath/hostpath-pvc.yaml 
b/storage/hostpath/hostpath-pvc.yaml
new file mode 100644
index 0000000..d7cfc28
--- /dev/null
+++ b/storage/hostpath/hostpath-pvc.yaml
@@ -0,0 +1,11 @@
+kind: PersistentVolumeClaim
+apiVersion: v1
+metadata:
+  name: hostpath-pvc
+spec:
+  storageClassName: "hostpath"
+  accessModes:
+    - ReadWriteMany
+  resources:
+    requests:
+      storage: 2Gi
diff --git a/storage/rook/nfs/nfs-ceph.yaml b/storage/rook/nfs/nfs-ceph.yaml
new file mode 100644
index 0000000..1a259b2
--- /dev/null
+++ b/storage/rook/nfs/nfs-ceph.yaml
@@ -0,0 +1,41 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name:  rook-nfs
+---
+# A rook ceph cluster must be running
+# Create a rook ceph cluster using examples in 
rook/cluster/examples/kubernetes/ceph
+# Refer to https://rook.io/docs/rook/master/ceph-quickstart.html for a quick 
rook cluster setup
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: nfs-ceph-claim
+  namespace: rook-nfs
+spec:
+  storageClassName: rook-ceph-block
+  accessModes:
+  - ReadWriteMany
+  resources:
+    requests:
+      storage: 2Gi
+---
+apiVersion: nfs.rook.io/v1alpha1
+kind: NFSServer
+metadata:
+  name: rook-nfs
+  namespace: rook-nfs
+spec:
+  replicas: 1
+  exports:
+  - name: share1
+    server:
+      accessMode: ReadWrite
+      squash: "none"
+    # A Persistent Volume Claim must be created before creating NFS CRD 
instance.
+    # Create a Ceph cluster for using this example
+    # Create a ceph PVC after creating the rook ceph cluster using 
ceph-pvc.yaml
+    persistentVolumeClaim:
+      claimName: nfs-ceph-claim
+  # A key value list of annotations
+  annotations:
+  #  key: value
diff --git a/storage/rook/nfs/nfs-hostpath.yaml 
b/storage/rook/nfs/nfs-hostpath.yaml
new file mode 100644
index 0000000..193c767
--- /dev/null
+++ b/storage/rook/nfs/nfs-hostpath.yaml
@@ -0,0 +1,35 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name:  rook-nfs
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: hostpath-pvc
+  namespace: rook-nfs
+spec:
+  storageClassName: hostpath
+  accessModes:
+  - ReadWriteMany
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: nfs.rook.io/v1alpha1
+kind: NFSServer
+metadata:
+  name: rook-nfs
+  namespace: rook-nfs
+spec:
+  replicas: 1
+  exports:
+  - name: share1
+    server:
+      accessMode: ReadWrite
+      squash: "none"
+    persistentVolumeClaim:
+      claimName: hostpath-pvc
+  # A key value list of annotations
+  annotations:
+  #  key: value
diff --git a/storage/rook/nfs/nfs.yaml b/storage/rook/nfs/nfs.yaml
new file mode 100644
index 0000000..aa6a079
--- /dev/null
+++ b/storage/rook/nfs/nfs.yaml
@@ -0,0 +1,37 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name:  rook-nfs
+---
+# A default storageclass must be present
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: nfs-default-claim
+  namespace: rook-nfs
+spec:
+  accessModes:
+  - ReadWriteMany
+  resources:
+    requests:
+      storage: 1Gi
+---
+apiVersion: nfs.rook.io/v1alpha1
+kind: NFSServer
+metadata:
+  name: rook-nfs
+  namespace: rook-nfs
+spec:
+  serviceAccountName: rook-nfs
+  replicas: 1
+  exports:
+  - name: share1
+    server:
+      accessMode: ReadWrite
+      squash: "none"
+    # A Persistent Volume Claim must be created before creating NFS CRD 
instance.
+    persistentVolumeClaim:
+      claimName: nfs-default-claim
+  # A key/value list of annotations
+  annotations:
+  #  key: value
diff --git a/storage/rook/nfs/storageclass.yaml 
b/storage/rook/nfs/storageclass.yaml
new file mode 100644
index 0000000..2f25f3c
--- /dev/null
+++ b/storage/rook/nfs/storageclass.yaml
@@ -0,0 +1,13 @@
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  labels:
+    app: rook-nfs
+  name: rook-nfs-share1
+parameters:
+  exportName: share1
+  nfsServerName: rook-nfs
+  nfsServerNamespace: rook-nfs
+provisioner: rook.io/nfs-provisioner
+reclaimPolicy: Delete
+volumeBindingMode: Immediate

Reply via email to