bipinsoniguavus closed pull request #5820: Prod build enablement to go with 
Guavus platform
URL: https://github.com/apache/incubator-superset/pull/5820
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000..ca3c0aca30
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,8 @@
+**/venv
+**/venv2
+**/install
+**/scripts
+**/superset.egg-info
+**/build
+**/dist
+**/node_modules
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index 11929a9b7e..2dd6b019a5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,7 +16,6 @@ envpy3
 build
 *.db
 tmp
-superset_config.py
 local_config.py
 env
 dist
@@ -43,8 +42,4 @@ venv
 @eaDir/
 
 # docker
-/Dockerfile
-/docker-build.sh
-/docker-compose.yml
-/docker-entrypoint.sh
-/docker-init.sh
+venv2
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000..28fbdd494a
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,41 @@
+FROM python:3.6.0
+
+MAINTAINER Arpit Agarwal <[email protected]>
+
+# Configure environment
+ENV LANG=C.UTF-8 \
+    LC_ALL=C.UTF-8 \
+    HOME=/home/work
+
+RUN apt-get update -y && apt-get install -y build-essential libssl-dev \
+    libffi-dev python3-dev libsasl2-dev libldap2-dev \
+    libsasl2-modules
+    
+RUN mkdir -p $HOME/incubator-superset
+
+WORKDIR $HOME/incubator-superset
+
+COPY ./ ./
+
+RUN pip install --upgrade setuptools pip && pip install wheel && python 
./setup.py bdist_wheel && pip install ./dist/*.whl
+
+# Install nodejs for custom build
+# 
https://github.com/apache/incubator-superset/blob/master/docs/installation.rst#making-your-own-build
+# https://nodejs.org/en/download/package-manager/
+RUN curl -sL https://deb.nodesource.com/setup_8.x | bash -
+RUN apt-get install -y nodejs
+RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -; \
+    echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee 
/etc/apt/sources.list.d/yarn.list; \
+    apt-get update; \
+    apt-get install -y yarn
+
+ENV PATH=/home/work/incubator-superset/superset/bin:$PATH \
+    PYTHONPATH=./superset/:$PYTHONPATH
+
+COPY docker-init.sh /usr/local/bin/
+RUN chmod +x /usr/local/bin/docker-init.sh
+
+EXPOSE 8088
+
+ENTRYPOINT ["docker-init.sh"]
+
diff --git a/cleanup-docker.sh b/cleanup-docker.sh
new file mode 100644
index 0000000000..241c4a85e6
--- /dev/null
+++ b/cleanup-docker.sh
@@ -0,0 +1,6 @@
+# remove all exited containers
+docker rm -v $(docker ps -a -q -f status=exited)
+# remove unused images
+docker rmi $(docker images -f "dangling=true" -q)
+# docker container that removes unwanted/unused volumes
+docker run -v /var/run/docker.sock:/var/run/docker.sock -v 
/var/lib/docker:/var/lib/docker --rm martin/docker-cleanup-volumes
\ No newline at end of file
diff --git a/contrib/docker/Dockerfile b/contrib/docker/Dockerfile
deleted file mode 100644
index 3d474864f7..0000000000
--- a/contrib/docker/Dockerfile
+++ /dev/null
@@ -1,60 +0,0 @@
-FROM python:3.6
-
-MAINTAINER Xiao Hanyu <[email protected]>
-
-# Add a normal user
-RUN useradd --user-group --create-home --shell /bin/bash work
-
-# Configure environment
-ENV LANG=C.UTF-8 \
-    LC_ALL=C.UTF-8 \
-    HOME=/home/work
-
-RUN apt-get update -y
-
-# Install some dependencies
-# http://airbnb.io/superset/installation.html#os-dependencies
-RUN apt-get update -y && apt-get install -y build-essential libssl-dev \
-    libffi-dev python3-dev libsasl2-dev libldap2-dev
-
-RUN apt-get install -y vim less postgresql-client redis-tools
-
-# Install nodejs for custom build
-# 
https://github.com/apache/incubator-superset/blob/master/docs/installation.rst#making-your-own-build
-# https://nodejs.org/en/download/package-manager/
-RUN curl -sL https://deb.nodesource.com/setup_8.x | bash -
-RUN apt-get install -y nodejs
-RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -; \
-    echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee 
/etc/apt/sources.list.d/yarn.list; \
-    apt-get update; \
-    apt-get install -y yarn
-
-RUN mkdir $HOME/incubator-superset
-
-WORKDIR $HOME/incubator-superset
-
-COPY ./ ./
-
-RUN pip install --upgrade setuptools pip
-RUN pip install -e . && pip install -r requirements-dev.txt
-
-ENV PATH=/home/work/incubator-superset/superset/bin:$PATH \
-    PYTHONPATH=./superset/:$PYTHONPATH
-
-COPY docker-entrypoint.sh /usr/local/bin/
-RUN chmod +x /usr/local/bin/docker-entrypoint.sh
-RUN ln -s usr/local/bin/docker-entrypoint.sh /entrypoint.sh # backwards compat
-
-COPY ./superset ./superset
-RUN chown -R work:work $HOME
-
-USER work
-
-RUN cd superset/assets && yarn
-RUN cd superset/assets && npm run build
-
-HEALTHCHECK CMD ["curl", "-f", "http://localhost:8088/health";]
-
-ENTRYPOINT ["docker-entrypoint.sh"]
-
-EXPOSE 8088
diff --git a/contrib/docker/docker-build.sh b/contrib/docker/docker-build.sh
deleted file mode 100644
index 55f73274d6..0000000000
--- a/contrib/docker/docker-build.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-
-set -ex
-
-docker build -t apache/incubator-superset -f Dockerfile .
diff --git a/contrib/docker/docker-init.sh b/contrib/docker/docker-init.sh
deleted file mode 100644
index 940ad4fa34..0000000000
--- a/contrib/docker/docker-init.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-
-set -ex
-
-# Create an admin user (you will be prompted to set username, first and last 
name before setting a password)
-fabmanager create-admin --app superset
-
-# Initialize the database
-superset db upgrade
-
-# Load some data to play with
-superset load_examples
-
-# Create default roles and permissions
-superset init
-
-# Need to run `npm run build` when enter contains for first time
-cd superset/assets && npm run build && cd ../../
-
-# Start superset worker for SQL Lab
-superset worker &
-
-# To start a development web server, use the -d switch
-superset runserver -d
diff --git a/deployments/postgres-deployment.yaml 
b/deployments/postgres-deployment.yaml
new file mode 100644
index 0000000000..3729f37d7a
--- /dev/null
+++ b/deployments/postgres-deployment.yaml
@@ -0,0 +1,39 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  creationTimestamp: null
+  labels:
+    service: postgres
+  name: postgres
+spec:
+  replicas: 1
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        service: postgres
+    spec:
+      containers:
+      - env:
+        - name: POSTGRES_DB
+          value: superset
+        - name: POSTGRES_PASSWORD
+          value: superset
+        - name: POSTGRES_USER
+          value: superset
+        image: postgres:10
+        name: postgres
+        ports:
+        - containerPort: 5432
+        resources: {}
+        volumeMounts:
+        - mountPath: /var/lib/postgresql/data
+          name: postgres-storage
+      restartPolicy: Always
+      volumes:
+      - name: postgres-storage
+        persistentVolumeClaim:
+          claimName: postgres-volume-claim
+status: {}
diff --git a/deployments/postgres-persistent-volume.yaml 
b/deployments/postgres-persistent-volume.yaml
new file mode 100644
index 0000000000..0bfa733220
--- /dev/null
+++ b/deployments/postgres-persistent-volume.yaml
@@ -0,0 +1,13 @@
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+  name: postgres-volume
+  labels:
+    type: local
+spec:
+  capacity:
+    storage: 100Mi
+  accessModes:
+    - ReadWriteOnce
+  hostPath:
+    path: "/mnt/postgres/data"
\ No newline at end of file
diff --git a/deployments/postgres-service.yaml 
b/deployments/postgres-service.yaml
new file mode 100644
index 0000000000..d608d52c50
--- /dev/null
+++ b/deployments/postgres-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  creationTimestamp: null
+  labels:
+    service: postgres
+  name: postgres
+spec:
+  ports:
+  - name: "5432"
+    port: 5432
+    targetPort: 5432
+  selector:
+    service: postgres
+status:
+  loadBalancer: {}
diff --git a/deployments/postgres-volume-claim.yaml 
b/deployments/postgres-volume-claim.yaml
new file mode 100644
index 0000000000..d937f4836e
--- /dev/null
+++ b/deployments/postgres-volume-claim.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  creationTimestamp: null
+  labels:
+    service: postgres
+  name: postgres-volume-claim
+spec:
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 100Mi
+status: {}
diff --git a/deployments/redis-deployment.yaml 
b/deployments/redis-deployment.yaml
new file mode 100644
index 0000000000..f30c733601
--- /dev/null
+++ b/deployments/redis-deployment.yaml
@@ -0,0 +1,32 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  creationTimestamp: null
+  labels:
+    service: redis
+  name: redis
+spec:
+  replicas: 1
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        service: redis
+    spec:
+      containers:
+      - image: redis:3.2
+        name: redis
+        ports:
+        - containerPort: 6379
+        resources: {}
+        volumeMounts:
+        - mountPath: /data
+          name: redis-storage
+      restartPolicy: Always
+      volumes:
+      - name: redis-storage
+        persistentVolumeClaim:
+          claimName: redis-volume-claim
+status: {}
diff --git a/deployments/redis-persistent-volume.yaml 
b/deployments/redis-persistent-volume.yaml
new file mode 100644
index 0000000000..0413132c54
--- /dev/null
+++ b/deployments/redis-persistent-volume.yaml
@@ -0,0 +1,13 @@
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+  name: redis-volume
+  labels:
+    type: local
+spec:
+  capacity:
+    storage: 100Mi
+  accessModes:
+    - ReadWriteOnce
+  hostPath:
+    path: "/mnt/redis/data"
\ No newline at end of file
diff --git a/deployments/redis-service.yaml b/deployments/redis-service.yaml
new file mode 100644
index 0000000000..c68b66fbcf
--- /dev/null
+++ b/deployments/redis-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  creationTimestamp: null
+  labels:
+    service: redis
+  name: redis
+spec:
+  ports:
+  - name: "6379"
+    port: 6379
+    targetPort: 6379
+  selector:
+    service: redis
+status:
+  loadBalancer: {}
diff --git a/deployments/redis-volume-claim.yaml 
b/deployments/redis-volume-claim.yaml
new file mode 100644
index 0000000000..5a94d64c09
--- /dev/null
+++ b/deployments/redis-volume-claim.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  creationTimestamp: null
+  labels:
+    service: redis
+  name: redis-volume-claim
+spec:
+  accessModes:
+  - ReadWriteOnce
+  resources:
+    requests:
+      storage: 100Mi
+status: {}
diff --git a/deployments/superset-deployment.yaml 
b/deployments/superset-deployment.yaml
new file mode 100644
index 0000000000..efa2576a30
--- /dev/null
+++ b/deployments/superset-deployment.yaml
@@ -0,0 +1,54 @@
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  creationTimestamp: null
+  labels:
+    service: superset
+  name: superset
+spec:
+  replicas: 1
+  strategy:
+    type: Recreate
+  template:
+    metadata:
+      creationTimestamp: null
+      labels:
+        service: superset
+    spec:
+      containers:
+      - args:
+        - tail
+        - -f
+        - /dev/null
+        env:
+        - name: POSTGRES_DB
+          value: superset
+        - name: POSTGRES_HOST
+          value: postgres
+        - name: POSTGRES_PASSWORD
+          value: superset
+        - name: POSTGRES_PORT
+          value: "5432"
+        - name: POSTGRES_USER
+          value: superset
+        - name: REDIS_HOST
+          value: redis
+        - name: REDIS_PORT
+          value: "6379"
+        - name: SUPERSET_ENV
+          value: local
+        image: artifacts.ggn.in.guavus.com:4245/guavus-superset:1.0.4
+        name: superset
+        ports:
+        - containerPort: 8088
+          hostPort: 8088
+        resources: {}
+        volumeMounts:
+        - mountPath: /home/work/incubator-superset/superset/assets/node_modules
+          name: superset-node-modules-storage
+      restartPolicy: Always
+      volumes:
+      - name: superset-node-modules-storage
+        persistentVolumeClaim:
+          claimName: superset-node-modules-volume-claim
+status: {}
diff --git a/deployments/superset-node-modules-volume-claim.yaml 
b/deployments/superset-node-modules-volume-claim.yaml
new file mode 100644
index 0000000000..654f4b73ed
--- /dev/null
+++ b/deployments/superset-node-modules-volume-claim.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  creationTimestamp: null
+  labels:
+    service: superset-node-modules
+  name: superset-node-modules-volume-claim
+spec:
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 100Mi
+status: {}
diff --git a/deployments/superset-persistent-volume.yaml 
b/deployments/superset-persistent-volume.yaml
new file mode 100644
index 0000000000..e4ece4741a
--- /dev/null
+++ b/deployments/superset-persistent-volume.yaml
@@ -0,0 +1,13 @@
+kind: PersistentVolume
+apiVersion: v1
+metadata:
+  name: superset-node-modules-volume
+  labels:
+    type: local
+spec:
+  capacity:
+    storage: 100Mi
+  accessModes:
+    - ReadWriteOnce
+  hostPath:
+    path: "/mnt/superset-node_modules/data"
\ No newline at end of file
diff --git a/deployments/superset-service.yaml 
b/deployments/superset-service.yaml
new file mode 100644
index 0000000000..a70ed776eb
--- /dev/null
+++ b/deployments/superset-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:
+  creationTimestamp: null
+  labels:
+    service: superset
+  name: superset
+spec:
+  ports:
+  - name: "8088"
+    protocol: TCP
+    port: 8088
+    targetPort: 8088
+  selector:
+    service: superset
+status:
+  loadBalancer: {}
diff --git a/docker-build.sh b/docker-build.sh
new file mode 100644
index 0000000000..42d7ce420d
--- /dev/null
+++ b/docker-build.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -ex
+START=$(date +%s)
+docker build -t guavus-superset -f Dockerfile .
+END=$(date +%s)
+DIFF=$(( $END - $START ))
+echo "Build completed in $DIFF seconds"
diff --git a/contrib/docker/docker-compose.yml b/docker-compose.yml
similarity index 92%
rename from contrib/docker/docker-compose.yml
rename to docker-compose.yml
index 9085793c10..4d95c59b5c 100644
--- a/contrib/docker/docker-compose.yml
+++ b/docker-compose.yml
@@ -19,7 +19,7 @@ services:
     volumes:
       - postgres:/var/lib/postgresql/data
   superset:
-    image: apache/incubator-superset
+    image: guavus-superset
     restart: always
     environment:
       POSTGRES_DB: superset
@@ -37,7 +37,6 @@ services:
       - postgres
       - redis
     volumes:
-      - .:/home/work/incubator-superset
       - 
superset-node-modules:/home/work/incubator-superset/superset/assets/node_modules
 volumes:
   postgres:
diff --git a/contrib/docker/docker-entrypoint.sh b/docker-entrypoint.sh
similarity index 100%
rename from contrib/docker/docker-entrypoint.sh
rename to docker-entrypoint.sh
diff --git a/docker-init.sh b/docker-init.sh
new file mode 100755
index 0000000000..b501c8de99
--- /dev/null
+++ b/docker-init.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+set -ex
+
+  echo "Initializing database"
+  superset db upgrade
+
+  echo "Creating default roles and permissions"
+  superset init
+
+  cd superset/assets && yarn && yarn run build && cd ../../
+# To start a development web server, use the -d switch
+  superset runserver
diff --git a/requirements.txt b/requirements.txt
index b076fc0c5a..f421933652 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -9,6 +9,7 @@ flask-appbuilder==1.10.0
 flask-caching==1.4.0
 flask-compress==1.4.0
 flask-migrate==2.1.1
+flask-sqlalchemy==2.1
 flask-wtf==0.14.2
 flower==0.9.2
 future==0.16.0
@@ -24,6 +25,7 @@ pathlib2==2.3.0
 polyline==1.3.2
 pydruid==0.4.4
 pyhive==0.5.1
+psycopg2-binary==2.7.5
 python-dateutil==2.6.1
 python-geohash==0.8.5
 pyyaml==3.12
@@ -39,4 +41,5 @@ thrift-sasl==0.3.0
 unicodecsv==0.14.1
 unidecode==1.0.22
 contextlib2==0.5.5
+elasticsearch==5.5.3
 
diff --git a/setup.py b/setup.py
index 84c0c48405..8334aa1db6 100644
--- a/setup.py
+++ b/setup.py
@@ -62,11 +62,13 @@ def get_git_sha():
         'colorama',
         'contextlib2',
         'cryptography',
+        'elasticsearch',
         'flask<1.0.0',
         'flask-appbuilder==1.10.0',  # known db migration with 1.11+
         'flask-caching',
         'flask-compress',
         'flask-migrate',
+        'flask-sqlalchemy==2.1',
         'flask-wtf',
         'flower',  # deprecated
         'future>=0.16.0, <0.17',
@@ -80,6 +82,7 @@ def get_git_sha():
         'parsedatetime',
         'pathlib2',
         'polyline',
+        'psycopg2-binary',
         'pydruid>=0.4.3',
         'pyhive>=0.4.0',
         'python-dateutil',
diff --git a/superset/cli.py b/superset/cli.py
index 074e12f963..c3e0596e19 100755
--- a/superset/cli.py
+++ b/superset/cli.py
@@ -39,6 +39,16 @@ def init():
     """Inits the Superset application"""
     utils.get_or_create_main_db()
     security_manager.sync_role_definitions()
+    role_admin = security_manager.find_role(security_manager.auth_role_admin)
+    username = firstname = lastname = password = "superset"
+    email = "[email protected]"
+    user = security_manager.find_user(username)
+    if user is None:
+        user = security_manager.add_user(username, firstname, lastname, email, 
role_admin, password)
+        if user:
+            print(Fore.GREEN+'Admin User {0} created.'.format(username))
+        else:
+            print(Fore.RED+'No user created an error occured')
 
 
 def debug_run(app, port, use_reloader):
diff --git a/superset/config.py b/superset/config.py
index a5e4f2988c..636833f6fe 100644
--- a/superset/config.py
+++ b/superset/config.py
@@ -246,6 +246,7 @@
 DEFAULT_MODULE_DS_MAP = OrderedDict([
     ('superset.connectors.sqla.models', ['SqlaTable']),
     ('superset.connectors.druid.models', ['DruidDatasource']),
+    ('superset.connectors.elastic.models', ['ElasticDatasource']),
 ])
 ADDITIONAL_MODULE_DS_MAP = {}
 ADDITIONAL_MIDDLEWARE = []
diff --git a/superset/connectors/druid/views.py 
b/superset/connectors/druid/views.py
index cc0cea9a08..cf77b53f6c 100644
--- a/superset/connectors/druid/views.py
+++ b/superset/connectors/druid/views.py
@@ -26,6 +26,8 @@
 )
 from . import models
 
+appbuilder.add_separator('Sources')
+
 
 class DruidColumnInlineView(CompactCRUDMixin, SupersetModelView):  # noqa
     datamodel = SQLAInterface(models.DruidColumn)
diff --git a/superset/connectors/elastic/__init__.py 
b/superset/connectors/elastic/__init__.py
new file mode 100644
index 0000000000..a60249b87b
--- /dev/null
+++ b/superset/connectors/elastic/__init__.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+from . import models  # noqa
+from . import views  # noqa
diff --git a/superset/connectors/elastic/models.py 
b/superset/connectors/elastic/models.py
new file mode 100644
index 0000000000..62710f40e1
--- /dev/null
+++ b/superset/connectors/elastic/models.py
@@ -0,0 +1,583 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=C,R,W
+# pylint: disable=invalid-unary-operand-type
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+
+from datetime import datetime
+import json
+import logging
+
+from elasticsearch import Elasticsearch
+from flask import escape, Markup
+from flask_appbuilder import Model
+from flask_appbuilder.models.decorators import renders
+import pandas as pd
+from six import string_types
+import sqlalchemy as sa
+from sqlalchemy import (Boolean, Column, DateTime, ForeignKey, Integer, String,
+                        Text)
+from sqlalchemy.orm import backref, relationship
+
+from superset import db, import_util, security_manager, utils
+from superset.connectors.base.models import (BaseColumn, BaseDatasource,
+                                             BaseMetric)
+from superset.models.helpers import AuditMixinNullable, QueryResult, set_perm
+from superset.utils import flasher
+
+
+class ElasticCluster(Model, AuditMixinNullable):
+
+    """ORM object referencing the Elastic clusters"""
+
+    __tablename__ = 'elastic_clusters'
+    type = 'elastic'
+
+    id = Column(Integer, primary_key=True)
+    cluster_name = Column(String(250), unique=True)
+    hosts_json = Column(Text)
+    metadata_last_refreshed = Column(DateTime)
+    cache_timeout = Column(Integer)
+
+    def __repr__(self):
+        return self.cluster_name
+
+    @property
+    def data(self):
+        return {
+            'name': self.cluster_name,
+            'backend': 'elastic',
+        }
+
+    @property
+    def hosts(self):
+        return json.loads(self.hosts_json)
+
+    def get_client(self):
+        return Elasticsearch(self.hosts)
+
+    def get_mappings(self):
+        client = self.get_client()
+        return client.indices.get_mapping()
+
+    def refresh_datasources(self, datasource_name=None, merge_flag=False):
+        """Refresh metadata of all datasources in the cluster
+        If ``datasource_name`` is specified, only that datasource is updated
+        """
+        for index_name, index_metadata in self.get_mappings().items():
+            for name, mapping_metadata in 
index_metadata.get('mappings').items():
+                ElasticDatasource.sync_to_db(
+                    '{}.{}'.format(index_name, name), mapping_metadata, self)
+
+    @property
+    def perm(self):
+        return '[{obj.cluster_name}].(id:{obj.id})'.format(obj=self)
+
+    def get_perm(self):
+        return self.perm
+
+    @property
+    def name(self):
+        return self.cluster_name
+
+    @property
+    def unique_name(self):
+        return self.cluster_name
+
+
+class ElasticColumn(Model, BaseColumn):
+    """ORM model for storing Elastic datasource column metadata"""
+
+    __tablename__ = 'elastic_columns'
+
+    datasource_name = Column(
+        String(255),
+        ForeignKey('elastic_datasources.datasource_name'))
+    # Setting enable_typechecks=False disables polymorphic inheritance.
+    datasource = relationship(
+        'ElasticDatasource',
+        backref=backref('columns', cascade='all, delete-orphan'),
+        enable_typechecks=False)
+    json = Column(Text)
+
+    export_fields = (
+        'datasource_name', 'column_name', 'is_active', 'type', 'groupby',
+        'count_distinct', 'sum', 'avg', 'max', 'min', 'filterable',
+        'description',
+    )
+
+    @property
+    def expression(self):
+        return self.json
+
+    def __repr__(self):
+        return self.column_name
+
+    def generate_metrics(self):
+        """Generate metrics based on the column metadata"""
+        M = ElasticMetric  # noqa
+        metrics = []
+        metrics.append(ElasticMetric(
+            metric_name='count',
+            verbose_name='COUNT(*)',
+            metric_type='count',
+            json=json.dumps({'type': 'count', 'name': 'count'}),
+        ))
+        if self.sum and self.is_num:
+            name = 'sum__' + self.column_name
+            metrics.append(ElasticMetric(
+                metric_name=name,
+                metric_type='sum',
+                verbose_name='SUM({})'.format(self.column_name),
+                json=json.dumps({'sum': {'field': self.column_name}}),
+            ))
+
+        if self.avg and self.is_num:
+            name = 'avg__' + self.column_name
+            metrics.append(ElasticMetric(
+                metric_name=name,
+                metric_type='avg',
+                verbose_name='AVG({})'.format(self.column_name),
+                json=json.dumps({'avg': {'field': self.column_name}}),
+            ))
+
+        if self.min and self.is_num:
+            name = 'min__' + self.column_name
+            metrics.append(ElasticMetric(
+                metric_name=name,
+                metric_type='min',
+                verbose_name='MIN({})'.format(self.column_name),
+                json=json.dumps({'min': {'field': self.column_name}}),
+            ))
+        if self.max and self.is_num:
+            name = 'max__' + self.column_name
+            metrics.append(ElasticMetric(
+                metric_name=name,
+                metric_type='max',
+                verbose_name='MAX({})'.format(self.column_name),
+                json=json.dumps({'max': {'field': self.column_name}}),
+            ))
+        if self.count_distinct:
+            metrics.append(ElasticMetric(
+                metric_name=name,
+                verbose_name='COUNT(DISTINCT {})'.format(self.column_name),
+                metric_type='count_distinct',
+                json=json.dumps({'cardinality': {'field': self.column_name}}),
+            ))
+        session = db.session
+        new_metrics = []
+        for metric in metrics:
+            m = (
+                session.query(M)
+                .filter(M.metric_name == metric.metric_name)
+                .filter(M.datasource_name == self.datasource_name)
+                .filter(ElasticCluster.cluster_name == 
self.datasource.cluster_name)
+                .first()
+            )
+            metric.datasource_name = self.datasource_name
+            if not m:
+                new_metrics.append(metric)
+                session.add(metric)
+                session.flush()
+
+    @classmethod
+    def import_obj(cls, i_column):
+        def lookup_obj(lookup_column):
+            return db.session.query(ElasticColumn).filter(
+                ElasticColumn.datasource_name == lookup_column.datasource_name,
+                ElasticColumn.column_name == lookup_column.column_name).first()
+
+        return import_util.import_simple_obj(db.session, i_column, lookup_obj)
+
+
+class ElasticMetric(Model, BaseMetric):
+
+    """ORM object referencing Elastic metrics for a datasource"""
+
+    __tablename__ = 'elastic_metrics'
+    datasource_name = Column(
+        String(255),
+        ForeignKey('elastic_datasources.datasource_name'))
+    # Setting enable_typechecks=False disables polymorphic inheritance.
+    datasource = relationship(
+        'ElasticDatasource',
+        backref=backref('metrics', cascade='all, delete-orphan'),
+        enable_typechecks=False)
+    json = Column(Text)
+
+    export_fields = (
+        'metric_name', 'verbose_name', 'metric_type', 'datasource_name',
+        'json', 'description', 'is_restricted', 'd3format',
+    )
+
+    @property
+    def expression(self):
+        return self.json
+
+    @property
+    def json_obj(self):
+        try:
+            obj = json.loads(self.json)
+        except Exception:
+            obj = {}
+        return obj
+
+    @property
+    def perm(self):
+        return (
+            '{parent_name}.[{obj.metric_name}](id:{obj.id})'
+        ).format(obj=self,
+                 parent_name=self.datasource.full_name,
+                 ) if self.datasource else None
+
+    @classmethod
+    def import_obj(cls, i_metric):
+        def lookup_obj(lookup_metric):
+            return db.session.query(ElasticMetric).filter(
+                ElasticMetric.datasource_name == lookup_metric.datasource_name,
+                ElasticMetric.metric_name == lookup_metric.metric_name).first()
+        return import_util.import_simple_obj(db.session, i_metric, lookup_obj)
+
+
+class ElasticDatasource(Model, BaseDatasource):
+
+    """ORM object referencing Elastic datasources (tables)"""
+
+    __tablename__ = 'elastic_datasources'
+
+    type = 'elastic'
+    query_langtage = 'json'
+    cluster_class = ElasticCluster
+    metric_class = ElasticMetric
+    column_class = ElasticColumn
+
+    baselink = 'elasticdatasourcemodelview'
+
+    # Columns
+    datasource_name = Column(String(255), unique=True)
+    is_hidden = Column(Boolean, default=False)
+    fetch_values_from = Column(String(100))
+    cluster_name = Column(
+        String(250), ForeignKey('elastic_clusters.cluster_name'))
+    cluster = relationship(
+        'ElasticCluster', backref='datasources', foreign_keys=[cluster_name])
+    user_id = Column(Integer, ForeignKey('ab_user.id'))
+    owner = relationship(
+        security_manager.user_model,
+        backref=backref('elastic_datasources', cascade='all, delete-orphan'),
+        foreign_keys=[user_id])
+
+    export_fields = (
+        'datasource_name', 'is_hidden', 'description', 'default_endpoint',
+        'cluster_name', 'offset', 'cache_timeout', 'params',
+    )
+    slices = relationship(
+        'Slice',
+        primaryjoin=(
+            'ElasticDatasource.id == foreign(Slice.datasource_id) and '
+            'Slice.datasource_type == "elastic"'))
+
+    @property
+    def database(self):
+        return self.cluster
+
+    @property
+    def num_cols(self):
+        return [c.column_name for c in self.columns if c.is_num]
+
+    @property
+    def name(self):
+        return self.datasource_name
+
+    @property
+    def schema(self):
+        ds_name = self.datasource_name or ''
+        name_pieces = ds_name.split('.')
+        if len(name_pieces) > 1:
+            return name_pieces[0]
+        else:
+            return None
+
+    @property
+    def schema_perm(self):
+        """Returns schema permission if present, cluster one otherwise."""
+        return security_manager.get_schema_perm(self.cluster, self.schema)
+
+    def get_perm(self):
+        return (
+            '[{obj.cluster_name}].[{obj.datasource_name}]'
+            '(id:{obj.id})').format(obj=self)
+
+    @property
+    def link(self):
+        name = escape(self.datasource_name)
+        return Markup('<a href="{self.url}">{name}</a>').format(**locals())
+
+    @property
+    def full_name(self):
+        return utils.get_datasource_full_name(
+            self.cluster_name, self.datasource_name)
+
+    @property
+    def time_column_grains(self):
+        return {
+            'time_columns': [
+                'all', '5 seconds', '30 seconds', '1 minute',
+                '5 minutes', '1 hour', '6 hour', '1 day', '7 days',
+                'week', 'week_starting_sunday', 'week_ending_saturday',
+                'month',
+            ],
+            'time_grains': ['now'],
+        }
+
+    def __repr__(self):
+        return self.datasource_name
+
+    @renders('datasource_name')
+    def datasource_link(self):
+        url = '/superset/explore/{obj.type}/{obj.id}/'.format(obj=self)
+        name = escape(self.datasource_name)
+        return Markup('<a href="{url}">{name}</a>'.format(**locals()))
+
+    def get_metric_obj(self, metric_name):
+        return [
+            m.json_obj for m in self.metrics
+            if m.metric_name == metric_name
+        ][0]
+
+    @classmethod
+    def import_obj(cls, i_datasource, import_time=None):
+        """Imports the datasource from the object to the database.
+
+         Metrics and columns and datasource will be overridden if exists.
+         This function can be used to import/export dashboards between multiple
+         superset instances. Audit metadata isn't copies over.
+        """
+        def lookup_datasource(d):
+            return 
db.session.query(ElasticDatasource).join(ElasticCluster).filter(
+                ElasticDatasource.datasource_name == d.datasource_name,
+                ElasticCluster.cluster_name == d.cluster_name,
+            ).first()
+
+        def lookup_cluster(d):
+            return db.session.query(ElasticCluster).filter_by(
+                cluster_name=d.cluster_name).one()
+        return import_util.import_datasource(
+            db.session, i_datasource, lookup_cluster, lookup_datasource,
+            import_time)
+
+    @staticmethod
+    def version_higher(v1, v2):
+        """is v1 higher than v2
+
+        >>> ElasticDatasource.version_higher('0.8.2', '0.9.1')
+        False
+        >>> ElasticDatasource.version_higher('0.8.2', '0.6.1')
+        True
+        >>> ElasticDatasource.version_higher('0.8.2', '0.8.2')
+        False
+        >>> ElasticDatasource.version_higher('0.8.2', '0.9.BETA')
+        False
+        >>> ElasticDatasource.version_higher('0.8.2', '0.9')
+        False
+        """
+        def int_or_0(v):
+            try:
+                v = int(v)
+            except (TypeError, ValueError):
+                v = 0
+            return v
+        v1nums = [int_or_0(n) for n in v1.split('.')]
+        v2nums = [int_or_0(n) for n in v2.split('.')]
+        v1nums = (v1nums + [0, 0, 0])[:3]
+        v2nums = (v2nums + [0, 0, 0])[:3]
+        return v1nums[0] > v2nums[0] or \
+            (v1nums[0] == v2nums[0] and v1nums[1] > v2nums[1]) or \
+            (v1nums[0] == v2nums[0] and v1nums[1] == v2nums[1] and v1nums[2] > 
v2nums[2])
+
+    def generate_metrics(self):
+        for col in self.columns:
+            col.generate_metrics()
+
+    def query_str(self):
+        d = {'query': None}
+        return json.dumps(d)
+
+    @classmethod
+    def sync_to_db(cls, name, metadata, cluster):
+        """Fetches metadata for that datasource and merges the Superset db"""
+        logging.info('Syncing Elastic datasource [{}]'.format(name))
+        session = db.session
+        datasource = session.query(cls).filter_by(datasource_name=name).first()
+        if not datasource:
+            datasource = cls(datasource_name=name)
+            session.add(datasource)
+            flasher('Adding new datasource [{}]'.format(name), 'success')
+        else:
+            flasher('Refreshing datasource [{}]'.format(name), 'info')
+        session.flush()
+        datasource.cluster = cluster
+        session.flush()
+
+        for col_name, col_metadata in metadata.get('properties').items():
+            cls.merge_column(col_name, col_metadata, datasource, session)
+
+    @classmethod
+    def merge_column(cls, col_name, col_metadata, datasource, sesh):
+        col_obj = (
+            sesh
+            .query(ElasticColumn)
+            .filter_by(
+                datasource_name=datasource.datasource_name,
+                column_name=col_name)
+            .first()
+        )
+        datatype = col_metadata.get('type')
+        if not col_obj:
+            col_obj = ElasticColumn(
+                datasource_name=datasource.datasource_name,
+                column_name=col_name)
+            sesh.add(col_obj)
+        if datatype == 'string':
+            col_obj.groupby = True
+            col_obj.filterable = True
+        if col_obj.is_num:
+            col_obj.sum = True
+        if col_obj:
+            col_obj.type = datatype
+        sesh.flush()
+        col_obj.datasource = datasource
+        col_obj.generate_metrics()
+        sesh.flush()
+
+    @staticmethod
+    def time_offset(granularity):
+        if granularity == 'week_ending_saturday':
+            return 6 * 24 * 3600 * 1000  # 6 days
+        return 0
+
+    # uses https://en.wikipedia.org/wiki/ISO_8601
+    # http://elastic.io/docs/0.8.0/querying/granularities.html
+    # TODO: pass origin from the UI
+    @staticmethod
+    def granularity(period_name, timezone=None, origin=None):
+        if not period_name or period_name == 'all':
+            return 'all'
+        iso_8601_dict = {
+            '5 seconds': 'PT5S',
+            '30 seconds': 'PT30S',
+            '1 minute': 'PT1M',
+            '5 minutes': 'PT5M',
+            '1 hour': 'PT1H',
+            '6 hour': 'PT6H',
+            'one day': 'P1D',
+            '1 day': 'P1D',
+            '7 days': 'P7D',
+            'week': 'P1W',
+            'week_starting_sunday': 'P1W',
+            'week_ending_saturday': 'P1W',
+            'month': 'P1M',
+        }
+
+        granularity = {'type': 'period'}
+        if timezone:
+            granularity['timeZone'] = timezone
+
+        if origin:
+            dttm = utils.parse_human_datetime(origin)
+            granularity['origin'] = dttm.isoformat()
+
+        if period_name in iso_8601_dict:
+            granularity['period'] = iso_8601_dict[period_name]
+            if period_name in ('week_ending_saturday', 'week_starting_sunday'):
+                # use Sunday as start of the week
+                granularity['origin'] = '2016-01-03T00:00:00'
+        elif not isinstance(period_name, string_types):
+            granularity['type'] = 'duration'
+            granularity['duration'] = period_name
+        elif period_name.startswith('P'):
+            # identify if the string is the iso_8601 period
+            granularity['period'] = period_name
+        else:
+            granularity['type'] = 'duration'
+            granularity['duration'] = utils.parse_human_timedelta(
+                period_name).total_seconds() * 1000
+        return granularity
+
+    def values_for_column(self,
+                          column_name,
+                          limit=10000):
+        """Retrieve some values for the given column"""
+        # TODO
+
+    def get_query_str(self, query_obj, phase=1, client=None):
+        return self.run_query(client=client, phase=phase, **query_obj)
+
+    def run_query(  # noqa / elastic
+            self,
+            groupby, metrics,
+            granularity,
+            from_dttm, to_dttm,
+            filter=None,  # noqa
+            is_timeseries=True,
+            timeseries_limit=None,
+            timeseries_limit_metric=None,
+            row_limit=None,
+            inner_from_dttm=None, inner_to_dttm=None,
+            orderby=None,
+            extras=None,  # noqa
+            select=None,  # noqa
+            columns=None, phase=2, client=None, form_data=None):
+        """Runs a query against Elastic and returns a dataframe.
+        """
+        pass
+
+    @property
+    def index(self):
+        self.datasource_name.split('.')[0]
+
+    def query(self, query_obj):
+        client = self.cluster.get_client()
+        equery = {}
+
+        # Aggregations
+        equery['aggregations'] = {}
+        for m in self.metrics:
+            if m.metric_name in query_obj.get('metrics'):
+                equery['aggregations'][m.metric_name] = m.json_obj
+
+        data = client.search(index=self.index, body=equery)
+        print('-=' * 20)
+        print('query is: {}'.format(equery))
+        data = data['hits']['hits']
+        data = [k['_source'] for k in data]
+        print('-=' * 20)
+        query_str = self.query_str()
+        qry_start_dttm = datetime.now()
+        df = pd.DataFrame(data)
+        print('-=' * 20)
+        print(df)
+        return QueryResult(
+            df=df,
+            query=query_str,
+            duration=datetime.now() - qry_start_dttm)
+
+    def get_filters(self, raw_filters):  # noqa
+        return
+
+    @classmethod
+    def query_datasources_by_name(
+            cls, session, database, datasource_name, schema=None):
+        return (
+            session.query(cls)
+            .filter_by(cluster_name=database.id)
+            .filter_by(datasource_name=datasource_name)
+            .all()
+        )
+
+
+sa.event.listen(ElasticDatasource, 'after_insert', set_perm)
+sa.event.listen(ElasticDatasource, 'after_update', set_perm)
diff --git a/superset/connectors/elastic/views.py 
b/superset/connectors/elastic/views.py
new file mode 100644
index 0000000000..efae4f0cf0
--- /dev/null
+++ b/superset/connectors/elastic/views.py
@@ -0,0 +1,294 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=C,R,W
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from datetime import datetime
+import logging
+
+from flask import flash, Markup, redirect
+from flask_appbuilder import CompactCRUDMixin, expose
+from flask_appbuilder.models.sqla.interface import SQLAInterface
+from flask_appbuilder.security.decorators import has_access
+from flask_babel import gettext as __
+from flask_babel import lazy_gettext as _
+import sqlalchemy as sqla
+
+from superset import appbuilder, db, security_manager, utils
+from superset.connectors.connector_registry import ConnectorRegistry
+from superset.views.base import BaseSupersetView
+from superset.views.base import (
+    DatasourceFilter, DeleteMixin, get_datasource_exist_error_msg,
+    ListWidgetWithCheckboxes, SupersetModelView, validate_json)
+from . import models
+
+appbuilder.add_separator('Sources')
+
+
+class ElasticColumnInlineView(CompactCRUDMixin, SupersetModelView):  # noqa
+    datamodel = SQLAInterface(models.ElasticColumn)
+    edit_columns = [
+        'column_name', 'description', 'json', 'datasource',
+        'groupby', 'filterable', 'count_distinct', 'sum', 'min', 'max']
+    add_columns = edit_columns
+    list_columns = [
+        'column_name', 'type', 'groupby', 'filterable', 'count_distinct',
+        'sum', 'min', 'max']
+    can_delete = False
+    page_size = 500
+    label_columns = {
+        'column_name': _('Column'),
+        'type': _('Type'),
+        'datasource': _('Datasource'),
+        'groupby': _('Groupable'),
+        'filterable': _('Filterable'),
+        'count_distinct': _('Count Distinct'),
+        'sum': _('Sum'),
+        'min': _('Min'),
+        'max': _('Max'),
+    }
+    description_columns = {
+        'filterable': _(
+            'Whether this column is exposed in the `Filters` section '
+            'of the explore view.'),
+        'json': utils.markdown(
+            'this field can be used to specify  '
+            'a `dimensionSpec` as documented [here]'
+            '(http://elastic.io/docs/latest/querying/dimensionspecs.html). '
+            'Make sure to input valid JSON and that the '
+            '`outputName` matches the `column_name` defined '
+            'above.',
+            True),
+    }
+
+    def post_update(self, col):
+        col.generate_metrics()
+        utils.validate_json(col.json)
+
+    def post_add(self, col):
+        self.post_update(col)
+
+
+appbuilder.add_view_no_menu(ElasticColumnInlineView)
+
+
+class ElasticMetricInlineView(CompactCRUDMixin, SupersetModelView):  # noqa
+    datamodel = SQLAInterface(models.ElasticMetric)
+    list_columns = ['metric_name', 'verbose_name', 'metric_type']
+    edit_columns = [
+        'metric_name', 'description', 'verbose_name', 'metric_type', 'json',
+        'datasource', 'd3format', 'is_restricted']
+    add_columns = edit_columns
+    page_size = 500
+    validators_columns = {
+        'json': [validate_json],
+    }
+    description_columns = {
+        'metric_type': utils.markdown(
+            'use `postagg` as the metric type if you are defining a '
+            '[Elastic Post Aggregation]'
+            '(http://elastic.io/docs/latest/querying/post-aggregations.html)',
+            True),
+        'is_restricted': _('Whether the access to this metric is restricted '
+                           'to certain roles. Only roles with the permission '
+                           '\'metric access on XXX (the name of this metric)\' 
'
+                           'are allowed to access this metric'),
+    }
+    label_columns = {
+        'metric_name': _('Metric'),
+        'description': _('Description'),
+        'verbose_name': _('Verbose Name'),
+        'metric_type': _('Type'),
+        'json': _('JSON'),
+        'datasource': _('Elastic Datasource'),
+    }
+
+    def post_add(self, metric):
+        if metric.is_restricted:
+            security_manager.merge_perm('metric_access', metric.get_perm())
+
+    def post_update(self, metric):
+        if metric.is_restricted:
+            security_manager.merge_perm('metric_access', metric.get_perm())
+
+
+appbuilder.add_view_no_menu(ElasticMetricInlineView)
+
+
+class ElasticClusterModelView(SupersetModelView, DeleteMixin):  # noqa
+    datamodel = SQLAInterface(models.ElasticCluster)
+    add_columns = [
+        'cluster_name', 'hosts_json', 'cache_timeout',
+    ]
+    edit_columns = add_columns
+    list_columns = ['cluster_name', 'metadata_last_refreshed']
+    search_columns = ('cluster_name',)
+    label_columns = {
+        'cluster_name': _('Cluster'),
+        'hosts_json': _('Hosts JSON configuration'),
+    }
+    description_columns = {
+        'hosts_json': _(
+            'A JSON string that represents a host, and array of host, '
+            'or anything else that ``elasticsearch.Elasticsearch()`` will '
+            'be able to interpret'),
+    }
+
+    def pre_add(self, cluster):
+        security_manager.merge_perm('database_access', cluster.perm)
+
+    def pre_update(self, cluster):
+        self.pre_add(cluster)
+
+    def _delete(self, pk):
+        DeleteMixin._delete(self, pk)
+
+
+appbuilder.add_view(
+    ElasticClusterModelView,
+    name='Elastic Clusters',
+    label=__('Elastic Clusters'),
+    icon='fa-cubes',
+    category='Sources',
+    category_label=__('Sources'),
+    category_icon='fa-database')
+
+
+class ElasticDatasourceModelView(SupersetModelView, DeleteMixin):  # noqa
+    datamodel = SQLAInterface(models.ElasticDatasource)
+    list_widget = ListWidgetWithCheckboxes
+    list_columns = [
+        'datasource_link', 'cluster', 'changed_by_', 'modified']
+    order_columns = [
+        'datasource_link', 'changed_on_', 'offset']
+    related_views = [ElasticColumnInlineView, ElasticMetricInlineView]
+    edit_columns = [
+        'datasource_name', 'cluster', 'slices', 'description', 'owner',
+        'is_hidden',
+        'filter_select_enabled', 'fetch_values_from',
+        'default_endpoint', 'offset', 'cache_timeout']
+    search_columns = (
+        'datasource_name', 'cluster', 'description', 'owner',
+    )
+    add_columns = edit_columns
+    show_columns = add_columns + ['perm']
+    page_size = 500
+    base_order = ('datasource_name', 'asc')
+    description_columns = {
+        'slices': _(
+            'The list of slices associated with this table. By '
+            'altering this datasource, you may change how these associated '
+            'slices behave. '
+            'Also note that slices need to point to a datasource, so '
+            'this form will fail at saving if removing slices from a '
+            'datasource. If you want to change the datasource for a slice, '
+            'overwrite the slice from the \'explore view\''),
+        'offset': _('Timezone offset (in hours) for this datasource'),
+        'description': Markup(
+            "Supports <a href='"
+            "https://daringfireball.net/projects/markdown/'>markdown</a>"),
+        'fetch_values_from': _(
+            'Time expression to use as a predicate when retrieving '
+            'distinct values to populate the filter component. '
+            'Only applies when `Enable Filter Select` is on. If '
+            'you enter `7 days ago`, the distinct list of values in '
+            'the filter will be populated based on the distinct value over '
+            'the past week'),
+        'filter_select_enabled': _(
+            'Whether to populate the filter\'s dropdown in the explore '
+            'view\'s filter section with a list of distinct values fetched '
+            'from the backend on the fly'),
+        'default_endpoint': _(
+            'Redirects to this endpoint when clicking on the datasource '
+            'from the datasource list'),
+    }
+    base_filters = [['id', DatasourceFilter, lambda: []]]
+    label_columns = {
+        'slices': _('Associated Slices'),
+        'datasource_link': _('Data Source'),
+        'cluster': _('Cluster'),
+        'description': _('Description'),
+        'owner': _('Owner'),
+        'is_hidden': _('Is Hidden'),
+        'filter_select_enabled': _('Enable Filter Select'),
+        'default_endpoint': _('Default Endpoint'),
+        'offset': _('Time Offset'),
+        'cache_timeout': _('Cache Timeout'),
+    }
+
+    def pre_add(self, datasource):
+        number_of_existing_datasources = db.session.query(
+            sqla.func.count('*')).filter(
+            models.ElasticDatasource.datasource_name ==
+                datasource.datasource_name,
+            models.ElasticDatasource.cluster_name == datasource.cluster.id,
+        ).scalar()
+
+        # table object is already added to the session
+        if number_of_existing_datasources > 1:
+            raise Exception(get_datasource_exist_error_msg(
+                datasource.full_name))
+
+    def post_add(self, datasource):
+        datasource.generate_metrics()
+        security_manager.merge_perm('datasource_access', datasource.get_perm())
+        if datasource.schema:
+            security_manager.merge_perm('schema_access', 
datasource.schema_perm)
+
+    def post_update(self, datasource):
+        self.post_add(datasource)
+
+    def _delete(self, pk):
+        DeleteMixin._delete(self, pk)
+
+
+appbuilder.add_view(
+    ElasticDatasourceModelView,
+    'Elastic Datasources',
+    label=__('Elastic Datasources'),
+    category='Sources',
+    category_label=__('Sources'),
+    icon='fa-cube')
+
+
+class Elastic(BaseSupersetView):
+    """The base views for Superset!"""
+
+    @has_access
+    @expose('/refresh_datasources/')
+    def refresh_datasources(self):
+        """endpoint that refreshes elastic datasources metadata"""
+        session = db.session()
+        elastic_cluster = ConnectorRegistry.sources['elastic'].cluster_class
+        for cluster in session.query(elastic_cluster).all():
+            cluster_name = cluster.cluster_name
+            try:
+                cluster.refresh_datasources()
+            except Exception as e:
+                flash(
+                    'Error while processing cluster \'{}\'\n{}'.format(
+                        cluster_name, utils.error_msg_from_exception(e)),
+                    'danger')
+                logging.exception(e)
+                return redirect('/elasticclustermodelview/list/')
+            cluster.metadata_last_refreshed = datetime.now()
+            flash(
+                'Refreshed metadata from cluster '
+                '[' + cluster.cluster_name + ']',
+                'info')
+        session.commit()
+        return redirect('/elasticdatasourcemodelview/list/')
+
+
+appbuilder.add_view_no_menu(Elastic)
+
+appbuilder.add_link(
+    'Refresh Elastic Metadata',
+    label=__('Refresh Elastic Metadata'),
+    href='/elastic/refresh_datasources/',
+    category='Sources',
+    category_label=__('Sources'),
+    category_icon='fa-database',
+    icon='fa-cog')
diff --git a/superset/migrations/versions/8475d45ad108_elasticsearch.py 
b/superset/migrations/versions/8475d45ad108_elasticsearch.py
new file mode 100644
index 0000000000..155921bfde
--- /dev/null
+++ b/superset/migrations/versions/8475d45ad108_elasticsearch.py
@@ -0,0 +1,114 @@
+"""elasticsearch
+
+Revision ID: 8475d45ad108
+Revises: 0c5070e96b57
+Create Date: 2018-08-13 12:30:10.792053
+
+"""
+
+# revision identifiers, used by Alembic.
+revision = '8475d45ad108'
+down_revision = '0c5070e96b57'
+
+from alembic import op
+import sqlalchemy as sa
+
+
+def upgrade():
+    op.create_table(
+        'elastic_clusters',
+        sa.Column('created_on', sa.DateTime(), nullable=True),
+        sa.Column('changed_on', sa.DateTime(), nullable=True),
+        sa.Column('id', sa.Integer(), nullable=False),
+        sa.Column('cluster_name', sa.String(length=250), nullable=True),
+        sa.Column('hosts_json', sa.Text(), nullable=True),
+        sa.Column('metadata_last_refreshed', sa.DateTime(), nullable=True),
+        sa.Column('cache_timeout', sa.Integer(), nullable=True),
+        sa.Column('changed_by_fk', sa.Integer(), nullable=True),
+        sa.Column('created_by_fk', sa.Integer(), nullable=True),
+        sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
+        sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
+        sa.PrimaryKeyConstraint('id'),
+        sa.UniqueConstraint('cluster_name')
+    )
+    op.create_table(
+        'elastic_datasources',
+        sa.Column('created_on', sa.DateTime(), nullable=True),
+        sa.Column('changed_on', sa.DateTime(), nullable=True),
+        sa.Column('id', sa.Integer(), nullable=False),
+        sa.Column('description', sa.Text(), nullable=True),
+        sa.Column('default_endpoint', sa.Text(), nullable=True),
+        sa.Column('is_featured', sa.Boolean(), nullable=True),
+        sa.Column('filter_select_enabled', sa.Boolean(), nullable=True),
+        sa.Column('offset', sa.Integer(), nullable=True),
+        sa.Column('cache_timeout', sa.Integer(), nullable=True),
+        sa.Column('params', sa.String(length=1000), nullable=True),
+        sa.Column('perm', sa.String(length=1000), nullable=True),
+        sa.Column('datasource_name', sa.String(length=255), nullable=True),
+        sa.Column('is_hidden', sa.Boolean(), nullable=True),
+        sa.Column('fetch_values_from', sa.String(length=100), nullable=True),
+        sa.Column('cluster_name', sa.String(length=250), nullable=True),
+        sa.Column('user_id', sa.Integer(), nullable=True),
+        sa.Column('changed_by_fk', sa.Integer(), nullable=True),
+        sa.Column('created_by_fk', sa.Integer(), nullable=True),
+        sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
+        sa.ForeignKeyConstraint(['cluster_name'], 
['elastic_clusters.cluster_name'], ),
+        sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
+        sa.ForeignKeyConstraint(['user_id'], ['ab_user.id'], ),
+        sa.PrimaryKeyConstraint('id'),
+        sa.UniqueConstraint('datasource_name')
+    )
+    op.create_table(
+        'elastic_columns',
+        sa.Column('created_on', sa.DateTime(), nullable=True),
+        sa.Column('changed_on', sa.DateTime(), nullable=True),
+        sa.Column('id', sa.Integer(), nullable=False),
+        sa.Column('column_name', sa.String(length=255), nullable=True),
+        sa.Column('verbose_name', sa.String(length=1024), nullable=True),
+        sa.Column('is_active', sa.Boolean(), nullable=True),
+        sa.Column('type', sa.String(length=32), nullable=True),
+        sa.Column('groupby', sa.Boolean(), nullable=True),
+        sa.Column('count_distinct', sa.Boolean(), nullable=True),
+        sa.Column('sum', sa.Boolean(), nullable=True),
+        sa.Column('avg', sa.Boolean(), nullable=True),
+        sa.Column('max', sa.Boolean(), nullable=True),
+        sa.Column('min', sa.Boolean(), nullable=True),
+        sa.Column('filterable', sa.Boolean(), nullable=True),
+        sa.Column('description', sa.Text(), nullable=True),
+        sa.Column('datasource_name', sa.String(length=255), nullable=True),
+        sa.Column('json', sa.Text(), nullable=True),
+        sa.Column('changed_by_fk', sa.Integer(), nullable=True),
+        sa.Column('created_by_fk', sa.Integer(), nullable=True),
+        sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
+        sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
+        sa.ForeignKeyConstraint(['datasource_name'], 
['elastic_datasources.datasource_name'], ),
+        sa.PrimaryKeyConstraint('id')
+    )
+    op.create_table(
+        'elastic_metrics',
+        sa.Column('created_on', sa.DateTime(), nullable=True),
+        sa.Column('changed_on', sa.DateTime(), nullable=True),
+        sa.Column('id', sa.Integer(), nullable=False),
+        sa.Column('metric_name', sa.String(length=512), nullable=True),
+        sa.Column('verbose_name', sa.String(length=1024), nullable=True),
+        sa.Column('metric_type', sa.String(length=32), nullable=True),
+        sa.Column('description', sa.Text(), nullable=True),
+        sa.Column('is_restricted', sa.Boolean(), nullable=True),
+        sa.Column('d3format', sa.String(length=128), nullable=True),
+        sa.Column('datasource_name', sa.String(length=255), nullable=True),
+        sa.Column('json', sa.Text(), nullable=True),
+        sa.Column('warning_text', sa.String(length=255), nullable=True),
+        sa.Column('changed_by_fk', sa.Integer(), nullable=True),
+        sa.Column('created_by_fk', sa.Integer(), nullable=True),
+        sa.ForeignKeyConstraint(['changed_by_fk'], ['ab_user.id'], ),
+        sa.ForeignKeyConstraint(['created_by_fk'], ['ab_user.id'], ),
+        sa.ForeignKeyConstraint(['datasource_name'], 
['elastic_datasources.datasource_name'], ),
+        sa.PrimaryKeyConstraint('id')
+    )
+
+
+def downgrade():
+    op.drop_table('elastic_metrics')
+    op.drop_table('elastic_columns')
+    op.drop_table('elastic_datasources')
+    op.drop_table('elastic_clusters')
diff --git a/contrib/docker/superset_config.py b/superset/superset_config.py
similarity index 100%
rename from contrib/docker/superset_config.py
rename to superset/superset_config.py
diff --git a/tox.ini b/tox.ini
index b73d276841..088fb6ae1e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,6 +9,8 @@ exclude =
     superset/data
     superset/migrations
     superset/templates
+    build
+    venv
 ignore =
     FI12
     FI15


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to