http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java
----------------------------------------------------------------------
diff --git a/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java 
b/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java
index be700c3..4504089 100644
--- a/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java
+++ b/livy/src/test/java/org/apache/zeppelin/livy/LivyInterpreterIT.java
@@ -17,14 +17,6 @@
 
 package org.apache.zeppelin.livy;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Properties;
 import org.apache.commons.io.IOUtils;
 import org.apache.livy.test.framework.Cluster;
 import org.apache.livy.test.framework.Cluster$;
@@ -45,6 +37,15 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Properties;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
 public class LivyInterpreterIT {
   private static final Logger LOGGER = 
LoggerFactory.getLogger(LivyInterpreterIT.class);
   private static Cluster cluster;
@@ -84,6 +85,7 @@ public class LivyInterpreterIT {
     return true;
   }
 
+
   @Test
   public void testSparkInterpreter() throws InterpreterException {
     if (!checkPreCondition()) {
@@ -97,13 +99,12 @@ public class LivyInterpreterIT {
     AuthenticationInfo authInfo = new AuthenticationInfo("user1");
     MyInterpreterOutputListener outputListener = new 
MyInterpreterOutputListener();
     InterpreterOutput output = new InterpreterOutput(outputListener);
-    InterpreterContext context =
-        InterpreterContext.builder()
-            .setNoteId("noteId")
-            .setParagraphId("paragraphId")
-            .setAuthenticationInfo(authInfo)
-            .setInterpreterOut(output)
-            .build();
+    InterpreterContext context = InterpreterContext.builder()
+        .setNoteId("noteId")
+        .setParagraphId("paragraphId")
+        .setAuthenticationInfo(authInfo)
+        .setInterpreterOut(output)
+        .build();
     sparkInterpreter.open();
 
     LivySparkSQLInterpreter sqlInterpreter = new 
LivySparkSQLInterpreter(properties);
@@ -131,13 +132,12 @@ public class LivyInterpreterIT {
     AuthenticationInfo authInfo = new AuthenticationInfo("user1");
     MyInterpreterOutputListener outputListener = new 
MyInterpreterOutputListener();
     InterpreterOutput output = new InterpreterOutput(outputListener);
-    final InterpreterContext context =
-        InterpreterContext.builder()
-            .setNoteId("noteId")
-            .setParagraphId("paragraphId")
-            .setAuthenticationInfo(authInfo)
-            .setInterpreterOut(output)
-            .build();
+    final InterpreterContext context = InterpreterContext.builder()
+        .setNoteId("noteId")
+        .setParagraphId("paragraphId")
+        .setAuthenticationInfo(authInfo)
+        .setInterpreterOut(output)
+        .build();
     ;
 
     InterpreterResult result = sparkInterpreter.interpret("sc.parallelize(1 to 
10).sum()", context);
@@ -158,15 +158,18 @@ public class LivyInterpreterIT {
     assertEquals(1, result.message().size());
 
     // multi-line string
-    String multiLineString = "val str = \"\"\"multiple\n" + "line\"\"\"\n" + 
"println(str)";
+    String multiLineString = "val str = \"\"\"multiple\n" +
+        "line\"\"\"\n" +
+        "println(str)";
     result = sparkInterpreter.interpret(multiLineString, context);
     assertEquals(InterpreterResult.Code.SUCCESS, result.code());
     assertEquals(1, result.message().size());
     assertTrue(result.message().get(0).getData().contains("multiple\nline"));
 
     // case class
-    String caseClassCode =
-        "case class Person(id:Int, \n" + "name:String)\n" + "val p=Person(1, 
\"name_a\")";
+    String caseClassCode = "case class Person(id:Int, \n" +
+        "name:String)\n" +
+        "val p=Person(1, \"name_a\")";
     result = sparkInterpreter.interpret(caseClassCode, context);
     assertEquals(InterpreterResult.Code.SUCCESS, result.code());
     assertEquals(1, result.message().size());
@@ -204,78 +207,60 @@ public class LivyInterpreterIT {
 
     // cancel
     if (sparkInterpreter.livyVersion.newerThanEquals(LivyVersion.LIVY_0_3_0)) {
-      Thread cancelThread =
-          new Thread() {
-            @Override
-            public void run() {
-              // invoke cancel after 1 millisecond to wait job starting
-              try {
-                Thread.sleep(1);
-              } catch (InterruptedException e) {
-                e.printStackTrace();
-              }
-              sparkInterpreter.cancel(context);
-            }
-          };
+      Thread cancelThread = new Thread() {
+        @Override
+        public void run() {
+          // invoke cancel after 1 millisecond to wait job starting
+          try {
+            Thread.sleep(1);
+          } catch (InterruptedException e) {
+            e.printStackTrace();
+          }
+          sparkInterpreter.cancel(context);
+        }
+      };
       cancelThread.start();
-      result =
-          sparkInterpreter.interpret(
-              "sc.parallelize(1 to 10).foreach(e=>Thread.sleep(10*1000))", 
context);
+      result = sparkInterpreter
+          .interpret("sc.parallelize(1 to 
10).foreach(e=>Thread.sleep(10*1000))", context);
       assertEquals(InterpreterResult.Code.ERROR, result.code());
       String message = result.message().get(0).getData();
       // 2 possibilities, sometimes livy doesn't return the real cancel 
exception
-      assertTrue(
-          message.contains("cancelled part of cancelled job group")
-              || message.contains("Job is cancelled"));
+      assertTrue(message.contains("cancelled part of cancelled job group") ||
+          message.contains("Job is cancelled"));
     }
   }
 
-  private void testDataFrame(
-      LivySparkInterpreter sparkInterpreter,
-      final LivySparkSQLInterpreter sqlInterpreter,
-      boolean isSpark2)
-      throws LivyException {
+  private void testDataFrame(LivySparkInterpreter sparkInterpreter,
+                             final LivySparkSQLInterpreter sqlInterpreter,
+                             boolean isSpark2) throws LivyException {
     AuthenticationInfo authInfo = new AuthenticationInfo("user1");
     MyInterpreterOutputListener outputListener = new 
MyInterpreterOutputListener();
     InterpreterOutput output = new InterpreterOutput(outputListener);
-    final InterpreterContext context =
-        InterpreterContext.builder()
-            .setNoteId("noteId")
-            .setParagraphId("paragraphId")
-            .setAuthenticationInfo(authInfo)
-            .setInterpreterOut(output)
-            .build();
+    final InterpreterContext context = InterpreterContext.builder()
+        .setNoteId("noteId")
+        .setParagraphId("paragraphId")
+        .setAuthenticationInfo(authInfo)
+        .setInterpreterOut(output)
+        .build();
 
     InterpreterResult result = null;
     // test DataFrame api
     if (!isSpark2) {
-      result =
-          sparkInterpreter.interpret(
-              "val 
df=sqlContext.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
-                  + "df.collect()",
-              context);
+      result = sparkInterpreter.interpret(
+          "val 
df=sqlContext.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
+              + "df.collect()", context);
       assertEquals(InterpreterResult.Code.SUCCESS, result.code());
       assertEquals(1, result.message().size());
-      assertTrue(
-          result
-              .message()
-              .get(0)
-              .getData()
-              .contains("Array[org.apache.spark.sql.Row] = 
Array([hello,20])"));
+      assertTrue(result.message().get(0).getData()
+          .contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
     } else {
-      result =
-          sparkInterpreter.interpret(
-              "val 
df=spark.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
-                  + "df.collect()",
-              context);
+      result = sparkInterpreter.interpret(
+          "val df=spark.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", 
\"col_2\")\n"
+              + "df.collect()", context);
       assertEquals(InterpreterResult.Code.SUCCESS, result.code());
       assertEquals(1, result.message().size());
-      assertTrue(
-          result
-              .message()
-              .get(0)
-              .getData()
-              .contains("Array[org.apache.spark.sql.Row] = 
Array([hello,20])"));
+      assertTrue(result.message().get(0).getData()
+          .contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
     }
     sparkInterpreter.interpret("df.registerTempTable(\"df\")", context);
     // test LivySparkSQLInterpreter which share the same SparkContext with 
LivySparkInterpreter
@@ -314,70 +299,57 @@ public class LivyInterpreterIT {
 
     // test sql cancel
     if 
(sqlInterpreter.getLivyVersion().newerThanEquals(LivyVersion.LIVY_0_3_0)) {
-      Thread cancelThread =
-          new Thread() {
-            @Override
-            public void run() {
-              sqlInterpreter.cancel(context);
-            }
-          };
+      Thread cancelThread = new Thread() {
+        @Override
+        public void run() {
+          sqlInterpreter.cancel(context);
+        }
+      };
       cancelThread.start();
-      // sleep so that cancelThread performs a cancel.
+      //sleep so that cancelThread performs a cancel.
       try {
         Thread.sleep(1);
       } catch (InterruptedException e) {
         e.printStackTrace();
       }
-      result = sqlInterpreter.interpret("select count(1) from df", context);
+      result = sqlInterpreter
+          .interpret("select count(1) from df", context);
       if (result.code().equals(InterpreterResult.Code.ERROR)) {
         String message = result.message().get(0).getData();
         // 2 possibilities, sometimes livy doesn't return the real cancel 
exception
-        assertTrue(
-            message.contains("cancelled part of cancelled job group")
-                || message.contains("Job is cancelled"));
+        assertTrue(message.contains("cancelled part of cancelled job group") ||
+            message.contains("Job is cancelled"));
       }
     }
 
     // test result string truncate
     if (!isSpark2) {
-      result =
-          sparkInterpreter.interpret(
-              "val 
df=sqlContext.createDataFrame(Seq((\"12characters12characters\",20)))"
-                  + ".toDF(\"col_1\", \"col_2\")\n"
-                  + "df.collect()",
-              context);
+      result = sparkInterpreter.interpret(
+          "val 
df=sqlContext.createDataFrame(Seq((\"12characters12characters\",20)))"
+              + ".toDF(\"col_1\", \"col_2\")\n"
+              + "df.collect()", context);
       assertEquals(InterpreterResult.Code.SUCCESS, result.code());
       assertEquals(1, result.message().size());
-      assertTrue(
-          result
-              .message()
-              .get(0)
-              .getData()
-              .contains("Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
+      assertTrue(result.message().get(0).getData()
+          .contains("Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
     } else {
-      result =
-          sparkInterpreter.interpret(
-              "val 
df=spark.createDataFrame(Seq((\"12characters12characters\",20)))"
-                  + ".toDF(\"col_1\", \"col_2\")\n"
-                  + "df.collect()",
-              context);
+      result = sparkInterpreter.interpret(
+          "val 
df=spark.createDataFrame(Seq((\"12characters12characters\",20)))"
+              + ".toDF(\"col_1\", \"col_2\")\n"
+              + "df.collect()", context);
       assertEquals(InterpreterResult.Code.SUCCESS, result.code());
       assertEquals(1, result.message().size());
-      assertTrue(
-          result
-              .message()
-              .get(0)
-              .getData()
-              .contains("Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
+      assertTrue(result.message().get(0).getData()
+          .contains("Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
     }
     sparkInterpreter.interpret("df.registerTempTable(\"df\")", context);
     // test LivySparkSQLInterpreter which share the same SparkContext with 
LivySparkInterpreter
-    result =
-        sqlInterpreter.interpret(
-            "select * from df where col_1='12characters12characters'", 
context);
+    result = sqlInterpreter.interpret("select * from df where 
col_1='12characters12characters'",
+        context);
     assertEquals(InterpreterResult.Code.SUCCESS, result.code());
     assertEquals(InterpreterResult.Type.TABLE, 
result.message().get(0).getType());
     assertEquals("col_1\tcol_2\n12characters12cha...\t20", 
result.message().get(0).getData());
+
   }
 
   @Test
@@ -391,13 +363,12 @@ public class LivyInterpreterIT {
     AuthenticationInfo authInfo = new AuthenticationInfo("user1");
     MyInterpreterOutputListener outputListener = new 
MyInterpreterOutputListener();
     InterpreterOutput output = new InterpreterOutput(outputListener);
-    final InterpreterContext context =
-        InterpreterContext.builder()
-            .setNoteId("noteId")
-            .setParagraphId("paragraphId")
-            .setAuthenticationInfo(authInfo)
-            .setInterpreterOut(output)
-            .build();
+    final InterpreterContext context = InterpreterContext.builder()
+        .setNoteId("noteId")
+        .setParagraphId("paragraphId")
+        .setAuthenticationInfo(authInfo)
+        .setInterpreterOut(output)
+        .build();
     pysparkInterpreter.open();
 
     // test traceback msg
@@ -405,8 +376,8 @@ public class LivyInterpreterIT {
       pysparkInterpreter.getLivyVersion();
       // for livy version >=0.3 , input some erroneous spark code, check the 
shown result is more
       // than one line
-      InterpreterResult result =
-          pysparkInterpreter.interpret("sc.parallelize(wrongSyntax(1, 
2)).count()", context);
+      InterpreterResult result = pysparkInterpreter.interpret(
+          "sc.parallelize(wrongSyntax(1, 2)).count()", context);
       assertEquals(InterpreterResult.Code.ERROR, result.code());
       assertTrue(result.message().get(0).getData().split("\n").length > 1);
       assertTrue(result.message().get(0).getData().contains("Traceback"));
@@ -426,7 +397,7 @@ public class LivyInterpreterIT {
     assertEquals(InterpreterResult.Code.SUCCESS, reslt.code());
     assertTrue(reslt.message().get(0).getData().contains(utf8Str));
 
-    // test special characters
+    //test special characters
     String charStr = "açñiñíûÑoç";
     InterpreterResult res = pysparkInterpreter.interpret("print(\"" + charStr 
+ "\")", context);
     assertEquals(InterpreterResult.Code.SUCCESS, res.code());
@@ -447,34 +418,28 @@ public class LivyInterpreterIT {
 
       // test DataFrame api
       if (!isSpark2) {
-        pysparkInterpreter.interpret(
-            "from pyspark.sql import SQLContext\n" + "sqlContext = 
SQLContext(sc)", context);
-        result =
-            pysparkInterpreter.interpret(
-                "df=sqlContext.createDataFrame([(\"hello\",20)])\n" + 
"df.collect()", context);
+        pysparkInterpreter.interpret("from pyspark.sql import SQLContext\n"
+            + "sqlContext = SQLContext(sc)", context);
+        result = 
pysparkInterpreter.interpret("df=sqlContext.createDataFrame([(\"hello\",20)])\n"
+            + "df.collect()", context);
         assertEquals(InterpreterResult.Code.SUCCESS, result.code());
         assertEquals(1, result.message().size());
-        // python2 has u and python3 don't have u
-        assertTrue(
-            result.message().get(0).getData().contains("[Row(_1=u'hello', 
_2=20)]")
-                || 
result.message().get(0).getData().contains("[Row(_1='hello', _2=20)]"));
+        //python2 has u and python3 don't have u
+        
assertTrue(result.message().get(0).getData().contains("[Row(_1=u'hello', 
_2=20)]")
+            || result.message().get(0).getData().contains("[Row(_1='hello', 
_2=20)]"));
       } else {
-        result =
-            pysparkInterpreter.interpret(
-                "df=spark.createDataFrame([(\"hello\",20)])\n" + 
"df.collect()", context);
+        result = 
pysparkInterpreter.interpret("df=spark.createDataFrame([(\"hello\",20)])\n"
+            + "df.collect()", context);
         assertEquals(InterpreterResult.Code.SUCCESS, result.code());
         assertEquals(1, result.message().size());
-        // python2 has u and python3 don't have u
-        assertTrue(
-            result.message().get(0).getData().contains("[Row(_1=u'hello', 
_2=20)]")
-                || 
result.message().get(0).getData().contains("[Row(_1='hello', _2=20)]"));
+        //python2 has u and python3 don't have u
+        
assertTrue(result.message().get(0).getData().contains("[Row(_1=u'hello', 
_2=20)]")
+            || result.message().get(0).getData().contains("[Row(_1='hello', 
_2=20)]"));
       }
 
       // test magic api
-      pysparkInterpreter.interpret(
-          "t = [{\"name\":\"userA\", \"role\":\"roleA\"},"
-              + "{\"name\":\"userB\", \"role\":\"roleB\"}]",
-          context);
+      pysparkInterpreter.interpret("t = [{\"name\":\"userA\", 
\"role\":\"roleA\"},"
+          + "{\"name\":\"userB\", \"role\":\"roleB\"}]", context);
       result = pysparkInterpreter.interpret("%table t", context);
       assertEquals(InterpreterResult.Code.SUCCESS, result.code());
       assertEquals(1, result.message().size());
@@ -489,29 +454,27 @@ public class LivyInterpreterIT {
 
       // cancel
       if 
(pysparkInterpreter.livyVersion.newerThanEquals(LivyVersion.LIVY_0_3_0)) {
-        Thread cancelThread =
-            new Thread() {
-              @Override
-              public void run() {
-                // invoke cancel after 1 millisecond to wait job starting
-                try {
-                  Thread.sleep(1);
-                } catch (InterruptedException e) {
-                  e.printStackTrace();
-                }
-                pysparkInterpreter.cancel(context);
-              }
-            };
+        Thread cancelThread = new Thread() {
+          @Override
+          public void run() {
+            // invoke cancel after 1 millisecond to wait job starting
+            try {
+              Thread.sleep(1);
+            } catch (InterruptedException e) {
+              e.printStackTrace();
+            }
+            pysparkInterpreter.cancel(context);
+          }
+        };
         cancelThread.start();
-        result =
-            pysparkInterpreter.interpret(
-                "import time\n" + "sc.range(1, 10).foreach(lambda a: 
time.sleep(10))", context);
+        result = pysparkInterpreter
+            .interpret("import time\n" +
+                "sc.range(1, 10).foreach(lambda a: time.sleep(10))", context);
         assertEquals(InterpreterResult.Code.ERROR, result.code());
         String message = result.message().get(0).getData();
         // 2 possibilities, sometimes livy doesn't return the real cancel 
exception
-        assertTrue(
-            message.contains("cancelled part of cancelled job group")
-                || message.contains("Job is cancelled"));
+        assertTrue(message.contains("cancelled part of cancelled job group") ||
+            message.contains("Job is cancelled"));
       }
     } finally {
       pysparkInterpreter.close();
@@ -537,13 +500,12 @@ public class LivyInterpreterIT {
     AuthenticationInfo authInfo = new AuthenticationInfo("user1");
     MyInterpreterOutputListener outputListener = new 
MyInterpreterOutputListener();
     InterpreterOutput output = new InterpreterOutput(outputListener);
-    InterpreterContext context =
-        InterpreterContext.builder()
-            .setNoteId("noteId")
-            .setParagraphId("paragraphId")
-            .setAuthenticationInfo(authInfo)
-            .setInterpreterOut(output)
-            .build();
+    InterpreterContext context = InterpreterContext.builder()
+        .setNoteId("noteId")
+        .setParagraphId("paragraphId")
+        .setAuthenticationInfo(authInfo)
+        .setInterpreterOut(output)
+        .build();
     sparkInterpreter.open();
 
     LivySparkSQLInterpreter sqlInterpreter = new 
LivySparkSQLInterpreter(properties2);
@@ -573,43 +535,28 @@ public class LivyInterpreterIT {
       boolean isSpark2 = isSpark2(sparkInterpreter, context);
 
       if (!isSpark2) {
-        result =
-            sparkInterpreter.interpret(
-                "val 
df=sqlContext.createDataFrame(Seq((\"12characters12characters\",20)))"
-                    + ".toDF(\"col_1\", \"col_2\")\n"
-                    + "df.collect()",
-                context);
+        result = sparkInterpreter.interpret(
+            "val 
df=sqlContext.createDataFrame(Seq((\"12characters12characters\",20)))"
+                + ".toDF(\"col_1\", \"col_2\")\n"
+                + "df.collect()", context);
         assertEquals(InterpreterResult.Code.SUCCESS, result.code());
         assertEquals(2, result.message().size());
-        assertTrue(
-            result
-                .message()
-                .get(0)
-                .getData()
-                .contains(
-                    "Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
+        assertTrue(result.message().get(0).getData()
+            .contains("Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
       } else {
-        result =
-            sparkInterpreter.interpret(
-                "val 
df=spark.createDataFrame(Seq((\"12characters12characters\",20)))"
-                    + ".toDF(\"col_1\", \"col_2\")\n"
-                    + "df.collect()",
-                context);
+        result = sparkInterpreter.interpret(
+            "val 
df=spark.createDataFrame(Seq((\"12characters12characters\",20)))"
+                + ".toDF(\"col_1\", \"col_2\")\n"
+                + "df.collect()", context);
         assertEquals(InterpreterResult.Code.SUCCESS, result.code());
         assertEquals(2, result.message().size());
-        assertTrue(
-            result
-                .message()
-                .get(0)
-                .getData()
-                .contains(
-                    "Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
+        assertTrue(result.message().get(0).getData()
+            .contains("Array[org.apache.spark.sql.Row] = 
Array([12characters12characters,20])"));
       }
       sparkInterpreter.interpret("df.registerTempTable(\"df\")", context);
       // test LivySparkSQLInterpreter which share the same SparkContext with 
LivySparkInterpreter
-      result =
-          sqlInterpreter.interpret(
-              "select * from df where col_1='12characters12characters'", 
context);
+      result = sqlInterpreter.interpret("select * from df where 
col_1='12characters12characters'",
+          context);
       assertEquals(InterpreterResult.Code.SUCCESS, result.code());
       assertEquals(InterpreterResult.Type.TABLE, 
result.message().get(0).getType());
       assertEquals("col_1\tcol_2\n12characters12characters\t20", 
result.message().get(0).getData());
@@ -637,13 +584,12 @@ public class LivyInterpreterIT {
     AuthenticationInfo authInfo = new AuthenticationInfo("user1");
     MyInterpreterOutputListener outputListener = new 
MyInterpreterOutputListener();
     InterpreterOutput output = new InterpreterOutput(outputListener);
-    final InterpreterContext context =
-        InterpreterContext.builder()
-            .setNoteId("noteId")
-            .setParagraphId("paragraphId")
-            .setAuthenticationInfo(authInfo)
-            .setInterpreterOut(output)
-            .build();
+    final InterpreterContext context = InterpreterContext.builder()
+        .setNoteId("noteId")
+        .setParagraphId("paragraphId")
+        .setAuthenticationInfo(authInfo)
+        .setInterpreterOut(output)
+        .build();
     sparkRInterpreter.open();
 
     try {
@@ -658,36 +604,30 @@ public class LivyInterpreterIT {
         assertTrue(result.message().get(0).getData().contains("eruptions 
waiting"));
 
         // cancel
-        Thread cancelThread =
-            new Thread() {
-              @Override
-              public void run() {
-                // invoke cancel after 1 millisecond to wait job starting
-                try {
-                  Thread.sleep(1);
-                } catch (InterruptedException e) {
-                  e.printStackTrace();
-                }
-                sparkRInterpreter.cancel(context);
-              }
-            };
+        Thread cancelThread = new Thread() {
+          @Override
+          public void run() {
+            // invoke cancel after 1 millisecond to wait job starting
+            try {
+              Thread.sleep(1);
+            } catch (InterruptedException e) {
+              e.printStackTrace();
+            }
+            sparkRInterpreter.cancel(context);
+          }
+        };
         cancelThread.start();
-        result =
-            sparkRInterpreter.interpret(
-                "df <- as.DataFrame(faithful)\n"
-                    + "df1 <- dapplyCollect(df, function(x) "
-                    + "{ Sys.sleep(10); x <- cbind(x, x$waiting * 60) })",
-                context);
+        result = sparkRInterpreter.interpret("df <- as.DataFrame(faithful)\n" +
+            "df1 <- dapplyCollect(df, function(x) " +
+            "{ Sys.sleep(10); x <- cbind(x, x$waiting * 60) })", context);
         assertEquals(InterpreterResult.Code.ERROR, result.code());
         String message = result.message().get(0).getData();
         // 2 possibilities, sometimes livy doesn't return the real cancel 
exception
-        assertTrue(
-            message.contains("cancelled part of cancelled job group")
-                || message.contains("Job is cancelled"));
+        assertTrue(message.contains("cancelled part of cancelled job group") ||
+            message.contains("Job is cancelled"));
       } else {
-        result =
-            sparkRInterpreter.interpret(
-                "df <- createDataFrame(sqlContext, faithful)" + "\nhead(df)", 
context);
+        result = sparkRInterpreter.interpret("df <- 
createDataFrame(sqlContext, faithful)" +
+            "\nhead(df)", context);
         assertEquals(InterpreterResult.Code.SUCCESS, result.code());
         assertEquals(1, result.message().size());
         assertTrue(result.message().get(0).getData().contains("eruptions 
waiting"));
@@ -710,12 +650,12 @@ public class LivyInterpreterIT {
     }
     InterpreterGroup interpreterGroup = new InterpreterGroup("group_1");
     interpreterGroup.put("session_1", new ArrayList<Interpreter>());
-    LazyOpenInterpreter sparkInterpreter =
-        new LazyOpenInterpreter(new LivySparkInterpreter(properties));
+    LazyOpenInterpreter sparkInterpreter = new LazyOpenInterpreter(
+        new LivySparkInterpreter(properties));
     sparkInterpreter.setInterpreterGroup(interpreterGroup);
     interpreterGroup.get("session_1").add(sparkInterpreter);
-    LazyOpenInterpreter sqlInterpreter =
-        new LazyOpenInterpreter(new LivySparkSQLInterpreter(properties));
+    LazyOpenInterpreter sqlInterpreter = new LazyOpenInterpreter(
+        new LivySparkSQLInterpreter(properties));
     interpreterGroup.get("session_1").add(sqlInterpreter);
     sqlInterpreter.setInterpreterGroup(interpreterGroup);
     sqlInterpreter.open();
@@ -724,13 +664,12 @@ public class LivyInterpreterIT {
       AuthenticationInfo authInfo = new AuthenticationInfo("user1");
       MyInterpreterOutputListener outputListener = new 
MyInterpreterOutputListener();
       InterpreterOutput output = new InterpreterOutput(outputListener);
-      InterpreterContext context =
-          InterpreterContext.builder()
-              .setNoteId("noteId")
-              .setParagraphId("paragraphId")
-              .setAuthenticationInfo(authInfo)
-              .setInterpreterOut(output)
-              .build();
+      InterpreterContext context = InterpreterContext.builder()
+          .setNoteId("noteId")
+          .setParagraphId("paragraphId")
+          .setAuthenticationInfo(authInfo)
+          .setInterpreterOut(output)
+          .build();
 
       String p1 = 
IOUtils.toString(getClass().getResourceAsStream("/livy_tutorial_1.scala"));
       InterpreterResult result = sparkInterpreter.interpret(p1, context);
@@ -753,28 +692,28 @@ public class LivyInterpreterIT {
     }
     InterpreterGroup interpreterGroup = new InterpreterGroup("group_1");
     interpreterGroup.put("session_1", new ArrayList<Interpreter>());
-    LazyOpenInterpreter sparkInterpreter =
-        new LazyOpenInterpreter(new LivySparkInterpreter(properties));
+    LazyOpenInterpreter sparkInterpreter = new LazyOpenInterpreter(
+        new LivySparkInterpreter(properties));
     sparkInterpreter.setInterpreterGroup(interpreterGroup);
     interpreterGroup.get("session_1").add(sparkInterpreter);
 
-    LazyOpenInterpreter sqlInterpreter =
-        new LazyOpenInterpreter(new LivySparkSQLInterpreter(properties));
+    LazyOpenInterpreter sqlInterpreter = new LazyOpenInterpreter(
+        new LivySparkSQLInterpreter(properties));
     interpreterGroup.get("session_1").add(sqlInterpreter);
     sqlInterpreter.setInterpreterGroup(interpreterGroup);
 
-    LazyOpenInterpreter pysparkInterpreter =
-        new LazyOpenInterpreter(new LivyPySparkInterpreter(properties));
+    LazyOpenInterpreter pysparkInterpreter = new LazyOpenInterpreter(
+        new LivyPySparkInterpreter(properties));
     interpreterGroup.get("session_1").add(pysparkInterpreter);
     pysparkInterpreter.setInterpreterGroup(interpreterGroup);
 
-    LazyOpenInterpreter sparkRInterpreter =
-        new LazyOpenInterpreter(new LivySparkRInterpreter(properties));
+    LazyOpenInterpreter sparkRInterpreter = new LazyOpenInterpreter(
+        new LivySparkRInterpreter(properties));
     interpreterGroup.get("session_1").add(sparkRInterpreter);
     sparkRInterpreter.setInterpreterGroup(interpreterGroup);
 
-    LazyOpenInterpreter sharedInterpreter =
-        new LazyOpenInterpreter(new LivySharedInterpreter(properties));
+    LazyOpenInterpreter sharedInterpreter = new LazyOpenInterpreter(
+        new LivySharedInterpreter(properties));
     interpreterGroup.get("session_1").add(sharedInterpreter);
     sharedInterpreter.setInterpreterGroup(interpreterGroup);
 
@@ -787,91 +726,68 @@ public class LivyInterpreterIT {
       AuthenticationInfo authInfo = new AuthenticationInfo("user1");
       MyInterpreterOutputListener outputListener = new 
MyInterpreterOutputListener();
       InterpreterOutput output = new InterpreterOutput(outputListener);
-      InterpreterContext context =
-          InterpreterContext.builder()
-              .setNoteId("noteId")
-              .setParagraphId("paragraphId")
-              .setAuthenticationInfo(authInfo)
-              .setInterpreterOut(output)
-              .build();
+      InterpreterContext context = InterpreterContext.builder()
+          .setNoteId("noteId")
+          .setParagraphId("paragraphId")
+          .setAuthenticationInfo(authInfo)
+          .setInterpreterOut(output)
+          .build();
       // detect spark version
       InterpreterResult result = sparkInterpreter.interpret("sc.version", 
context);
       assertEquals(InterpreterResult.Code.SUCCESS, result.code());
       assertEquals(1, result.message().size());
 
-      boolean isSpark2 =
-          isSpark2((BaseLivyInterpreter) 
sparkInterpreter.getInnerInterpreter(), context);
+      boolean isSpark2 = isSpark2((BaseLivyInterpreter) 
sparkInterpreter.getInnerInterpreter(),
+          context);
 
       if (!isSpark2) {
-        result =
-            sparkInterpreter.interpret(
-                "val 
df=sqlContext.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
-                    + "df.collect()",
-                context);
+        result = sparkInterpreter.interpret(
+            "val 
df=sqlContext.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
+                + "df.collect()", context);
         assertEquals(InterpreterResult.Code.SUCCESS, result.code());
         assertEquals(1, result.message().size());
-        assertTrue(
-            result
-                .message()
-                .get(0)
-                .getData()
-                .contains("Array[org.apache.spark.sql.Row] = 
Array([hello,20])"));
+        assertTrue(result.message().get(0).getData()
+            .contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
         sparkInterpreter.interpret("df.registerTempTable(\"df\")", context);
 
         // access table from pyspark
-        result =
-            pysparkInterpreter.interpret("sqlContext.sql(\"select * from 
df\").show()", context);
+        result = pysparkInterpreter.interpret("sqlContext.sql(\"select * from 
df\").show()",
+            context);
         assertEquals(InterpreterResult.Code.SUCCESS, result.code());
         assertEquals(1, result.message().size());
-        assertTrue(
-            result
-                .message()
-                .get(0)
-                .getData()
-                .contains(
-                    "+-----+-----+\n"
-                        + "|col_1|col_2|\n"
-                        + "+-----+-----+\n"
-                        + "|hello|   20|\n"
-                        + "+-----+-----+"));
+        assertTrue(result.message().get(0).getData()
+            .contains("+-----+-----+\n" +
+                "|col_1|col_2|\n" +
+                "+-----+-----+\n" +
+                "|hello|   20|\n" +
+                "+-----+-----+"));
 
         // access table from sparkr
-        result =
-            sparkRInterpreter.interpret("head(sql(sqlContext, \"select * from 
df\"))", context);
+        result = sparkRInterpreter.interpret("head(sql(sqlContext, \"select * 
from df\"))",
+            context);
         assertEquals(InterpreterResult.Code.SUCCESS, result.code());
         assertEquals(1, result.message().size());
         assertTrue(result.message().get(0).getData().contains("col_1 col_2\n1 
hello    20"));
       } else {
-        result =
-            sparkInterpreter.interpret(
-                "val 
df=spark.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", \"col_2\")\n"
-                    + "df.collect()",
-                context);
+        result = sparkInterpreter.interpret(
+            "val df=spark.createDataFrame(Seq((\"hello\",20))).toDF(\"col_1\", 
\"col_2\")\n"
+                + "df.collect()", context);
         assertEquals(InterpreterResult.Code.SUCCESS, result.code());
         assertEquals(1, result.message().size());
-        assertTrue(
-            result
-                .message()
-                .get(0)
-                .getData()
-                .contains("Array[org.apache.spark.sql.Row] = 
Array([hello,20])"));
+        assertTrue(result.message().get(0).getData()
+            .contains("Array[org.apache.spark.sql.Row] = Array([hello,20])"));
         sparkInterpreter.interpret("df.registerTempTable(\"df\")", context);
 
         // access table from pyspark
         result = pysparkInterpreter.interpret("spark.sql(\"select * from 
df\").show()", context);
         assertEquals(InterpreterResult.Code.SUCCESS, result.code());
         assertEquals(1, result.message().size());
-        assertTrue(
-            result
-                .message()
-                .get(0)
-                .getData()
-                .contains(
-                    "+-----+-----+\n"
-                        + "|col_1|col_2|\n"
-                        + "+-----+-----+\n"
-                        + "|hello|   20|\n"
-                        + "+-----+-----+"));
+        assertTrue(result.message().get(0).getData()
+            .contains("+-----+-----+\n" +
+                "|col_1|col_2|\n" +
+                "+-----+-----+\n" +
+                "|hello|   20|\n" +
+                "+-----+-----+"));
 
         // access table from sparkr
         result = sparkRInterpreter.interpret("head(sql(\"select * from 
df\"))", context);
@@ -881,28 +797,27 @@ public class LivyInterpreterIT {
       }
 
       // test plotting of python
-      result =
-          pysparkInterpreter.interpret(
-              "import matplotlib.pyplot as plt\n"
-                  + "plt.switch_backend('agg')\n"
-                  + "data=[1,2,3,4]\n"
-                  + "plt.figure()\n"
-                  + "plt.plot(data)\n"
-                  + "%matplot plt",
-              context);
+      result = pysparkInterpreter.interpret(
+          "import matplotlib.pyplot as plt\n" +
+              "plt.switch_backend('agg')\n" +
+              "data=[1,2,3,4]\n" +
+              "plt.figure()\n" +
+              "plt.plot(data)\n" +
+              "%matplot plt", context);
       assertEquals(InterpreterResult.Code.SUCCESS, result.code());
       assertEquals(1, result.message().size());
       assertEquals(InterpreterResult.Type.IMG, 
result.message().get(0).getType());
 
       // test plotting of R
-      result = sparkRInterpreter.interpret("hist(mtcars$mpg)", context);
+      result = sparkRInterpreter.interpret(
+          "hist(mtcars$mpg)", context);
       assertEquals(InterpreterResult.Code.SUCCESS, result.code());
       assertEquals(1, result.message().size());
       assertEquals(InterpreterResult.Type.IMG, 
result.message().get(0).getType());
 
       // test code completion
-      List<InterpreterCompletion> completionResult =
-          sparkInterpreter.completion("df.sho", 6, context);
+      List<InterpreterCompletion> completionResult = sparkInterpreter
+          .completion("df.sho", 6, context);
       assertEquals(1, completionResult.size());
       assertEquals("show", completionResult.get(0).name);
 
@@ -934,12 +849,17 @@ public class LivyInterpreterIT {
 
   public static class MyInterpreterOutputListener implements 
InterpreterOutputListener {
     @Override
-    public void onAppend(int index, InterpreterResultMessageOutput out, byte[] 
line) {}
+    public void onAppend(int index, InterpreterResultMessageOutput out, byte[] 
line) {
+    }
 
     @Override
-    public void onUpdate(int index, InterpreterResultMessageOutput out) {}
+    public void onUpdate(int index, InterpreterResultMessageOutput out) {
+
+    }
 
     @Override
-    public void onUpdateAll(InterpreterOutput out) {}
+    public void onUpdateAll(InterpreterOutput out) {
+
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/livy/src/test/java/org/apache/zeppelin/livy/LivySQLInterpreterTest.java
----------------------------------------------------------------------
diff --git 
a/livy/src/test/java/org/apache/zeppelin/livy/LivySQLInterpreterTest.java 
b/livy/src/test/java/org/apache/zeppelin/livy/LivySQLInterpreterTest.java
index 8e1a83d..8821a86 100644
--- a/livy/src/test/java/org/apache/zeppelin/livy/LivySQLInterpreterTest.java
+++ b/livy/src/test/java/org/apache/zeppelin/livy/LivySQLInterpreterTest.java
@@ -21,12 +21,15 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 
-import java.util.List;
-import java.util.Properties;
 import org.junit.Before;
 import org.junit.Test;
 
-/** Unit test for LivySQLInterpreter. */
+import java.util.List;
+import java.util.Properties;
+
+/**
+ * Unit test for LivySQLInterpreter.
+ */
 public class LivySQLInterpreterTest {
 
   private LivySparkSQLInterpreter sqlInterpreter;
@@ -55,11 +58,14 @@ public class LivySQLInterpreterTest {
     //    |  a|  b|
     //    +---+---+
     //    +---+---+
-    List<String> rows =
-        sqlInterpreter.parseSQLOutput("+---+---+\n" + "|  a|  b|\n" + 
"+---+---+\n" + "+---+---+");
+    List<String> rows = sqlInterpreter.parseSQLOutput("+---+---+\n" +
+                                  "|  a|  b|\n" +
+                                  "+---+---+\n" +
+                                  "+---+---+");
     assertEquals(1, rows.size());
     assertEquals("a\tb", rows.get(0));
 
+
     //  sql output with 2 rows
     //    +---+---+
     //    |  a|  b|
@@ -67,19 +73,18 @@ public class LivySQLInterpreterTest {
     //    |  1| 1a|
     //    |  2| 2b|
     //    +---+---+
-    rows =
-        sqlInterpreter.parseSQLOutput(
-            "+---+---+\n"
-                + "|  a|  b|\n"
-                + "+---+---+\n"
-                + "|  1| 1a|\n"
-                + "|  2| 2b|\n"
-                + "+---+---+");
+    rows = sqlInterpreter.parseSQLOutput("+---+---+\n" +
+        "|  a|  b|\n" +
+        "+---+---+\n" +
+        "|  1| 1a|\n" +
+        "|  2| 2b|\n" +
+        "+---+---+");
     assertEquals(3, rows.size());
     assertEquals("a\tb", rows.get(0));
     assertEquals("1\t1a", rows.get(1));
     assertEquals("2\t2b", rows.get(2));
 
+
     //  sql output with 3 rows and showing "only showing top 3 rows"
     //    +---+---+
     //    |  a|  b|
@@ -89,22 +94,21 @@ public class LivySQLInterpreterTest {
     //    |  3| 3c|
     //    +---+---+
     //    only showing top 3 rows
-    rows =
-        sqlInterpreter.parseSQLOutput(
-            "+---+---+\n"
-                + "|  a|  b|\n"
-                + "+---+---+\n"
-                + "|  1| 1a|\n"
-                + "|  2| 2b|\n"
-                + "|  3| 3c|\n"
-                + "+---+---+\n"
-                + "only showing top 3 rows");
+    rows = sqlInterpreter.parseSQLOutput("+---+---+\n" +
+        "|  a|  b|\n" +
+        "+---+---+\n" +
+        "|  1| 1a|\n" +
+        "|  2| 2b|\n" +
+        "|  3| 3c|\n" +
+        "+---+---+\n" +
+        "only showing top 3 rows");
     assertEquals(4, rows.size());
     assertEquals("a\tb", rows.get(0));
     assertEquals("1\t1a", rows.get(1));
     assertEquals("2\t2b", rows.get(2));
     assertEquals("3\t3c", rows.get(3));
 
+
     //  sql output with 1 rows and showing "only showing top 1 rows"
     //    +---+
     //    |  a|
@@ -112,13 +116,17 @@ public class LivySQLInterpreterTest {
     //    |  1|
     //    +---+
     //    only showing top 1 rows
-    rows =
-        sqlInterpreter.parseSQLOutput(
-            "+---+\n" + "|  a|\n" + "+---+\n" + "|  1|\n" + "+---+\n" + "only 
showing top 1 rows");
+    rows = sqlInterpreter.parseSQLOutput("+---+\n" +
+        "|  a|\n" +
+        "+---+\n" +
+        "|  1|\n" +
+        "+---+\n" +
+        "only showing top 1 rows");
     assertEquals(2, rows.size());
     assertEquals("a", rows.get(0));
     assertEquals("1", rows.get(1));
 
+
     //  sql output with 3 rows, 3 columns, showing "only showing top 3 rows" 
with a line break in
     //  the data
     //    +---+---+---+
@@ -130,22 +138,21 @@ public class LivySQLInterpreterTest {
     //    | 3a| 3b| 3c|
     //    +---+---+---+
     //    only showing top 3 rows
-    rows =
-        sqlInterpreter.parseSQLOutput(
-            "+---+----+---+\n"
-                + "|  a|   b|  c|\n"
-                + "+---+----+---+\n"
-                + "| 1a|  1b| 1c|\n"
-                + "| 2a| 2\nb| 2c|\n"
-                + "| 3a|  3b| 3c|\n"
-                + "+---+---+---+\n"
-                + "only showing top 3 rows");
+    rows = sqlInterpreter.parseSQLOutput("+---+----+---+\n" +
+            "|  a|   b|  c|\n" +
+            "+---+----+---+\n" +
+            "| 1a|  1b| 1c|\n" +
+            "| 2a| 2\nb| 2c|\n" +
+            "| 3a|  3b| 3c|\n" +
+            "+---+---+---+\n" +
+            "only showing top 3 rows");
     assertEquals(4, rows.size());
     assertEquals("a\tb\tc", rows.get(0));
     assertEquals("1a\t1b\t1c", rows.get(1));
     assertEquals("2a\t2\\nb\t2c", rows.get(2));
     assertEquals("3a\t3b\t3c", rows.get(3));
 
+
     //  sql output with 2 rows and one containing a tab
     //    +---+---+
     //    |  a|  b|
@@ -153,14 +160,12 @@ public class LivySQLInterpreterTest {
     //    |  1| \ta|
     //    |  2| 2b|
     //    +---+---+
-    rows =
-        sqlInterpreter.parseSQLOutput(
-            "+---+---+\n"
-                + "|  a|  b|\n"
-                + "+---+---+\n"
-                + "|  1| \ta|\n"
-                + "|  2| 2b|\n"
-                + "+---+---+");
+    rows = sqlInterpreter.parseSQLOutput("+---+---+\n" +
+            "|  a|  b|\n" +
+            "+---+---+\n" +
+            "|  1| \ta|\n" +
+            "|  2| 2b|\n" +
+            "+---+---+");
     assertEquals(3, rows.size());
     assertEquals("a\tb", rows.get(0));
     assertEquals("1\t\\ta", rows.get(1));

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/markdown/pom.xml
----------------------------------------------------------------------
diff --git a/markdown/pom.xml b/markdown/pom.xml
index dce5157..79cabc1 100644
--- a/markdown/pom.xml
+++ b/markdown/pom.xml
@@ -93,6 +93,13 @@
       <plugin>
         <artifactId>maven-resources-plugin</artifactId>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <configuration>
+          <skip>false</skip>
+        </configuration>
+      </plugin>
     </plugins>
   </build>
 </project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/markdown/src/main/java/org/apache/zeppelin/markdown/Markdown.java
----------------------------------------------------------------------
diff --git a/markdown/src/main/java/org/apache/zeppelin/markdown/Markdown.java 
b/markdown/src/main/java/org/apache/zeppelin/markdown/Markdown.java
index 233dd16..83b4069 100644
--- a/markdown/src/main/java/org/apache/zeppelin/markdown/Markdown.java
+++ b/markdown/src/main/java/org/apache/zeppelin/markdown/Markdown.java
@@ -17,8 +17,12 @@
 
 package org.apache.zeppelin.markdown;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import java.util.List;
 import java.util.Properties;
+
 import org.apache.zeppelin.interpreter.Interpreter;
 import org.apache.zeppelin.interpreter.InterpreterContext;
 import org.apache.zeppelin.interpreter.InterpreterResult;
@@ -27,16 +31,18 @@ import org.apache.zeppelin.interpreter.InterpreterUtils;
 import org.apache.zeppelin.interpreter.thrift.InterpreterCompletion;
 import org.apache.zeppelin.scheduler.Scheduler;
 import org.apache.zeppelin.scheduler.SchedulerFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
-/** MarkdownInterpreter interpreter for Zeppelin. */
+/**
+ * MarkdownInterpreter interpreter for Zeppelin.
+ */
 public class Markdown extends Interpreter {
   private static final Logger LOGGER = LoggerFactory.getLogger(Markdown.class);
 
   private MarkdownParser parser;
 
-  /** Markdown Parser Type. */
+  /**
+   * Markdown Parser Type.
+   */
   public enum MarkdownParserType {
     PEGDOWN {
       @Override
@@ -79,7 +85,8 @@ public class Markdown extends Interpreter {
   }
 
   @Override
-  public void close() {}
+  public void close() {
+  }
 
   @Override
   public InterpreterResult interpret(String markdownText, InterpreterContext 
interpreterContext) {
@@ -96,7 +103,8 @@ public class Markdown extends Interpreter {
   }
 
   @Override
-  public void cancel(InterpreterContext context) {}
+  public void cancel(InterpreterContext context) {
+  }
 
   @Override
   public FormType getFormType() {
@@ -115,8 +123,8 @@ public class Markdown extends Interpreter {
   }
 
   @Override
-  public List<InterpreterCompletion> completion(
-      String buf, int cursor, InterpreterContext interpreterContext) {
+  public List<InterpreterCompletion> completion(String buf, int cursor,
+      InterpreterContext interpreterContext) {
     return null;
   }
 }

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/markdown/src/main/java/org/apache/zeppelin/markdown/Markdown4jParser.java
----------------------------------------------------------------------
diff --git 
a/markdown/src/main/java/org/apache/zeppelin/markdown/Markdown4jParser.java 
b/markdown/src/main/java/org/apache/zeppelin/markdown/Markdown4jParser.java
index 77fd4a3..215540d 100644
--- a/markdown/src/main/java/org/apache/zeppelin/markdown/Markdown4jParser.java
+++ b/markdown/src/main/java/org/apache/zeppelin/markdown/Markdown4jParser.java
@@ -17,10 +17,13 @@
 
 package org.apache.zeppelin.markdown;
 
-import java.io.IOException;
 import org.markdown4j.Markdown4jProcessor;
 
-/** Markdown Parser using markdown4j processor. */
+import java.io.IOException;
+
+/**
+ * Markdown Parser using markdown4j processor.
+ */
 public class Markdown4jParser implements MarkdownParser {
   private Markdown4jProcessor processor;
 

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/markdown/src/main/java/org/apache/zeppelin/markdown/MarkdownParser.java
----------------------------------------------------------------------
diff --git 
a/markdown/src/main/java/org/apache/zeppelin/markdown/MarkdownParser.java 
b/markdown/src/main/java/org/apache/zeppelin/markdown/MarkdownParser.java
index 056ca26..2f8717e 100644
--- a/markdown/src/main/java/org/apache/zeppelin/markdown/MarkdownParser.java
+++ b/markdown/src/main/java/org/apache/zeppelin/markdown/MarkdownParser.java
@@ -17,7 +17,9 @@
 
 package org.apache.zeppelin.markdown;
 
-/** Abstract Markdown Parser. */
+/**
+ * Abstract Markdown Parser.
+ */
 public interface MarkdownParser {
   String render(String markdownText);
 }

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/markdown/src/main/java/org/apache/zeppelin/markdown/ParamVar.java
----------------------------------------------------------------------
diff --git a/markdown/src/main/java/org/apache/zeppelin/markdown/ParamVar.java 
b/markdown/src/main/java/org/apache/zeppelin/markdown/ParamVar.java
index 37b864e..14828e0 100644
--- a/markdown/src/main/java/org/apache/zeppelin/markdown/ParamVar.java
+++ b/markdown/src/main/java/org/apache/zeppelin/markdown/ParamVar.java
@@ -17,9 +17,10 @@
 
 package org.apache.zeppelin.markdown;
 
+import org.parboiled.support.Var;
+
 import java.util.HashMap;
 import java.util.Map;
-import org.parboiled.support.Var;
 
 /**
  * Implementation of Var to support parameter parsing.

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownParser.java
----------------------------------------------------------------------
diff --git 
a/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownParser.java 
b/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownParser.java
index 3152f99..fb99f05 100644
--- a/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownParser.java
+++ b/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownParser.java
@@ -21,7 +21,9 @@ import org.pegdown.Extensions;
 import org.pegdown.PegDownProcessor;
 import org.pegdown.plugins.PegDownPlugins;
 
-/** Markdown Parser using pegdown processor. */
+/**
+ * Markdown Parser using pegdown processor.
+ */
 public class PegdownParser implements MarkdownParser {
   private PegDownProcessor processor;
 
@@ -29,11 +31,10 @@ public class PegdownParser implements MarkdownParser {
   public static final int OPTIONS = Extensions.ALL_WITH_OPTIONALS - 
Extensions.ANCHORLINKS;
 
   public PegdownParser() {
-    PegDownPlugins plugins =
-        new PegDownPlugins.Builder()
-            .withPlugin(PegdownYumlPlugin.class)
-            .withPlugin(PegdownWebSequencelPlugin.class)
-            .build();
+    PegDownPlugins plugins = new PegDownPlugins.Builder()
+        .withPlugin(PegdownYumlPlugin.class)
+        .withPlugin(PegdownWebSequencelPlugin.class)
+        .build();
     processor = new PegDownProcessor(OPTIONS, PARSING_TIMEOUT_AS_MILLIS, 
plugins);
   }
 
@@ -52,7 +53,9 @@ public class PegdownParser implements MarkdownParser {
     return html;
   }
 
-  /** wrap with markdown class div to styling DOM using css. */
+  /**
+   * wrap with markdown class div to styling DOM using css.
+   */
   public static String wrapWithMarkdownClassDiv(String html) {
     return new StringBuilder()
         .append("<div class=\"markdown-body\">\n")

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownWebSequencelPlugin.java
----------------------------------------------------------------------
diff --git 
a/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownWebSequencelPlugin.java
 
b/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownWebSequencelPlugin.java
index 88184df..6238f95 100644
--- 
a/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownWebSequencelPlugin.java
+++ 
b/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownWebSequencelPlugin.java
@@ -17,14 +17,6 @@
 
 package org.apache.zeppelin.markdown;
 
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.net.URL;
-import java.net.URLConnection;
-import java.net.URLEncoder;
-import java.nio.charset.StandardCharsets;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.parboiled.BaseParser;
@@ -36,17 +28,29 @@ import org.pegdown.ast.TextNode;
 import org.pegdown.plugins.BlockPluginParser;
 import org.pegdown.plugins.PegDownPlugins;
 
-/** Pegdown plugin for Websequence diagram. */
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.net.URL;
+import java.net.URLConnection;
+import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Pegdown plugin for Websequence diagram.
+ */
 public class PegdownWebSequencelPlugin extends Parser implements 
BlockPluginParser {
   private static final String WEBSEQ_URL = 
"http://www.websequencediagrams.com";;
 
   public PegdownWebSequencelPlugin() {
-    super(
-        PegdownParser.OPTIONS, PegdownParser.PARSING_TIMEOUT_AS_MILLIS, 
DefaultParseRunnerProvider);
+    super(PegdownParser.OPTIONS,
+        PegdownParser.PARSING_TIMEOUT_AS_MILLIS,
+        DefaultParseRunnerProvider);
   }
 
-  public PegdownWebSequencelPlugin(
-      Integer opts, Long millis, ParseRunnerProvider provider, PegDownPlugins 
plugins) {
+  public PegdownWebSequencelPlugin(Integer opts, Long millis, 
ParseRunnerProvider provider,
+                                   PegDownPlugins plugins) {
     super(opts, millis, provider, plugins);
   }
 
@@ -70,14 +74,16 @@ public class PegdownWebSequencelPlugin extends Parser 
implements BlockPluginPars
 
     return NodeSequence(
         startMarker(),
-        Optional(String("style="), Sequence(OneOrMore(Letter()), 
style.append(match()), Spn1())),
+        Optional(
+            String("style="),
+            Sequence(OneOrMore(Letter()), style.append(match()), Spn1())),
         Sequence(body(), body.append(match())),
         endMarker(),
         push(
-            new ExpImageNode(
-                "title",
+            new ExpImageNode("title",
                 createWebsequenceUrl(style.getString(), body.getString()),
-                new TextNode(""))));
+                new TextNode("")))
+    );
   }
 
   public static String createWebsequenceUrl(String style, String content) {
@@ -89,14 +95,13 @@ public class PegdownWebSequencelPlugin extends Parser 
implements BlockPluginPars
     String webSeqUrl = "";
 
     try {
-      String query =
-          new StringBuilder()
-              .append("style=")
-              .append(style)
-              .append("&message=")
-              .append(URLEncoder.encode(content, "UTF-8"))
-              .append("&apiVersion=1")
-              .toString();
+      String query = new StringBuilder()
+          .append("style=")
+          .append(style)
+          .append("&message=")
+          .append(URLEncoder.encode(content, "UTF-8"))
+          .append("&apiVersion=1")
+          .toString();
 
       URL url = new URL(WEBSEQ_URL);
       URLConnection conn = url.openConnection();
@@ -106,8 +111,8 @@ public class PegdownWebSequencelPlugin extends Parser 
implements BlockPluginPars
       writer.flush();
 
       StringBuilder response = new StringBuilder();
-      reader =
-          new BufferedReader(new InputStreamReader(conn.getInputStream(), 
StandardCharsets.UTF_8));
+      reader = new BufferedReader(
+          new InputStreamReader(conn.getInputStream(), 
StandardCharsets.UTF_8));
       String line;
       while ((line = reader.readLine()) != null) {
         response.append(line);
@@ -136,6 +141,6 @@ public class PegdownWebSequencelPlugin extends Parser 
implements BlockPluginPars
 
   @Override
   public Rule[] blockPluginRules() {
-    return new Rule[] {blockRule()};
+    return new Rule[]{blockRule()};
   }
 }

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownYumlPlugin.java
----------------------------------------------------------------------
diff --git 
a/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownYumlPlugin.java 
b/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownYumlPlugin.java
index e9ac9ad..c9e942a 100644
--- a/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownYumlPlugin.java
+++ b/markdown/src/main/java/org/apache/zeppelin/markdown/PegdownYumlPlugin.java
@@ -19,9 +19,6 @@ package org.apache.zeppelin.markdown;
 
 import static org.apache.commons.lang3.StringUtils.defaultString;
 
-import java.io.UnsupportedEncodingException;
-import java.net.URLEncoder;
-import java.util.Map;
 import org.parboiled.BaseParser;
 import org.parboiled.Rule;
 import org.parboiled.support.StringBuilderVar;
@@ -31,18 +28,24 @@ import org.pegdown.ast.TextNode;
 import org.pegdown.plugins.BlockPluginParser;
 import org.pegdown.plugins.PegDownPlugins;
 
-/** Pegdown plugin for YUML. */
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
+import java.util.Map;
+
+/**
+ * Pegdown plugin for YUML.
+ */
 public class PegdownYumlPlugin extends Parser implements BlockPluginParser {
   public PegdownYumlPlugin() {
-    super(
-        PegdownParser.OPTIONS, PegdownParser.PARSING_TIMEOUT_AS_MILLIS, 
DefaultParseRunnerProvider);
+    super(PegdownParser.OPTIONS,
+        PegdownParser.PARSING_TIMEOUT_AS_MILLIS,
+        DefaultParseRunnerProvider);
   }
 
-  public PegdownYumlPlugin(
-      Integer options,
-      Long maxParsingTimeInMillis,
-      ParseRunnerProvider parseRunnerProvider,
-      PegDownPlugins plugins) {
+  public PegdownYumlPlugin(Integer options,
+                           Long maxParsingTimeInMillis,
+                           ParseRunnerProvider parseRunnerProvider,
+                           PegDownPlugins plugins) {
     super(options, maxParsingTimeInMillis, parseRunnerProvider, plugins);
   }
 
@@ -74,21 +77,19 @@ public class PegdownYumlPlugin extends Parser implements 
BlockPluginParser {
         startMarker(),
         ZeroOrMore(
             Sequence(
-                parameterName(),
-                name.append(match()),
+                parameterName(), name.append(match()),
                 String("="),
-                OneOrMore(Alphanumeric()),
-                value.append(match())),
+                OneOrMore(Alphanumeric()), value.append(match())),
             Sp(),
             params.put(name.getString(), value.getString()),
-            name.clear(),
-            value.clear()),
+            name.clear(), value.clear()),
         body(),
         body.append(match()),
         endMarker(),
         push(
             new ExpImageNode(
-                "title", createYumlUrl(params.get(), body.getString()), new 
TextNode(""))));
+                "title", createYumlUrl(params.get(), body.getString()), new 
TextNode("")))
+    );
   }
 
   public static String createYumlUrl(Map<String, String> params, String body) {
@@ -136,6 +137,6 @@ public class PegdownYumlPlugin extends Parser implements 
BlockPluginParser {
 
   @Override
   public Rule[] blockPluginRules() {
-    return new Rule[] {blockRule()};
+    return new Rule[]{blockRule()};
   }
 }

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/markdown/src/test/java/org/apache/zeppelin/markdown/Markdown4jParserTest.java
----------------------------------------------------------------------
diff --git 
a/markdown/src/test/java/org/apache/zeppelin/markdown/Markdown4jParserTest.java 
b/markdown/src/test/java/org/apache/zeppelin/markdown/Markdown4jParserTest.java
index 444f42d..fe381ee 100644
--- 
a/markdown/src/test/java/org/apache/zeppelin/markdown/Markdown4jParserTest.java
+++ 
b/markdown/src/test/java/org/apache/zeppelin/markdown/Markdown4jParserTest.java
@@ -19,12 +19,14 @@ package org.apache.zeppelin.markdown;
 
 import static org.junit.Assert.assertEquals;
 
-import java.util.Properties;
-import org.apache.zeppelin.interpreter.InterpreterResult;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.util.Properties;
+
+import org.apache.zeppelin.interpreter.InterpreterResult;
+
 public class Markdown4jParserTest {
   Markdown md;
 

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/markdown/src/test/java/org/apache/zeppelin/markdown/PegdownParserTest.java
----------------------------------------------------------------------
diff --git 
a/markdown/src/test/java/org/apache/zeppelin/markdown/PegdownParserTest.java 
b/markdown/src/test/java/org/apache/zeppelin/markdown/PegdownParserTest.java
index a07c470..a608a05 100644
--- a/markdown/src/test/java/org/apache/zeppelin/markdown/PegdownParserTest.java
+++ b/markdown/src/test/java/org/apache/zeppelin/markdown/PegdownParserTest.java
@@ -17,13 +17,11 @@
 
 package org.apache.zeppelin.markdown;
 
-import static 
org.apache.zeppelin.markdown.PegdownParser.wrapWithMarkdownClassDiv;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 
-import java.util.ArrayList;
-import java.util.Properties;
-import org.apache.zeppelin.interpreter.InterpreterResult;
+import static 
org.apache.zeppelin.markdown.PegdownParser.wrapWithMarkdownClassDiv;
+
 import org.hamcrest.CoreMatchers;
 import org.junit.After;
 import org.junit.Before;
@@ -33,11 +31,17 @@ import org.junit.rules.ErrorCollector;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.ArrayList;
+import java.util.Properties;
+
+import org.apache.zeppelin.interpreter.InterpreterResult;
+
 public class PegdownParserTest {
   Logger logger = LoggerFactory.getLogger(PegdownParserTest.class);
   Markdown md;
 
-  @Rule public ErrorCollector collector = new ErrorCollector();
+  @Rule
+  public ErrorCollector collector = new ErrorCollector();
 
   @Before
   public void setUp() {
@@ -56,18 +60,18 @@ public class PegdownParserTest {
   public void testMultipleThread() {
     ArrayList<Thread> arrThreads = new ArrayList<>();
     for (int i = 0; i < 10; i++) {
-      Thread t =
-          new Thread() {
-            public void run() {
-              String r1 = null;
-              try {
-                r1 = md.interpret("# H1", null).code().name();
-              } catch (Exception e) {
-                logger.error("testTestMultipleThread failed to interpret", e);
-              }
-              collector.checkThat("SUCCESS", CoreMatchers.containsString(r1));
-            }
-          };
+      Thread t = new Thread() {
+        public void run() {
+          String r1 = null;
+          try {
+            r1 = md.interpret("# H1", null).code().name();
+          } catch (Exception e) {
+            logger.error("testTestMultipleThread failed to interpret", e);
+          }
+          collector.checkThat("SUCCESS",
+              CoreMatchers.containsString(r1));
+        }
+      };
       t.start();
       arrThreads.add(t);
     }
@@ -113,7 +117,7 @@ public class PegdownParserTest {
     InterpreterResult result = md.interpret("This is ~~deleted~~ text", null);
     assertEquals(
         wrapWithMarkdownClassDiv("<p>This is <del>deleted</del> text</p>"),
-        result.message().get(0).getData());
+            result.message().get(0).getData());
   }
 
   @Test
@@ -121,7 +125,7 @@ public class PegdownParserTest {
     InterpreterResult result = md.interpret("This is *italics* text", null);
     assertEquals(
         wrapWithMarkdownClassDiv("<p>This is <em>italics</em> text</p>"),
-        result.message().get(0).getData());
+            result.message().get(0).getData());
   }
 
   @Test
@@ -182,7 +186,7 @@ public class PegdownParserTest {
             .append("\n")
             .append(
                 "[I'm an inline-style link with title](https://www.google.com "
-                    + "\"Google's Homepage\")\n")
+                        + "\"Google's Homepage\")\n")
             .append("\n")
             .append("[I'm a reference-style link][Arbitrary case-insensitive 
reference text]\n")
             .append("\n")
@@ -209,23 +213,23 @@ public class PegdownParserTest {
                 "<p><a href=\"https://www.google.com\";>I&rsquo;m an 
inline-style link</a></p>\n")
             .append(
                 "<p><a href=\"https://www.google.com\"; title=\"Google&#39;s 
Homepage\">I&rsquo;m "
-                    + "an inline-style link with title</a></p>\n")
+                        + "an inline-style link with title</a></p>\n")
             .append(
                 "<p><a href=\"https://www.mozilla.org\";>I&rsquo;m a 
reference-style link</a></p>\n")
             .append(
                 "<p><a href=\"../blob/master/LICENSE\">I&rsquo;m a relative 
reference to a "
-                    + "repository file</a></p>\n")
+                        + "repository file</a></p>\n")
             .append(
                 "<p><a href=\"http://slashdot.org\";>You can use numbers for 
reference-style link "
-                    + "definitions</a></p>\n")
+                        + "definitions</a></p>\n")
             .append(
                 "<p>Or leave it empty and use the <a 
href=\"http://www.reddit.com\";>link text "
-                    + "itself</a>.</p>\n")
+                        + "itself</a>.</p>\n")
             .append(
                 "<p>URLs and URLs in angle brackets will automatically get 
turned into links."
-                    + "<br/><a 
href=\"http://www.example.com\";>http://www.example.com</a> or "
-                    + "<a 
href=\"http://www.example.com\";>http://www.example.com</a> and "
-                    + "sometimes<br/>example.com (but not on Github, for 
example).</p>\n")
+                        + "<br/><a 
href=\"http://www.example.com\";>http://www.example.com</a> or "
+                        + "<a 
href=\"http://www.example.com\";>http://www.example.com</a> and "
+                        + "sometimes<br/>example.com (but not on Github, for 
example).</p>\n")
             .append("<p>Some text to show that the reference links can follow 
later.</p>")
             .toString();
 
@@ -252,26 +256,26 @@ public class PegdownParserTest {
     assertEquals(
         wrapWithMarkdownClassDiv(
             "<blockquote>\n"
-                + "  <p>Blockquotes are very handy in email to emulate reply 
text.<br/>This "
-                + "line is part of the same quote.</p>\n"
-                + "</blockquote>"),
+                    + "  <p>Blockquotes are very handy in email to emulate 
reply text.<br/>This "
+                    + "line is part of the same quote.</p>\n"
+                    + "</blockquote>"),
         r1.message().get(0).getData());
 
     InterpreterResult r2 =
         md.interpret(
             "> This is a very long line that will still be quoted properly 
when it "
-                + "wraps. Oh boy let's keep writing to make sure this is long 
enough to "
-                + "actually wrap for everyone. Oh, you can *put* 
**MarkdownInterpreter** "
-                + "into a blockquote. ",
+                    + "wraps. Oh boy let's keep writing to make sure this is 
long enough to "
+                    + "actually wrap for everyone. Oh, you can *put* 
**MarkdownInterpreter** "
+                    + "into a blockquote. ",
             null);
     assertEquals(
         wrapWithMarkdownClassDiv(
             "<blockquote>\n"
-                + "  <p>This is a very long line that will still be quoted 
properly when "
-                + "it wraps. Oh boy let&rsquo;s keep writing to make sure this 
is long enough "
-                + "to actually wrap for everyone. Oh, you can <em>put</em> "
-                + "<strong>MarkdownInterpreter</strong> into a blockquote. 
</p>\n"
-                + "</blockquote>"),
+                    + "  <p>This is a very long line that will still be quoted 
properly when "
+                    + "it wraps. Oh boy let&rsquo;s keep writing to make sure 
this is long enough "
+                    + "to actually wrap for everyone. Oh, you can <em>put</em> 
"
+                    + "<strong>MarkdownInterpreter</strong> into a blockquote. 
</p>\n"
+                    + "</blockquote>"),
         r2.message().get(0).getData());
   }
 
@@ -375,32 +379,25 @@ public class PegdownParserTest {
     // CoreMatchers.containsString("<img 
src=\"http://www.websequencediagrams.com/?png=";));
 
     System.err.println(result.message().get(0).getData());
-    if (!result
-        .message()
-        .get(0)
-        .getData()
-        .contains("<img src=\"http://www.websequencediagrams.com/?png=";)) {
-      logger.error(
-          "Expected {} but found {}",
-          "<img src=\"http://www.websequencediagrams.com/?png=";,
-          result.message().get(0).getData());
+    if (!result.message().get(0).getData().contains(
+        "<img src=\"http://www.websequencediagrams.com/?png=";)) {
+      logger.error("Expected {} but found {}",
+          "<img src=\"http://www.websequencediagrams.com/?png=";, 
result.message().get(0).getData());
     }
   }
 
   @Test
   public void testYumlPlugin() {
-    String input =
-        new StringBuilder()
-            .append("\n \n %%% yuml style=nofunky scale=120 format=svg\n")
-            .append("[Customer]<>-orders>[Order]\n")
-            .append("[Order]++-0..>[LineItem]\n")
-            .append("[Order]-[note:Aggregate root.]\n")
-            .append("  %%%  ")
-            .toString();
+    String input = new StringBuilder()
+        .append("\n \n %%% yuml style=nofunky scale=120 format=svg\n")
+        .append("[Customer]<>-orders>[Order]\n")
+        .append("[Order]++-0..>[LineItem]\n")
+        .append("[Order]-[note:Aggregate root.]\n")
+        .append("  %%%  ")
+        .toString();
 
     InterpreterResult result = md.interpret(input, null);
-    assertThat(
-        result.message().get(0).getData(),
-        CoreMatchers.containsString("<img src=\"http://yuml.me/diagram/";));
+    assertThat(result.message().get(0).getData(),
+            CoreMatchers.containsString("<img src=\"http://yuml.me/diagram/";));
   }
 }

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/neo4j/pom.xml
----------------------------------------------------------------------
diff --git a/neo4j/pom.xml b/neo4j/pom.xml
index 2bf9e9a..906939c 100644
--- a/neo4j/pom.xml
+++ b/neo4j/pom.xml
@@ -138,6 +138,13 @@
           </execution>
         </executions>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <configuration>
+          <skip>false</skip>
+        </configuration>
+      </plugin>
     </plugins>
   </build>
 

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/Neo4jConnectionManager.java
----------------------------------------------------------------------
diff --git 
a/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/Neo4jConnectionManager.java
 
b/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/Neo4jConnectionManager.java
index 2e57570..208d142 100644
--- 
a/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/Neo4jConnectionManager.java
+++ 
b/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/Neo4jConnectionManager.java
@@ -17,17 +17,7 @@
 
 package org.apache.zeppelin.graph.neo4j;
 
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Properties;
-import java.util.Set;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 import org.apache.commons.lang.StringUtils;
-import org.apache.zeppelin.interpreter.InterpreterContext;
-import org.apache.zeppelin.resource.Resource;
-import org.apache.zeppelin.resource.ResourcePool;
 import org.neo4j.driver.v1.AuthToken;
 import org.neo4j.driver.v1.AuthTokens;
 import org.neo4j.driver.v1.Config;
@@ -38,10 +28,24 @@ import org.neo4j.driver.v1.StatementResult;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-/** Neo4j connection manager for Zeppelin. */
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.zeppelin.interpreter.InterpreterContext;
+import org.apache.zeppelin.resource.Resource;
+import org.apache.zeppelin.resource.ResourcePool;
+
+/**
+ * Neo4j connection manager for Zeppelin.
+ */
 public class Neo4jConnectionManager {
   static final Logger LOGGER = 
LoggerFactory.getLogger(Neo4jConnectionManager.class);
-
+  
   public static final String NEO4J_SERVER_URL = "neo4j.url";
   public static final String NEO4J_AUTH_TYPE = "neo4j.auth.type";
   public static final String NEO4J_AUTH_USER = "neo4j.auth.user";
@@ -62,18 +66,16 @@ public class Neo4jConnectionManager {
 
   private final AuthToken authToken;
 
-  /** Enum type for the AuthToken. */
-  public enum Neo4jAuthType {
-    NONE,
-    BASIC
-  }
+  /**
+   * Enum type for the AuthToken.
+   */
+  public enum Neo4jAuthType {NONE, BASIC}
 
   public Neo4jConnectionManager(Properties properties) {
     this.neo4jUrl = properties.getProperty(NEO4J_SERVER_URL);
-    this.config =
-        Config.build()
-            
.withMaxIdleSessions(Integer.parseInt(properties.getProperty(NEO4J_MAX_CONCURRENCY)))
-            .toConfig();
+    this.config = Config.build()
+          
.withMaxIdleSessions(Integer.parseInt(properties.getProperty(NEO4J_MAX_CONCURRENCY)))
+          .toConfig();
     String authType = properties.getProperty(NEO4J_AUTH_TYPE);
     switch (Neo4jAuthType.valueOf(authType.toUpperCase())) {
       case BASIC:
@@ -109,7 +111,8 @@ public class Neo4jConnectionManager {
     return getDriver().session();
   }
 
-  public StatementResult execute(String cypherQuery, InterpreterContext 
interpreterContext) {
+  public StatementResult execute(String cypherQuery,
+      InterpreterContext interpreterContext) {
     Map<String, Object> params = new HashMap<>();
     if (interpreterContext != null) {
       ResourcePool resourcePool = interpreterContext.getResourcePool();
@@ -125,8 +128,8 @@ public class Neo4jConnectionManager {
     LOGGER.debug("Executing cypher query {} with params {}", cypherQuery, 
params);
     StatementResult result;
     try (Session session = getSession()) {
-      result =
-          params.isEmpty() ? getSession().run(cypherQuery) : 
getSession().run(cypherQuery, params);
+      result = params.isEmpty()
+            ? getSession().run(cypherQuery) : getSession().run(cypherQuery, 
params);
     }
     return result;
   }

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/Neo4jCypherInterpreter.java
----------------------------------------------------------------------
diff --git 
a/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/Neo4jCypherInterpreter.java
 
b/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/Neo4jCypherInterpreter.java
index 6999b5f..bcb9d7b 100644
--- 
a/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/Neo4jCypherInterpreter.java
+++ 
b/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/Neo4jCypherInterpreter.java
@@ -17,7 +17,17 @@
 
 package org.apache.zeppelin.graph.neo4j;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.commons.lang.StringUtils;
+import org.neo4j.driver.internal.types.InternalTypeSystem;
+import org.neo4j.driver.internal.util.Iterables;
+import org.neo4j.driver.v1.Record;
+import org.neo4j.driver.v1.StatementResult;
+import org.neo4j.driver.v1.Value;
+import org.neo4j.driver.v1.types.Node;
+import org.neo4j.driver.v1.types.Relationship;
+import org.neo4j.driver.v1.types.TypeSystem;
+import org.neo4j.driver.v1.util.Pair;
+
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
@@ -27,7 +37,9 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
-import org.apache.commons.lang.StringUtils;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+
 import org.apache.zeppelin.graph.neo4j.utils.Neo4jConversionUtils;
 import org.apache.zeppelin.interpreter.Interpreter;
 import org.apache.zeppelin.interpreter.InterpreterContext;
@@ -36,17 +48,10 @@ import 
org.apache.zeppelin.interpreter.InterpreterResult.Code;
 import org.apache.zeppelin.interpreter.graph.GraphResult;
 import org.apache.zeppelin.scheduler.Scheduler;
 import org.apache.zeppelin.scheduler.SchedulerFactory;
-import org.neo4j.driver.internal.types.InternalTypeSystem;
-import org.neo4j.driver.internal.util.Iterables;
-import org.neo4j.driver.v1.Record;
-import org.neo4j.driver.v1.StatementResult;
-import org.neo4j.driver.v1.Value;
-import org.neo4j.driver.v1.types.Node;
-import org.neo4j.driver.v1.types.Relationship;
-import org.neo4j.driver.v1.types.TypeSystem;
-import org.neo4j.driver.v1.util.Pair;
 
-/** Neo4j interpreter for Zeppelin. */
+/**
+ * Neo4j interpreter for Zeppelin.
+ */
 public class Neo4jCypherInterpreter extends Interpreter {
   private static final String TABLE = "%table";
   public static final String NEW_LINE = "\n";
@@ -57,9 +62,9 @@ public class Neo4jCypherInterpreter extends Interpreter {
   private Map<String, String> labels;
 
   private Set<String> types;
-
+  
   private final Neo4jConnectionManager neo4jConnectionManager;
-
+  
   private final ObjectMapper jsonMapper = new ObjectMapper();
 
   public Neo4jCypherInterpreter(Properties properties) {
@@ -79,8 +84,8 @@ public class Neo4jCypherInterpreter extends Interpreter {
 
   public Map<String, String> getLabels(boolean refresh) {
     if (labels == null || refresh) {
-      Map<String, String> old =
-          labels == null ? new LinkedHashMap<String, String>() : new 
LinkedHashMap<>(labels);
+      Map<String, String> old = labels == null ?
+          new LinkedHashMap<String, String>() : new LinkedHashMap<>(labels);
       labels = new LinkedHashMap<>();
       StatementResult result = this.neo4jConnectionManager.execute("CALL 
db.labels()");
       Set<String> colors = new HashSet<>();
@@ -117,7 +122,8 @@ public class Neo4jCypherInterpreter extends Interpreter {
       return new InterpreterResult(Code.SUCCESS);
     }
     try {
-      StatementResult result = 
this.neo4jConnectionManager.execute(cypherQuery, interpreterContext);
+      StatementResult result = this.neo4jConnectionManager.execute(cypherQuery,
+              interpreterContext);
       Set<Node> nodes = new HashSet<>();
       Set<Relationship> relationships = new HashSet<>();
       List<String> columns = new ArrayList<>();
@@ -135,8 +141,8 @@ public class Neo4jCypherInterpreter extends Interpreter {
             nodes.addAll(Iterables.asList(field.value().asPath().nodes()));
             
relationships.addAll(Iterables.asList(field.value().asPath().relationships()));
           } else {
-            setTabularResult(
-                field.key(), field.value(), columns, line, 
InternalTypeSystem.TYPE_SYSTEM);
+            setTabularResult(field.key(), field.value(), columns, line,
+                    InternalTypeSystem.TYPE_SYSTEM);
           }
         }
         if (!line.isEmpty()) {
@@ -154,19 +160,15 @@ public class Neo4jCypherInterpreter extends Interpreter {
     }
   }
 
-  private void setTabularResult(
-      String key, Object obj, List<String> columns, List<String> line, 
TypeSystem typeSystem) {
+  private void setTabularResult(String key, Object obj, List<String> columns, 
List<String> line,
+      TypeSystem typeSystem) {
     if (obj instanceof Value) {
       Value value = (Value) obj;
       if (value.hasType(typeSystem.MAP())) {
         Map<String, Object> map = value.asMap();
         for (Entry<String, Object> entry : map.entrySet()) {
-          setTabularResult(
-              String.format(MAP_KEY_TEMPLATE, key, entry.getKey()),
-              entry.getValue(),
-              columns,
-              line,
-              typeSystem);
+          setTabularResult(String.format(MAP_KEY_TEMPLATE, key, 
entry.getKey()), entry.getValue(),
+                columns, line, typeSystem);
         }
       } else {
         addValueToLine(key, columns, line, value);
@@ -174,12 +176,8 @@ public class Neo4jCypherInterpreter extends Interpreter {
     } else if (obj instanceof Map) {
       Map<String, Object> map = (Map<String, Object>) obj;
       for (Entry<String, Object> entry : map.entrySet()) {
-        setTabularResult(
-            String.format(MAP_KEY_TEMPLATE, key, entry.getKey()),
-            entry.getValue(),
-            columns,
-            line,
-            typeSystem);
+        setTabularResult(String.format(MAP_KEY_TEMPLATE, key, entry.getKey()), 
entry.getValue(),
+                columns, line, typeSystem);
       }
     } else {
       addValueToLine(key, columns, line, obj);
@@ -239,7 +237,8 @@ public class Neo4jCypherInterpreter extends Interpreter {
     return new InterpreterResult(Code.SUCCESS, msg.toString());
   }
 
-  private InterpreterResult renderGraph(Set<Node> nodes, Set<Relationship> 
relationships) {
+  private InterpreterResult renderGraph(Set<Node> nodes,
+      Set<Relationship> relationships) {
     logger.info("Executing renderGraph method");
     List<org.apache.zeppelin.tabledata.Node> nodesList = new ArrayList<>();
     List<org.apache.zeppelin.tabledata.Relationship> relsList = new 
ArrayList<>();
@@ -250,15 +249,14 @@ public class Neo4jCypherInterpreter extends Interpreter {
     for (Node node : nodes) {
       nodesList.add(Neo4jConversionUtils.toZeppelinNode(node, labels));
     }
-    return new GraphResult(
-        Code.SUCCESS, new GraphResult.Graph(nodesList, relsList, labels, 
getTypes(true), true));
+    return new GraphResult(Code.SUCCESS,
+        new GraphResult.Graph(nodesList, relsList, labels, getTypes(true), 
true));
   }
 
   @Override
   public Scheduler getScheduler() {
     return SchedulerFactory.singleton()
-        .createOrGetParallelScheduler(
-            Neo4jCypherInterpreter.class.getName() + this.hashCode(),
+        .createOrGetParallelScheduler(Neo4jCypherInterpreter.class.getName() + 
this.hashCode(),
             
Integer.parseInt(getProperty(Neo4jConnectionManager.NEO4J_MAX_CONCURRENCY)));
   }
 
@@ -273,5 +271,6 @@ public class Neo4jCypherInterpreter extends Interpreter {
   }
 
   @Override
-  public void cancel(InterpreterContext context) {}
+  public void cancel(InterpreterContext context) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/zeppelin/blob/0d746fa2/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/utils/Neo4jConversionUtils.java
----------------------------------------------------------------------
diff --git 
a/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/utils/Neo4jConversionUtils.java
 
b/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/utils/Neo4jConversionUtils.java
index ddbc283..571afa9 100644
--- 
a/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/utils/Neo4jConversionUtils.java
+++ 
b/neo4j/src/main/java/org/apache/zeppelin/graph/neo4j/utils/Neo4jConversionUtils.java
@@ -17,22 +17,25 @@
 
 package org.apache.zeppelin.graph.neo4j.utils;
 
+import org.neo4j.driver.v1.types.Node;
+import org.neo4j.driver.v1.types.Relationship;
+
 import java.util.LinkedHashSet;
 import java.util.Map;
 import java.util.Set;
-import org.neo4j.driver.v1.types.Node;
-import org.neo4j.driver.v1.types.Relationship;
 
-/** Neo4jConversionUtils. */
+/**
+ * Neo4jConversionUtils.
+ */
 public class Neo4jConversionUtils {
   private Neo4jConversionUtils() {}
-
+  
   private static final String[] LETTERS = "0123456789ABCDEF".split("");
 
   public static final String COLOR_GREY = "#D3D3D3";
-
-  public static org.apache.zeppelin.tabledata.Node toZeppelinNode(
-      Node n, Map<String, String> graphLabels) {
+  
+  public static org.apache.zeppelin.tabledata.Node toZeppelinNode(Node n,
+      Map<String, String> graphLabels) {
     Set<String> labels = new LinkedHashSet<>();
     String firstLabel = null;
     for (String label : n.labels()) {
@@ -41,12 +44,13 @@ public class Neo4jConversionUtils {
       }
       labels.add(label);
     }
-    return new org.apache.zeppelin.tabledata.Node(n.id(), n.asMap(), labels);
+    return new org.apache.zeppelin.tabledata.Node(n.id(), n.asMap(),
+        labels);
   }
-
+  
   public static org.apache.zeppelin.tabledata.Relationship 
toZeppelinRelationship(Relationship r) {
-    return new org.apache.zeppelin.tabledata.Relationship(
-        r.id(), r.asMap(), r.startNodeId(), r.endNodeId(), r.type());
+    return new org.apache.zeppelin.tabledata.Relationship(r.id(), r.asMap(),
+        r.startNodeId(), r.endNodeId(), r.type());
   }
 
   public static String getRandomLabelColor() {

Reply via email to