HIVE-19161: Add authorizations to information schema (Daniel Dai, reviewed by 
Thejas Nair)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7584b327
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7584b327
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7584b327

Branch: refs/heads/master
Commit: 7584b3276bebf64aa006eaa162c0a6264d8fcb56
Parents: 88d224f
Author: Daniel Dai <dai...@gmail.com>
Authored: Mon May 7 09:55:06 2018 -0700
Committer: Daniel Dai <dai...@gmail.com>
Committed: Mon May 7 09:55:06 2018 -0700

----------------------------------------------------------------------
 .../hive/accumulo/HiveAccumuloHelper.java       |    3 +-
 .../org/apache/hadoop/hive/accumulo/Utils.java  |  350 ---
 .../serde/CompositeAccumuloRowIdFactory.java    |    4 +-
 .../serde/DefaultAccumuloRowIdFactory.java      |    4 +-
 .../org/apache/hadoop/hive/conf/HiveConf.java   |   11 +-
 .../listener/DummyRawStoreFailEvent.java        |    7 +
 itests/hive-unit/pom.xml                        |    5 +
 .../apache/hive/jdbc/TestRestrictedList.java    |    2 +
 .../TestInformationSchemaWithPrivilege.java     |  605 ++++
 .../org/apache/hive/storage/jdbc/JdbcSerDe.java |   31 +-
 .../hive/storage/jdbc/JdbcStorageHandler.java   |   31 +
 .../hive/storage/jdbc/dao/DatabaseAccessor.java |    2 -
 .../jdbc/dao/DatabaseAccessorFactory.java       |   12 +
 .../jdbc/dao/GenericJdbcDatabaseAccessor.java   |   66 +-
 .../storage/jdbc/dao/JdbcRecordIterator.java    |   55 +-
 .../storage/jdbc/dao/MsSqlDatabaseAccessor.java |   41 +
 .../jdbc/dao/OracleDatabaseAccessor.java        |   46 +
 .../jdbc/dao/PostgresDatabaseAccessor.java      |   39 +
 .../upgrade/hive/hive-schema-3.0.0.hive.sql     |  184 +-
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |    2 +
 .../hadoop/hive/ql/metadata/JarUtils.java       |  344 +++
 .../HiveAuthorizationProvider.java              |    9 +
 .../authorization/PrivilegeSynchonizer.java     |  204 ++
 .../plugin/HiveResourceACLsImpl.java            |   61 +
 .../hadoop/hive/ql/session/SessionState.java    |    7 +
 .../ql/udf/generic/GenericUDFCurrentGroups.java |   98 +
 .../GenericUDFRestrictInformationSchema.java    |  129 +
 .../clientpositive/llap/jdbc_handler.q.out      |    2 +-
 .../clientpositive/llap/resourceplan.q.out      |  476 +--
 .../results/clientpositive/show_functions.q.out |    3 +
 .../apache/hive/service/server/HiveServer2.java |   71 +-
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  |  431 +++
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  141 +
 .../ThriftHiveMetastore_server.skeleton.cpp     |    5 +
 .../hive/metastore/api/ThriftHiveMetastore.java | 2754 ++++++++++++------
 .../gen-php/metastore/ThriftHiveMetastore.php   |  275 ++
 .../hive_metastore/ThriftHiveMetastore-remote   |    7 +
 .../hive_metastore/ThriftHiveMetastore.py       |  225 ++
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |   63 +
 .../hadoop/hive/metastore/HiveMetaStore.java    |   18 +
 .../hive/metastore/HiveMetaStoreClient.java     |   25 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   10 +
 .../hadoop/hive/metastore/ObjectStore.java      |   84 +
 .../apache/hadoop/hive/metastore/RawStore.java  |    6 +-
 .../hive/metastore/cache/CachedStore.java       |    7 +
 .../src/main/thrift/hive_metastore.thrift       |    2 +
 .../DummyRawStoreControlledCommit.java          |    6 +
 .../DummyRawStoreForJdoConnection.java          |    7 +
 .../HiveMetaStoreClientPreCatalog.java          |   26 +
 49 files changed, 5426 insertions(+), 1570 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/HiveAccumuloHelper.java
----------------------------------------------------------------------
diff --git 
a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/HiveAccumuloHelper.java
 
b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/HiveAccumuloHelper.java
index 9fccb49..32a4f30 100644
--- 
a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/HiveAccumuloHelper.java
+++ 
b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/HiveAccumuloHelper.java
@@ -40,6 +40,7 @@ import org.apache.accumulo.start.Main;
 import org.apache.accumulo.trace.instrument.Tracer;
 import org.apache.accumulo.core.client.impl.AuthenticationTokenIdentifier;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.ql.metadata.JarUtils;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
@@ -323,7 +324,7 @@ public class HiveAccumuloHelper {
       log.warn("Failed to load class for HTrace jar, trying to continue", e);
     }
     try {
-      Utils.addDependencyJars(conf, classesToLoad);
+      JarUtils.addDependencyJars(conf, classesToLoad);
     } catch (IOException e) {
       log.error("Could not add necessary Accumulo dependencies to classpath", 
e);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/Utils.java
----------------------------------------------------------------------
diff --git 
a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/Utils.java 
b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/Utils.java
deleted file mode 100644
index 3a2facf..0000000
--- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/Utils.java
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations 
under
- * the License.
- */
-package org.apache.hadoop.hive.accumulo;
-
-import java.io.BufferedOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.net.URL;
-import java.net.URLDecoder;
-import java.text.MessageFormat;
-import java.util.Enumeration;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.jar.JarFile;
-import java.util.jar.JarOutputStream;
-import java.util.jar.Manifest;
-import java.util.zip.ZipEntry;
-import java.util.zip.ZipFile;
-import java.util.zip.ZipOutputStream;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.JavaUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Accumulo doesn't have a TableMapReduceUtil.addDependencyJars method like 
HBase which is very
- * helpful
- */
-public class Utils {
-  private static final Logger log = LoggerFactory.getLogger(Utils.class);
-
-  // Thanks, HBase
-  public static void addDependencyJars(Configuration conf, List<Class<?>> 
classes) throws IOException {
-    FileSystem localFs = FileSystem.getLocal(conf);
-    Set<String> jars = new HashSet<String>();
-    // Add jars that are already in the tmpjars variable
-    jars.addAll(conf.getStringCollection("tmpjars"));
-
-    // add jars as we find them to a map of contents jar name so that we can
-    // avoid
-    // creating new jars for classes that have already been packaged.
-    Map<String,String> packagedClasses = new HashMap<String,String>();
-
-    // Add jars containing the specified classes
-    for (Class<?> clazz : classes) {
-      if (clazz == null) {
-        continue;
-      }
-      Path path = findOrCreateJar(clazz, localFs, packagedClasses);
-      if (path == null) {
-        log.warn("Could not find jar for class " + clazz + " in order to ship 
it to the cluster.");
-        continue;
-      }
-      if (!localFs.exists(path)) {
-        log.warn("Could not validate jar file " + path + " for class " + 
clazz);
-        continue;
-      }
-      jars.add(path.toString());
-    }
-    if (!jars.isEmpty()) {
-      conf.set("tmpjars", StringUtils.join(jars, ","));
-    }
-  }
-
-  /**
-   * If org.apache.hadoop.util.JarFinder is available (0.23+ hadoop), finds 
the Jar for a class or
-   * creates it if it doesn't exist. If the class is in a directory in the 
classpath, it creates a
-   * Jar on the fly with the contents of the directory and returns the path to 
that Jar. If a Jar is
-   * created, it is created in the system temporary directory. Otherwise, 
returns an existing jar
-   * that contains a class of the same name. Maintains a mapping from jar 
contents to the tmp jar
-   * created.
-   *
-   * @param my_class
-   *          the class to find.
-   * @param fs
-   *          the FileSystem with which to qualify the returned path.
-   * @param packagedClasses
-   *          a map of class name to path.
-   * @return a jar file that contains the class.
-   * @throws IOException
-   */
-  @SuppressWarnings("deprecation")
-  private static Path findOrCreateJar(Class<?> my_class, FileSystem fs,
-      Map<String,String> packagedClasses) throws IOException {
-    // attempt to locate an existing jar for the class.
-    String jar = findContainingJar(my_class, packagedClasses);
-    if (StringUtils.isEmpty(jar)) {
-      jar = getJar(my_class);
-      updateMap(jar, packagedClasses);
-    }
-
-    if (StringUtils.isEmpty(jar)) {
-      return null;
-    }
-
-    log.debug("For class {}, using jar {}", my_class.getName(), jar);
-    return new Path(jar).makeQualified(fs);
-  }
-
-  /**
-   * Add entries to <code>packagedClasses</code> corresponding to class files 
contained in
-   * <code>jar</code>.
-   *
-   * @param jar
-   *          The jar who's content to list.
-   * @param packagedClasses
-   *          map[class -> jar]
-   */
-  private static void updateMap(String jar, Map<String,String> 
packagedClasses) throws IOException {
-    if (StringUtils.isEmpty(jar)) {
-      return;
-    }
-    ZipFile zip = null;
-    try {
-      zip = new ZipFile(jar);
-      for (Enumeration<? extends ZipEntry> iter = zip.entries(); 
iter.hasMoreElements();) {
-        ZipEntry entry = iter.nextElement();
-        if (entry.getName().endsWith("class")) {
-          packagedClasses.put(entry.getName(), jar);
-        }
-      }
-    } finally {
-      if (null != zip)
-        zip.close();
-    }
-  }
-
-  /**
-   * Find a jar that contains a class of the same name, if any. It will return 
a jar file, even if
-   * that is not the first thing on the class path that has a class with the 
same name. Looks first
-   * on the classpath and then in the <code>packagedClasses</code> map.
-   *
-   * @param my_class
-   *          the class to find.
-   * @return a jar file that contains the class, or null.
-   * @throws IOException
-   */
-  private static String findContainingJar(Class<?> my_class, 
Map<String,String> packagedClasses)
-      throws IOException {
-    ClassLoader loader = my_class.getClassLoader();
-    String class_file = my_class.getName().replaceAll("\\.", "/") + ".class";
-
-    // first search the classpath
-    for (Enumeration<URL> itr = loader.getResources(class_file); 
itr.hasMoreElements();) {
-      URL url = itr.nextElement();
-      if ("jar".equals(url.getProtocol())) {
-        String toReturn = url.getPath();
-        if (toReturn.startsWith("file:")) {
-          toReturn = toReturn.substring("file:".length());
-        }
-        // URLDecoder is a misnamed class, since it actually decodes
-        // x-www-form-urlencoded MIME type rather than actual
-        // URL encoding (which the file path has). Therefore it would
-        // decode +s to ' 's which is incorrect (spaces are actually
-        // either unencoded or encoded as "%20"). Replace +s first, so
-        // that they are kept sacred during the decoding process.
-        toReturn = toReturn.replaceAll("\\+", "%2B");
-        toReturn = URLDecoder.decode(toReturn, "UTF-8");
-        return toReturn.replaceAll("!.*$", "");
-      }
-    }
-
-    // now look in any jars we've packaged using JarFinder. Returns null
-    // when
-    // no jar is found.
-    return packagedClasses.get(class_file);
-  }
-
-  /**
-   * Invoke 'getJar' on a JarFinder implementation. Useful for some job 
configuration contexts
-   * (HBASE-8140) and also for testing on MRv2. First check if we have 
HADOOP-9426. Lacking that,
-   * fall back to the backport.
-   *
-   * @param my_class
-   *          the class to find.
-   * @return a jar file that contains the class, or null.
-   */
-  private static String getJar(Class<?> my_class) {
-    String ret = null;
-    String hadoopJarFinder = "org.apache.hadoop.util.JarFinder";
-    Class<?> jarFinder = null;
-    try {
-      log.debug("Looking for: {}", hadoopJarFinder);
-      jarFinder = JavaUtils.loadClass(hadoopJarFinder);
-      log.debug("Found: {}", hadoopJarFinder);
-      Method getJar = jarFinder.getMethod("getJar", Class.class);
-      ret = (String) getJar.invoke(null, my_class);
-    } catch (ClassNotFoundException e) {
-      log.debug("Using backported JarFinder.");
-      ret = jarFinderGetJar(my_class);
-    } catch (InvocationTargetException e) {
-      // function was properly called, but threw it's own exception.
-      // Unwrap it
-      // and pass it on.
-      throw new RuntimeException(e.getCause());
-    } catch (Exception e) {
-      // toss all other exceptions, related to reflection failure
-      throw new RuntimeException("getJar invocation failed.", e);
-    }
-
-    return ret;
-  }
-
-  /**
-   * Returns the full path to the Jar containing the class. It always return a 
JAR.
-   *
-   * @param klass
-   *          class.
-   *
-   * @return path to the Jar containing the class.
-   */
-  @SuppressWarnings("rawtypes")
-  public static String jarFinderGetJar(Class klass) {
-    Preconditions.checkNotNull(klass, "klass");
-    ClassLoader loader = klass.getClassLoader();
-    if (loader != null) {
-      String class_file = klass.getName().replaceAll("\\.", "/") + ".class";
-      try {
-        for (Enumeration itr = loader.getResources(class_file); 
itr.hasMoreElements();) {
-          URL url = (URL) itr.nextElement();
-          String path = url.getPath();
-          if (path.startsWith("file:")) {
-            path = path.substring("file:".length());
-          }
-          path = URLDecoder.decode(path, "UTF-8");
-          if ("jar".equals(url.getProtocol())) {
-            path = URLDecoder.decode(path, "UTF-8");
-            return path.replaceAll("!.*$", "");
-          } else if ("file".equals(url.getProtocol())) {
-            String klassName = klass.getName();
-            klassName = klassName.replace(".", "/") + ".class";
-            path = path.substring(0, path.length() - klassName.length());
-            File baseDir = new File(path);
-            File testDir = new File(System.getProperty("test.build.dir", 
"target/test-dir"));
-            testDir = testDir.getAbsoluteFile();
-            if (!testDir.exists()) {
-              testDir.mkdirs();
-            }
-            File tempJar = File.createTempFile("hadoop-", "", testDir);
-            tempJar = new File(tempJar.getAbsolutePath() + ".jar");
-            createJar(baseDir, tempJar);
-            return tempJar.getAbsolutePath();
-          }
-        }
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-    return null;
-  }
-
-  private static void copyToZipStream(InputStream is, ZipEntry entry, 
ZipOutputStream zos)
-      throws IOException {
-    zos.putNextEntry(entry);
-    IOUtils.copy(is, zos);
-    is.close();
-    zos.closeEntry();
-  }
-
-  public static void jarDir(File dir, String relativePath, ZipOutputStream 
zos) throws IOException {
-    Preconditions.checkNotNull(relativePath, "relativePath");
-    Preconditions.checkNotNull(zos, "zos");
-
-    // by JAR spec, if there is a manifest, it must be the first entry in
-    // the
-    // ZIP.
-    File manifestFile = new File(dir, JarFile.MANIFEST_NAME);
-    ZipEntry manifestEntry = new ZipEntry(JarFile.MANIFEST_NAME);
-    if (!manifestFile.exists()) {
-      zos.putNextEntry(manifestEntry);
-      new Manifest().write(new BufferedOutputStream(zos));
-      zos.closeEntry();
-    } else {
-      InputStream is = new FileInputStream(manifestFile);
-      copyToZipStream(is, manifestEntry, zos);
-    }
-    zos.closeEntry();
-    zipDir(dir, relativePath, zos, true);
-    zos.close();
-  }
-
-  private static void zipDir(File dir, String relativePath, ZipOutputStream 
zos, boolean start)
-      throws IOException {
-    String[] dirList = dir.list();
-    for (String aDirList : dirList) {
-      File f = new File(dir, aDirList);
-      if (!f.isHidden()) {
-        if (f.isDirectory()) {
-          if (!start) {
-            ZipEntry dirEntry = new ZipEntry(relativePath + f.getName() + "/");
-            zos.putNextEntry(dirEntry);
-            zos.closeEntry();
-          }
-          String filePath = f.getPath();
-          File file = new File(filePath);
-          zipDir(file, relativePath + f.getName() + "/", zos, false);
-        } else {
-          String path = relativePath + f.getName();
-          if (!path.equals(JarFile.MANIFEST_NAME)) {
-            ZipEntry anEntry = new ZipEntry(path);
-            InputStream is = new FileInputStream(f);
-            copyToZipStream(is, anEntry, zos);
-          }
-        }
-      }
-    }
-  }
-
-  private static void createJar(File dir, File jarFile) throws IOException {
-    Preconditions.checkNotNull(dir, "dir");
-    Preconditions.checkNotNull(jarFile, "jarFile");
-    File jarDir = jarFile.getParentFile();
-    if (!jarDir.exists()) {
-      if (!jarDir.mkdirs()) {
-        throw new IOException(MessageFormat.format("could not create dir 
[{0}]", jarDir));
-      }
-    }
-    JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile));
-    jarDir(dir, StringUtils.EMPTY, zos);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/CompositeAccumuloRowIdFactory.java
----------------------------------------------------------------------
diff --git 
a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/CompositeAccumuloRowIdFactory.java
 
b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/CompositeAccumuloRowIdFactory.java
index d8b9aa3..a3be5a3 100644
--- 
a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/CompositeAccumuloRowIdFactory.java
+++ 
b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/CompositeAccumuloRowIdFactory.java
@@ -24,7 +24,7 @@ import java.util.Collections;
 import java.util.Properties;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.accumulo.Utils;
+import org.apache.hadoop.hive.ql.metadata.JarUtils;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import 
org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
@@ -58,7 +58,7 @@ public class CompositeAccumuloRowIdFactory<T extends 
AccumuloCompositeRowId> ext
   public void addDependencyJars(Configuration jobConf) throws IOException {
     // Make sure the jar containing the custom CompositeRowId is included
     // in the mapreduce job's classpath (libjars)
-    Utils.addDependencyJars(jobConf, Collections.<Class<?>> 
singletonList(keyClass));
+    JarUtils.addDependencyJars(jobConf, Collections.<Class<?>> 
singletonList(keyClass));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/DefaultAccumuloRowIdFactory.java
----------------------------------------------------------------------
diff --git 
a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/DefaultAccumuloRowIdFactory.java
 
b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/DefaultAccumuloRowIdFactory.java
index bae2930..f873601 100644
--- 
a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/DefaultAccumuloRowIdFactory.java
+++ 
b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/DefaultAccumuloRowIdFactory.java
@@ -23,9 +23,9 @@ import java.util.Collections;
 import java.util.Properties;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.accumulo.Utils;
 import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding;
 import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping;
+import org.apache.hadoop.hive.ql.metadata.JarUtils;
 import org.apache.hadoop.hive.serde2.ByteStream;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.lazy.LazyFactory;
@@ -62,7 +62,7 @@ public class DefaultAccumuloRowIdFactory implements 
AccumuloRowIdFactory {
 
   @Override
   public void addDependencyJars(Configuration conf) throws IOException {
-    Utils.addDependencyJars(conf, Collections.<Class<?>> 
singletonList(getClass()));
+    JarUtils.addDependencyJars(conf, Collections.<Class<?>> 
singletonList(getClass()));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------
diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java 
b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index c58ffab..b872827 100644
--- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -2397,7 +2397,8 @@ public class HiveConf extends Configuration {
         "2. When HiveServer2 supports service discovery via Zookeeper.\n" +
         "3. For delegation token storage if zookeeper store is used, if\n" +
         "hive.cluster.delegation.token.store.zookeeper.connectString is not 
set\n" +
-        "4. LLAP daemon registry service"),
+        "4. LLAP daemon registry service\n" +
+        "5. Leader selection for privilege synchronizer"),
 
     HIVE_ZOOKEEPER_CLIENT_PORT("hive.zookeeper.client.port", "2181",
         "The port of ZooKeeper servers to talk to.\n" +
@@ -2961,6 +2962,12 @@ public class HiveConf extends Configuration {
     HIVE_SSL_PROTOCOL_BLACKLIST("hive.ssl.protocol.blacklist", "SSLv2,SSLv3",
         "SSL Versions to disable for all Hive Servers"),
 
+    HIVE_PRIVILEGE_SYNCHRONIZER("hive.privilege.synchronizer", false,
+        "Synchronize privileges from external authorizer such as ranger to 
Hive periodically in HS2"),
+    
HIVE_PRIVILEGE_SYNCHRONIZER_INTERVAL("hive.privilege.synchronizer.interval",
+        "1800s", new TimeValidator(TimeUnit.SECONDS),
+        "Interval to synchronize privileges from external authorizer 
periodically in HS2"),
+
      // HiveServer2 specific configs
     
HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR("hive.server2.clear.dangling.scratchdir",
 false,
         "Clear dangling scratch dir periodically in HS2"),
@@ -4265,6 +4272,8 @@ public class HiveConf extends Configuration {
             "hive.server2.authentication.ldap.userMembershipKey," +
             "hive.server2.authentication.ldap.groupClassKey," +
             "hive.server2.authentication.ldap.customLDAPQuery," +
+            "hive.privilege.synchronizer," +
+            "hive.privilege.synchronizer.interval," +
             "hive.spark.client.connect.timeout," +
             "hive.spark.client.server.connect.timeout," +
             "hive.spark.client.channel.log.level," +

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 2e96983..3d6fda6 100644
--- 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 import org.apache.hadoop.hive.metastore.api.ISchema;
 import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
@@ -543,6 +544,12 @@ public class DummyRawStoreFailEvent implements RawStore, 
Configurable {
   }
 
   @Override
+  public boolean refreshPrivileges(HiveObjectRef objToRefresh, PrivilegeBag 
grantPrivileges)
+      throws InvalidObjectException, MetaException, NoSuchObjectException {
+    return objectStore.refreshPrivileges(objToRefresh, grantPrivileges);
+  }
+
+  @Override
   public Role getRole(String roleName) throws NoSuchObjectException {
     return objectStore.getRole(roleName);
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/itests/hive-unit/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index b51ebf2..26e423c 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -46,6 +46,11 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
+      <artifactId>hive-jdbc-handler</artifactId>
+      <version>${project.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hive</groupId>
       <artifactId>hive-service</artifactId>
       <version>${project.version}</version>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java
index 79fdb68..6270e14 100644
--- 
a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java
@@ -107,6 +107,8 @@ public class TestRestrictedList {
     addToExpectedRestrictedMap("_hive.hdfs.session.path");
     addToExpectedRestrictedMap("hive.spark.client.rpc.server.address");
     addToExpectedRestrictedMap("spark.home");
+    addToExpectedRestrictedMap("hive.privilege.synchronizer");
+    addToExpectedRestrictedMap("hive.privilege.synchronizer.interval");
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestInformationSchemaWithPrivilege.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestInformationSchemaWithPrivilege.java
 
b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestInformationSchemaWithPrivilege.java
new file mode 100644
index 0000000..08e2964
--- /dev/null
+++ 
b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestInformationSchemaWithPrivilege.java
@@ -0,0 +1,605 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.server;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator;
+import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessController;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizationValidator;
+import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerFactory;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerImpl;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactory;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePolicyChangeListener;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePolicyProvider;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveResourceACLs;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveResourceACLsImpl;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAccessControllerWrapper;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizationValidator;
+import org.apache.hive.beeline.BeeLine;
+import org.apache.hive.jdbc.miniHS2.MiniHS2;
+import org.apache.hive.service.cli.CLIServiceClient;
+import org.apache.hive.service.cli.OperationHandle;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.SessionHandle;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test restricted information schema with privilege synchronization
+ */
+public class TestInformationSchemaWithPrivilege {
+
+  // Group mapping:
+  // group_a: user1, user2
+  // group_b: user2
+  static class FakeGroupAuthenticator extends HadoopDefaultAuthenticator {
+    @Override
+    public List<String> getGroupNames() {
+      List<String> groups = new ArrayList<String>();
+      if (getUserName().equals("user1")) {
+        groups.add("group_a");
+      } else if (getUserName().equals("user2")) {
+        groups.add("group_a");
+        groups.add("group_b");
+      }
+      return groups;
+    }
+  }
+
+  // Privilege matrix:
+  //                    user1  user2  group_a  group_b  public
+  // testdb1:            S             S
+  //   testtable1.*:     SU     S
+  //   testtable2.*:                   S
+  //   testtable3.*:                                     S
+  //   testtable4.*:                            S
+  // testdb2:            S
+  //   testtable1.key    S
+  static class TestHivePolicyProvider implements HivePolicyProvider {
+    @Override
+    public HiveResourceACLs getResourceACLs(HivePrivilegeObject hiveObject) {
+      HiveResourceACLsImpl acls = new HiveResourceACLsImpl();
+      if (hiveObject.getType() == HivePrivilegeObjectType.DATABASE) {
+        if (hiveObject.getDbname().equals("testdb1")) {
+          acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+          acls.addGroupEntry("group_a", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        } else if (hiveObject.getDbname().equals("testdb2")) {
+          acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        }
+      } else if (hiveObject.getType() == 
HivePrivilegeObjectType.TABLE_OR_VIEW) {
+        if (hiveObject.getDbname().equals("testdb1") && 
hiveObject.getObjectName().equals("testtable1")) {
+          acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+          acls.addUserEntry("user1", HiveResourceACLs.Privilege.UPDATE, 
HiveResourceACLs.AccessResult.ALLOWED);
+          acls.addUserEntry("user2", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        } else if (hiveObject.getDbname().equals("testdb1") && 
hiveObject.getObjectName().equals("testtable2")) {
+          acls.addGroupEntry("group_a", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        } else if (hiveObject.getDbname().equals("testdb1") && 
hiveObject.getObjectName().equals("testtable3")) {
+          acls.addGroupEntry("public", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        } else if (hiveObject.getDbname().equals("testdb1") && 
hiveObject.getObjectName().equals("testtable4")) {
+          acls.addGroupEntry("group_b", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        } else if (hiveObject.getDbname().equals("testdb2") && 
hiveObject.getObjectName().equals("testtable1")) {
+          acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        }
+      } else if (hiveObject.getType() == HivePrivilegeObjectType.COLUMN) {
+        if (hiveObject.getDbname().equals("testdb1") && 
hiveObject.getObjectName().equals("testtable1")) {
+          acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+          acls.addUserEntry("user2", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        } else if (hiveObject.getDbname().equals("testdb1") && 
hiveObject.getObjectName().equals("testtable2")) {
+          acls.addGroupEntry("group_a", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        } else if (hiveObject.getDbname().equals("testdb1") && 
hiveObject.getObjectName().equals("testtable3")) {
+          acls.addGroupEntry("public", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        } else if (hiveObject.getDbname().equals("testdb1") && 
hiveObject.getObjectName().equals("testtable4")) {
+          acls.addGroupEntry("group_b", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        } else if (hiveObject.getDbname().equals("testdb2") && 
hiveObject.getObjectName().equals("testtable1")
+            && hiveObject.getColumns().get(0).equals("key")) {
+          acls.addUserEntry("user1", HiveResourceACLs.Privilege.SELECT, 
HiveResourceACLs.AccessResult.ALLOWED);
+        }
+      }
+      return acls;
+    }
+
+    @Override
+    public void registerHivePolicyChangeListener(HivePolicyChangeListener 
listener) {
+      // PolicyChangeListener will be implemented later
+    }
+  }
+
+  static class HiveAuthorizerImplWithPolicyProvider extends HiveAuthorizerImpl 
{
+
+    HiveAuthorizerImplWithPolicyProvider(HiveAccessController accessController,
+        HiveAuthorizationValidator authValidator) {
+      super(accessController, authValidator);
+    }
+
+    @Override
+    public HivePolicyProvider getHivePolicyProvider() throws 
HiveAuthzPluginException {
+      return new TestHivePolicyProvider();
+    }
+  }
+
+  static class HiveAuthorizerImplWithNullPolicyProvider extends 
HiveAuthorizerImpl {
+
+    HiveAuthorizerImplWithNullPolicyProvider(HiveAccessController 
accessController,
+        HiveAuthorizationValidator authValidator) {
+      super(accessController, authValidator);
+    }
+
+    @Override
+    public HivePolicyProvider getHivePolicyProvider() throws 
HiveAuthzPluginException {
+      return null;
+    }
+  }
+
+  static class TestHiveAuthorizerFactory implements HiveAuthorizerFactory {
+    @Override
+    public HiveAuthorizer createHiveAuthorizer(HiveMetastoreClientFactory 
metastoreClientFactory, HiveConf conf,
+        HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) 
throws HiveAuthzPluginException {
+      SQLStdHiveAccessControllerWrapper privilegeManager = new 
SQLStdHiveAccessControllerWrapper(metastoreClientFactory,
+          conf, authenticator, ctx);
+      return new HiveAuthorizerImplWithPolicyProvider(privilegeManager,
+          new SQLStdHiveAuthorizationValidator(metastoreClientFactory, conf, 
authenticator, privilegeManager, ctx));
+    }
+  }
+
+  static class TestHiveAuthorizerNullPolicyProviderFactory implements 
HiveAuthorizerFactory {
+    @Override
+    public HiveAuthorizer createHiveAuthorizer(HiveMetastoreClientFactory 
metastoreClientFactory, HiveConf conf,
+        HiveAuthenticationProvider authenticator, HiveAuthzSessionContext ctx) 
throws HiveAuthzPluginException {
+      SQLStdHiveAccessControllerWrapper privilegeManager = new 
SQLStdHiveAccessControllerWrapper(metastoreClientFactory,
+          conf, authenticator, ctx);
+      return new HiveAuthorizerImplWithNullPolicyProvider(privilegeManager,
+          new SQLStdHiveAuthorizationValidator(metastoreClientFactory, conf, 
authenticator, privilegeManager, ctx));
+    }
+  }
+
+  private static MiniHS2 miniHS2 = null;
+  private static MiniZooKeeperCluster zkCluster = null;
+  private static Map<String, String> confOverlay;
+
+  @BeforeClass
+  public static void beforeTest() throws Exception {
+    File zkDataDir = new File(System.getProperty("test.tmp.dir"));
+    zkCluster = new MiniZooKeeperCluster();
+    int zkPort = zkCluster.startup(zkDataDir);
+
+    miniHS2 = new MiniHS2(new HiveConf());
+    confOverlay = new HashMap<String, String>();
+    confOverlay.put(ConfVars.HIVE_PRIVILEGE_SYNCHRONIZER.varname, "true");
+    confOverlay.put(ConfVars.HIVE_PRIVILEGE_SYNCHRONIZER_INTERVAL.varname, 
"1");
+    
confOverlay.put(ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY.varname,
 "true");
+    confOverlay.put(ConfVars.HIVE_AUTHORIZATION_MANAGER.varname, 
TestHiveAuthorizerFactory.class.getName());
+    confOverlay.put(ConfVars.HIVE_ZOOKEEPER_QUORUM.varname, "localhost");
+    confOverlay.put(ConfVars.HIVE_ZOOKEEPER_CLIENT_PORT.varname, 
Integer.toString(zkPort));
+    confOverlay.put(MetastoreConf.ConfVars.AUTO_CREATE_ALL.getVarname(), 
"true");
+    confOverlay.put(ConfVars.HIVE_AUTHENTICATOR_MANAGER.varname, 
FakeGroupAuthenticator.class.getName());
+    miniHS2.start(confOverlay);
+  }
+
+  @Test
+  public void test() throws Exception {
+
+    String db1Name = "testdb1";
+    String db2Name = "testdb2";
+    String table1Name = "testtable1";
+    String table2Name = "testtable2";
+    String table3Name = "testtable3";
+    String table4Name = "testtable4";
+    CLIServiceClient serviceClient = miniHS2.getServiceClient();
+    SessionHandle sessHandle = serviceClient.openSession("hive_test_user", "");
+    serviceClient.executeStatement(sessHandle, "DROP DATABASE IF EXISTS " + 
db1Name + " CASCADE", confOverlay);
+    serviceClient.executeStatement(sessHandle, "CREATE DATABASE " + db1Name, 
confOverlay);
+    serviceClient.executeStatement(sessHandle, "DROP TABLE IF EXISTS " + 
db1Name + "." + table1Name, confOverlay);
+    serviceClient.executeStatement(sessHandle,
+        "CREATE TABLE " + db1Name + "." + table1Name + "(key string, value 
double)", confOverlay);
+    serviceClient.executeStatement(sessHandle, "DROP TABLE IF EXISTS " + 
db1Name + "." + table2Name, confOverlay);
+    serviceClient.executeStatement(sessHandle,
+        "CREATE TABLE " + db1Name + "." + table2Name + "(key string, value 
double)", confOverlay);
+    serviceClient.executeStatement(sessHandle, "DROP VIEW IF EXISTS " + 
db1Name + "." + table3Name, confOverlay);
+    serviceClient.executeStatement(sessHandle,
+        "CREATE VIEW " + db1Name + "." + table3Name + " AS SELECT * FROM " + 
db1Name + "." + table1Name, confOverlay);
+    serviceClient.executeStatement(sessHandle, "DROP TABLE IF EXISTS " + 
db1Name + "." + table4Name, confOverlay);
+    serviceClient.executeStatement(sessHandle,
+        "CREATE TABLE " + db1Name + "." + table4Name + "(key string, value 
double) PARTITIONED BY (p string)",
+        confOverlay);
+
+    serviceClient.executeStatement(sessHandle, "DROP DATABASE IF EXISTS " + 
db2Name + " CASCADE", confOverlay);
+    serviceClient.executeStatement(sessHandle, "CREATE DATABASE " + db2Name, 
confOverlay);
+    serviceClient.executeStatement(sessHandle, "DROP TABLE IF EXISTS " + 
db2Name + "." + table1Name, confOverlay);
+    serviceClient.executeStatement(sessHandle,
+        "CREATE TABLE " + db2Name + "." + table1Name + "(key string, value 
double)", confOverlay);
+
+    // Just to trigger auto creation of needed metastore tables
+    serviceClient.executeStatement(sessHandle, "SHOW GRANT USER hive_test_user 
ON ALL", confOverlay);
+    serviceClient.closeSession(sessHandle);
+
+    List<String> baseArgs = new ArrayList<String>();
+    baseArgs.add("-d");
+    baseArgs.add(BeeLine.BEELINE_DEFAULT_JDBC_DRIVER);
+    baseArgs.add("-u");
+    baseArgs.add(miniHS2.getBaseJdbcURL());
+    baseArgs.add("-n");
+    baseArgs.add("hive_test_user");
+
+    List<String> args = new ArrayList<String>(baseArgs);
+    args.add("-f");
+    
args.add("../../metastore/scripts/upgrade/hive/hive-schema-3.0.0.hive.sql");
+    BeeLine beeLine = new BeeLine();
+    int result = beeLine.begin(args.toArray(new String[] {}), null);
+    beeLine.close();
+    Assert.assertEquals(result, 0);
+
+    boolean containsDb1 = false;
+    boolean containsDb2 = false;
+    boolean containsDb1Table1 = false;
+    boolean containsDb1Table2 = false;
+    boolean containsDb1Table3 = false;
+    boolean containsDb1Table4 = false;
+    boolean containsDb2Table1 = false;
+    boolean containsDb1Table1SelectPriv = false;
+    boolean containsDb1Table1UpdatePriv = false;
+    boolean containsDb1Table2SelectPriv = false;
+    boolean containsDb1Table3SelectPriv = false;
+    boolean containsDb1Table4SelectPriv = false;
+    boolean containsDb2Table1SelectPriv = false;
+    boolean containsDb1Table1Key = false;
+    boolean containsDb1Table1Value = false;
+    boolean containsDb1Table2Key = false;
+    boolean containsDb1Table2Value = false;
+    boolean containsDb1Table3Key = false;
+    boolean containsDb1Table3Value = false;
+    boolean containsDb1Table4Key = false;
+    boolean containsDb1Table4Value = false;
+    boolean containsDb1Table4P = false;
+    boolean containsDb2Table1Key = false;
+
+    // We shall have enough time to synchronize privileges during loading
+    // information schema
+
+    // User1 privileges:
+    // testdb1:            S
+    //   testtable1.*:     SU
+    //   testtable2.*:     S
+    //   testtable3.*:     S
+    //   testtable4.*:
+    // testdb2:            S
+    //   testtable1.*:     S
+    sessHandle = serviceClient.openSession("user1", "");
+    OperationHandle opHandle = serviceClient.executeStatement(sessHandle, 
"select * from INFORMATION_SCHEMA.SCHEMATA",
+        confOverlay);
+    RowSet rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 2);
+    Iterator<Object[]> iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[1].equals(db1Name)) {
+        containsDb1 = true;
+      } else if (cols[1].equals(db2Name)) {
+        containsDb2 = true;
+      }
+    }
+    Assert.assertTrue(containsDb1 && containsDb2);
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.TABLES", confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 4);
+    iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[1].equals(db1Name) && cols[2].equals(table1Name)) {
+        containsDb1Table1 = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name)) {
+        containsDb1Table2 = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name)) {
+        containsDb1Table3 = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table4Name)) {
+        containsDb1Table4 = true;
+      } else if (cols[1].equals(db2Name) && cols[2].equals(table1Name)) {
+        containsDb2Table1 = true;
+      }
+    }
+    Assert.assertTrue(
+        containsDb1Table1 && containsDb1Table2 && containsDb1Table3 && 
!containsDb1Table4 && containsDb2Table1);
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.VIEWS", confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 1);
+    iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[1].equals(db1Name) && cols[2].equals(table3Name)) {
+        containsDb1Table3 = true;
+      } else {
+        containsDb1Table3 = false;
+      }
+    }
+    Assert.assertTrue(containsDb1Table3);
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.TABLE_PRIVILEGES",
+        confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 5);
+    iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && 
cols[5].equals("SELECT")) {
+        containsDb1Table1SelectPriv = true;
+      }
+      if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && 
cols[5].equals("UPDATE")) {
+        containsDb1Table1UpdatePriv = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && 
cols[5].equals("SELECT")) {
+        containsDb1Table2SelectPriv = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && 
cols[5].equals("SELECT")) {
+        containsDb1Table3SelectPriv = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table4Name) && 
cols[5].equals("SELECT")) {
+        containsDb1Table4SelectPriv = true;
+      } else if (cols[3].equals(db2Name) && cols[4].equals(table1Name) && 
cols[5].equals("SELECT")) {
+        containsDb2Table1SelectPriv = true;
+      }
+    }
+    Assert.assertTrue(containsDb1Table1SelectPriv && 
containsDb1Table1UpdatePriv && containsDb1Table2SelectPriv
+        && containsDb1Table3SelectPriv && !containsDb1Table4SelectPriv && 
containsDb2Table1SelectPriv);
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.COLUMNS", confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 7);
+    iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[1].equals(db1Name) && cols[2].equals(table1Name) && 
cols[3].equals("key")) {
+        containsDb1Table1Key = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table1Name) && 
cols[3].equals("value")) {
+        containsDb1Table1Value = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name) && 
cols[3].equals("key")) {
+        containsDb1Table2Key = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name) && 
cols[3].equals("value")) {
+        containsDb1Table2Value = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name) && 
cols[3].equals("key")) {
+        containsDb1Table3Key = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name) && 
cols[3].equals("value")) {
+        containsDb1Table3Value = true;
+      } else if (cols[1].equals(db2Name) && cols[2].equals(table1Name) && 
cols[3].equals("key")) {
+        containsDb2Table1Key = true;
+      }
+    }
+    Assert.assertTrue(containsDb1Table1Key && containsDb1Table1Value && 
containsDb1Table2Key && containsDb1Table2Value
+        && containsDb1Table3Key && containsDb1Table3Value && 
containsDb2Table1Key);
+
+    containsDb1Table1Key = false;
+    containsDb1Table1Value = false;
+    containsDb1Table2Key = false;
+    containsDb1Table2Value = false;
+    containsDb1Table3Key = false;
+    containsDb1Table3Value = false;
+    containsDb2Table1Key = false;
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.COLUMN_PRIVILEGES",
+        confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 7);
+    iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && 
cols[5].equals("key")) {
+        containsDb1Table1Key = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && 
cols[5].equals("value")) {
+        containsDb1Table1Value = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && 
cols[5].equals("key")) {
+        containsDb1Table2Key = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && 
cols[5].equals("value")) {
+        containsDb1Table2Value = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && 
cols[5].equals("key")) {
+        containsDb1Table3Key = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && 
cols[5].equals("value")) {
+        containsDb1Table3Value = true;
+      } else if (cols[3].equals(db2Name) && cols[4].equals(table1Name) && 
cols[5].equals("key")) {
+        containsDb2Table1Key = true;
+      }
+    }
+    Assert.assertTrue(containsDb1Table1Key && containsDb1Table1Value && 
containsDb1Table2Key && containsDb1Table2Value
+        && containsDb1Table3Key && containsDb1Table3Value && 
containsDb2Table1Key);
+    serviceClient.closeSession(sessHandle);
+
+    // User2 privileges:
+    // testdb1: S
+    // testtable1.*: S
+    // testtable2.*: S
+    // testtable3.*: S
+    // testtable4.*: S
+    // testdb2:
+    // testtable1.*:
+    sessHandle = serviceClient.openSession("user2", "");
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.SCHEMATA", confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 1);
+    iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[1].equals(db1Name)) {
+        containsDb1 = true;
+      }
+    }
+    Assert.assertTrue(containsDb1);
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.TABLES", confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 4);
+    iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[1].equals(db1Name) && cols[2].equals(table1Name)) {
+        containsDb1Table1 = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name)) {
+        containsDb1Table2 = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name)) {
+        containsDb1Table3 = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table4Name)) {
+        containsDb1Table4 = true;
+      }
+    }
+    Assert.assertTrue(containsDb1Table1 && containsDb1Table2 && 
containsDb1Table3 && containsDb1Table4);
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.VIEWS", confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 1);
+    iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[1].equals(db1Name) && cols[2].equals(table3Name)) {
+        containsDb1Table3 = true;
+      } else {
+        containsDb1Table3 = false;
+      }
+    }
+    Assert.assertTrue(containsDb1Table3);
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.TABLE_PRIVILEGES",
+        confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 4);
+    iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && 
cols[5].equals("SELECT")) {
+        containsDb1Table1SelectPriv = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && 
cols[5].equals("SELECT")) {
+        containsDb1Table2SelectPriv = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && 
cols[5].equals("SELECT")) {
+        containsDb1Table3SelectPriv = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table4Name) && 
cols[5].equals("SELECT")) {
+        containsDb1Table4SelectPriv = true;
+      }
+    }
+    Assert.assertTrue(containsDb1Table1SelectPriv && 
containsDb1Table2SelectPriv && containsDb1Table3SelectPriv
+        && containsDb1Table4SelectPriv);
+
+    // db1.testtable3.p should also be in COLUMNS, will fix in separate ticket
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.COLUMNS", confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 8);
+    iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[1].equals(db1Name) && cols[2].equals(table1Name) && 
cols[3].equals("key")) {
+        containsDb1Table1Key = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table1Name) && 
cols[3].equals("value")) {
+        containsDb1Table1Value = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name) && 
cols[3].equals("key")) {
+        containsDb1Table2Key = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table2Name) && 
cols[3].equals("value")) {
+        containsDb1Table2Value = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name) && 
cols[3].equals("key")) {
+        containsDb1Table3Key = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table3Name) && 
cols[3].equals("value")) {
+        containsDb1Table3Value = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table4Name) && 
cols[3].equals("key")) {
+        containsDb1Table4Key = true;
+      } else if (cols[1].equals(db1Name) && cols[2].equals(table4Name) && 
cols[3].equals("value")) {
+        containsDb1Table4Value = true;
+      }
+    }
+    Assert.assertTrue(containsDb1Table1Key && containsDb1Table1Value && 
containsDb1Table2Key && containsDb1Table2Value
+        && containsDb1Table3Key && containsDb1Table3Value && 
containsDb1Table4Key && containsDb1Table4Value);
+
+    containsDb1Table1Key = false;
+    containsDb1Table1Value = false;
+    containsDb1Table2Key = false;
+    containsDb1Table2Value = false;
+    containsDb1Table3Key = false;
+    containsDb1Table3Value = false;
+    containsDb1Table4Key = false;
+    containsDb1Table4Value = false;
+    containsDb1Table4P = false;
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.COLUMN_PRIVILEGES",
+        confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 9);
+    iter = rowSet.iterator();
+    while (iter.hasNext()) {
+      Object[] cols = iter.next();
+      if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && 
cols[5].equals("key")) {
+        containsDb1Table1Key = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table1Name) && 
cols[5].equals("value")) {
+        containsDb1Table1Value = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && 
cols[5].equals("key")) {
+        containsDb1Table2Key = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table2Name) && 
cols[5].equals("value")) {
+        containsDb1Table2Value = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && 
cols[5].equals("key")) {
+        containsDb1Table3Key = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table3Name) && 
cols[5].equals("value")) {
+        containsDb1Table3Value = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table4Name) && 
cols[5].equals("key")) {
+        containsDb1Table4Key = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table4Name) && 
cols[5].equals("value")) {
+        containsDb1Table4Value = true;
+      } else if (cols[3].equals(db1Name) && cols[4].equals(table4Name) && 
cols[5].equals("p")) {
+        containsDb1Table4P = true;
+      }
+    }
+    Assert.assertTrue(containsDb1Table1Key && containsDb1Table1Value && 
containsDb1Table2Key && containsDb1Table2Value
+        && containsDb1Table3Key && containsDb1Table3Value && 
containsDb1Table4Key && containsDb1Table4Value
+        && containsDb1Table4P);
+    serviceClient.closeSession(sessHandle);
+
+    // Revert hive.server2.restrict_information_schema to false
+    miniHS2.getHiveConf().set(ConfVars.HIVE_AUTHORIZATION_MANAGER.varname,
+        TestHiveAuthorizerNullPolicyProviderFactory.class.getName());
+    
miniHS2.getHiveConf().unset(MetastoreConf.ConfVars.PRE_EVENT_LISTENERS.getVarname());
+
+    sessHandle = serviceClient.openSession("user1", "");
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.SCHEMATA", confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertTrue(rowSet.numRows() > 2);
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.TABLES", confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertTrue(rowSet.numRows() > 10);
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.TABLE_PRIVILEGES",
+        confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 7);
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.COLUMNS", confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertTrue(rowSet.numRows() > 10);
+
+    opHandle = serviceClient.executeStatement(sessHandle, "select * from 
INFORMATION_SCHEMA.COLUMN_PRIVILEGES",
+        confOverlay);
+    rowSet = serviceClient.fetchResults(opHandle);
+    Assert.assertEquals(rowSet.numRows(), 12);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcSerDe.java
----------------------------------------------------------------------
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcSerDe.java 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcSerDe.java
index eac03d2..f5472a0 100644
--- a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcSerDe.java
+++ b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcSerDe.java
@@ -15,7 +15,6 @@
 package org.apache.hive.storage.jdbc;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.AbstractSerDe;
 import org.apache.hadoop.hive.serde2.SerDeException;
@@ -67,7 +66,6 @@ public class JdbcSerDe extends AbstractSerDe {
       LOGGER.trace("Initializing the SerDe");
 
       if (tbl.containsKey(JdbcStorageConfig.DATABASE_TYPE.getPropertyName())) {
-        final boolean hiveQueryExecution = 
tbl.containsKey(Constants.HIVE_JDBC_QUERY);
 
         Configuration tableConfig = 
JdbcStorageConfigManager.convertPropertiesToConfiguration(tbl);
 
@@ -75,24 +73,17 @@ public class JdbcSerDe extends AbstractSerDe {
         columnNames = dbAccessor.getColumnNames(tableConfig);
         numColumns = columnNames.size();
         List<String> hiveColumnNames;
-        if (hiveQueryExecution) {
-          hiveColumnNames = columnNames;
-          final List<String> columnTypes = 
dbAccessor.getColumnTypes(tableConfig);
-          hiveColumnTypeArray = new String[columnTypes.size()];
-          hiveColumnTypeArray = columnTypes.toArray(hiveColumnTypeArray);
-        } else {
-
-          String[] hiveColumnNameArray = 
parseProperty(tbl.getProperty(serdeConstants.LIST_COLUMNS), ",");
-          if (numColumns != hiveColumnNameArray.length) {
-            throw new SerDeException("Expected " + numColumns + " columns. 
Table definition has "
-                + hiveColumnNameArray.length + " columns");
-          }
-          hiveColumnNames = Arrays.asList(hiveColumnNameArray);
-
-          hiveColumnTypeArray = 
parseProperty(tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES), ":");
-          if (hiveColumnTypeArray.length == 0) {
-            throw new SerDeException("Received an empty Hive column type 
definition");
-          }
+
+        String[] hiveColumnNameArray = 
parseProperty(tbl.getProperty(serdeConstants.LIST_COLUMNS), ",");
+        if (numColumns != hiveColumnNameArray.length) {
+          throw new SerDeException("Expected " + numColumns + " columns. Table 
definition has "
+              + hiveColumnNameArray.length + " columns");
+        }
+        hiveColumnNames = Arrays.asList(hiveColumnNameArray);
+
+        hiveColumnTypeArray = 
parseProperty(tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES), ":");
+        if (hiveColumnTypeArray.length == 0) {
+          throw new SerDeException("Received an empty Hive column type 
definition");
         }
 
         List<ObjectInspector> fieldInspectors = new 
ArrayList<ObjectInspector>(numColumns);

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcStorageHandler.java
----------------------------------------------------------------------
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcStorageHandler.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcStorageHandler.java
index df55272..d8c5433 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcStorageHandler.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/JdbcStorageHandler.java
@@ -19,6 +19,7 @@ import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.metastore.HiveMetaHook;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
+import org.apache.hadoop.hive.ql.metadata.JarUtils;
 import org.apache.hadoop.hive.ql.plan.TableDesc;
 import 
org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
 import org.apache.hadoop.hive.serde2.AbstractSerDe;
@@ -30,7 +31,10 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hive.storage.jdbc.conf.JdbcStorageConfigManager;
 
+import java.io.IOException;
 import java.lang.IllegalArgumentException;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Map;
 import java.util.Properties;
 
@@ -123,6 +127,33 @@ public class JdbcStorageHandler implements 
HiveStorageHandler {
   @Override
   public void configureJobConf(TableDesc tableDesc, JobConf jobConf) {
 
+    List<Class<?>> classesToLoad = new ArrayList<>();
+    classesToLoad.add(org.apache.hive.storage.jdbc.JdbcInputSplit.class);
+    classesToLoad.add(org.apache.commons.dbcp.BasicDataSourceFactory.class);
+    classesToLoad.add(org.apache.commons.pool.impl.GenericObjectPool.class);
+    // Adding mysql jdbc driver if exists
+    try {
+      classesToLoad.add(Class.forName("com.mysql.jdbc.Driver"));
+    } catch (Exception e) {
+    }
+    // Adding postgres jdbc driver if exists
+    try {
+      classesToLoad.add(Class.forName("org.postgresql.Driver"));
+    } catch (Exception e) {
+    } // Adding oracle jdbc driver if exists
+    try {
+      classesToLoad.add(Class.forName("oracle.jdbc.OracleDriver"));
+    } catch (Exception e) {
+    } // Adding mssql jdbc driver if exists
+    try {
+      
classesToLoad.add(Class.forName("com.microsoft.sqlserver.jdbc.SQLServerDriver"));
+    } catch (Exception e) {
+    }
+    try {
+      JarUtils.addDependencyJars(conf, classesToLoad);
+    } catch (IOException e) {
+      LOGGER.error("Could not add necessary JDBC storage handler dependencies 
to classpath", e);
+    }
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessor.java
----------------------------------------------------------------------
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessor.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessor.java
index fdaa794..f2712b8 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessor.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessor.java
@@ -24,8 +24,6 @@ public interface DatabaseAccessor {
 
   List<String> getColumnNames(Configuration conf) throws 
HiveJdbcDatabaseAccessException;
 
-  List<String> getColumnTypes(Configuration conf) throws 
HiveJdbcDatabaseAccessException;
-
   int getTotalNumberOfRecords(Configuration conf) throws 
HiveJdbcDatabaseAccessException;
 
   JdbcRecordIterator

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessorFactory.java
----------------------------------------------------------------------
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessorFactory.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessorFactory.java
index 6d3c8d9..fffe0df 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessorFactory.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/DatabaseAccessorFactory.java
@@ -39,6 +39,18 @@ public class DatabaseAccessorFactory {
       accessor = new JethroDatabaseAccessor();
       break;
 
+    case POSTGRES:
+      accessor = new PostgresDatabaseAccessor();
+      break;
+
+    case ORACLE:
+      accessor = new OracleDatabaseAccessor();
+      break;
+
+    case MSSQL:
+      accessor = new MsSqlDatabaseAccessor();
+      break;
+
     default:
       accessor = new GenericJdbcDatabaseAccessor();
       break;

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/GenericJdbcDatabaseAccessor.java
----------------------------------------------------------------------
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/GenericJdbcDatabaseAccessor.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/GenericJdbcDatabaseAccessor.java
index 772bc5d..af27c48 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/GenericJdbcDatabaseAccessor.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/GenericJdbcDatabaseAccessor.java
@@ -35,7 +35,6 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.ResultSetMetaData;
 import java.sql.SQLException;
-import java.sql.Types;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
@@ -100,69 +99,6 @@ public class GenericJdbcDatabaseAccessor implements 
DatabaseAccessor {
   }
 
   @Override
-  public List<String> getColumnTypes(Configuration conf) throws 
HiveJdbcDatabaseAccessException {
-    Connection conn = null;
-    PreparedStatement ps = null;
-    ResultSet rs = null;
-
-    try {
-      initializeDatabaseConnection(conf);
-      String metadataQuery = getMetaDataQuery(conf);
-      LOGGER.debug("Query to execute is [{}]", metadataQuery);
-
-      conn = dbcpDataSource.getConnection();
-      ps = conn.prepareStatement(metadataQuery);
-      rs = ps.executeQuery();
-
-      ResultSetMetaData metadata = rs.getMetaData();
-      int numColumns = metadata.getColumnCount();
-      List<String> columnTypes = new ArrayList<String>(numColumns);
-      for (int i = 0; i < numColumns; i++) {
-        switch (metadata.getColumnType(i + 1)) {
-        case Types.CHAR:
-          columnTypes.add(serdeConstants.STRING_TYPE_NAME);
-          break;
-        case Types.INTEGER:
-          columnTypes.add(serdeConstants.INT_TYPE_NAME);
-          break;
-        case Types.BIGINT:
-          columnTypes.add(serdeConstants.BIGINT_TYPE_NAME);
-          break;
-        case Types.DECIMAL:
-          columnTypes.add(serdeConstants.DECIMAL_TYPE_NAME);
-          break;
-        case Types.FLOAT:
-        case Types.REAL:
-          columnTypes.add(serdeConstants.FLOAT_TYPE_NAME);
-          break;
-        case Types.DOUBLE:
-          columnTypes.add(serdeConstants.DOUBLE_TYPE_NAME);
-          break;
-        case Types.DATE:
-          columnTypes.add(serdeConstants.DATE_TYPE_NAME);
-          break;
-        case Types.TIMESTAMP:
-          columnTypes.add(serdeConstants.TIMESTAMP_TYPE_NAME);
-          break;
-
-        default:
-          columnTypes.add(metadata.getColumnTypeName(i+1));
-          break;
-        }
-      }
-
-      return columnTypes;
-    } catch (Exception e) {
-      LOGGER.error("Error while trying to get column names.", e);
-      throw new HiveJdbcDatabaseAccessException("Error while trying to get 
column names: " + e.getMessage(), e);
-    } finally {
-      cleanupResources(conn, ps, rs);
-    }
-
-  }
-
-
-  @Override
   public int getTotalNumberOfRecords(Configuration conf) throws 
HiveJdbcDatabaseAccessException {
     Connection conn = null;
     PreparedStatement ps = null;
@@ -217,7 +153,7 @@ public class GenericJdbcDatabaseAccessor implements 
DatabaseAccessor {
       ps.setFetchSize(getFetchSize(conf));
       rs = ps.executeQuery();
 
-      return new JdbcRecordIterator(conn, ps, rs);
+      return new JdbcRecordIterator(conn, ps, rs, 
conf.get(serdeConstants.LIST_COLUMN_TYPES));
     }
     catch (Exception e) {
       LOGGER.error("Caught exception while trying to execute query", e);

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/JdbcRecordIterator.java
----------------------------------------------------------------------
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/JdbcRecordIterator.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/JdbcRecordIterator.java
index 638e2b0..d6c2736 100644
--- 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/JdbcRecordIterator.java
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/JdbcRecordIterator.java
@@ -14,6 +14,10 @@
  */
 package org.apache.hive.storage.jdbc.dao;
 
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
 import org.apache.hadoop.io.NullWritable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -22,6 +26,7 @@ import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.ResultSetMetaData;
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
@@ -36,12 +41,15 @@ public class JdbcRecordIterator implements 
Iterator<Map<String, Object>> {
   private Connection conn;
   private PreparedStatement ps;
   private ResultSet rs;
+  private ArrayList<TypeInfo> columnTypes = null;
 
-
-  public JdbcRecordIterator(Connection conn, PreparedStatement ps, ResultSet 
rs) {
+  public JdbcRecordIterator(Connection conn, PreparedStatement ps, ResultSet 
rs, String typeString) {
     this.conn = conn;
     this.ps = ps;
     this.rs = rs;
+    if (typeString != null) {
+      this.columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(typeString);
+    }
   }
 
 
@@ -65,7 +73,48 @@ public class JdbcRecordIterator implements 
Iterator<Map<String, Object>> {
       Map<String, Object> record = new HashMap<String, Object>(numColumns);
       for (int i = 0; i < numColumns; i++) {
         String key = metadata.getColumnName(i + 1);
-        Object value = rs.getObject(i + 1);
+        Object value;
+        if (columnTypes!=null && columnTypes.get(i) instanceof 
PrimitiveTypeInfo) {
+          // This is not a complete list, barely make information schema work
+          switch (((PrimitiveTypeInfo)columnTypes.get(i)).getTypeName()) {
+          case "int":
+          case "smallint":
+          case "tinyint":
+            value = rs.getInt(i + 1);
+            break;
+          case "bigint":
+            value = rs.getLong(i + 1);
+            break;
+          case "float":
+            value = rs.getFloat(i + 1);
+            break;
+          case "double":
+            value = rs.getDouble(i + 1);
+            break;
+          case "bigdecimal":
+            value = HiveDecimal.create(rs.getBigDecimal(i + 1));
+            break;
+          case "boolean":
+            value = rs.getBoolean(i + 1);
+            break;
+          case "string":
+          case "char":
+          case "varchar":
+            value = rs.getString(i + 1);
+            break;
+          case "datetime":
+            value = rs.getDate(i + 1);
+            break;
+          case "timestamp":
+            value = rs.getTimestamp(i + 1);
+            break;
+          default:
+            value = rs.getObject(i + 1);
+            break;
+          }
+        } else {
+          value = rs.getObject(i + 1);
+        }
         record.put(key, value);
       }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/MsSqlDatabaseAccessor.java
----------------------------------------------------------------------
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/MsSqlDatabaseAccessor.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/MsSqlDatabaseAccessor.java
new file mode 100644
index 0000000..5c5455f
--- /dev/null
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/MsSqlDatabaseAccessor.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.storage.jdbc.dao;
+
+/**
+ * MSSQL specific data accessor. This is needed because MSSQL JDBC drivers do 
not support generic LIMIT and OFFSET
+ * escape functions
+ */
+public class MsSqlDatabaseAccessor extends GenericJdbcDatabaseAccessor {
+
+  @Override
+  protected String addLimitAndOffsetToQuery(String sql, int limit, int offset) 
{
+    if (offset == 0) {
+      return addLimitToQuery(sql, limit);
+    } else {
+      // Order by is not necessary, but MS SQL require it to use FETCH
+      return sql + " ORDER BY 1 OFFSET " + offset + " ROWS FETCH NEXT " + 
limit + " ROWS ONLY";
+    }
+  }
+
+  @Override
+  protected String addLimitToQuery(String sql, int limit) {
+    return sql + " {LIMIT " + limit + "}";
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/OracleDatabaseAccessor.java
----------------------------------------------------------------------
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/OracleDatabaseAccessor.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/OracleDatabaseAccessor.java
new file mode 100644
index 0000000..39c4cda
--- /dev/null
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/OracleDatabaseAccessor.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.storage.jdbc.dao;
+
+/**
+ * Oracle specific data accessor. This is needed because Oracle JDBC drivers 
do not support generic LIMIT and OFFSET
+ * escape functions
+ */
+public class OracleDatabaseAccessor extends GenericJdbcDatabaseAccessor {
+
+  // Random column name to reduce the chance of conflict
+  static final String ROW_NUM_COLUMN_NAME = "dummy_rownum_col_rn1938392";
+
+  @Override
+  protected String addLimitAndOffsetToQuery(String sql, int limit, int offset) 
{
+    if (offset == 0) {
+      return addLimitToQuery(sql, limit);
+    } else {
+      // A simple ROWNUM > offset and ROWNUM <= (offset + limit) won't work, 
it will return nothing
+      return "SELECT * FROM (SELECT t.*, ROWNUM AS " + ROW_NUM_COLUMN_NAME + " 
FROM (" + sql + ") t) WHERE "
+          +  ROW_NUM_COLUMN_NAME + " >" + offset + " AND " + 
ROW_NUM_COLUMN_NAME + " <=" + (offset + limit);
+    }
+  }
+
+
+  @Override
+  protected String addLimitToQuery(String sql, int limit) {
+    return "SELECT * FROM (" + sql + ") WHERE ROWNUM <= " + limit;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/7584b327/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/PostgresDatabaseAccessor.java
----------------------------------------------------------------------
diff --git 
a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/PostgresDatabaseAccessor.java
 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/PostgresDatabaseAccessor.java
new file mode 100644
index 0000000..c0280fd
--- /dev/null
+++ 
b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/dao/PostgresDatabaseAccessor.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.storage.jdbc.dao;
+
+/**
+ * Postgres specific data accessor. Postgres JDBC drivers do not support 
generic LIMIT and OFFSET
+ * escape functions
+ */
+public class PostgresDatabaseAccessor extends GenericJdbcDatabaseAccessor {
+
+  @Override
+  protected String addLimitAndOffsetToQuery(String sql, int limit, int offset) 
{
+    if (offset == 0) {
+      return addLimitToQuery(sql, limit);
+    } else {
+      return sql + " LIMIT " + limit + " OFFSET " + offset;
+    }
+  }
+
+  @Override
+  protected String addLimitToQuery(String sql, int limit) {
+    return sql + " LIMIT " + limit;
+  }
+}

Reply via email to