http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/Type.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/Type.java 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/Type.java
new file mode 100644
index 0000000..a96d2ac
--- /dev/null
+++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/Type.java
@@ -0,0 +1,348 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli;
+
+import java.sql.DatabaseMetaData;
+
+import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hive.service.cli.thrift.TTypeId;
+
+/**
+ * Type.
+ *
+ */
+public enum Type {
+  NULL_TYPE("VOID",
+      java.sql.Types.NULL,
+      TTypeId.NULL_TYPE),
+  BOOLEAN_TYPE("BOOLEAN",
+      java.sql.Types.BOOLEAN,
+      TTypeId.BOOLEAN_TYPE),
+  TINYINT_TYPE("TINYINT",
+      java.sql.Types.TINYINT,
+      TTypeId.TINYINT_TYPE),
+  SMALLINT_TYPE("SMALLINT",
+      java.sql.Types.SMALLINT,
+      TTypeId.SMALLINT_TYPE),
+  INT_TYPE("INT",
+      java.sql.Types.INTEGER,
+      TTypeId.INT_TYPE),
+  BIGINT_TYPE("BIGINT",
+      java.sql.Types.BIGINT,
+      TTypeId.BIGINT_TYPE),
+  FLOAT_TYPE("FLOAT",
+      java.sql.Types.FLOAT,
+      TTypeId.FLOAT_TYPE),
+  DOUBLE_TYPE("DOUBLE",
+      java.sql.Types.DOUBLE,
+      TTypeId.DOUBLE_TYPE),
+  STRING_TYPE("STRING",
+      java.sql.Types.VARCHAR,
+      TTypeId.STRING_TYPE),
+  CHAR_TYPE("CHAR",
+      java.sql.Types.CHAR,
+      TTypeId.CHAR_TYPE,
+      true, false, false),
+  VARCHAR_TYPE("VARCHAR",
+      java.sql.Types.VARCHAR,
+      TTypeId.VARCHAR_TYPE,
+      true, false, false),
+  DATE_TYPE("DATE",
+      java.sql.Types.DATE,
+      TTypeId.DATE_TYPE),
+  TIMESTAMP_TYPE("TIMESTAMP",
+      java.sql.Types.TIMESTAMP,
+      TTypeId.TIMESTAMP_TYPE),
+  INTERVAL_YEAR_MONTH_TYPE("INTERVAL_YEAR_MONTH",
+      java.sql.Types.OTHER,
+      TTypeId.INTERVAL_YEAR_MONTH_TYPE),
+  INTERVAL_DAY_TIME_TYPE("INTERVAL_DAY_TIME",
+      java.sql.Types.OTHER,
+      TTypeId.INTERVAL_DAY_TIME_TYPE),
+  BINARY_TYPE("BINARY",
+      java.sql.Types.BINARY,
+      TTypeId.BINARY_TYPE),
+  DECIMAL_TYPE("DECIMAL",
+      java.sql.Types.DECIMAL,
+      TTypeId.DECIMAL_TYPE,
+      true, false, false),
+  ARRAY_TYPE("ARRAY",
+      java.sql.Types.ARRAY,
+      TTypeId.ARRAY_TYPE,
+      true, true),
+  MAP_TYPE("MAP",
+      java.sql.Types.JAVA_OBJECT,
+      TTypeId.MAP_TYPE,
+      true, true),
+  STRUCT_TYPE("STRUCT",
+      java.sql.Types.STRUCT,
+      TTypeId.STRUCT_TYPE,
+      true, false),
+  UNION_TYPE("UNIONTYPE",
+      java.sql.Types.OTHER,
+      TTypeId.UNION_TYPE,
+      true, false),
+  USER_DEFINED_TYPE("USER_DEFINED",
+      java.sql.Types.OTHER,
+      TTypeId.USER_DEFINED_TYPE,
+      true, false);
+
+  private final String name;
+  private final TTypeId tType;
+  private final int javaSQLType;
+  private final boolean isQualified;
+  private final boolean isComplex;
+  private final boolean isCollection;
+
+  Type(String name, int javaSQLType, TTypeId tType, boolean isQualified, 
boolean isComplex, boolean isCollection) {
+    this.name = name;
+    this.javaSQLType = javaSQLType;
+    this.tType = tType;
+    this.isQualified = isQualified;
+    this.isComplex = isComplex;
+    this.isCollection = isCollection;
+  }
+
+  Type(String name, int javaSQLType, TTypeId tType, boolean isComplex, boolean 
isCollection) {
+    this(name, javaSQLType, tType, false, isComplex, isCollection);
+  }
+
+  Type(String name, int javaSqlType, TTypeId tType) {
+    this(name, javaSqlType, tType, false, false, false);
+  }
+
+  public boolean isPrimitiveType() {
+    return !isComplex;
+  }
+
+  public boolean isQualifiedType() {
+    return isQualified;
+  }
+
+  public boolean isComplexType() {
+    return isComplex;
+  }
+
+  public boolean isCollectionType() {
+    return isCollection;
+  }
+
+  public static Type getType(TTypeId tType) {
+    for (Type type : values()) {
+      if (tType.equals(type.tType)) {
+        return type;
+      }
+    }
+    throw new IllegalArgumentException("Unregonized Thrift TTypeId value: " + 
tType);
+  }
+
+  public static Type getType(String name) {
+    if (name == null) {
+      throw new IllegalArgumentException("Invalid type name: null");
+    }
+    for (Type type : values()) {
+      if (name.equalsIgnoreCase(type.name)) {
+        return type;
+      } else if (type.isQualifiedType() || type.isComplexType()) {
+        if (name.toUpperCase().startsWith(type.name)) {
+            return type;
+        }
+      }
+    }
+    throw new IllegalArgumentException("Unrecognized type name: " + name);
+  }
+
+  /**
+   * Radix for this type (typically either 2 or 10)
+   * Null is returned for data types where this is not applicable.
+   */
+  public Integer getNumPrecRadix() {
+    if (this.isNumericType()) {
+      return 10;
+    }
+    return null;
+  }
+
+  /**
+   * Maximum precision for numeric types.
+   * Returns null for non-numeric types.
+   * @return
+   */
+  public Integer getMaxPrecision() {
+    switch (this) {
+    case TINYINT_TYPE:
+      return 3;
+    case SMALLINT_TYPE:
+      return 5;
+    case INT_TYPE:
+      return 10;
+    case BIGINT_TYPE:
+      return 19;
+    case FLOAT_TYPE:
+      return 7;
+    case DOUBLE_TYPE:
+      return 15;
+    case DECIMAL_TYPE:
+      return HiveDecimal.MAX_PRECISION;
+    default:
+      return null;
+    }
+  }
+
+  public boolean isNumericType() {
+    switch (this) {
+    case TINYINT_TYPE:
+    case SMALLINT_TYPE:
+    case INT_TYPE:
+    case BIGINT_TYPE:
+    case FLOAT_TYPE:
+    case DOUBLE_TYPE:
+    case DECIMAL_TYPE:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  /**
+   * Prefix used to quote a literal of this type (may be null)
+   */
+  public String getLiteralPrefix() {
+    return null;
+  }
+
+  /**
+   * Suffix used to quote a literal of this type (may be null)
+   * @return
+   */
+  public String getLiteralSuffix() {
+    return null;
+  }
+
+  /**
+   * Can you use NULL for this type?
+   * @return
+   * DatabaseMetaData.typeNoNulls - does not allow NULL values
+   * DatabaseMetaData.typeNullable - allows NULL values
+   * DatabaseMetaData.typeNullableUnknown - nullability unknown
+   */
+  public Short getNullable() {
+    // All Hive types are nullable
+    return DatabaseMetaData.typeNullable;
+  }
+
+  /**
+   * Is the type case sensitive?
+   * @return
+   */
+  public Boolean isCaseSensitive() {
+    switch (this) {
+    case STRING_TYPE:
+      return true;
+    default:
+      return false;
+    }
+  }
+
+  /**
+   * Parameters used in creating the type (may be null)
+   * @return
+   */
+  public String getCreateParams() {
+    return null;
+  }
+
+  /**
+   * Can you use WHERE based on this type?
+   * @return
+   * DatabaseMetaData.typePredNone - No support
+   * DatabaseMetaData.typePredChar - Only support with WHERE .. LIKE
+   * DatabaseMetaData.typePredBasic - Supported except for WHERE .. LIKE
+   * DatabaseMetaData.typeSearchable - Supported for all WHERE ..
+   */
+  public Short getSearchable() {
+    if (isPrimitiveType()) {
+      return DatabaseMetaData.typeSearchable;
+    }
+    return DatabaseMetaData.typePredNone;
+  }
+
+  /**
+   * Is this type unsigned?
+   * @return
+   */
+  public Boolean isUnsignedAttribute() {
+    if (isNumericType()) {
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Can this type represent money?
+   * @return
+   */
+  public Boolean isFixedPrecScale() {
+    return false;
+  }
+
+  /**
+   * Can this type be used for an auto-increment value?
+   * @return
+   */
+  public Boolean isAutoIncrement() {
+    return false;
+  }
+
+  /**
+   * Localized version of type name (may be null).
+   * @return
+   */
+  public String getLocalizedName() {
+    return null;
+  }
+
+  /**
+   * Minimum scale supported for this type
+   * @return
+   */
+  public Short getMinimumScale() {
+    return 0;
+  }
+
+  /**
+   * Maximum scale supported for this type
+   * @return
+   */
+  public Short getMaximumScale() {
+    return 0;
+  }
+
+  public TTypeId toTType() {
+    return tType;
+  }
+
+  public int toJavaSQLType() {
+    return javaSQLType;
+  }
+
+  public String getName() {
+    return name;
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeDescriptor.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeDescriptor.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeDescriptor.java
new file mode 100644
index 0000000..562b3f5
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeDescriptor.java
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
+import org.apache.hive.service.cli.thrift.TPrimitiveTypeEntry;
+import org.apache.hive.service.cli.thrift.TTypeDesc;
+import org.apache.hive.service.cli.thrift.TTypeEntry;
+
+/**
+ * TypeDescriptor.
+ *
+ */
+public class TypeDescriptor {
+
+  private final Type type;
+  private String typeName = null;
+  private TypeQualifiers typeQualifiers = null;
+
+  public TypeDescriptor(Type type) {
+    this.type = type;
+  }
+
+  public TypeDescriptor(TTypeDesc tTypeDesc) {
+    List<TTypeEntry> tTypeEntries = tTypeDesc.getTypes();
+    TPrimitiveTypeEntry top = tTypeEntries.get(0).getPrimitiveEntry();
+    this.type = Type.getType(top.getType());
+    if (top.isSetTypeQualifiers()) {
+      
setTypeQualifiers(TypeQualifiers.fromTTypeQualifiers(top.getTypeQualifiers()));
+    }
+  }
+
+  public TypeDescriptor(String typeName) {
+    this.type = Type.getType(typeName);
+    if (this.type.isComplexType()) {
+      this.typeName = typeName;
+    } else if (this.type.isQualifiedType()) {
+      PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(typeName);
+      setTypeQualifiers(TypeQualifiers.fromTypeInfo(pti));
+    }
+  }
+
+  public Type getType() {
+    return type;
+  }
+
+  public TTypeDesc toTTypeDesc() {
+    TPrimitiveTypeEntry primitiveEntry = new 
TPrimitiveTypeEntry(type.toTType());
+    if (getTypeQualifiers() != null) {
+      
primitiveEntry.setTypeQualifiers(getTypeQualifiers().toTTypeQualifiers());
+    }
+    TTypeEntry entry = TTypeEntry.primitiveEntry(primitiveEntry);
+
+    TTypeDesc desc = new TTypeDesc();
+    desc.addToTypes(entry);
+    return desc;
+  }
+
+  public String getTypeName() {
+    if (typeName != null) {
+      return typeName;
+    } else {
+      return type.getName();
+    }
+  }
+
+  public TypeQualifiers getTypeQualifiers() {
+    return typeQualifiers;
+  }
+
+  public void setTypeQualifiers(TypeQualifiers typeQualifiers) {
+    this.typeQualifiers = typeQualifiers;
+  }
+
+  /**
+   * The column size for this type.
+   * For numeric data this is the maximum precision.
+   * For character data this is the length in characters.
+   * For datetime types this is the length in characters of the String 
representation
+   * (assuming the maximum allowed precision of the fractional seconds 
component).
+   * For binary data this is the length in bytes.
+   * Null is returned for for data types where the column size is not 
applicable.
+   */
+  public Integer getColumnSize() {
+    if (type.isNumericType()) {
+      return getPrecision();
+    }
+    switch (type) {
+    case STRING_TYPE:
+    case BINARY_TYPE:
+      return Integer.MAX_VALUE;
+    case CHAR_TYPE:
+    case VARCHAR_TYPE:
+      return typeQualifiers.getCharacterMaximumLength();
+    case DATE_TYPE:
+      return 10;
+    case TIMESTAMP_TYPE:
+      return 29;
+    default:
+      return null;
+    }
+  }
+
+  /**
+   * Maximum precision for numeric types.
+   * Returns null for non-numeric types.
+   * @return
+   */
+  public Integer getPrecision() {
+    if (this.type == Type.DECIMAL_TYPE) {
+      return typeQualifiers.getPrecision();
+    }
+    return this.type.getMaxPrecision();
+  }
+
+  /**
+   * The number of fractional digits for this type.
+   * Null is returned for data types where this is not applicable.
+   */
+  public Integer getDecimalDigits() {
+    switch (this.type) {
+    case BOOLEAN_TYPE:
+    case TINYINT_TYPE:
+    case SMALLINT_TYPE:
+    case INT_TYPE:
+    case BIGINT_TYPE:
+      return 0;
+    case FLOAT_TYPE:
+      return 7;
+    case DOUBLE_TYPE:
+      return 15;
+    case DECIMAL_TYPE:
+      return typeQualifiers.getScale();
+    case TIMESTAMP_TYPE:
+      return 9;
+    default:
+      return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeQualifiers.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeQualifiers.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeQualifiers.java
new file mode 100644
index 0000000..c6da52c
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeQualifiers.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
+import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
+import org.apache.hive.service.cli.thrift.TCLIServiceConstants;
+import org.apache.hive.service.cli.thrift.TTypeQualifierValue;
+import org.apache.hive.service.cli.thrift.TTypeQualifiers;
+
+/**
+ * This class holds type qualifier information for a primitive type,
+ * such as char/varchar length or decimal precision/scale.
+ */
+public class TypeQualifiers {
+  private Integer characterMaximumLength;
+  private Integer precision;
+  private Integer scale;
+
+  public TypeQualifiers() {}
+
+  public Integer getCharacterMaximumLength() {
+    return characterMaximumLength;
+  }
+  public void setCharacterMaximumLength(int characterMaximumLength) {
+    this.characterMaximumLength = characterMaximumLength;
+  }
+
+  public TTypeQualifiers toTTypeQualifiers() {
+    TTypeQualifiers ret = null;
+
+    Map<String, TTypeQualifierValue> qMap = new HashMap<String, 
TTypeQualifierValue>();
+    if (getCharacterMaximumLength() != null) {
+      TTypeQualifierValue val = new TTypeQualifierValue();
+      val.setI32Value(getCharacterMaximumLength().intValue());
+      qMap.put(TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH, val);
+    }
+
+    if (precision != null) {
+      TTypeQualifierValue val = new TTypeQualifierValue();
+      val.setI32Value(precision.intValue());
+      qMap.put(TCLIServiceConstants.PRECISION, val);
+    }
+
+    if (scale != null) {
+      TTypeQualifierValue val = new TTypeQualifierValue();
+      val.setI32Value(scale.intValue());
+      qMap.put(TCLIServiceConstants.SCALE, val);
+    }
+
+    if (qMap.size() > 0) {
+      ret = new TTypeQualifiers(qMap);
+    }
+
+    return ret;
+  }
+
+  public static TypeQualifiers fromTTypeQualifiers(TTypeQualifiers ttq) {
+    TypeQualifiers ret = null;
+    if (ttq != null) {
+      ret = new TypeQualifiers();
+      Map<String, TTypeQualifierValue> tqMap = ttq.getQualifiers();
+
+      if (tqMap.containsKey(TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH)) {
+        ret.setCharacterMaximumLength(
+            
tqMap.get(TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH).getI32Value());
+      }
+
+      if (tqMap.containsKey(TCLIServiceConstants.PRECISION)) {
+        
ret.setPrecision(tqMap.get(TCLIServiceConstants.PRECISION).getI32Value());
+      }
+
+      if (tqMap.containsKey(TCLIServiceConstants.SCALE)) {
+        ret.setScale(tqMap.get(TCLIServiceConstants.SCALE).getI32Value());
+      }
+    }
+    return ret;
+  }
+
+  public static TypeQualifiers fromTypeInfo(PrimitiveTypeInfo pti) {
+    TypeQualifiers result = null;
+    if (pti instanceof VarcharTypeInfo) {
+      result = new TypeQualifiers();
+      result.setCharacterMaximumLength(((VarcharTypeInfo)pti).getLength());
+    }  else if (pti instanceof CharTypeInfo) {
+      result = new TypeQualifiers();
+      result.setCharacterMaximumLength(((CharTypeInfo)pti).getLength());
+    } else if (pti instanceof DecimalTypeInfo) {
+      result = new TypeQualifiers();
+      result.setPrecision(((DecimalTypeInfo)pti).precision());
+      result.setScale(((DecimalTypeInfo)pti).scale());
+    }
+    return result;
+  }
+
+  public Integer getPrecision() {
+    return precision;
+  }
+
+  public void setPrecision(Integer precision) {
+    this.precision = precision;
+  }
+
+  public Integer getScale() {
+    return scale;
+  }
+
+  public void setScale(Integer scale) {
+    this.scale = scale;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
new file mode 100644
index 0000000..87ac39b
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.hive.metastore.TableType;
+
+/**
+ * ClassicTableTypeMapping.
+ * Classic table type mapping :
+ *  Managed Table ==> Table
+ *  External Table ==> Table
+ *  Virtual View ==> View
+ */
+public class ClassicTableTypeMapping implements TableTypeMapping {
+
+  public enum ClassicTableTypes {
+    TABLE,
+    VIEW,
+  }
+
+  private final Map<String, String> hiveToClientMap = new HashMap<String, 
String>();
+  private final Map<String, String> clientToHiveMap = new HashMap<String, 
String>();
+
+  public ClassicTableTypeMapping () {
+    hiveToClientMap.put(TableType.MANAGED_TABLE.toString(),
+        ClassicTableTypes.TABLE.toString());
+    hiveToClientMap.put(TableType.EXTERNAL_TABLE.toString(),
+        ClassicTableTypes.TABLE.toString());
+    hiveToClientMap.put(TableType.VIRTUAL_VIEW.toString(),
+        ClassicTableTypes.VIEW.toString());
+
+    clientToHiveMap.put(ClassicTableTypes.TABLE.toString(),
+        TableType.MANAGED_TABLE.toString());
+    clientToHiveMap.put(ClassicTableTypes.VIEW.toString(),
+        TableType.VIRTUAL_VIEW.toString());
+  }
+
+  @Override
+  public String mapToHiveType(String clientTypeName) {
+    if (clientToHiveMap.containsKey(clientTypeName)) {
+      return clientToHiveMap.get(clientTypeName);
+    } else {
+      return clientTypeName;
+    }
+  }
+
+  @Override
+  public String mapToClientType(String hiveTypeName) {
+    if (hiveToClientMap.containsKey(hiveTypeName)) {
+      return hiveToClientMap.get(hiveTypeName);
+    } else {
+      return hiveTypeName;
+    }
+  }
+
+  @Override
+  public Set<String> getTableTypeNames() {
+    Set<String> typeNameSet = new HashSet<String>();
+    for (ClassicTableTypes typeNames : ClassicTableTypes.values()) {
+      typeNameSet.add(typeNames.toString());
+    }
+    return typeNameSet;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
new file mode 100644
index 0000000..3f2de10
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hive.service.cli.operation;
+
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.hive.ql.processors.CommandProcessor;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.session.HiveSession;
+
+public abstract class ExecuteStatementOperation extends Operation {
+  protected String statement = null;
+  protected Map<String, String> confOverlay = new HashMap<String, String>();
+
+  public ExecuteStatementOperation(HiveSession parentSession, String statement,
+      Map<String, String> confOverlay, boolean runInBackground) {
+    super(parentSession, OperationType.EXECUTE_STATEMENT, runInBackground);
+    this.statement = statement;
+    setConfOverlay(confOverlay);
+  }
+
+  public String getStatement() {
+    return statement;
+  }
+
+  public static ExecuteStatementOperation newExecuteStatementOperation(
+      HiveSession parentSession, String statement, Map<String, String> 
confOverlay, boolean runAsync)
+          throws HiveSQLException {
+    String[] tokens = statement.trim().split("\\s+");
+    CommandProcessor processor = null;
+    try {
+      processor = CommandProcessorFactory.getForHiveCommand(tokens, 
parentSession.getHiveConf());
+    } catch (SQLException e) {
+      throw new HiveSQLException(e.getMessage(), e.getSQLState(), e);
+    }
+    if (processor == null) {
+      return new SQLOperation(parentSession, statement, confOverlay, runAsync);
+    }
+    return new HiveCommandOperation(parentSession, statement, processor, 
confOverlay);
+  }
+
+  protected Map<String, String> getConfOverlay() {
+    return confOverlay;
+  }
+
+  protected void setConfOverlay(Map<String, String> confOverlay) {
+    if (confOverlay != null) {
+      this.confOverlay = confOverlay;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java
new file mode 100644
index 0000000..8868ec1
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetCatalogsOperation.
+ *
+ */
+public class GetCatalogsOperation extends MetadataOperation {
+  private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
+  .addStringColumn("TABLE_CAT", "Catalog name. NULL if not applicable.");
+
+  private final RowSet rowSet;
+
+  protected GetCatalogsOperation(HiveSession parentSession) {
+    super(parentSession, OperationType.GET_CATALOGS);
+    rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion());
+  }
+
+  @Override
+  public void runInternal() throws HiveSQLException {
+    setState(OperationState.RUNNING);
+    try {
+      if (isAuthV2Enabled()) {
+        authorizeMetaGets(HiveOperationType.GET_CATALOGS, null);
+      }
+      setState(OperationState.FINISHED);
+    } catch (HiveSQLException e) {
+      setState(OperationState.ERROR);
+      throw e;
+    }
+
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+   */
+  @Override
+  public TableSchema getResultSetSchema() throws HiveSQLException {
+    return RESULT_SET_SCHEMA;
+  }
+
+  /* (non-Javadoc)
+   * @see 
org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation,
 long)
+   */
+  @Override
+  public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) 
throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    validateDefaultFetchOrientation(orientation);
+    if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+      rowSet.setStartOffset(0);
+    }
+    return rowSet.extractSubset((int)maxRows);
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
new file mode 100644
index 0000000..309f10f
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.sql.DatabaseMetaData;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.ql.plan.HiveOperation;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType;
+import org.apache.hive.service.cli.ColumnDescriptor;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.Type;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetColumnsOperation.
+ *
+ */
+public class GetColumnsOperation extends MetadataOperation {
+
+  private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
+  .addPrimitiveColumn("TABLE_CAT", Type.STRING_TYPE,
+      "Catalog name. NULL if not applicable")
+  .addPrimitiveColumn("TABLE_SCHEM", Type.STRING_TYPE,
+      "Schema name")
+  .addPrimitiveColumn("TABLE_NAME", Type.STRING_TYPE,
+      "Table name")
+  .addPrimitiveColumn("COLUMN_NAME", Type.STRING_TYPE,
+      "Column name")
+  .addPrimitiveColumn("DATA_TYPE", Type.INT_TYPE,
+      "SQL type from java.sql.Types")
+  .addPrimitiveColumn("TYPE_NAME", Type.STRING_TYPE,
+      "Data source dependent type name, for a UDT the type name is fully 
qualified")
+  .addPrimitiveColumn("COLUMN_SIZE", Type.INT_TYPE,
+      "Column size. For char or date types this is the maximum number of 
characters,"
+      + " for numeric or decimal types this is precision.")
+  .addPrimitiveColumn("BUFFER_LENGTH", Type.TINYINT_TYPE,
+      "Unused")
+  .addPrimitiveColumn("DECIMAL_DIGITS", Type.INT_TYPE,
+      "The number of fractional digits")
+  .addPrimitiveColumn("NUM_PREC_RADIX", Type.INT_TYPE,
+      "Radix (typically either 10 or 2)")
+  .addPrimitiveColumn("NULLABLE", Type.INT_TYPE,
+      "Is NULL allowed")
+  .addPrimitiveColumn("REMARKS", Type.STRING_TYPE,
+      "Comment describing column (may be null)")
+  .addPrimitiveColumn("COLUMN_DEF", Type.STRING_TYPE,
+      "Default value (may be null)")
+  .addPrimitiveColumn("SQL_DATA_TYPE", Type.INT_TYPE,
+      "Unused")
+  .addPrimitiveColumn("SQL_DATETIME_SUB", Type.INT_TYPE,
+      "Unused")
+  .addPrimitiveColumn("CHAR_OCTET_LENGTH", Type.INT_TYPE,
+      "For char types the maximum number of bytes in the column")
+  .addPrimitiveColumn("ORDINAL_POSITION", Type.INT_TYPE,
+      "Index of column in table (starting at 1)")
+  .addPrimitiveColumn("IS_NULLABLE", Type.STRING_TYPE,
+      "\"NO\" means column definitely does not allow NULL values; "
+      + "\"YES\" means the column might allow NULL values. An empty "
+      + "string means nobody knows.")
+  .addPrimitiveColumn("SCOPE_CATALOG", Type.STRING_TYPE,
+      "Catalog of table that is the scope of a reference attribute "
+      + "(null if DATA_TYPE isn't REF)")
+  .addPrimitiveColumn("SCOPE_SCHEMA", Type.STRING_TYPE,
+      "Schema of table that is the scope of a reference attribute "
+      + "(null if the DATA_TYPE isn't REF)")
+  .addPrimitiveColumn("SCOPE_TABLE", Type.STRING_TYPE,
+      "Table name that this the scope of a reference attribure "
+      + "(null if the DATA_TYPE isn't REF)")
+  .addPrimitiveColumn("SOURCE_DATA_TYPE", Type.SMALLINT_TYPE,
+      "Source type of a distinct type or user-generated Ref type, "
+      + "SQL type from java.sql.Types (null if DATA_TYPE isn't DISTINCT or 
user-generated REF)")
+  .addPrimitiveColumn("IS_AUTO_INCREMENT", Type.STRING_TYPE,
+      "Indicates whether this column is auto incremented.");
+
+  private final String catalogName;
+  private final String schemaName;
+  private final String tableName;
+  private final String columnName;
+
+  private final RowSet rowSet;
+
+  protected GetColumnsOperation(HiveSession parentSession, String catalogName, 
String schemaName,
+      String tableName, String columnName) {
+    super(parentSession, OperationType.GET_COLUMNS);
+    this.catalogName = catalogName;
+    this.schemaName = schemaName;
+    this.tableName = tableName;
+    this.columnName = columnName;
+    this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, 
getProtocolVersion());
+  }
+
+  @Override
+  public void runInternal() throws HiveSQLException {
+    setState(OperationState.RUNNING);
+    try {
+      IMetaStoreClient metastoreClient = 
getParentSession().getMetaStoreClient();
+      String schemaPattern = convertSchemaPattern(schemaName);
+      String tablePattern = convertIdentifierPattern(tableName, true);
+
+      Pattern columnPattern = null;
+      if (columnName != null) {
+        columnPattern = Pattern.compile(convertIdentifierPattern(columnName, 
false));
+      }
+
+      List<String> dbNames = metastoreClient.getDatabases(schemaPattern);
+      Collections.sort(dbNames);
+      Map<String, List<String>> db2Tabs = new HashMap<>();
+
+      for (String dbName : dbNames) {
+        List<String> tableNames = metastoreClient.getTables(dbName, 
tablePattern);
+        Collections.sort(tableNames);
+        db2Tabs.put(dbName, tableNames);
+      }
+
+      if (isAuthV2Enabled()) {
+        List<HivePrivilegeObject> privObjs = getPrivObjs(db2Tabs);
+        String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + 
schemaName
+            + ", tablePattern : " + tableName;
+        authorizeMetaGets(HiveOperationType.GET_COLUMNS, privObjs, cmdStr);
+      }
+
+      for (Entry<String, List<String>> dbTabs : db2Tabs.entrySet()) {
+        String dbName = dbTabs.getKey();
+        List<String> tableNames = dbTabs.getValue();
+        for (Table table : metastoreClient.getTableObjectsByName(dbName, 
tableNames)) {
+          TableSchema schema = new 
TableSchema(metastoreClient.getSchema(dbName, table.getTableName()));
+          for (ColumnDescriptor column : schema.getColumnDescriptors()) {
+            if (columnPattern != null && 
!columnPattern.matcher(column.getName()).matches()) {
+              continue;
+            }
+            Object[] rowData = new Object[] {
+                null,  // TABLE_CAT
+                table.getDbName(), // TABLE_SCHEM
+                table.getTableName(), // TABLE_NAME
+                column.getName(), // COLUMN_NAME
+                column.getType().toJavaSQLType(), // DATA_TYPE
+                column.getTypeName(), // TYPE_NAME
+                column.getTypeDescriptor().getColumnSize(), // COLUMN_SIZE
+                null, // BUFFER_LENGTH, unused
+                column.getTypeDescriptor().getDecimalDigits(), // 
DECIMAL_DIGITS
+                column.getType().getNumPrecRadix(), // NUM_PREC_RADIX
+                DatabaseMetaData.columnNullable, // NULLABLE
+                column.getComment(), // REMARKS
+                null, // COLUMN_DEF
+                null, // SQL_DATA_TYPE
+                null, // SQL_DATETIME_SUB
+                null, // CHAR_OCTET_LENGTH
+                column.getOrdinalPosition(), // ORDINAL_POSITION
+                "YES", // IS_NULLABLE
+                null, // SCOPE_CATALOG
+                null, // SCOPE_SCHEMA
+                null, // SCOPE_TABLE
+                null, // SOURCE_DATA_TYPE
+                "NO", // IS_AUTO_INCREMENT
+            };
+            rowSet.addRow(rowData);
+          }
+        }
+      }
+      setState(OperationState.FINISHED);
+    } catch (Exception e) {
+      setState(OperationState.ERROR);
+      throw new HiveSQLException(e);
+    }
+
+  }
+
+
+  private List<HivePrivilegeObject> getPrivObjs(Map<String, List<String>> 
db2Tabs) {
+    List<HivePrivilegeObject> privObjs = new ArrayList<>();
+    for (Entry<String, List<String>> dbTabs : db2Tabs.entrySet()) {
+      for (String tabName : dbTabs.getValue()) {
+        privObjs.add(new 
HivePrivilegeObject(HivePrivilegeObjectType.TABLE_OR_VIEW, dbTabs.getKey(),
+            tabName));
+      }
+    }
+    return privObjs;
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+   */
+  @Override
+  public TableSchema getResultSetSchema() throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    return RESULT_SET_SCHEMA;
+  }
+
+  /* (non-Javadoc)
+   * @see 
org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation,
 long)
+   */
+  @Override
+  public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) 
throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    validateDefaultFetchOrientation(orientation);
+    if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+      rowSet.setStartOffset(0);
+    }
+    return rowSet.extractSubset((int)maxRows);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java
new file mode 100644
index 0000000..6df1e8a
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java
@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.sql.DatabaseMetaData;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.ql.exec.FunctionInfo;
+import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObjectUtils;
+import org.apache.hive.service.cli.CLIServiceUtils;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.Type;
+import org.apache.hive.service.cli.session.HiveSession;
+import org.apache.thrift.TException;
+
+/**
+ * GetFunctionsOperation.
+ *
+ */
+public class GetFunctionsOperation extends MetadataOperation {
+  private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
+  .addPrimitiveColumn("FUNCTION_CAT", Type.STRING_TYPE,
+      "Function catalog (may be null)")
+  .addPrimitiveColumn("FUNCTION_SCHEM", Type.STRING_TYPE,
+      "Function schema (may be null)")
+  .addPrimitiveColumn("FUNCTION_NAME", Type.STRING_TYPE,
+      "Function name. This is the name used to invoke the function")
+  .addPrimitiveColumn("REMARKS", Type.STRING_TYPE,
+      "Explanatory comment on the function")
+  .addPrimitiveColumn("FUNCTION_TYPE", Type.INT_TYPE,
+      "Kind of function.")
+  .addPrimitiveColumn("SPECIFIC_NAME", Type.STRING_TYPE,
+      "The name which uniquely identifies this function within its schema");
+
+  private final String catalogName;
+  private final String schemaName;
+  private final String functionName;
+
+  private final RowSet rowSet;
+
+  public GetFunctionsOperation(HiveSession parentSession,
+      String catalogName, String schemaName, String functionName) {
+    super(parentSession, OperationType.GET_FUNCTIONS);
+    this.catalogName = catalogName;
+    this.schemaName = schemaName;
+    this.functionName = functionName;
+    this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, 
getProtocolVersion());
+  }
+
+  @Override
+  public void runInternal() throws HiveSQLException {
+    setState(OperationState.RUNNING);
+    if (isAuthV2Enabled()) {
+      // get databases for schema pattern
+      IMetaStoreClient metastoreClient = 
getParentSession().getMetaStoreClient();
+      String schemaPattern = convertSchemaPattern(schemaName);
+      List<String> matchingDbs;
+      try {
+        matchingDbs = metastoreClient.getDatabases(schemaPattern);
+      } catch (TException e) {
+        setState(OperationState.ERROR);
+        throw new HiveSQLException(e);
+      }
+      // authorize this call on the schema objects
+      List<HivePrivilegeObject> privObjs = HivePrivilegeObjectUtils
+          .getHivePrivDbObjects(matchingDbs);
+      String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + 
schemaName;
+      authorizeMetaGets(HiveOperationType.GET_FUNCTIONS, privObjs, cmdStr);
+    }
+
+    try {
+      if ((null == catalogName || "".equals(catalogName))
+          && (null == schemaName || "".equals(schemaName))) {
+        Set<String> functionNames =  FunctionRegistry
+            .getFunctionNames(CLIServiceUtils.patternToRegex(functionName));
+        for (String functionName : functionNames) {
+          FunctionInfo functionInfo = 
FunctionRegistry.getFunctionInfo(functionName);
+          Object rowData[] = new Object[] {
+              null, // FUNCTION_CAT
+              null, // FUNCTION_SCHEM
+              functionInfo.getDisplayName(), // FUNCTION_NAME
+              "", // REMARKS
+              (functionInfo.isGenericUDTF() ?
+                  DatabaseMetaData.functionReturnsTable
+                  : DatabaseMetaData.functionNoTable), // FUNCTION_TYPE
+             functionInfo.getClass().getCanonicalName()
+          };
+          rowSet.addRow(rowData);
+        }
+      }
+      setState(OperationState.FINISHED);
+    } catch (Exception e) {
+      setState(OperationState.ERROR);
+      throw new HiveSQLException(e);
+    }
+  }
+
+
+  /* (non-Javadoc)
+   * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+   */
+  @Override
+  public TableSchema getResultSetSchema() throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    return RESULT_SET_SCHEMA;
+  }
+
+  /* (non-Javadoc)
+   * @see 
org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation,
 long)
+   */
+  @Override
+  public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) 
throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    validateDefaultFetchOrientation(orientation);
+    if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+      rowSet.setStartOffset(0);
+    }
+    return rowSet.extractSubset((int)maxRows);
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java
new file mode 100644
index 0000000..e56686a
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetSchemasOperation.
+ *
+ */
+public class GetSchemasOperation extends MetadataOperation {
+  private final String catalogName;
+  private final String schemaName;
+
+  private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
+  .addStringColumn("TABLE_SCHEM", "Schema name.")
+  .addStringColumn("TABLE_CATALOG", "Catalog name.");
+
+  private RowSet rowSet;
+
+  protected GetSchemasOperation(HiveSession parentSession,
+      String catalogName, String schemaName) {
+    super(parentSession, OperationType.GET_SCHEMAS);
+    this.catalogName = catalogName;
+    this.schemaName = schemaName;
+    this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, 
getProtocolVersion());
+  }
+
+  @Override
+  public void runInternal() throws HiveSQLException {
+    setState(OperationState.RUNNING);
+    if (isAuthV2Enabled()) {
+      String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + 
schemaName;
+      authorizeMetaGets(HiveOperationType.GET_SCHEMAS, null, cmdStr);
+    }
+    try {
+      IMetaStoreClient metastoreClient = 
getParentSession().getMetaStoreClient();
+      String schemaPattern = convertSchemaPattern(schemaName);
+      for (String dbName : metastoreClient.getDatabases(schemaPattern)) {
+        rowSet.addRow(new Object[] {dbName, DEFAULT_HIVE_CATALOG});
+      }
+      setState(OperationState.FINISHED);
+    } catch (Exception e) {
+      setState(OperationState.ERROR);
+      throw new HiveSQLException(e);
+    }
+  }
+
+
+  /* (non-Javadoc)
+   * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+   */
+  @Override
+  public TableSchema getResultSetSchema() throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    return RESULT_SET_SCHEMA;
+  }
+
+  /* (non-Javadoc)
+   * @see 
org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation,
 long)
+   */
+  @Override
+  public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) 
throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    validateDefaultFetchOrientation(orientation);
+    if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+      rowSet.setStartOffset(0);
+    }
+    return rowSet.extractSubset((int)maxRows);
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java
new file mode 100644
index 0000000..a09b39a
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.TableType;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetTableTypesOperation.
+ *
+ */
+public class GetTableTypesOperation extends MetadataOperation {
+
+  protected static TableSchema RESULT_SET_SCHEMA = new TableSchema()
+  .addStringColumn("TABLE_TYPE", "Table type name.");
+
+  private final RowSet rowSet;
+  private final TableTypeMapping tableTypeMapping;
+
+  protected GetTableTypesOperation(HiveSession parentSession) {
+    super(parentSession, OperationType.GET_TABLE_TYPES);
+    String tableMappingStr = getParentSession().getHiveConf().
+        getVar(HiveConf.ConfVars.HIVE_SERVER2_TABLE_TYPE_MAPPING);
+    tableTypeMapping =
+      TableTypeMappingFactory.getTableTypeMapping(tableMappingStr);
+    rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion());
+  }
+
+  @Override
+  public void runInternal() throws HiveSQLException {
+    setState(OperationState.RUNNING);
+    if (isAuthV2Enabled()) {
+      authorizeMetaGets(HiveOperationType.GET_TABLETYPES, null);
+    }
+    try {
+      for (TableType type : TableType.values()) {
+        rowSet.addRow(new String[] 
{tableTypeMapping.mapToClientType(type.toString())});
+      }
+      setState(OperationState.FINISHED);
+    } catch (Exception e) {
+      setState(OperationState.ERROR);
+      throw new HiveSQLException(e);
+    }
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+   */
+  @Override
+  public TableSchema getResultSetSchema() throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    return RESULT_SET_SCHEMA;
+  }
+
+  /* (non-Javadoc)
+   * @see 
org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation,
 long)
+   */
+  @Override
+  public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) 
throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    validateDefaultFetchOrientation(orientation);
+    if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+      rowSet.setStartOffset(0);
+    }
+    return rowSet.extractSubset((int)maxRows);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java
new file mode 100644
index 0000000..0e2fdc6
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.Table;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject;
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObjectUtils;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetTablesOperation.
+ *
+ */
+public class GetTablesOperation extends MetadataOperation {
+
+  private final String catalogName;
+  private final String schemaName;
+  private final String tableName;
+  private final List<String> tableTypes = new ArrayList<String>();
+  private final RowSet rowSet;
+  private final TableTypeMapping tableTypeMapping;
+
+
+  private static final TableSchema RESULT_SET_SCHEMA = new TableSchema()
+  .addStringColumn("TABLE_CAT", "Catalog name. NULL if not applicable.")
+  .addStringColumn("TABLE_SCHEM", "Schema name.")
+  .addStringColumn("TABLE_NAME", "Table name.")
+  .addStringColumn("TABLE_TYPE", "The table type, e.g. \"TABLE\", \"VIEW\", 
etc.")
+  .addStringColumn("REMARKS", "Comments about the table.");
+
+  protected GetTablesOperation(HiveSession parentSession,
+      String catalogName, String schemaName, String tableName,
+      List<String> tableTypes) {
+    super(parentSession, OperationType.GET_TABLES);
+    this.catalogName = catalogName;
+    this.schemaName = schemaName;
+    this.tableName = tableName;
+    String tableMappingStr = getParentSession().getHiveConf().
+        getVar(HiveConf.ConfVars.HIVE_SERVER2_TABLE_TYPE_MAPPING);
+    tableTypeMapping =
+        TableTypeMappingFactory.getTableTypeMapping(tableMappingStr);
+    if (tableTypes != null) {
+      this.tableTypes.addAll(tableTypes);
+    }
+    this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, 
getProtocolVersion());
+  }
+
+  @Override
+  public void runInternal() throws HiveSQLException {
+    setState(OperationState.RUNNING);
+    try {
+      IMetaStoreClient metastoreClient = 
getParentSession().getMetaStoreClient();
+      String schemaPattern = convertSchemaPattern(schemaName);
+      List<String> matchingDbs = metastoreClient.getDatabases(schemaPattern);
+      if(isAuthV2Enabled()){
+        List<HivePrivilegeObject> privObjs = 
HivePrivilegeObjectUtils.getHivePrivDbObjects(matchingDbs);
+        String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + 
schemaName;
+        authorizeMetaGets(HiveOperationType.GET_TABLES, privObjs, cmdStr);
+      }
+
+      String tablePattern = convertIdentifierPattern(tableName, true);
+      for (String dbName : metastoreClient.getDatabases(schemaPattern)) {
+        List<String> tableNames = metastoreClient.getTables(dbName, 
tablePattern);
+        for (Table table : metastoreClient.getTableObjectsByName(dbName, 
tableNames)) {
+          Object[] rowData = new Object[] {
+              DEFAULT_HIVE_CATALOG,
+              table.getDbName(),
+              table.getTableName(),
+              tableTypeMapping.mapToClientType(table.getTableType()),
+              table.getParameters().get("comment")
+              };
+          if (tableTypes.isEmpty() || tableTypes.contains(
+                tableTypeMapping.mapToClientType(table.getTableType()))) {
+            rowSet.addRow(rowData);
+          }
+        }
+      }
+      setState(OperationState.FINISHED);
+    } catch (Exception e) {
+      setState(OperationState.ERROR);
+      throw new HiveSQLException(e);
+    }
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+   */
+  @Override
+  public TableSchema getResultSetSchema() throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    return RESULT_SET_SCHEMA;
+  }
+
+  /* (non-Javadoc)
+   * @see 
org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation,
 long)
+   */
+  @Override
+  public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) 
throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    validateDefaultFetchOrientation(orientation);
+    if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+      rowSet.setStartOffset(0);
+    }
+    return rowSet.extractSubset((int)maxRows);
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java
new file mode 100644
index 0000000..2a0fec2
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import 
org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.OperationType;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.Type;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * GetTypeInfoOperation.
+ *
+ */
+public class GetTypeInfoOperation extends MetadataOperation {
+
+  private final static TableSchema RESULT_SET_SCHEMA = new TableSchema()
+  .addPrimitiveColumn("TYPE_NAME", Type.STRING_TYPE,
+      "Type name")
+  .addPrimitiveColumn("DATA_TYPE", Type.INT_TYPE,
+      "SQL data type from java.sql.Types")
+  .addPrimitiveColumn("PRECISION", Type.INT_TYPE,
+      "Maximum precision")
+  .addPrimitiveColumn("LITERAL_PREFIX", Type.STRING_TYPE,
+      "Prefix used to quote a literal (may be null)")
+  .addPrimitiveColumn("LITERAL_SUFFIX", Type.STRING_TYPE,
+      "Suffix used to quote a literal (may be null)")
+  .addPrimitiveColumn("CREATE_PARAMS", Type.STRING_TYPE,
+      "Parameters used in creating the type (may be null)")
+  .addPrimitiveColumn("NULLABLE", Type.SMALLINT_TYPE,
+      "Can you use NULL for this type")
+  .addPrimitiveColumn("CASE_SENSITIVE", Type.BOOLEAN_TYPE,
+      "Is it case sensitive")
+  .addPrimitiveColumn("SEARCHABLE", Type.SMALLINT_TYPE,
+      "Can you use \"WHERE\" based on this type")
+  .addPrimitiveColumn("UNSIGNED_ATTRIBUTE", Type.BOOLEAN_TYPE,
+      "Is it unsigned")
+  .addPrimitiveColumn("FIXED_PREC_SCALE", Type.BOOLEAN_TYPE,
+      "Can it be a money value")
+  .addPrimitiveColumn("AUTO_INCREMENT", Type.BOOLEAN_TYPE,
+      "Can it be used for an auto-increment value")
+  .addPrimitiveColumn("LOCAL_TYPE_NAME", Type.STRING_TYPE,
+      "Localized version of type name (may be null)")
+  .addPrimitiveColumn("MINIMUM_SCALE", Type.SMALLINT_TYPE,
+      "Minimum scale supported")
+  .addPrimitiveColumn("MAXIMUM_SCALE", Type.SMALLINT_TYPE,
+      "Maximum scale supported")
+  .addPrimitiveColumn("SQL_DATA_TYPE", Type.INT_TYPE,
+      "Unused")
+  .addPrimitiveColumn("SQL_DATETIME_SUB", Type.INT_TYPE,
+      "Unused")
+  .addPrimitiveColumn("NUM_PREC_RADIX", Type.INT_TYPE,
+      "Usually 2 or 10");
+
+  private final RowSet rowSet;
+
+  protected GetTypeInfoOperation(HiveSession parentSession) {
+    super(parentSession, OperationType.GET_TYPE_INFO);
+    rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion());
+  }
+
+  @Override
+  public void runInternal() throws HiveSQLException {
+    setState(OperationState.RUNNING);
+    if (isAuthV2Enabled()) {
+      authorizeMetaGets(HiveOperationType.GET_TYPEINFO, null);
+    }
+    try {
+      for (Type type : Type.values()) {
+        Object[] rowData = new Object[] {
+            type.getName(), // TYPE_NAME
+            type.toJavaSQLType(), // DATA_TYPE
+            type.getMaxPrecision(), // PRECISION
+            type.getLiteralPrefix(), // LITERAL_PREFIX
+            type.getLiteralSuffix(), // LITERAL_SUFFIX
+            type.getCreateParams(), // CREATE_PARAMS
+            type.getNullable(), // NULLABLE
+            type.isCaseSensitive(), // CASE_SENSITIVE
+            type.getSearchable(), // SEARCHABLE
+            type.isUnsignedAttribute(), // UNSIGNED_ATTRIBUTE
+            type.isFixedPrecScale(), // FIXED_PREC_SCALE
+            type.isAutoIncrement(), // AUTO_INCREMENT
+            type.getLocalizedName(), // LOCAL_TYPE_NAME
+            type.getMinimumScale(), // MINIMUM_SCALE
+            type.getMaximumScale(), // MAXIMUM_SCALE
+            null, // SQL_DATA_TYPE, unused
+            null, // SQL_DATETIME_SUB, unused
+            type.getNumPrecRadix() //NUM_PREC_RADIX
+        };
+        rowSet.addRow(rowData);
+      }
+      setState(OperationState.FINISHED);
+    } catch (Exception e) {
+      setState(OperationState.ERROR);
+      throw new HiveSQLException(e);
+    }
+  }
+
+
+  /* (non-Javadoc)
+   * @see org.apache.hive.service.cli.Operation#getResultSetSchema()
+   */
+  @Override
+  public TableSchema getResultSetSchema() throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    return RESULT_SET_SCHEMA;
+  }
+
+  /* (non-Javadoc)
+   * @see 
org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation,
 long)
+   */
+  @Override
+  public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) 
throws HiveSQLException {
+    assertState(OperationState.FINISHED);
+    validateDefaultFetchOrientation(orientation);
+    if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+      rowSet.setStartOffset(0);
+    }
+    return rowSet.extractSubset((int)maxRows);
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
new file mode 100644
index 0000000..bcc66cf
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java
@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.UnsupportedEncodingException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.api.Schema;
+import org.apache.hadoop.hive.ql.processors.CommandProcessor;
+import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hive.service.cli.FetchOrientation;
+import org.apache.hive.service.cli.HiveSQLException;
+import org.apache.hive.service.cli.OperationState;
+import org.apache.hive.service.cli.RowSet;
+import org.apache.hive.service.cli.RowSetFactory;
+import org.apache.hive.service.cli.TableSchema;
+import org.apache.hive.service.cli.session.HiveSession;
+
+/**
+ * Executes a HiveCommand
+ */
+public class HiveCommandOperation extends ExecuteStatementOperation {
+  private CommandProcessor commandProcessor;
+  private TableSchema resultSchema = null;
+
+  /**
+   * For processors other than Hive queries (Driver), they output to 
session.out (a temp file)
+   * first and the fetchOne/fetchN/fetchAll functions get the output from 
pipeIn.
+   */
+  private BufferedReader resultReader;
+
+
+  protected HiveCommandOperation(HiveSession parentSession, String statement,
+      CommandProcessor commandProcessor, Map<String, String> confOverlay) {
+    super(parentSession, statement, confOverlay, false);
+    this.commandProcessor = commandProcessor;
+    setupSessionIO(parentSession.getSessionState());
+  }
+
+  private void setupSessionIO(SessionState sessionState) {
+    try {
+      LOG.info("Putting temp output to file " + 
sessionState.getTmpOutputFile().toString());
+      sessionState.in = null; // hive server's session input stream is not used
+      // open a per-session file in auto-flush mode for writing temp results
+      sessionState.out = new PrintStream(new 
FileOutputStream(sessionState.getTmpOutputFile()), true, "UTF-8");
+      // TODO: for hadoop jobs, progress is printed out to session.err,
+      // we should find a way to feed back job progress to client
+      sessionState.err = new PrintStream(System.err, true, "UTF-8");
+    } catch (IOException e) {
+      LOG.error("Error in creating temp output file ", e);
+      try {
+        sessionState.in = null;
+        sessionState.out = new PrintStream(System.out, true, "UTF-8");
+        sessionState.err = new PrintStream(System.err, true, "UTF-8");
+      } catch (UnsupportedEncodingException ee) {
+        LOG.error("Error creating PrintStream", e);
+        ee.printStackTrace();
+        sessionState.out = null;
+        sessionState.err = null;
+      }
+    }
+  }
+
+
+  private void tearDownSessionIO() {
+    IOUtils.cleanup(LOG, parentSession.getSessionState().out);
+    IOUtils.cleanup(LOG, parentSession.getSessionState().err);
+  }
+
+  @Override
+  public void runInternal() throws HiveSQLException {
+    setState(OperationState.RUNNING);
+    try {
+      String command = getStatement().trim();
+      String[] tokens = statement.split("\\s");
+      String commandArgs = command.substring(tokens[0].length()).trim();
+
+      CommandProcessorResponse response = commandProcessor.run(commandArgs);
+      int returnCode = response.getResponseCode();
+      if (returnCode != 0) {
+        throw toSQLException("Error while processing statement", response);
+      }
+      Schema schema = response.getSchema();
+      if (schema != null) {
+        setHasResultSet(true);
+        resultSchema = new TableSchema(schema);
+      } else {
+        setHasResultSet(false);
+        resultSchema = new TableSchema();
+      }
+    } catch (HiveSQLException e) {
+      setState(OperationState.ERROR);
+      throw e;
+    } catch (Exception e) {
+      setState(OperationState.ERROR);
+      throw new HiveSQLException("Error running query: " + e.toString(), e);
+    }
+    setState(OperationState.FINISHED);
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hive.service.cli.operation.Operation#close()
+   */
+  @Override
+  public void close() throws HiveSQLException {
+    setState(OperationState.CLOSED);
+    tearDownSessionIO();
+    cleanTmpFile();
+    cleanupOperationLog();
+  }
+
+  /* (non-Javadoc)
+   * @see org.apache.hive.service.cli.operation.Operation#getResultSetSchema()
+   */
+  @Override
+  public TableSchema getResultSetSchema() throws HiveSQLException {
+    return resultSchema;
+  }
+
+  /* (non-Javadoc)
+   * @see 
org.apache.hive.service.cli.operation.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation,
 long)
+   */
+  @Override
+  public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) 
throws HiveSQLException {
+    validateDefaultFetchOrientation(orientation);
+    if (orientation.equals(FetchOrientation.FETCH_FIRST)) {
+      resetResultReader();
+    }
+    List<String> rows = readResults((int) maxRows);
+    RowSet rowSet = RowSetFactory.create(resultSchema, getProtocolVersion());
+
+    for (String row : rows) {
+      rowSet.addRow(new String[] {row});
+    }
+    return rowSet;
+  }
+
+  /**
+   * Reads the temporary results for non-Hive (non-Driver) commands to the
+   * resulting List of strings.
+   * @param nLines number of lines read at once. If it is <= 0, then read all 
lines.
+   */
+  private List<String> readResults(int nLines) throws HiveSQLException {
+    if (resultReader == null) {
+      SessionState sessionState = getParentSession().getSessionState();
+      File tmp = sessionState.getTmpOutputFile();
+      try {
+        resultReader = new BufferedReader(new FileReader(tmp));
+      } catch (FileNotFoundException e) {
+        LOG.error("File " + tmp + " not found. ", e);
+        throw new HiveSQLException(e);
+      }
+    }
+    List<String> results = new ArrayList<String>();
+
+    for (int i = 0; i < nLines || nLines <= 0; ++i) {
+      try {
+        String line = resultReader.readLine();
+        if (line == null) {
+          // reached the end of the result file
+          break;
+        } else {
+          results.add(line);
+        }
+      } catch (IOException e) {
+        LOG.error("Reading temp results encountered an exception: ", e);
+        throw new HiveSQLException(e);
+      }
+    }
+    return results;
+  }
+
+  private void cleanTmpFile() {
+    resetResultReader();
+    SessionState sessionState = getParentSession().getSessionState();
+    File tmp = sessionState.getTmpOutputFile();
+    tmp.delete();
+  }
+
+  private void resetResultReader() {
+    if (resultReader != null) {
+      IOUtils.cleanup(LOG, resultReader);
+      resultReader = null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java
new file mode 100644
index 0000000..b530f21
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.hadoop.hive.metastore.TableType;
+
+/**
+ * HiveTableTypeMapping.
+ * Default table type mapping
+ *
+ */
+public class HiveTableTypeMapping implements TableTypeMapping {
+
+  @Override
+  public String mapToHiveType(String clientTypeName) {
+    return clientTypeName;
+  }
+
+  @Override
+  public String mapToClientType(String hiveTypeName) {
+    return hiveTypeName;
+  }
+
+  @Override
+  public Set<String> getTableTypeNames() {
+    Set<String> typeNameSet = new HashSet<String>();
+    for (TableType typeNames : TableType.values()) {
+      typeNameSet.add(typeNames.toString());
+    }
+    return typeNameSet;
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/7feeb82c/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
----------------------------------------------------------------------
diff --git 
a/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
new file mode 100644
index 0000000..70340bd
--- /dev/null
+++ 
b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
@@ -0,0 +1,209 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.service.cli.operation;
+import java.io.CharArrayWriter;
+import java.util.Enumeration;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.hive.ql.exec.Task;
+import org.apache.hadoop.hive.ql.log.PerfLogger;
+import org.apache.hadoop.hive.ql.session.OperationLog;
+import org.apache.hadoop.hive.ql.session.OperationLog.LoggingLevel;
+import org.apache.hive.service.cli.CLIServiceUtils;
+import org.apache.log4j.Appender;
+import org.apache.log4j.ConsoleAppender;
+import org.apache.log4j.Layout;
+import org.apache.log4j.Logger;
+import org.apache.log4j.WriterAppender;
+import org.apache.log4j.spi.Filter;
+import org.apache.log4j.spi.LoggingEvent;
+
+import com.google.common.base.Joiner;
+
+/**
+ * An Appender to divert logs from individual threads to the LogObject they 
belong to.
+ */
+public class LogDivertAppender extends WriterAppender {
+  private static final Logger LOG = 
Logger.getLogger(LogDivertAppender.class.getName());
+  private final OperationManager operationManager;
+  private boolean isVerbose;
+  private Layout verboseLayout;
+
+  /**
+   * A log filter that filters messages coming from the logger with the given 
names.
+   * It be used as a white list filter or a black list filter.
+   * We apply black list filter on the Loggers used by the log diversion 
stuff, so that
+   * they don't generate more logs for themselves when they process logs.
+   * White list filter is used for less verbose log collection
+   */
+  private static class NameFilter extends Filter {
+    private Pattern namePattern;
+    private LoggingLevel loggingMode;
+    private OperationManager operationManager;
+
+    /* Patterns that are excluded in verbose logging level.
+     * Filter out messages coming from log processing classes, or we'll run an 
infinite loop.
+     */
+    private static final Pattern verboseExcludeNamePattern = 
Pattern.compile(Joiner.on("|").
+      join(new String[] {LOG.getName(), OperationLog.class.getName(),
+      OperationManager.class.getName()}));
+
+    /* Patterns that are included in execution logging level.
+     * In execution mode, show only select logger messages.
+     */
+    private static final Pattern executionIncludeNamePattern = 
Pattern.compile(Joiner.on("|").
+      join(new String[] {"org.apache.hadoop.mapreduce.JobSubmitter",
+      "org.apache.hadoop.mapreduce.Job", "SessionState", Task.class.getName(),
+      "org.apache.hadoop.hive.ql.exec.spark.status.SparkJobMonitor"}));
+
+    /* Patterns that are included in performance logging level.
+     * In performance mode, show execution and performance logger messages.
+     */
+    private static final Pattern performanceIncludeNamePattern = 
Pattern.compile(
+      executionIncludeNamePattern.pattern() + "|" + 
PerfLogger.class.getName());
+
+    private void setCurrentNamePattern(OperationLog.LoggingLevel mode) {
+      if (mode == OperationLog.LoggingLevel.VERBOSE) {
+        this.namePattern = verboseExcludeNamePattern;
+      } else if (mode == OperationLog.LoggingLevel.EXECUTION) {
+        this.namePattern = executionIncludeNamePattern;
+      } else if (mode == OperationLog.LoggingLevel.PERFORMANCE) {
+        this.namePattern = performanceIncludeNamePattern;
+      }
+    }
+
+    public NameFilter(
+      OperationLog.LoggingLevel loggingMode, OperationManager op) {
+      this.operationManager = op;
+      this.loggingMode = loggingMode;
+      setCurrentNamePattern(loggingMode);
+    }
+
+    @Override
+    public int decide(LoggingEvent ev) {
+      OperationLog log = operationManager.getOperationLogByThread();
+      boolean excludeMatches = (loggingMode == 
OperationLog.LoggingLevel.VERBOSE);
+
+      if (log == null) {
+        return Filter.DENY;
+      }
+
+      OperationLog.LoggingLevel currentLoggingMode = log.getOpLoggingLevel();
+      // If logging is disabled, deny everything.
+      if (currentLoggingMode == OperationLog.LoggingLevel.NONE) {
+        return Filter.DENY;
+      }
+      // Look at the current session's setting
+      // and set the pattern and excludeMatches accordingly.
+      if (currentLoggingMode != loggingMode) {
+        loggingMode = currentLoggingMode;
+        setCurrentNamePattern(loggingMode);
+      }
+
+      boolean isMatch = namePattern.matcher(ev.getLoggerName()).matches();
+
+      if (excludeMatches == isMatch) {
+        // Deny if this is black-list filter (excludeMatches = true) and it
+        // matched
+        // or if this is whitelist filter and it didn't match
+        return Filter.DENY;
+      }
+      return Filter.NEUTRAL;
+    }
+  }
+
+  /** This is where the log message will go to */
+  private final CharArrayWriter writer = new CharArrayWriter();
+
+  private void setLayout (boolean isVerbose, Layout lo) {
+    if (isVerbose) {
+      if (lo == null) {
+        lo = CLIServiceUtils.verboseLayout;
+        LOG.info("Cannot find a Layout from a ConsoleAppender. Using default 
Layout pattern.");
+      }
+    } else {
+      lo = CLIServiceUtils.nonVerboseLayout;
+    }
+    setLayout(lo);
+  }
+
+  private void initLayout(boolean isVerbose) {
+    // There should be a ConsoleAppender. Copy its Layout.
+    Logger root = Logger.getRootLogger();
+    Layout layout = null;
+
+    Enumeration<?> appenders = root.getAllAppenders();
+    while (appenders.hasMoreElements()) {
+      Appender ap = (Appender) appenders.nextElement();
+      if (ap.getClass().equals(ConsoleAppender.class)) {
+        layout = ap.getLayout();
+        break;
+      }
+    }
+    setLayout(isVerbose, layout);
+  }
+
+  public LogDivertAppender(OperationManager operationManager,
+    OperationLog.LoggingLevel loggingMode) {
+    isVerbose = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
+    initLayout(isVerbose);
+    setWriter(writer);
+    setName("LogDivertAppender");
+    this.operationManager = operationManager;
+    this.verboseLayout = isVerbose ? layout : CLIServiceUtils.verboseLayout;
+    addFilter(new NameFilter(loggingMode, operationManager));
+  }
+
+  @Override
+  public void doAppend(LoggingEvent event) {
+    OperationLog log = operationManager.getOperationLogByThread();
+
+    // Set current layout depending on the verbose/non-verbose mode.
+    if (log != null) {
+      boolean isCurrModeVerbose = (log.getOpLoggingLevel() == 
OperationLog.LoggingLevel.VERBOSE);
+
+      // If there is a logging level change from verbose->non-verbose or 
vice-versa since
+      // the last subAppend call, change the layout to preserve consistency.
+      if (isCurrModeVerbose != isVerbose) {
+        isVerbose = isCurrModeVerbose;
+        setLayout(isVerbose, verboseLayout);
+      }
+    }
+    super.doAppend(event);
+  }
+
+  /**
+   * Overrides WriterAppender.subAppend(), which does the real logging. No need
+   * to worry about concurrency since log4j calls this synchronously.
+   */
+  @Override
+  protected void subAppend(LoggingEvent event) {
+    super.subAppend(event);
+    // That should've gone into our writer. Notify the LogContext.
+    String logOutput = writer.toString();
+    writer.reset();
+
+    OperationLog log = operationManager.getOperationLogByThread();
+    if (log == null) {
+      LOG.debug(" ---+++=== Dropped log event from thread " + 
event.getThreadName());
+      return;
+    }
+    log.writeOperationLog(logOutput);
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to