http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java 
b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
index 8dab33a..077c4cb 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TupleDescriptor.java
@@ -345,4 +345,18 @@ public class TupleDescriptor {
     }
     return true;
   }
+
+  /**
+   * Returns a list of slot ids that correspond to partition columns.
+   */
+  public List<SlotId> getPartitionSlots() {
+    List<SlotId> partitionSlots = Lists.newArrayList();
+    for (SlotDescriptor slotDesc: getSlots()) {
+      if (slotDesc.getColumn() == null) continue;
+      if (slotDesc.getColumn().getPosition() < 
getTable().getNumClusteringCols()) {
+        partitionSlots.add(slotDesc.getId());
+      }
+    }
+    return partitionSlots;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
----------------------------------------------------------------------
diff --git 
a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java 
b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
index f56f502..203ef6d 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
@@ -932,6 +932,25 @@ public class CatalogServiceCatalog extends Catalog {
   }
 
   /**
+   * Drops the partitions specified in 'partitionSet' from 'tbl'. Throws a
+   * CatalogException if 'tbl' is not an HdfsTable. Returns the target table.
+   */
+  public Table dropPartitions(Table tbl, List<List<TPartitionKeyValue>> 
partitionSet)
+      throws CatalogException {
+    Preconditions.checkNotNull(tbl);
+    Preconditions.checkNotNull(partitionSet);
+    Preconditions.checkState(Thread.holdsLock(tbl));
+    if (!(tbl instanceof HdfsTable)) {
+      throw new CatalogException("Table " + tbl.getFullName() + " is not an 
Hdfs table");
+    }
+    HdfsTable hdfsTable = (HdfsTable) tbl;
+    List<HdfsPartition> partitions =
+        hdfsTable.getPartitionsFromPartitionSet(partitionSet);
+    hdfsTable.dropPartitions(partitions);
+    return hdfsTable;
+  }
+
+  /**
    * Drops the partition specified in 'partitionSpec' from 'tbl'. Throws a
    * CatalogException if 'tbl' is not an HdfsTable. If the partition having 
the given
    * partition spec does not exist, null is returned. Otherwise, the modified 
table is

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java 
b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index 69b4b4e..21b5359 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -90,12 +90,42 @@ import org.apache.impala.util.MetaStoreUtil;
 import org.apache.impala.util.TAccessLevelUtil;
 import org.apache.impala.util.TResultRowBuilder;
 import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.ImmutableMap;
+
+import org.apache.avro.Schema;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStorageLocation;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.VolumeId;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
 
 /**
  * Internal representation of table-related metadata of a file-resident table 
on a
@@ -621,6 +651,20 @@ public class HdfsTable extends Table {
   }
 
   /**
+   * Gets hdfs partitions by the given partition set.
+   */
+  public List<HdfsPartition> getPartitionsFromPartitionSet(
+      List<List<TPartitionKeyValue>> partitionSet) {
+    List<HdfsPartition> partitions = Lists.newArrayList();
+    for (List<TPartitionKeyValue> kv : partitionSet) {
+      HdfsPartition partition =
+          getPartitionFromThriftPartitionSpec(kv);
+      if (partition != null) partitions.add(partition);
+    }
+    return partitions;
+  }
+
+  /**
    * Create columns corresponding to fieldSchemas. Throws a 
TableLoadingException if the
    * metadata is incompatible with what we support.
    */
@@ -961,6 +1005,20 @@ public class HdfsTable extends Table {
   }
 
   /**
+   * Drops the given partitions from this table. Cleans up its metadata from 
all the
+   * mappings used to speed up partition pruning/lookup. Also updates 
partitions column
+   * statistics. Returns the list of partitions that were dropped.
+   */
+  public List<HdfsPartition> dropPartitions(List<HdfsPartition> partitions) {
+    ArrayList<HdfsPartition> droppedPartitions = Lists.newArrayList();
+    for (HdfsPartition partition: partitions) {
+      HdfsPartition hdfsPartition = dropPartition(partition);
+      if (hdfsPartition != null) droppedPartitions.add(hdfsPartition);
+    }
+    return droppedPartitions;
+  }
+
+  /**
    * Adds or replaces the default partition.
    */
   public void addDefaultPartition(StorageDescriptor storageDescriptor)
@@ -1143,7 +1201,7 @@ public class HdfsTable extends Table {
       partitionNames.add(partition.getPartitionName());
     }
     partitionsToRemove.addAll(dirtyPartitions);
-    for (HdfsPartition partition: partitionsToRemove) dropPartition(partition);
+    dropPartitions(partitionsToRemove);
     // Load dirty partitions from Hive Metastore
     loadPartitionsFromMetastore(dirtyPartitions, client);
 
@@ -1889,7 +1947,7 @@ public class HdfsTable extends Table {
    * Returns files info for all partitions, if partition spec is null, ordered
    * by partition.
    */
-  public TResultSet getFiles(List<TPartitionKeyValue> partitionSpec)
+  public TResultSet getFiles(List<List<TPartitionKeyValue>> partitionSet)
       throws CatalogException {
     TResultSet result = new TResultSet();
     TResultSetMetadata resultSchema = new TResultSetMetadata();
@@ -1899,16 +1957,14 @@ public class HdfsTable extends Table {
     resultSchema.addToColumns(new TColumn("Partition", 
Type.STRING.toThrift()));
     result.setRows(Lists.<TResultRow>newArrayList());
 
-    List<HdfsPartition> orderedPartitions = null;
-    if (partitionSpec == null) {
+    List<HdfsPartition> orderedPartitions;
+    if (partitionSet == null) {
       orderedPartitions = Lists.newArrayList(partitionMap_.values());
-      Collections.sort(orderedPartitions);
     } else {
-      // Get the HdfsPartition object for the given partition spec.
-      HdfsPartition partition = 
getPartitionFromThriftPartitionSpec(partitionSpec);
-      Preconditions.checkState(partition != null);
-      orderedPartitions = Lists.newArrayList(partition);
+      // Get a list of HdfsPartition objects for the given partition set.
+      orderedPartitions = getPartitionsFromPartitionSet(partitionSet);
     }
+    Collections.sort(orderedPartitions);
 
     for (HdfsPartition p: orderedPartitions) {
       List<FileDescriptor> orderedFds = 
Lists.newArrayList(p.getFileDescriptors());

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/catalog/Table.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Table.java 
b/fe/src/main/java/org/apache/impala/catalog/Table.java
index bef381a..be9dc7b 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Table.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Table.java
@@ -245,6 +245,10 @@ public abstract class Table implements CatalogObject {
     return newTable;
   }
 
+  public boolean isClusteringColumn(Column c) {
+    return c.getPosition() < numClusteringCols_;
+  }
+
   protected void loadFromThrift(TTable thriftTable) throws 
TableLoadingException {
     List<TColumn> columns = new ArrayList<TColumn>();
     columns.addAll(thriftTable.getClustering_columns());

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
----------------------------------------------------------------------
diff --git 
a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java 
b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
index 10b6539..f719d0c 100644
--- a/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
+++ b/fe/src/main/java/org/apache/impala/planner/HdfsPartitionPruner.java
@@ -74,8 +74,7 @@ public class HdfsPartitionPruner {
   private final static int PARTITION_PRUNING_BATCH_SIZE = 1024;
 
   private final HdfsTable tbl_;
-
-  private final List<SlotId> partitionSlots_ = Lists.newArrayList();
+  private final List<SlotId> partitionSlots_;
 
   // For converting BetweenPredicates to CompoundPredicates so they can be
   // executed in the BE.
@@ -85,21 +84,17 @@ public class HdfsPartitionPruner {
   public HdfsPartitionPruner(TupleDescriptor tupleDesc) {
     Preconditions.checkState(tupleDesc.getTable() instanceof HdfsTable);
     tbl_ = (HdfsTable)tupleDesc.getTable();
+    partitionSlots_ = tupleDesc.getPartitionSlots();
 
-    // Collect all the partitioning columns from TupleDescriptor.
-    for (SlotDescriptor slotDesc: tupleDesc.getSlots()) {
-      if (slotDesc.getColumn() == null) continue;
-      if (slotDesc.getColumn().getPosition() < tbl_.getNumClusteringCols()) {
-        partitionSlots_.add(slotDesc.getId());
-      }
-    }
   }
 
   /**
    * Return a list of partitions left after applying the conjuncts. Please note
-   * that conjuncts used for filtering will be removed from the list 
'conjuncts'.
+   * that conjunts used for filtering will be removed from the list 
'conjuncts'.
+   * If 'allowEmpty' is False, empty partitions are not returned.
    */
-  public List<HdfsPartition> prunePartitions(Analyzer analyzer, List<Expr> 
conjuncts)
+  public List<HdfsPartition> prunePartitions(
+      Analyzer analyzer, List<Expr> conjuncts, boolean allowEmpty)
       throws InternalException, AnalysisException {
     // Start with creating a collection of partition filters for the 
applicable conjuncts.
     List<HdfsPartitionFilter> partitionFilters = Lists.newArrayList();
@@ -159,7 +154,7 @@ public class HdfsPartitionPruner {
     for (Long id: matchingPartitionIds) {
       HdfsPartition partition = partitionMap.get(id);
       Preconditions.checkNotNull(partition);
-      if (partition.hasFileDescriptors()) {
+      if (partition.hasFileDescriptors() || allowEmpty) {
         results.add(partition);
         analyzer.getDescTbl().addReferencedPartition(tbl_, partition.getId());
       }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java 
b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
index 9b0e376..6219127 100644
--- a/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/SingleNodePlanner.java
@@ -1196,7 +1196,7 @@ public class SingleNodePlanner {
     // Do partition pruning before deciding which slots to materialize,
     // We might end up removing some predicates.
     HdfsPartitionPruner pruner = new HdfsPartitionPruner(tupleDesc);
-    List<HdfsPartition> partitions = pruner.prunePartitions(analyzer, 
conjuncts);
+    List<HdfsPartition> partitions = pruner.prunePartitions(analyzer, 
conjuncts, false);
 
     // Mark all slots referenced by the remaining conjuncts as materialized.
     analyzer.materializeSlots(conjuncts);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java 
b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 884c43a..1755934 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -49,6 +49,15 @@ import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
+import org.apache.impala.common.Reference;
+import org.apache.log4j.Logger;
+import org.apache.thrift.TException;
+import com.google.common.base.Joiner;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
 import org.apache.impala.analysis.FunctionName;
 import org.apache.impala.analysis.TableName;
 import org.apache.impala.authorization.User;
@@ -332,6 +341,11 @@ public class CatalogOpExecutor {
     // When true, loads the table schema and the column stats from the Hive 
Metastore.
     boolean reloadTableSchema = false;
 
+    // When true, sets the result to be reported to the client.
+    boolean setResultSet = false;
+    TColumnValue resultColVal = new TColumnValue();
+    Reference<Long> numUpdatedPartitions = new Reference<>(0L);
+
     TableName tableName = TableName.fromThrift(params.getTable_name());
     Table tbl = getExistingTable(tableName.getDb(), tableName.getTbl());
     catalog_.getLock().writeLock().lock();
@@ -351,6 +365,7 @@ public class CatalogOpExecutor {
       }
       // Get a new catalog version to assign to the table being altered.
       long newCatalogVersion = catalog_.incrementAndGetCatalogVersion();
+      boolean reloadMetadata = true;
       catalog_.getLock().writeLock().unlock();
       switch (params.getAlter_type()) {
         case ADD_REPLACE_COLUMNS:
@@ -373,7 +388,8 @@ public class CatalogOpExecutor {
             refreshedTable.setCatalogVersion(newCatalogVersion);
             addTableToCatalogUpdate(refreshedTable, response.result);
           }
-          return;
+          reloadMetadata = false;
+          break;
         case DROP_COLUMN:
           TAlterTableDropColParams dropColParams = params.getDrop_col_params();
           alterTableDropCol(tbl, dropColParams.getCol_name());
@@ -390,57 +406,82 @@ public class CatalogOpExecutor {
               params.getDrop_partition_params();
           // Drop the partition from the corresponding table. Get the table 
object
           // with an updated catalog version. If the partition does not exist 
and
-          // "IfExists" is true, then return without populating the response 
object.
-          // If "purge" option is specified partition data is purged by 
skipping
-          // Trash, if configured.
-          refreshedTable = alterTableDropPartition(tbl,
-              dropPartParams.getPartition_spec(),
-              dropPartParams.isIf_exists(), dropPartParams.isPurge());
+          // "IfExists" is true, null is returned. If "purge" option is 
specified
+          // partition data is purged by skipping Trash, if configured.
+          refreshedTable = alterTableDropPartition(
+              tbl, dropPartParams.getPartition_set(),
+              dropPartParams.isIf_exists(),
+              dropPartParams.isPurge(), numUpdatedPartitions);
           if (refreshedTable != null) {
             refreshedTable.setCatalogVersion(newCatalogVersion);
             addTableToCatalogUpdate(refreshedTable, response.result);
           }
-          return;
+          resultColVal.setString_val(
+              "Dropped " + numUpdatedPartitions.getRef() + " partition(s).");
+          setResultSet = true;
+          reloadMetadata = false;
+          break;
         case RENAME_TABLE:
         case RENAME_VIEW:
           Preconditions.checkState(false,
               "RENAME TABLE/VIEW operation has been processed");
-          return;
+          break;
         case SET_FILE_FORMAT:
           TAlterTableSetFileFormatParams fileFormatParams =
               params.getSet_file_format_params();
-          List<TPartitionKeyValue> fileFormatPartitionSpec = null;
-          if (fileFormatParams.isSetPartition_spec()) {
-            fileFormatPartitionSpec = fileFormatParams.getPartition_spec();
+          reloadFileMetadata = alterTableSetFileFormat(
+              tbl, fileFormatParams.getPartition_set(), 
fileFormatParams.getFile_format(),
+              numUpdatedPartitions);
+
+          if (fileFormatParams.isSetPartition_set()) {
+            resultColVal.setString_val(
+                "Updated " + numUpdatedPartitions.getRef() + " partition(s).");
+          } else {
+            resultColVal.setString_val("Updated table.");
           }
-          reloadFileMetadata = alterTableSetFileFormat(tbl, 
fileFormatPartitionSpec,
-              fileFormatParams.getFile_format());
+          setResultSet = true;
           break;
         case SET_LOCATION:
           TAlterTableSetLocationParams setLocationParams =
               params.getSet_location_params();
-          List<TPartitionKeyValue> partitionSpec = null;
-          if (setLocationParams.isSetPartition_spec()) {
-            partitionSpec = setLocationParams.getPartition_spec();
-          }
-          reloadFileMetadata = alterTableSetLocation(tbl, partitionSpec,
-              setLocationParams.getLocation());
+          reloadFileMetadata = alterTableSetLocation(tbl,
+              setLocationParams.getPartition_spec(), 
setLocationParams.getLocation());
           break;
         case SET_TBL_PROPERTIES:
-          alterTableSetTblProperties(tbl, 
params.getSet_tbl_properties_params());
+          alterTableSetTblProperties(tbl, 
params.getSet_tbl_properties_params(),
+              numUpdatedPartitions);
+          if (params.getSet_tbl_properties_params().isSetPartition_set()) {
+            resultColVal.setString_val(
+                "Updated " + numUpdatedPartitions.getRef() + " partition(s).");
+          } else {
+            resultColVal.setString_val("Updated table.");
+          }
+          setResultSet = true;
           break;
         case UPDATE_STATS:
           Preconditions.checkState(params.isSetUpdate_stats_params());
-          alterTableUpdateStats(tbl, params.getUpdate_stats_params(), 
response);
+          Reference<Long> numUpdatedColumns = new Reference<>(0L);
+          alterTableUpdateStats(tbl, params.getUpdate_stats_params(), response,
+              numUpdatedPartitions, numUpdatedColumns);
           reloadTableSchema = true;
+          resultColVal.setString_val("Updated " + 
numUpdatedPartitions.getRef() +
+              " partition(s) and " + numUpdatedColumns.getRef() + " 
column(s).");
+          setResultSet = true;
           break;
         case SET_CACHED:
           Preconditions.checkState(params.isSetSet_cached_params());
-          if (params.getSet_cached_params().getPartition_spec() == null) {
+          String op = 
params.getSet_cached_params().getCache_op().isSet_cached() ?
+              "Cached " : "Uncached ";
+          if (params.getSet_cached_params().getPartition_set() == null) {
             reloadFileMetadata = alterTableSetCached(tbl, 
params.getSet_cached_params());
+            resultColVal.setString_val(op + "table.");
           } else {
-            alterPartitionSetCached(tbl, params.getSet_cached_params());
+            alterPartitionSetCached(tbl, params.getSet_cached_params(),
+                numUpdatedPartitions);
+            resultColVal.setString_val(
+                op + numUpdatedPartitions.getRef() + " partition(s).");
           }
+          setResultSet = true;
           break;
         case RECOVER_PARTITIONS:
           alterTableRecoverPartitions(tbl);
@@ -450,9 +491,21 @@ public class CatalogOpExecutor {
               "Unknown ALTER TABLE operation type: " + params.getAlter_type());
       }
 
-      loadTableMetadata(tbl, newCatalogVersion, reloadFileMetadata, 
reloadTableSchema,
-          null);
-      addTableToCatalogUpdate(tbl, response.result);
+      if (reloadMetadata) {
+        loadTableMetadata(tbl, newCatalogVersion, reloadFileMetadata, 
reloadTableSchema,
+            null);
+        addTableToCatalogUpdate(tbl, response.result);
+      }
+
+      if (setResultSet) {
+        TResultSet resultSet = new TResultSet();
+        resultSet.setSchema(new TResultSetMetadata(Lists.newArrayList(
+            new TColumn("summary", Type.STRING.toThrift()))));
+        TResultRow resultRow = new TResultRow();
+        resultRow.setColVals(Lists.newArrayList(resultColVal));
+        resultSet.setRows(Lists.newArrayList(resultRow));
+        response.setResult_set(resultSet);
+      }
     } // end of synchronized block
   }
 
@@ -550,7 +603,8 @@ public class CatalogOpExecutor {
    * in batches of size 'MAX_PARTITION_UPDATES_PER_RPC'.
    */
   private void alterTableUpdateStats(Table table, TAlterTableUpdateStatsParams 
params,
-      TDdlExecResponse resp) throws ImpalaException {
+      TDdlExecResponse resp, Reference<Long> numUpdatedPartitions,
+      Reference<Long> numUpdatedColumns) throws ImpalaException {
     Preconditions.checkState(Thread.holdsLock(table));
     if (params.isSetTable_stats()) {
       // Updating table and column stats via COMPUTE STATS.
@@ -578,9 +632,9 @@ public class CatalogOpExecutor {
       }
     }
 
-    int numTargetedPartitions = 0;
-    int numUpdatedColumns = 0;
-    try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
+    long numTargetedPartitions = 0L;
+    long numTargetedColumns = 0L;
+    try(MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
       // Update the table and partition row counts based on the query results.
       List<HdfsPartition> modifiedParts = Lists.newArrayList();
       if (params.isSetTable_stats()) {
@@ -592,12 +646,12 @@ public class CatalogOpExecutor {
       if (params.isSetColumn_stats()) {
         // Create Hive column stats from the query results.
         colStats = createHiveColStats(params.getColumn_stats(), table);
-        numUpdatedColumns = colStats.getStatsObjSize();
+        numTargetedColumns = colStats.getStatsObjSize();
       }
 
       // Update all partitions.
       bulkAlterPartitions(table.getDb().getName(), table.getName(), 
modifiedParts);
-      if (numUpdatedColumns > 0) {
+      if (numTargetedColumns > 0) {
         Preconditions.checkNotNull(colStats);
         // Update column stats.
         try {
@@ -611,18 +665,8 @@ public class CatalogOpExecutor {
       // lastDdlTime is as accurate as possible.
       applyAlterTable(msTbl);
     }
-
-    // Set the results to be reported to the client.
-    TResultSet resultSet = new TResultSet();
-    resultSet.setSchema(new TResultSetMetadata(Lists.newArrayList(
-        new TColumn("summary", Type.STRING.toThrift()))));
-    TColumnValue resultColVal = new TColumnValue();
-    resultColVal.setString_val("Updated " + numTargetedPartitions + " 
partition(s) and " +
-        numUpdatedColumns + " column(s).");
-    TResultRow resultRow = new TResultRow();
-    resultRow.setColVals(Lists.newArrayList(resultColVal));
-    resultSet.setRows(Lists.newArrayList(resultRow));
-    resp.setResult_set(resultSet);
+    numUpdatedPartitions.setRef(numTargetedPartitions);
+    numUpdatedColumns.setRef(numTargetedColumns);
   }
 
   /**
@@ -983,33 +1027,27 @@ public class CatalogOpExecutor {
     synchronized(table) {
       long newCatalogVersion = catalog_.incrementAndGetCatalogVersion();
       catalog_.getLock().writeLock().unlock();
-      if (params.getPartition_spec() == null) {
+      if (params.getPartition_set() == null) {
         // TODO: Report the number of updated partitions/columns to the user?
+        // TODO: bulk alter the partitions.
         dropColumnStats(table);
         dropTableStats(table);
       } else {
-        HdfsPartition partition =
-            ((HdfsTable)table).getPartitionFromThriftPartitionSpec(
-                params.getPartition_spec());
-        if (partition == null) {
-          List<String> partitionDescription = Lists.newArrayList();
-          for (TPartitionKeyValue v: params.getPartition_spec()) {
-            partitionDescription.add(v.name + " = " + v.value);
-          }
-          throw new ImpalaRuntimeException("Could not find partition: " +
-              Joiner.on("/").join(partitionDescription));
-        }
+        List<HdfsPartition> partitions =
+            
((HdfsTable)table).getPartitionsFromPartitionSet(params.getPartition_set());
+        if (partitions.isEmpty()) return;
 
-        if (partition.getPartitionStats() != null)  {
-          PartitionStatsUtil.deletePartStats(partition);
-          try {
-            applyAlterPartition(table, partition);
-          } finally {
-            partition.markDirty();
+        for(HdfsPartition partition : partitions) {
+          if (partition.getPartitionStats() != null) {
+            PartitionStatsUtil.deletePartStats(partition);
+            try {
+              applyAlterPartition(table, partition);
+            } finally {
+              partition.markDirty();
+            }
           }
         }
       }
-
       loadTableMetadata(table, newCatalogVersion, false, true, null);
       addTableToCatalogUpdate(table, resp.result);
     } // end of synchronization
@@ -1825,59 +1863,73 @@ public class CatalogOpExecutor {
   }
 
   /**
-   * Drops an existing partition from the given table in Hive. If the 
partition is cached,
+   * Drops existing partitions from the given table in Hive. If a partition is 
cached,
    * the associated cache directive will also be removed.
-   * Also drops the partition from its Hdfs table.
-   * Returns the table object with an updated catalog version. If the 
partition does not
-   * exist and "IfExists" is true, null is returned. If purge is true, 
partition data is
-   * permanently deleted.
+   * Also drops the corresponding partitions from its Hdfs table.
+   * Returns the table object with an updated catalog version. If none of the 
partitions
+   * exists and "IfExists" is true, null is returned. If purge is true, 
partition data is
+   * permanently deleted. numUpdatedPartitions is used to inform the client 
how many
+   * partitions being dropped in this operation.
    */
   private Table alterTableDropPartition(Table tbl,
-      List<TPartitionKeyValue> partitionSpec, boolean ifExists, boolean purge)
+      List<List<TPartitionKeyValue>> partitionSet,
+      boolean ifExists, boolean purge, Reference<Long> numUpdatedPartitions)
       throws ImpalaException {
     Preconditions.checkState(Thread.holdsLock(tbl));
+    Preconditions.checkNotNull(partitionSet);
+
     TableName tableName = tbl.getTableName();
-    if (ifExists && !catalog_.containsHdfsPartition(tableName.getDb(), 
tableName.getTbl(),
-        partitionSpec)) {
-      LOG.debug(String.format("Skipping partition drop because (%s) does not 
exist " +
-          "and ifExists is true.", Joiner.on(", ").join(partitionSpec)));
-      return null;
+    if (!ifExists) {
+      Preconditions.checkState(!partitionSet.isEmpty());
+    } else {
+      if (partitionSet.isEmpty()) {
+        LOG.debug(String.format("Ignoring empty partition list when dropping " 
+
+            "partitions from %s because ifExists is true.", tableName));
+        return tbl;
+      }
+    }
+
+    Preconditions.checkArgument(tbl instanceof HdfsTable);
+    List<HdfsPartition> parts =
+        ((HdfsTable) tbl).getPartitionsFromPartitionSet(partitionSet);
+
+    if (!ifExists && parts.isEmpty()) {
+      throw new PartitionNotFoundException(
+          "The partitions being dropped don't exist any more");
     }
 
-    HdfsPartition part = catalog_.getHdfsPartition(tableName.getDb(),
-        tableName.getTbl(), partitionSpec);
     org.apache.hadoop.hive.metastore.api.Table msTbl =
         tbl.getMetaStoreTable().deepCopy();
-    List<String> values = Lists.newArrayList();
-    // Need to add in the values in the same order they are defined in the 
table.
-    for (FieldSchema fs: msTbl.getPartitionKeys()) {
-      for (TPartitionKeyValue kv: partitionSpec) {
-        if (fs.getName().toLowerCase().equals(kv.getName().toLowerCase())) {
-          values.add(kv.getValue());
-        }
-      }
-    }
+
     PartitionDropOptions dropOptions = PartitionDropOptions.instance();
     dropOptions.purgeData(purge);
+    long numTargetedPartitions = 0L;
     try (MetaStoreClient msClient = catalog_.getMetaStoreClient()) {
-      msClient.getHiveClient().dropPartition(tableName.getDb(),
-          tableName.getTbl(), values, dropOptions);
-      updateLastDdlTime(msTbl, msClient);
-      if (part.isMarkedCached()) {
-        HdfsCachingUtil.uncachePartition(part);
-      }
-    } catch (NoSuchObjectException e) {
-      if (!ifExists) {
-        throw new ImpalaRuntimeException(
-            String.format(HMS_RPC_ERROR_FORMAT_STR, "dropPartition"), e);
+      for (HdfsPartition part : parts) {
+        try {
+          msClient.getHiveClient().dropPartition(tableName.getDb(), 
tableName.getTbl(),
+              part.getPartitionValuesAsStrings(true), dropOptions);
+          ++numTargetedPartitions;
+          if (part.isMarkedCached()) {
+            HdfsCachingUtil.uncachePartition(part);
+          }
+        } catch (NoSuchObjectException e) {
+          if (!ifExists) {
+            throw new ImpalaRuntimeException(
+                String.format(HMS_RPC_ERROR_FORMAT_STR, "dropPartition"), e);
+          }
+          LOG.debug(
+              String.format("Ignoring '%s' when dropping partitions from %s 
because" +
+              " ifExists is true.", e, tableName));
+        }
       }
-      LOG.debug(String.format("Ignoring '%s' when dropping partition from %s 
because" +
-          " ifExists is true.", e, tableName));
+      updateLastDdlTime(msTbl, msClient);
     } catch (TException e) {
       throw new ImpalaRuntimeException(
           String.format(HMS_RPC_ERROR_FORMAT_STR, "dropPartition"), e);
     }
-    return catalog_.dropPartition(tbl, partitionSpec);
+    numUpdatedPartitions.setRef(numTargetedPartitions);
+    return catalog_.dropPartitions(tbl, partitionSet);
   }
 
   /**
@@ -1970,18 +2022,19 @@ public class CatalogOpExecutor {
   }
 
   /**
-   * Changes the file format for the given table or partition. This is a 
metadata only
+   * Changes the file format for the given table or partitions. This is a 
metadata only
    * operation, existing table data will not be converted to the new format. 
After
    * changing the file format the table metadata is marked as invalid and will 
be
    * reloaded on the next access.
    */
   private boolean alterTableSetFileFormat(Table tbl,
-      List<TPartitionKeyValue> partitionSpec, THdfsFileFormat fileFormat)
+      List<List<TPartitionKeyValue>> partitionSet, THdfsFileFormat fileFormat,
+      Reference<Long> numUpdatedPartitions)
       throws ImpalaException {
     Preconditions.checkState(Thread.holdsLock(tbl));
-    Preconditions.checkState(partitionSpec == null || 
!partitionSpec.isEmpty());
+    Preconditions.checkState(partitionSet == null || !partitionSet.isEmpty());
     boolean reloadFileMetadata = false;
-    if (partitionSpec == null) {
+    if (partitionSet == null) {
       org.apache.hadoop.hive.metastore.api.Table msTbl =
           tbl.getMetaStoreTable().deepCopy();
       setStorageDescriptorFileFormat(msTbl.getSd(), fileFormat);
@@ -1991,16 +2044,17 @@ public class CatalogOpExecutor {
       applyAlterTable(msTbl);
       reloadFileMetadata = true;
     } else {
-      TableName tableName = tbl.getTableName();
-      HdfsPartition partition = catalog_.getHdfsPartition(
-          tableName.getDb(), tableName.getTbl(), partitionSpec);
-      Preconditions.checkNotNull(partition);
-      partition.setFileFormat(HdfsFileFormat.fromThrift(fileFormat));
-      try {
-        applyAlterPartition(tbl, partition);
-      } finally {
-        partition.markDirty();
+      Preconditions.checkArgument(tbl instanceof HdfsTable);
+      List<HdfsPartition> partitions =
+          ((HdfsTable) tbl).getPartitionsFromPartitionSet(partitionSet);
+      List<HdfsPartition> modifiedParts = Lists.newArrayList();
+      for(HdfsPartition partition: partitions) {
+        partition.setFileFormat(HdfsFileFormat.fromThrift(fileFormat));
+        modifiedParts.add(partition);
       }
+      TableName tableName = tbl.getTableName();
+      bulkAlterPartitions(tableName.getDb(), tableName.getTbl(), 
modifiedParts);
+      numUpdatedPartitions.setRef((long) modifiedParts.size());
     }
     return reloadFileMetadata;
   }
@@ -2024,7 +2078,6 @@ public class CatalogOpExecutor {
   private boolean alterTableSetLocation(Table tbl,
       List<TPartitionKeyValue> partitionSpec, String location) throws 
ImpalaException {
     Preconditions.checkState(Thread.holdsLock(tbl));
-    Preconditions.checkState(partitionSpec == null || 
!partitionSpec.isEmpty());
     boolean reloadFileMetadata = false;
     if (partitionSpec == null) {
       org.apache.hadoop.hive.metastore.api.Table msTbl =
@@ -2047,35 +2100,44 @@ public class CatalogOpExecutor {
   }
 
   /**
-   * Appends to the table or partition property metadata for the given table, 
replacing
+   * Appends to the table or partitions property metadata for the given table, 
replacing
    * the values of any keys that already exist.
    */
   private void alterTableSetTblProperties(Table tbl,
-      TAlterTableSetTblPropertiesParams params) throws ImpalaException {
+      TAlterTableSetTblPropertiesParams params, Reference<Long> 
numUpdatedPartitions)
+      throws ImpalaException {
     Preconditions.checkState(Thread.holdsLock(tbl));
     Map<String, String> properties = params.getProperties();
     Preconditions.checkNotNull(properties);
-    if (params.isSetPartition_spec()) {
-      TableName tableName = tbl.getTableName();
-      // Alter partition params.
-      HdfsPartition partition = catalog_.getHdfsPartition(
-          tableName.getDb(), tableName.getTbl(), params.getPartition_spec());
-      switch (params.getTarget()) {
-        case TBL_PROPERTY:
-          partition.getParameters().putAll(properties);
-          break;
-        case SERDE_PROPERTY:
-          partition.getSerdeInfo().getParameters().putAll(properties);
-          break;
-        default:
-          throw new UnsupportedOperationException(
-              "Unknown target TTablePropertyType: " + params.getTarget());
+    if (params.isSetPartition_set()) {
+      Preconditions.checkArgument(tbl instanceof HdfsTable);
+      List<HdfsPartition> partitions =
+          ((HdfsTable) 
tbl).getPartitionsFromPartitionSet(params.getPartition_set());
+
+      List<HdfsPartition> modifiedParts = Lists.newArrayList();
+      for(HdfsPartition partition: partitions) {
+        switch (params.getTarget()) {
+          case TBL_PROPERTY:
+            partition.getParameters().putAll(properties);
+            break;
+          case SERDE_PROPERTY:
+            partition.getSerdeInfo().getParameters().putAll(properties);
+            break;
+          default:
+            throw new UnsupportedOperationException(
+                "Unknown target TTablePropertyType: " + params.getTarget());
+        }
+        modifiedParts.add(partition);
       }
+      TableName tableName = tbl.getTableName();
       try {
-        applyAlterPartition(tbl, partition);
+        bulkAlterPartitions(tableName.getDb(), tableName.getTbl(), 
modifiedParts);
       } finally {
-        partition.markDirty();
+        for (HdfsPartition modifiedPart : modifiedParts) {
+          modifiedPart.markDirty();
+        }
       }
+      numUpdatedPartitions.setRef((long) modifiedParts.size());
     } else {
       // Alter table params.
       org.apache.hadoop.hive.metastore.api.Table msTbl =
@@ -2226,56 +2288,68 @@ public class CatalogOpExecutor {
   }
 
   /**
-   * Caches or uncaches the HDFS location of the target partition and updates 
the
-   * partition's metadata in Hive Metastore Store. If a partition is being 
cached, the
+   * Caches or uncaches the HDFS location of the target partitions and updates 
the
+   * partitions' metadata in Hive Metastore Store. If a partition is being 
cached, the
    * partition properties will have the ID of the cache directive added. If 
the partition
    * is being uncached, any outstanding cache directive will be dropped and 
the cache
    * directive ID property key will be cleared.
    */
-  private void alterPartitionSetCached(Table tbl, TAlterTableSetCachedParams 
params)
+  private void alterPartitionSetCached(Table tbl,
+      TAlterTableSetCachedParams params, Reference<Long> numUpdatedPartitions)
       throws ImpalaException {
     Preconditions.checkState(Thread.holdsLock(tbl));
     THdfsCachingOp cacheOp = params.getCache_op();
     Preconditions.checkNotNull(cacheOp);
-    Preconditions.checkNotNull(params.getPartition_spec());
-    // Alter partition params.
+    Preconditions.checkNotNull(params.getPartition_set());
     TableName tableName = tbl.getTableName();
-    HdfsPartition partition = catalog_.getHdfsPartition(
-        tableName.getDb(), tableName.getTbl(), params.getPartition_spec());
+    Preconditions.checkArgument(tbl instanceof HdfsTable);
+    List<HdfsPartition> partitions =
+        ((HdfsTable) 
tbl).getPartitionsFromPartitionSet(params.getPartition_set());
+    List<HdfsPartition> modifiedParts = Lists.newArrayList();
     if (cacheOp.isSet_cached()) {
-      // The directive is null if the partition is not cached
-      Long directiveId = HdfsCachingUtil.getCacheDirectiveId(
-          partition.getParameters());
-      short replication = HdfsCachingUtil.getReplicationOrDefault(cacheOp);
-      List<Long> cacheDirs = Lists.newArrayList();
-
-      if (directiveId == null) {
-        cacheDirs.add(HdfsCachingUtil.submitCachePartitionDirective(partition,
-            cacheOp.getCache_pool_name(), replication));
-      } else {
-        if (HdfsCachingUtil.isUpdateOp(cacheOp, partition.getParameters())) {
-          HdfsCachingUtil.validateCachePool(cacheOp, directiveId, tableName, 
partition);
-          cacheDirs.add(HdfsCachingUtil.modifyCacheDirective(directiveId, 
partition,
-              cacheOp.getCache_pool_name(), replication));
+      for (HdfsPartition partition : partitions) {
+        // The directive is null if the partition is not cached
+        Long directiveId =
+            HdfsCachingUtil.getCacheDirectiveId(partition.getParameters());
+        short replication = HdfsCachingUtil.getReplicationOrDefault(cacheOp);
+        List<Long> cacheDirs = Lists.newArrayList();
+        if (directiveId == null) {
+          cacheDirs.add(HdfsCachingUtil.submitCachePartitionDirective(
+              partition, cacheOp.getCache_pool_name(), replication));
+        } else {
+          if (HdfsCachingUtil.isUpdateOp(cacheOp, partition.getParameters())) {
+            HdfsCachingUtil.validateCachePool(cacheOp, directiveId, tableName, 
partition);
+            cacheDirs.add(HdfsCachingUtil.modifyCacheDirective(
+                directiveId, partition, cacheOp.getCache_pool_name(),
+                replication));
+          }
         }
-      }
 
-      // Once the cache directives are submitted, observe the status of the 
caching
-      // until no more progress is made -- either fully cached or out of cache 
memory
-      if (!cacheDirs.isEmpty()) {
-        catalog_.watchCacheDirs(cacheDirs, tableName.toThrift());
+        // Once the cache directives are submitted, observe the status of the 
caching
+        // until no more progress is made -- either fully cached or out of 
cache memory
+        if (!cacheDirs.isEmpty()) {
+          catalog_.watchCacheDirs(cacheDirs, tableName.toThrift());
+        }
+        if (!partition.isMarkedCached()) {
+          modifiedParts.add(partition);
+        }
       }
-
     } else {
-      // Partition is not cached, just return.
-      if (!partition.isMarkedCached()) return;
-      HdfsCachingUtil.uncachePartition(partition);
+      for (HdfsPartition partition : partitions) {
+        if (partition.isMarkedCached()) {
+          HdfsCachingUtil.uncachePartition(partition);
+          modifiedParts.add(partition);
+        }
+      }
     }
     try {
-      applyAlterPartition(tbl, partition);
+      bulkAlterPartitions(tableName.getDb(), tableName.getTbl(), 
modifiedParts);
     } finally {
-      partition.markDirty();
+      for (HdfsPartition modifiedPart : modifiedParts) {
+        modifiedPart.markDirty();
+      }
     }
+    numUpdatedPartitions.setRef((long) modifiedParts.size());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/main/java/org/apache/impala/service/Frontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java 
b/fe/src/main/java/org/apache/impala/service/Frontend.java
index a974278..8657182 100644
--- a/fe/src/main/java/org/apache/impala/service/Frontend.java
+++ b/fe/src/main/java/org/apache/impala/service/Frontend.java
@@ -1203,7 +1203,7 @@ public class Frontend {
     Table table = 
impaladCatalog_.getTable(request.getTable_name().getDb_name(),
         request.getTable_name().getTable_name());
     if (table instanceof HdfsTable) {
-      return ((HdfsTable) table).getFiles(request.getPartition_spec());
+      return ((HdfsTable) table).getFiles(request.getPartition_set());
     } else {
       throw new InternalException("SHOW FILES only supports Hdfs table. " +
           "Unsupported table class: " + table.getClass());

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java 
b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
index b005924..0a78dd5 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
@@ -77,21 +77,6 @@ public class AnalyzeDDLTest extends FrontendTestBase {
       AnalysisError("alter table functional_hbase.alltypesagg " + kw +
           " partition (i=1)", "Table is not partitioned: 
functional_hbase.alltypesagg");
 
-      // Duplicate partition key name
-      AnalysisError("alter table functional.alltypes " + kw +
-          " partition(year=2050, year=2051)", "Duplicate partition key name: 
year");
-      // Not a partition column
-      AnalysisError("alter table functional.alltypes " + kw +
-          " partition(year=2050, int_col=1)",
-          "Column 'int_col' is not a partition column in table: 
functional.alltypes");
-
-      // NULL partition keys
-      AnalyzesOk("alter table functional.alltypes " + kw +
-          " partition(year=NULL, month=1)");
-      AnalyzesOk("alter table functional.alltypes " + kw +
-          " partition(year=NULL, month=NULL)");
-      AnalyzesOk("alter table functional.alltypes " + kw +
-          " partition(year=ascii(null), month=ascii(NULL))");
       // Empty string partition keys
       AnalyzesOk("alter table functional.insert_string_partitioned " + kw +
           " partition(s2='')");
@@ -99,41 +84,13 @@ public class AnalyzeDDLTest extends FrontendTestBase {
       AnalyzesOk("alter table functional.alltypes " + kw +
           " partition(year=-1, month=cast((10+5*4) as INT))");
 
-      // Arbitrary exprs as partition key values. Non-constant exprs should 
fail.
-      AnalysisError("alter table functional.alltypes " + kw +
-          " partition(year=2050, month=int_col)",
-          "Non-constant expressions are not supported as static partition-key 
values " +
-          "in 'month=int_col'.");
-      AnalysisError("alter table functional.alltypes " + kw +
-          " partition(year=cast(int_col as int), month=12)",
-          "Non-constant expressions are not supported as static partition-key 
values " +
-          "in 'year=CAST(int_col AS INT)'.");
-
-      // Not a valid column
-      AnalysisError("alter table functional.alltypes " + kw +
-          " partition(year=2050, blah=1)",
-          "Partition column 'blah' not found in table: functional.alltypes");
-
-      // Data types don't match
-      AnalysisError(
-          "alter table functional.insert_string_partitioned " + kw +
-          " partition(s2=1234)",
-          "Value of partition spec (column=s2) has incompatible type: 
'SMALLINT'. " +
-          "Expected type: 'STRING'.");
-
-      // Loss of precision
-      AnalysisError(
-          "alter table functional.alltypes " + kw +
-          " partition(year=100000000000, month=10)",
-          "Partition key value may result in loss of precision.\nWould need to 
cast" +
-          " '100000000000' to 'INT' for partition column: year");
-
-
       // Table/Db does not exist
       AnalysisError("alter table db_does_not_exist.alltypes " + kw +
-          " partition (i=1)", "Database does not exist: db_does_not_exist");
+          " partition (i=1)", "Could not resolve table reference: " +
+          "'db_does_not_exist.alltypes'");
       AnalysisError("alter table functional.table_does_not_exist " + kw +
-          " partition (i=1)", "Table does not exist: 
functional.table_does_not_exist");
+          " partition (i=1)", "Could not resolve table reference: " +
+          "'functional.table_does_not_exist'");
 
       // Cannot ALTER TABLE a view.
       AnalysisError("alter table functional.alltypes_view " + kw +
@@ -143,8 +100,60 @@ public class AnalyzeDDLTest extends FrontendTestBase {
           " partition(year=2050, month=10)",
           "ALTER TABLE not allowed on a table produced by a data source: " +
           "functional.alltypes_datasource");
+
+      // NULL partition keys
+      AnalyzesOk("alter table functional.alltypes " + kw +
+          " partition(year=NULL, month=1)");
+      AnalyzesOk("alter table functional.alltypes " + kw +
+          " partition(year=NULL, month=NULL)");
+      AnalyzesOk("alter table functional.alltypes " + kw +
+          " partition(year=ascii(null), month=ascii(NULL))");
     }
 
+    // Data types don't match
+    AnalysisError("alter table functional.insert_string_partitioned add" +
+                  " partition(s2=1234)",
+                  "Value of partition spec (column=s2) has incompatible type: 
" +
+                  "'SMALLINT'. Expected type: 'STRING'.");
+    AnalysisError("alter table functional.insert_string_partitioned drop" +
+                  " partition(s2=1234)",
+                  "operands of type STRING and SMALLINT are not comparable: s2 
= 1234");
+
+    // Loss of precision
+    AnalysisError(
+        "alter table functional.alltypes add " +
+        "partition(year=100000000000, month=10) ",
+        "Partition key value may result in loss of precision.\nWould need to 
cast " +
+        "'100000000000' to 'INT' for partition column: year");
+
+    // Duplicate partition key name
+    AnalysisError("alter table functional.alltypes add " +
+        "partition(year=2050, year=2051)", "Duplicate partition key name: 
year");
+
+    // Arbitrary exprs as partition key values. Non-constant exprs should fail.
+    AnalysisError("alter table functional.alltypes add " +
+        "partition(year=2050, month=int_col) ",
+        "Non-constant expressions are not supported as static partition-key " +
+        "values in 'month=int_col'.");
+    AnalysisError("alter table functional.alltypes add " +
+        "partition(year=cast(int_col as int), month=12) ",
+        "Non-constant expressions are not supported as static partition-key " +
+        "values in 'year=CAST(int_col AS INT)'.");
+
+    // Not a partition column
+    AnalysisError("alter table functional.alltypes drop " +
+        "partition(year=2050, int_col=1)",
+        "Partition exprs cannot contain non-partition column(s): int_col = 
1.");
+
+    // Arbitrary exprs as partition key values. Non-partition columns should 
fail.
+    AnalysisError("alter table functional.alltypes drop " +
+        "partition(year=2050, month=int_col) ",
+        "Partition exprs cannot contain non-partition column(s): month = 
int_col.");
+    AnalysisError("alter table functional.alltypes drop " +
+        "partition(year=cast(int_col as int), month=12) ",
+        "Partition exprs cannot contain non-partition column(s): " +
+        "year = CAST(int_col AS INT).");
+
     // IF NOT EXISTS properly checks for partition existence
     AnalyzesOk("alter table functional.alltypes add " +
         "partition(year=2050, month=10)");
@@ -152,20 +161,63 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "partition(year=2010, month=10)",
         "Partition spec already exists: (year=2010, month=10).");
     AnalyzesOk("alter table functional.alltypes add if not exists " +
-        " partition(year=2010, month=10)");
+        "partition(year=2010, month=10)");
     AnalyzesOk("alter table functional.alltypes add if not exists " +
-        " partition(year=2010, month=10) location " +
+        "partition(year=2010, month=10) location " +
         "'/test-warehouse/alltypes/year=2010/month=10'");
 
     // IF EXISTS properly checks for partition existence
+    // with a fully specified partition.
     AnalyzesOk("alter table functional.alltypes drop " +
         "partition(year=2010, month=10)");
     AnalysisError("alter table functional.alltypes drop " +
         "partition(year=2050, month=10)",
-        "Partition spec does not exist: (year=2050, month=10).");
+        "No matching partition(s) found.");
     AnalyzesOk("alter table functional.alltypes drop if exists " +
         "partition(year=2050, month=10)");
 
+    // NULL partition keys
+    AnalysisError("alter table functional.alltypes drop " +
+      "partition(year=NULL, month=1)",
+      "No matching partition(s) found.");
+    AnalysisError("alter table functional.alltypes drop " +
+      "partition(year=NULL, month is NULL)",
+      "No matching partition(s) found.");
+
+    // Drop partition using predicates
+    // IF EXISTS is added here
+    AnalyzesOk("alter table functional.alltypes drop " +
+        "partition(year<2011, month!=10)");
+    AnalysisError("alter table functional.alltypes drop " +
+        "partition(1=1, month=10)",
+        "Invalid partition expr 1 = 1. " +
+        "A partition spec may not contain constant predicates.");
+    AnalyzesOk("alter table functional.alltypes drop " +
+        "partition(year>1050, month=10)");
+    AnalyzesOk("alter table functional.alltypes drop " +
+        "partition(year>1050 and month=10)");
+    AnalyzesOk("alter table functional.alltypes drop " +
+        "partition(month=10)");
+    AnalyzesOk("alter table functional.alltypes drop " +
+        "partition(month+2000=year)");
+    AnalyzesOk("alter table functional.alltypes drop " +
+      "partition(year>9050, month=10)");
+    AnalyzesOk("alter table functional.alltypes drop if exists " +
+        "partition(year>9050, month=10)");
+
+    // Not a valid column
+    AnalysisError("alter table functional.alltypes add " +
+        "partition(year=2050, blah=1)",
+        "Partition column 'blah' not found in table: functional.alltypes");
+    AnalysisError("alter table functional.alltypes drop " +
+        "partition(year=2050, blah=1)",
+        "Could not resolve column/field reference: 'blah'");
+
+    // Not a partition column
+    AnalysisError("alter table functional.alltypes add " +
+      "partition(year=2050, int_col=1) ",
+      "Column 'int_col' is not a partition column in table: 
functional.alltypes");
+
     // Caching ops
     AnalyzesOk("alter table functional.alltypes add " +
         "partition(year=2050, month=10) cached in 'testPool'");
@@ -185,25 +237,25 @@ public class AnalyzeDDLTest extends FrontendTestBase {
 
     // Valid URIs.
     AnalyzesOk("alter table functional.alltypes add " +
-        " partition(year=2050, month=10) location " +
+        "partition(year=2050, month=10) location " +
         "'/test-warehouse/alltypes/year=2010/month=10'");
     AnalyzesOk("alter table functional.alltypes add " +
-        " partition(year=2050, month=10) location " +
+        "partition(year=2050, month=10) location " +
         "'hdfs://localhost:20500/test-warehouse/alltypes/year=2010/month=10'");
     AnalyzesOk("alter table functional.alltypes add " +
-        " partition(year=2050, month=10) location " +
+        "partition(year=2050, month=10) location " +
         "'s3n://bucket/test-warehouse/alltypes/year=2010/month=10'");
     AnalyzesOk("alter table functional.alltypes add " +
-        " partition(year=2050, month=10) location " +
+        "partition(year=2050, month=10) location " +
         "'file:///test-warehouse/alltypes/year=2010/month=10'");
 
     // Invalid URIs.
     AnalysisError("alter table functional.alltypes add " +
-        " partition(year=2050, month=10) location " +
+        "partition(year=2050, month=10) location " +
         "'foofs://bar/test-warehouse/alltypes/year=2010/month=10'",
         "No FileSystem for scheme: foofs");
     AnalysisError("alter table functional.alltypes add " +
-        " partition(year=2050, month=10) location '  '",
+        "partition(year=2050, month=10) location '  '",
         "URI path cannot be empty.");
   }
 
@@ -242,14 +294,19 @@ public class AnalyzeDDLTest extends FrontendTestBase {
 
     // Table/Db does not exist
     AnalysisError("alter table db_does_not_exist.alltypes add columns (i int)",
-        "Database does not exist: db_does_not_exist");
+        "Could not resolve table reference: 'db_does_not_exist.alltypes'");
     AnalysisError("alter table functional.table_does_not_exist add columns (i 
int)",
-        "Table does not exist: functional.table_does_not_exist");
+        "Could not resolve table reference: 
'functional.table_does_not_exist'");
 
     // Cannot ALTER TABLE a view.
     AnalysisError("alter table functional.alltypes_view " +
         "add columns (c1 string comment 'hi')",
         "ALTER TABLE not allowed on a view: functional.alltypes_view");
+    // Cannot ALTER TABLE a nested collection.
+    AnalysisError("alter table allcomplextypes.int_array_col " +
+        "add columns (c1 string comment 'hi')",
+        createAnalyzer("functional"),
+        "ALTER TABLE not allowed on a nested collection: 
allcomplextypes.int_array_col");
     // Cannot ALTER TABLE produced by a data source.
     AnalysisError("alter table functional.alltypes_datasource " +
         "add columns (c1 string comment 'hi')",
@@ -278,13 +335,17 @@ public class AnalyzeDDLTest extends FrontendTestBase {
 
     // Table/Db does not exist
     AnalysisError("alter table db_does_not_exist.alltypes drop column col1",
-        "Database does not exist: db_does_not_exist");
+        "Could not resolve table reference: 'db_does_not_exist.alltypes'");
     AnalysisError("alter table functional.table_does_not_exist drop column 
col1",
-        "Table does not exist: functional.table_does_not_exist");
+        "Could not resolve table reference: 
'functional.table_does_not_exist'");
 
     // Cannot ALTER TABLE a view.
     AnalysisError("alter table functional.alltypes_view drop column int_col",
         "ALTER TABLE not allowed on a view: functional.alltypes_view");
+    // Cannot ALTER TABLE a nested collection.
+    AnalysisError("alter table allcomplextypes.int_array_col drop column 
int_col",
+        createAnalyzer("functional"),
+        "ALTER TABLE not allowed on a nested collection: 
allcomplextypes.int_array_col");
     // Cannot ALTER TABLE produced by a data source.
     AnalysisError("alter table functional.alltypes_datasource drop column 
int_col",
         "ALTER TABLE not allowed on a table produced by a data source: " +
@@ -324,14 +385,19 @@ public class AnalyzeDDLTest extends FrontendTestBase {
 
     // Table/Db does not exist
     AnalysisError("alter table db_does_not_exist.alltypes change c1 c2 int",
-        "Database does not exist: db_does_not_exist");
+        "Could not resolve table reference: 'db_does_not_exist.alltypes'");
     AnalysisError("alter table functional.table_does_not_exist change c1 c2 
double",
-        "Table does not exist: functional.table_does_not_exist");
+        "Could not resolve table reference: 
'functional.table_does_not_exist'");
 
     // Cannot ALTER TABLE a view.
     AnalysisError("alter table functional.alltypes_view " +
         "change column int_col int_col2 int",
         "ALTER TABLE not allowed on a view: functional.alltypes_view");
+    // Cannot ALTER TABLE a nested collection.
+    AnalysisError("alter table allcomplextypes.int_array_col " +
+        "change column int_col int_col2 int",
+        createAnalyzer("functional"),
+        "ALTER TABLE not allowed on a nested collection: 
allcomplextypes.int_array_col");
     // Cannot ALTER TABLE produced by a data source.
     AnalysisError("alter table functional.alltypes_datasource " +
         "change column int_col int_col2 int",
@@ -353,14 +419,20 @@ public class AnalyzeDDLTest extends FrontendTestBase {
                "set location '/a/b'");
     AnalyzesOk("alter table functional.alltypes PARTITION (month=11, 
year=2010) " +
                "set fileformat parquetfile");
+    AnalyzesOk("alter table functional.alltypes PARTITION (month<=11, 
year=2010) " +
+               "set fileformat parquetfile");
     AnalyzesOk("alter table functional.stringpartitionkey PARTITION " +
                "(string_col='partition1') set fileformat parquet");
     AnalyzesOk("alter table functional.stringpartitionkey PARTITION " +
                "(string_col='PaRtiTion1') set location '/a/b/c'");
     AnalyzesOk("alter table functional.alltypes PARTITION (year=2010, 
month=11) " +
                "set tblproperties('a'='1')");
+    AnalyzesOk("alter table functional.alltypes PARTITION (year<=2010, 
month=11) " +
+               "set tblproperties('a'='1')");
     AnalyzesOk("alter table functional.alltypes PARTITION (year=2010, 
month=11) " +
                "set serdeproperties ('a'='2')");
+    AnalyzesOk("alter table functional.alltypes PARTITION (year<=2010, 
month=11) " +
+               "set serdeproperties ('a'='2')");
 
     {
       // Check that long_properties fail at the analysis layer
@@ -422,33 +494,33 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     AnalyzesOk("alter table functional.alltypes PARTITION " +
                "(year=cast(100*20+10 as INT), month=cast(2+9 as INT)) " +
                "set location '/a/b'");
-    // Arbitrary exprs as partition key values. Non-constant exprs should fail.
-    AnalysisError("alter table functional.alltypes PARTITION " +
-                  "(Year=2050, month=int_col) set fileformat sequencefile",
-                  "Non-constant expressions are not supported as static 
partition-key " +
-                  "values in 'month=int_col'.");
+
+    // Arbitrary exprs as partition key values. One-partition-column-bound 
exprs are ok.
+    AnalyzesOk("alter table functional.alltypes PARTITION " +
+               "(Year*2=Year+2010, month=11) set fileformat sequencefile");
+
+    // Arbitrary exprs as partition key values. Non-partition-column exprs.
     AnalysisError("alter table functional.alltypes PARTITION " +
-                  "(Year=2050, month=int_col) set location '/a/b'",
-                  "Non-constant expressions are not supported as static 
partition-key " +
-                  "values in 'month=int_col'.");
+                  "(int_col=3) set fileformat sequencefile",
+                  "Partition exprs cannot contain non-partition column(s): 
int_col = 3.");
+
+    // Partition expr matches more than one partition in set location 
statement.
+    AnalysisError("alter table functional.alltypes PARTITION (year!=20) " +
+                  "set location '/a/b'",
+                  "Partition expr in set location statements can only match " +
+                  "one partition. Too many matched partitions 
year=2009/month=1," +
+                  "year=2009/month=2,year=2009/month=3");
 
     // Partition spec does not exist
     AnalysisError("alter table functional.alltypes PARTITION (year=2014, 
month=11) " +
                   "set location '/a/b'",
-                  "Partition spec does not exist: (year=2014, month=11)");
+                  "No matching partition(s) found.");
     AnalysisError("alter table functional.alltypes PARTITION (year=2014, 
month=11) " +
                   "set tblproperties('a'='1')",
-                  "Partition spec does not exist: (year=2014, month=11)");
-    AnalysisError("alter table functional.alltypes PARTITION (year=2010) " +
-                  "set tblproperties('a'='1')",
-                  "Items in partition spec must exactly match the partition 
columns " +
-                  "in the table definition: functional.alltypes (1 vs 2)");
-    AnalysisError("alter table functional.alltypes PARTITION (year=2010, 
year=2010) " +
-                  "set location '/a/b'",
-                  "Duplicate partition key name: year");
+                  "No matching partition(s) found.");
     AnalysisError("alter table functional.alltypes PARTITION (month=11, 
year=2014) " +
                   "set fileformat sequencefile",
-                  "Partition spec does not exist: (month=11, year=2014)");
+                  "No matching partition(s) found.");
     AnalysisError("alter table functional.alltypesnopart PARTITION (month=1) " 
+
                   "set fileformat sequencefile",
                   "Table is not partitioned: functional.alltypesnopart");
@@ -457,34 +529,32 @@ public class AnalyzeDDLTest extends FrontendTestBase {
                   "Table is not partitioned: functional.alltypesnopart");
     AnalysisError("alter table functional.stringpartitionkey PARTITION " +
                   "(string_col='partition2') set location '/a/b'",
-                  "Partition spec does not exist: (string_col='partition2')");
+                  "No matching partition(s) found.");
     AnalysisError("alter table functional.stringpartitionkey PARTITION " +
                   "(string_col='partition2') set fileformat sequencefile",
-                  "Partition spec does not exist: (string_col='partition2')");
+                  "No matching partition(s) found.");
     AnalysisError("alter table functional.alltypes PARTITION " +
                  "(year=cast(10*20+10 as INT), month=cast(5*3 as INT)) " +
                   "set location '/a/b'",
-                  "Partition spec does not exist: " +
-                  "(year=CAST(10 * 20 + 10 AS INT), month=CAST(5 * 3 AS 
INT))");
+                  "No matching partition(s) found.");
     AnalysisError("alter table functional.alltypes PARTITION " +
                   "(year=cast(10*20+10 as INT), month=cast(5*3 as INT)) " +
                   "set fileformat sequencefile",
-                  "Partition spec does not exist: " +
-                  "(year=CAST(10 * 20 + 10 AS INT), month=CAST(5 * 3 AS 
INT))");
+                  "No matching partition(s) found.");
 
     // Table/Db does not exist
     AnalysisError("alter table db_does_not_exist.alltypes set fileformat 
sequencefile",
-        "Database does not exist: db_does_not_exist");
+        "Could not resolve table reference: 'db_does_not_exist.alltypes'");
     AnalysisError("alter table functional.table_does_not_exist set fileformat 
rcfile",
-        "Table does not exist: functional.table_does_not_exist");
+        "Could not resolve table reference: 
'functional.table_does_not_exist'");
     AnalysisError("alter table db_does_not_exist.alltypes set location '/a/b'",
-        "Database does not exist: db_does_not_exist");
+        "Could not resolve table reference: 'db_does_not_exist.alltypes'");
     AnalysisError("alter table functional.table_does_not_exist set location 
'/a/b'",
-        "Table does not exist: functional.table_does_not_exist");
+        "Could not resolve table reference: 
'functional.table_does_not_exist'");
     AnalysisError("alter table functional.no_tbl partition(i=1) set location 
'/a/b'",
-        "Table does not exist: functional.no_tbl");
+        "Could not resolve table reference: 'functional.no_tbl'");
     AnalysisError("alter table no_db.alltypes partition(i=1) set fileformat 
textfile",
-        "Database does not exist: no_db");
+        "Could not resolve table reference: 'no_db.alltypes'");
 
     // Valid location
     AnalyzesOk("alter table functional.alltypes set location " +
@@ -507,6 +577,10 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     // Cannot ALTER TABLE a view.
     AnalysisError("alter table functional.alltypes_view set fileformat 
sequencefile",
         "ALTER TABLE not allowed on a view: functional.alltypes_view");
+    // Cannot ALTER TABLE a nested collection.
+    AnalysisError("alter table allcomplextypes.int_array_col set fileformat 
sequencefile",
+        createAnalyzer("functional"),
+        "ALTER TABLE not allowed on a nested collection: 
allcomplextypes.int_array_col");
     // Cannot ALTER TABLE produced by a data source.
     AnalysisError("alter table functional.alltypes_datasource set fileformat 
parquet",
         "ALTER TABLE not allowed on a table produced by a data source: " +
@@ -524,6 +598,8 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     AnalyzesOk("alter table functional.alltypes set cached in 'testPool'");
     AnalyzesOk("alter table functional.alltypes partition(year=2010, month=12) 
" +
         "set cached in 'testPool'");
+    AnalyzesOk("alter table functional.alltypes partition(year<=2010, 
month<=12) " +
+        "set cached in 'testPool'");
 
     // Replication factor
     AnalyzesOk("alter table functional.alltypes set cached in 'testPool' " +
@@ -542,6 +618,9 @@ public class AnalyzeDDLTest extends FrontendTestBase {
         "ALTER TABLE SET not currently supported on HBase tables.");
     AnalysisError("alter table functional.view_view set cached in 'testPool'",
         "ALTER TABLE not allowed on a view: functional.view_view");
+    AnalysisError("alter table allcomplextypes.int_array_col set cached in 
'testPool'",
+        createAnalyzer("functional"),
+        "ALTER TABLE not allowed on a nested collection: 
allcomplextypes.int_array_col");
 
     AnalysisError("alter table functional.alltypes set cached in 'badPool'",
         "The specified cache pool does not exist: badPool");
@@ -565,17 +644,17 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     AnalysisError("alter table functional.alltypestiny partition 
(year=2009,month=1) " +
         "set location '/test-warehouse/new_location'",
         "Target partition is cached, please uncache before changing the 
location " +
-        "using: ALTER TABLE functional.alltypestiny PARTITION (year=2009, 
month=1) " +
+        "using: ALTER TABLE functional.alltypestiny PARTITION (year = 2009, 
month = 1) " +
         "SET UNCACHED");
 
     // Table/db/partition do not exist
     AnalysisError("alter table baddb.alltypestiny set cached in 'testPool'",
-        "Database does not exist: baddb");
+        "Could not resolve table reference: 'baddb.alltypestiny'");
     AnalysisError("alter table functional.badtbl set cached in 'testPool'",
-        "Table does not exist: functional.badtbl");
+        "Could not resolve table reference: 'functional.badtbl'");
     AnalysisError("alter table functional.alltypestiny partition(year=9999, 
month=1) " +
         "set cached in 'testPool'",
-        "Partition spec does not exist: (year=9999, month=1).");
+        "No matching partition(s) found.");
   }
 
   @Test
@@ -646,7 +725,7 @@ public class AnalyzeDDLTest extends FrontendTestBase {
 
     // Table does not exist.
     AnalysisError("alter table bad_tbl set column stats int_col 
('numNulls'='2')",
-        "Table does not exist: default.bad_tbl");
+        "Could not resolve table reference: 'bad_tbl'");
     // Column does not exist.
     AnalysisError(
         "alter table functional.alltypes set column stats bad_col 
('numNulls'='2')",
@@ -656,6 +735,12 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     AnalysisError(
         "alter table functional.alltypes_view set column stats int_col 
('numNulls'='2')",
         "ALTER TABLE not allowed on a view: functional.alltypes_view");
+    // Cannot set column stats of a nested collection.
+    AnalysisError(
+        "alter table allcomplextypes.int_array_col " +
+        "set column stats int_col ('numNulls'='2')",
+        createAnalyzer("functional"),
+        "ALTER TABLE not allowed on a nested collection: 
allcomplextypes.int_array_col");
     // Cannot set column stats of partition columns.
     AnalysisError(
         "alter table functional.alltypes set column stats month 
('numDVs'='10')",
@@ -835,6 +920,10 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     // Cannot ALTER TABLE a view.
     AnalysisError("alter table functional.alltypes_view rename to 
new_alltypes",
         "ALTER TABLE not allowed on a view: functional.alltypes_view");
+    // Cannot ALTER TABLE a nested collection.
+    AnalysisError("alter table allcomplextypes.int_array_col rename to 
new_alltypes",
+        createAnalyzer("functional"),
+        "Database does not exist: allcomplextypes");
 
     // It should be okay to rename an HBase table.
     AnalyzesOk("alter table functional_hbase.alltypes rename to new_alltypes");
@@ -847,13 +936,16 @@ public class AnalyzeDDLTest extends FrontendTestBase {
   public void TestAlterTableRecoverPartitions() throws CatalogException {
     AnalyzesOk("alter table functional.alltypes recover partitions");
     AnalysisError("alter table baddb.alltypes recover partitions",
-        "Database does not exist: baddb");
+        "Could not resolve table reference: 'baddb.alltypes'");
     AnalysisError("alter table functional.badtbl recover partitions",
-        "Table does not exist: functional.badtbl");
+        "Could not resolve table reference: 'functional.badtbl'");
     AnalysisError("alter table functional.alltypesnopart recover partitions",
         "Table is not partitioned: functional.alltypesnopart");
     AnalysisError("alter table functional.view_view recover partitions",
         "ALTER TABLE not allowed on a view: functional.view_view");
+    AnalysisError("alter table allcomplextypes.int_array_col recover 
partitions",
+        createAnalyzer("functional"),
+        "ALTER TABLE not allowed on a nested collection: 
allcomplextypes.int_array_col");
     AnalysisError("alter table functional_hbase.alltypes recover partitions",
         "ALTER TABLE RECOVER PARTITIONS must target an HDFS table: " +
         "functional_hbase.alltypes");
@@ -958,10 +1050,10 @@ public class AnalyzeDDLTest extends FrontendTestBase {
 
     // Cannot compute stats on a database.
     AnalysisError("compute stats tbl_does_not_exist",
-        "Table does not exist: default.tbl_does_not_exist");
+        "Could not resolve table reference: 'tbl_does_not_exist'");
     // Cannot compute stats on a view.
     AnalysisError("compute stats functional.alltypes_view",
-        "COMPUTE STATS not supported for view functional.alltypes_view");
+        "COMPUTE STATS not supported for view: functional.alltypes_view");
 
     AnalyzesOk("compute stats functional_avro_snap.alltypes");
     // Test mismatched column definitions and Avro schema (HIVE-6308, 
IMPALA-867).
@@ -991,28 +1083,26 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     checkComputeStatsStmt("compute incremental stats functional.alltypes");
     checkComputeStatsStmt(
         "compute incremental stats functional.alltypes partition(year=2010, 
month=10)");
+    checkComputeStatsStmt(
+        "compute incremental stats functional.alltypes partition(year<=2010)");
 
     AnalysisError(
         "compute incremental stats functional.alltypes partition(year=9999, 
month=10)",
-        "Partition spec does not exist: (year=9999, month=10)");
-    AnalysisError(
-        "compute incremental stats functional.alltypes partition(year=2010)",
-        "Items in partition spec must exactly match the partition columns in 
the table " +
-        "definition: functional.alltypes (1 vs 2)");
+        "No matching partition(s) found.");
     AnalysisError(
         "compute incremental stats functional.alltypes partition(year=2010, 
month)",
-        "Syntax error");
+        "Partition expr requires return type 'BOOLEAN'. Actual type is 
'INT'.");
 
     // Test that NULL partitions generates a valid query
     checkComputeStatsStmt("compute incremental stats functional.alltypesagg " +
-        "partition(year=2010, month=1, day=NULL)");
+        "partition(year=2010, month=1, day is NULL)");
 
     AnalysisError("compute incremental stats functional_hbase.alltypes " +
         "partition(year=2010, month=1)", "COMPUTE INCREMENTAL ... PARTITION 
not " +
         "supported for non-HDFS table functional_hbase.alltypes");
 
     AnalysisError("compute incremental stats functional.view_view",
-        "COMPUTE STATS not supported for view functional.view_view");
+        "COMPUTE STATS not supported for view: functional.view_view");
   }
 
 
@@ -1020,9 +1110,11 @@ public class AnalyzeDDLTest extends FrontendTestBase {
   public void TestDropIncrementalStats() throws AnalysisException {
     AnalyzesOk(
         "drop incremental stats functional.alltypes partition(year=2010, 
month=10)");
+    AnalyzesOk(
+        "drop incremental stats functional.alltypes partition(year<=2010, 
month=10)");
     AnalysisError(
         "drop incremental stats functional.alltypes partition(year=9999, 
month=10)",
-        "Partition spec does not exist: (year=9999, month=10)");
+        "No matching partition(s) found.");
   }
 
 
@@ -1032,10 +1124,10 @@ public class AnalyzeDDLTest extends FrontendTestBase {
 
     // Table does not exist
     AnalysisError("drop stats tbl_does_not_exist",
-        "Table does not exist: default.tbl_does_not_exist");
+        "Could not resolve table reference: 'tbl_does_not_exist'");
     // Database does not exist
     AnalysisError("drop stats no_db.no_tbl",
-        "Database does not exist: no_db");
+        "Could not resolve table reference: 'no_db.no_tbl'");
 
     AnalysisError("drop stats functional.alltypes partition(year=2010, 
month=10)",
         "Syntax error");
@@ -2959,32 +3051,38 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     // Test empty table
     AnalyzesOk(String.format("show files in functional.emptytable"));
 
-    String[] partitions = new String[] { "", "partition(month=10, year=2010)" 
};
+    String[] partitions = new String[] {
+        "",
+        "partition(month=10, year=2010)",
+        "partition(month>10, year<2011, year>2008)"};
     for (String partition: partitions) {
       AnalyzesOk(String.format("show files in functional.alltypes %s", 
partition));
       // Database/table doesn't exist.
       AnalysisError(String.format("show files in baddb.alltypes %s", 
partition),
-          "Database does not exist: baddb");
+          "Could not resolve table reference: 'baddb.alltypes'");
       AnalysisError(String.format("show files in functional.badtbl %s", 
partition),
-          "Table does not exist: functional.badtbl");
+          "Could not resolve table reference: 'functional.badtbl'");
       // Cannot show files on a non hdfs table.
       AnalysisError(String.format("show files in functional.alltypes_view %s",
           partition),
           "SHOW FILES not applicable to a non hdfs table: 
functional.alltypes_view");
+      AnalysisError(String.format("show files in allcomplextypes.int_array_col 
%s",
+          partition), createAnalyzer("functional"),
+          "SHOW FILES not applicable to a non hdfs table: 
allcomplextypes.int_array_col");
     }
 
     // Not a partition column.
     AnalysisError("show files in functional.alltypes 
partition(year=2010,int_col=1)",
-        "Column 'int_col' is not a partition column in table: 
functional.alltypes");
+        "Partition exprs cannot contain non-partition column(s): int_col = 
1.");
     // Not a valid column.
     AnalysisError("show files in functional.alltypes 
partition(year=2010,day=1)",
-        "Partition column 'day' not found in table: functional.alltypes");
+        "Could not resolve column/field reference: 'day'");
     // Table is not partitioned.
     AnalysisError("show files in functional.tinyinttable partition(int_col=1)",
         "Table is not partitioned: functional.tinyinttable");
     // Partition spec does not exist
     AnalysisError("show files in functional.alltypes 
partition(year=2010,month=NULL)",
-        "Partition spec does not exist: (year=2010, month=NULL)");
+        "No matching partition(s) found.");
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java 
b/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java
index a6760de..cacdd83 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeStmtsTest.java
@@ -3466,7 +3466,7 @@ public class AnalyzeStmtsTest extends AnalyzerTest {
     testNumberOfMembers(ValuesStmt.class, 0);
 
     // Also check TableRefs.
-    testNumberOfMembers(TableRef.class, 18);
+    testNumberOfMembers(TableRef.class, 19);
     testNumberOfMembers(BaseTableRef.class, 0);
     testNumberOfMembers(InlineViewRef.class, 8);
   }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java 
b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
index 0affd4e..69c90da 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
@@ -1768,8 +1768,8 @@ public class ParserTest extends FrontendTestBase {
     ParserError("SHOW TABLE STATS 'strlit'");
     // Missing table.
     ParserError("SHOW FILES IN");
-    // Invalid partition.
-    ParserError("SHOW FILES IN db.tbl PARTITION(p)");
+
+    ParsesOk("SHOW FILES IN db.tbl PARTITION(p)");
   }
 
   @Test
@@ -2002,9 +2002,6 @@ public class ParserTest extends FrontendTestBase {
     ParsesOk("ALTER TABLE Foo ADD PARTITION (i=NULL, j=2, k=NULL)");
     ParsesOk("ALTER TABLE Foo ADD PARTITION (i=abc, j=(5*8+10), k=!true and 
false)");
 
-    // Cannot use dynamic partition syntax
-    ParserError("ALTER TABLE TestDb.Foo ADD PARTITION (partcol)");
-    ParserError("ALTER TABLE TestDb.Foo ADD PARTITION (i=1, partcol)");
     // Location needs to be a string literal
     ParserError("ALTER TABLE TestDb.Foo ADD PARTITION (i=1, s='Hello') 
LOCATION a/b");
 
@@ -2070,10 +2067,6 @@ public class ParserTest extends FrontendTestBase {
       ParsesOk(String.format("ALTER TABLE Foo DROP PARTITION (i=abc, "
         + "j=(5*8+10), k=!true and false) %s", kw));
 
-      // Cannot use dynamic partition syntax
-      ParserError(String.format("ALTER TABLE Foo DROP PARTITION (partcol) %s", 
kw));
-      ParserError(String.format("ALTER TABLE Foo DROP PARTITION (i=1, j) %s", 
kw));
-
       ParserError(String.format("ALTER TABLE Foo DROP IF NOT EXISTS "
         + "PARTITION (i=1, s='Hello') %s", kw));
       ParserError(String.format("ALTER TABLE TestDb.Foo DROP (i=1, s='Hello') 
%s", kw));
@@ -2126,10 +2119,7 @@ public class ParserTest extends FrontendTestBase {
     ParsesOk("ALTER TABLE Foo PARTITION (i=1,s='str') SET LOCATION 
'/a/i=1/s=str'");
     ParsesOk("ALTER TABLE Foo PARTITION (s='str') SET LOCATION 
'/a/i=1/s=str'");
 
-    ParserError("ALTER TABLE Foo PARTITION (s) SET LOCATION '/a'");
     ParserError("ALTER TABLE Foo PARTITION () SET LOCATION '/a'");
-    ParserError("ALTER TABLE Foo PARTITION ('str') SET FILEFORMAT TEXTFILE");
-    ParserError("ALTER TABLE Foo PARTITION (a=1, 5) SET FILEFORMAT TEXTFILE");
     ParserError("ALTER TABLE Foo PARTITION () SET FILEFORMAT PARQUETFILE");
     ParserError("ALTER TABLE Foo PARTITION (,) SET FILEFORMAT PARQUET");
     ParserError("ALTER TABLE Foo PARTITION (a=1) SET FILEFORMAT");
@@ -3331,13 +3321,10 @@ public class ParserTest extends FrontendTestBase {
 
     ParsesOk(
         "COMPUTE INCREMENTAL STATS functional.alltypes PARTITION(month=10, 
year=2010)");
-    // No dynamic partition specs
-    ParserError("COMPUTE INCREMENTAL STATS functional.alltypes 
PARTITION(month, year)");
 
     ParserError("COMPUTE INCREMENTAL STATS");
 
     ParsesOk("DROP INCREMENTAL STATS functional.alltypes PARTITION(month=10, 
year=2010)");
-    ParserError("DROP INCREMENTAL STATS functional.alltypes PARTITION(month, 
year)");
     ParserError("DROP INCREMENTAL STATS functional.alltypes");
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/628685ae/shell/impala_client.py
----------------------------------------------------------------------
diff --git a/shell/impala_client.py b/shell/impala_client.py
index f57a015..f84c14e 100755
--- a/shell/impala_client.py
+++ b/shell/impala_client.py
@@ -450,7 +450,7 @@ class ImpalaClient(object):
 
   def expect_result_metadata(self, query_str):
     """ Given a query string, return True if impalad expects result metadata"""
-    excluded_query_types = ['use', 'alter', 'drop']
+    excluded_query_types = ['use', 'drop']
     if True in set(map(query_str.startswith, excluded_query_types)):
       return False
     return True

Reply via email to