This is an automated email from the ASF dual-hosted git repository.

pvary pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new be1d957  HIVE-22242: Move TempTable and PartitionTree out of 
SessionHiveMetastoreClient (Laszlo Pinter via Peter Vary)
be1d957 is described below

commit be1d9579b6178bf53bc7b5338ae112839c9d8ca4
Author: Peter Vary <[email protected]>
AuthorDate: Thu Sep 26 18:37:46 2019 +0200

    HIVE-22242: Move TempTable and PartitionTree out of 
SessionHiveMetastoreClient (Laszlo Pinter via Peter Vary)
---
 .../hadoop/hive/ql/metadata/PartitionTree.java     | 151 ++++++++++++
 .../ql/metadata/SessionHiveMetaStoreClient.java    | 272 +--------------------
 .../apache/hadoop/hive/ql/metadata/TempTable.java  | 180 ++++++++++++++
 .../hadoop/hive/ql/session/SessionState.java       |   6 +-
 4 files changed, 336 insertions(+), 273 deletions(-)

diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionTree.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionTree.java
new file mode 100644
index 0000000..c84c3ef
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionTree.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.metadata;
+
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.hive.metastore.Warehouse.makePartName;
+import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.makePartNameMatcher;
+
+/**
+ * Always clone objects before adding or returning them so that callers don't 
modify them
+ * via references.
+ */
+final class PartitionTree {
+  private final Map<String, org.apache.hadoop.hive.metastore.api.Partition> 
parts = new LinkedHashMap<>();
+  private final org.apache.hadoop.hive.metastore.api.Table tTable;
+
+  PartitionTree(org.apache.hadoop.hive.metastore.api.Table t) {
+    this.tTable = t;
+  }
+
+  Partition addPartition(Partition partition, String partName, boolean 
ifNotExists) throws AlreadyExistsException {
+    partition.setDbName(partition.getDbName().toLowerCase());
+    partition.setTableName(partition.getTableName().toLowerCase());
+    if (!ifNotExists && parts.containsKey(partName)) {
+      throw new AlreadyExistsException("Partition " + partName + " already 
exists");
+    }
+    return parts.putIfAbsent(partName, partition);
+  }
+
+  /**
+   * @param partName - "p=1/q=2" full partition name {@link 
Warehouse#makePartName(List, List)}
+   * @return null if doesn't exist
+   */
+  Partition getPartition(String partName) {
+    return parts.get(partName);
+  }
+
+  /**
+   * Get a partition matching the partition values.
+   *
+   * @param partVals partition values for this partition, must be in the same 
order as the
+   *                 partition keys of the table.
+   * @return the partition object, or if not found null.
+   * @throws MetaException
+   */
+  Partition getPartition(List<String> partVals) throws MetaException {
+    String partName = makePartName(tTable.getPartitionKeys(), partVals);
+    return getPartition(partName);
+  }
+
+  /**
+   * Add partitions to the partition tree.
+   *
+   * @param partitions  The partitions to add
+   * @param ifNotExists only add partitions if they don't exist
+   * @return the partitions that were added
+   * @throws MetaException
+   */
+  List<Partition> addPartitions(List<Partition> partitions, boolean 
ifNotExists)
+      throws MetaException, AlreadyExistsException {
+    List<Partition> partitionsAdded = new ArrayList<>();
+    Map<String, Partition> partNameToPartition = new HashMap<>();
+    // validate that the new partition values is not already added to the table
+    for (Partition partition : partitions) {
+      String partName = makePartName(tTable.getPartitionKeys(), 
partition.getValues());
+      if (!ifNotExists && parts.containsKey(partName)) {
+        throw new AlreadyExistsException("Partition " + partName + " already 
exists");
+      }
+      partNameToPartition.put(partName, partition);
+    }
+
+    for (Map.Entry<String, Partition> entry : partNameToPartition.entrySet()) {
+      if (addPartition(entry.getValue(), entry.getKey(), ifNotExists) == null) 
{
+        partitionsAdded.add(entry.getValue());
+      }
+    }
+
+    return partitionsAdded;
+  }
+
+  /**
+   * Provided values for the 1st N partition columns, will return all matching 
PartitionS
+   * The list is a partial list of partition values in the same order as 
partition columns.
+   * Missing values should be represented as "" (empty strings).  May provide 
fewer values.
+   * So if part cols are a,b,c, {"",2} is a valid list
+   * {@link MetaStoreUtils#getPvals(List, Map)}
+   */
+  List<Partition> getPartitionsByPartitionVals(List<String> partialPartVals) 
throws MetaException {
+    if (partialPartVals == null || partialPartVals.isEmpty()) {
+      throw new MetaException("Partition partial vals cannot be null or 
empty");
+    }
+    String partNameMatcher = makePartNameMatcher(tTable, partialPartVals, 
".*");
+    List<Partition> matchedPartitions = new ArrayList<>();
+    for (Map.Entry<String, Partition> entry : parts.entrySet()) {
+      if (entry.getKey().matches(partNameMatcher)) {
+        matchedPartitions.add(entry.getValue());
+      }
+    }
+    return matchedPartitions;
+  }
+
+  /**
+   * Get all the partitions.
+   *
+   * @return partitions list
+   */
+  List<Partition> listPartitions() {
+    return new ArrayList<>(parts.values());
+  }
+
+  /**
+   * Remove a partition from the table.
+   * @param partVals partition values, must be not null
+   * @return the instance of the dropped partition, if the remove was 
successful, otherwise false
+   * @throws MetaException
+   */
+  Partition dropPartition(List<String> partVals) throws MetaException, 
NoSuchObjectException {
+    String partName = makePartName(tTable.getPartitionKeys(), partVals);
+    if (!parts.containsKey(partName)) {
+      throw new NoSuchObjectException(
+          "Partition with partition values " + 
Arrays.toString(partVals.toArray()) + " is not found.");
+    }
+    return parts.remove(partName);
+  }
+}
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index acd6f72..a5b16d1 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -25,7 +25,6 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -82,15 +81,12 @@ import org.slf4j.LoggerFactory;
 
 import static 
org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName;
 import static org.apache.hadoop.hive.metastore.Warehouse.makePartName;
-import static org.apache.hadoop.hive.metastore.Warehouse.makeSpecFromName;
 import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
 import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.compareFieldColumns;
 import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getColumnNamesForTable;
 import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
 import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getPvals;
 import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.isExternalTable;
-import static 
org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.makePartNameMatcher;
-
 
 /**
  * todo: This need review re: thread safety.  Various places (see callsers of
@@ -927,270 +923,6 @@ public class SessionHiveMetaStoreClient extends 
HiveMetaStoreClient implements I
   }
 
   /**
-   * This stores partition information for a temp table.
-   */
-  public static final class TempTable {
-    private final org.apache.hadoop.hive.metastore.api.Table tTable;
-    private final PartitionTree pTree;
-
-    private static final String EXTERNAL_PARAM = "EXTERNAL";
-
-    TempTable(org.apache.hadoop.hive.metastore.api.Table t) {
-      assert t != null;
-      this.tTable = t;
-      pTree = t.getPartitionKeysSize() > 0 ? new PartitionTree(tTable) : null;
-    }
-
-    private Partition addPartition(Partition p) throws AlreadyExistsException, 
MetaException {
-      String partName = makePartName(tTable.getPartitionKeys(), p.getValues());
-      Partition partition = pTree.addPartition(p, partName, false);
-      return partition == null ? pTree.getPartition(partName) : partition;
-    }
-
-    private boolean isExternal() {
-      return tTable.getParameters() != null && 
"true".equals(tTable.getParameters().get(EXTERNAL_PARAM));
-    }
-
-    private Partition getPartition(String partName) throws MetaException {
-      if (partName == null || partName.isEmpty()) {
-        throw new MetaException("Partition name cannot be null or empty");
-      }
-      return pTree.getPartition(partName);
-    }
-
-    private Partition getPartition(List<String> partVals) throws MetaException 
{
-      if (partVals == null) {
-        throw new MetaException("Partition values cannot be null");
-      }
-      return pTree.getPartition(partVals);
-    }
-
-    private List<Partition> addPartitions(List<Partition> partitions, boolean 
ifNotExists)
-        throws MetaException, AlreadyExistsException {
-      return pTree.addPartitions(partitions, ifNotExists);
-    }
-
-    private List<Partition> getPartitionsByNames(List<String> partNames) 
throws MetaException {
-      if (partNames == null) {
-        throw new MetaException("Partition names cannot be null");
-      }
-      List<Partition> partitions = new ArrayList<>();
-      for (String partName : partNames) {
-        Partition partition = getPartition(partName);
-        if (partition != null) {
-          partitions.add(partition);
-        }
-      }
-      return partitions;
-    }
-
-    private List<Partition> getPartitionsByPartitionVals(List<String> 
partialPartVals) throws MetaException {
-      return pTree.getPartitionsByPartitionVals(partialPartVals);
-    }
-
-    private Partition getPartitionWithAuthInfo(List<String> partionVals, 
String userName, List<String> groupNames)
-        throws MetaException {
-      Partition partition = getPartition(partionVals);
-      if (partition == null) {
-        return null;
-      }
-      return checkPrivilegesForPartition(partition, userName, groupNames) ? 
partition : null;
-    }
-
-    private List<Partition> listPartitions() throws MetaException {
-      return pTree.listPartitions();
-    }
-
-    private List<Partition> listPartitionsWithAuthInfo(String userName, 
List<String> groupNames) throws MetaException {
-      List<Partition> partitions = listPartitions();
-      List<Partition> result = new ArrayList<>();
-      partitions.forEach(p -> {
-        if (checkPrivilegesForPartition(p, userName, groupNames)) {
-          result.add(p);
-        }
-      });
-      return result;
-    }
-
-    private List<Partition> 
listPartitionsByPartitionValsWithAuthInfo(List<String> partialVals, String 
userName,
-        List<String> groupNames) throws MetaException {
-      List<Partition> partitions = 
pTree.getPartitionsByPartitionVals(partialVals);
-      List<Partition> result = new ArrayList<>();
-      partitions.forEach(p -> {
-        if (checkPrivilegesForPartition(p, userName, groupNames)) {
-          result.add(p);
-        }
-      });
-      return result;
-    }
-
-
-    private boolean checkPrivilegesForPartition(Partition partition, String 
userName, List<String> groupNames) {
-      if ((userName == null || userName.isEmpty()) && (groupNames == null || 
groupNames.isEmpty())) {
-        return true;
-      }
-      PrincipalPrivilegeSet privileges = partition.getPrivileges();
-      if (privileges == null) {
-        return true;
-      }
-      if (privileges.isSetUserPrivileges()) {
-        if (!privileges.getUserPrivileges().containsKey(userName)) {
-          return false;
-        }
-      }
-      if (privileges.isSetGroupPrivileges()) {
-        if (groupNames == null) {
-          return false;
-        }
-        for (String group : groupNames) {
-          if (!privileges.getGroupPrivileges().containsKey(group)) {
-            return false;
-          }
-        }
-      }
-      return true;
-    }
-
-    private Partition dropPartition(List<String> partVals) throws 
MetaException, NoSuchObjectException {
-      return pTree.dropPartition(partVals);
-    }
-
-    private Partition dropPartition(String partitionName) throws 
MetaException, NoSuchObjectException {
-      Map<String, String> specFromName = makeSpecFromName(partitionName);
-      if (specFromName == null || specFromName.isEmpty()) {
-        throw new NoSuchObjectException("Invalid partition name " + 
partitionName);
-      }
-      List<String> pVals = new ArrayList<>();
-      for (FieldSchema field : tTable.getPartitionKeys()) {
-        String val = specFromName.get(field.getName());
-        if (val == null) {
-          throw new NoSuchObjectException("Partition name " + partitionName + 
" and table partition keys " + Arrays
-              .toString(tTable.getPartitionKeys().toArray()) + " does not 
match");
-        }
-        pVals.add(val);
-      }
-      return pTree.dropPartition(pVals);
-    }
-
-
-    /**
-     * Always clone objects before adding or returning them so that callers 
don't modify them
-     * via references.
-     */
-    private static final class PartitionTree {
-      private final Map<String, Partition> parts = new LinkedHashMap<>();
-      private final org.apache.hadoop.hive.metastore.api.Table tTable;
-
-      private PartitionTree(org.apache.hadoop.hive.metastore.api.Table t) {
-        this.tTable = t;
-      }
-      private Partition addPartition(Partition partition, String partName,  
boolean ifNotExists)
-          throws AlreadyExistsException {
-        partition.setDbName(partition.getDbName().toLowerCase());
-        partition.setTableName(partition.getTableName().toLowerCase());
-        if(!ifNotExists && parts.containsKey(partName)) {
-          throw new AlreadyExistsException("Partition " + partName + " already 
exists");
-        }
-        return parts.putIfAbsent(partName, partition);
-      }
-      /**
-       * @param partName - "p=1/q=2" full partition name {@link 
Warehouse#makePartName(List, List)}
-       * @return null if doesn't exist
-       */
-      private Partition getPartition(String partName) {
-        return parts.get(partName);
-      }
-
-      /**
-       * Get a partition matching the partition values.
-       *
-       * @param partVals partition values for this partition, must be in the 
same order as the
-       *                 partition keys of the table.
-       * @return the partition object, or if not found null.
-       * @throws MetaException
-       */
-      private Partition getPartition(List<String> partVals) throws 
MetaException {
-        String partName = makePartName(tTable.getPartitionKeys(), partVals);
-        return getPartition(partName);
-      }
-
-      /**
-       * Add partitions to the partition tree.
-       *
-       * @param partitions  The partitions to add
-       * @param ifNotExists only add partitions if they don't exist
-       * @return the partitions that were added
-       * @throws MetaException
-       */
-      private List<Partition> addPartitions(List<Partition> partitions, 
boolean ifNotExists)
-          throws MetaException, AlreadyExistsException {
-        List<Partition> partitionsAdded = new ArrayList<>();
-        Map<String, Partition> partNameToPartition = new HashMap<>();
-        // validate that the new partition values is not already added to the 
table
-        for (Partition partition : partitions) {
-          String partName = makePartName(tTable.getPartitionKeys(), 
partition.getValues());
-          if (!ifNotExists && parts.containsKey(partName)) {
-            throw new AlreadyExistsException("Partition " + partName + " 
already exists");
-          }
-          partNameToPartition.put(partName, partition);
-        }
-
-        for (Entry<String, Partition> entry : partNameToPartition.entrySet()) {
-          if (addPartition(entry.getValue(), entry.getKey(), ifNotExists) == 
null) {
-            partitionsAdded.add(entry.getValue());
-          }
-        }
-
-        return partitionsAdded;
-      }
-      /**
-       * Provided values for the 1st N partition columns, will return all 
matching PartitionS
-       * The list is a partial list of partition values in the same order as 
partition columns.
-       * Missing values should be represented as "" (empty strings).  May 
provide fewer values.
-       * So if part cols are a,b,c, {"",2} is a valid list
-       * {@link MetaStoreUtils#getPvals(List, Map)}
-       */
-      private List<Partition> getPartitionsByPartitionVals(List<String> 
partialPartVals) throws MetaException {
-        if (partialPartVals == null || partialPartVals.isEmpty()) {
-          throw new MetaException("Partition partial vals cannot be null or 
empty");
-        }
-        String partNameMatcher = makePartNameMatcher(tTable, partialPartVals, 
".*");
-        List<Partition> matchedPartitions = new ArrayList<>();
-        for (String key : parts.keySet()) {
-          if (key.matches(partNameMatcher)) {
-            matchedPartitions.add(parts.get(key));
-          }
-        }
-        return matchedPartitions;
-      }
-
-      /**
-       * Get all the partitions.
-       *
-       * @return partitions list
-       */
-      private List<Partition> listPartitions() {
-        return new ArrayList<>(parts.values());
-      }
-
-      /**
-       * Remove a partition from the table.
-       * @param partVals partition values, must be not null
-       * @return the instance of the dropped partition, if the remove was 
successful, otherwise false
-       * @throws MetaException
-       */
-      private Partition dropPartition(List<String> partVals) throws 
MetaException, NoSuchObjectException {
-        String partName = makePartName(tTable.getPartitionKeys(), partVals);
-        if (!parts.containsKey(partName)) {
-          throw new NoSuchObjectException(
-              "Partition with partition values " + 
Arrays.toString(partVals.toArray()) + " is not found.");
-        }
-        return parts.remove(partName);
-      }
-    }
-  }
-
-  /**
    * Hive.loadPartition() calls this.
    * @param partition
    *          The partition to add
@@ -1387,7 +1119,7 @@ public class SessionHiveMetaStoreClient extends 
HiveMetaStoreClient implements I
     List<Partition> partitions = tt.listPartitions();
     List<String> result = new ArrayList<>();
     for (int i = 0; i < ((maxParts < 0 || maxParts > partitions.size()) ? 
partitions.size() : maxParts); i++) {
-      result.add(makePartName(tt.tTable.getPartitionKeys(), 
partitions.get(i).getValues()));
+      result.add(makePartName(table.getPartitionKeys(), 
partitions.get(i).getValues()));
     }
     return result;
   }
@@ -1403,7 +1135,7 @@ public class SessionHiveMetaStoreClient extends 
HiveMetaStoreClient implements I
     List<Partition> partitions = tt.getPartitionsByPartitionVals(partVals);
     List<String> result = new ArrayList<>();
     for (int i = 0; i < ((maxParts < 0 || maxParts > partitions.size()) ? 
partitions.size() : maxParts); i++) {
-      result.add(makePartName(tt.tTable.getPartitionKeys(), 
partitions.get(i).getValues()));
+      result.add(makePartName(table.getPartitionKeys(), 
partitions.get(i).getValues()));
     }
     return result;
   }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/TempTable.java 
b/ql/src/java/org/apache/hadoop/hive/ql/metadata/TempTable.java
new file mode 100644
index 0000000..fa6dddc
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/TempTable.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.metadata;
+
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.hive.metastore.Warehouse.makePartName;
+import static org.apache.hadoop.hive.metastore.Warehouse.makeSpecFromName;
+
+/**
+ * This stores partition information for a temp table.
+ */
+public final class TempTable {
+  private final org.apache.hadoop.hive.metastore.api.Table tTable;
+  private final PartitionTree pTree;
+
+  private static final String EXTERNAL_PARAM = "EXTERNAL";
+
+  TempTable(org.apache.hadoop.hive.metastore.api.Table t) {
+    assert t != null;
+    this.tTable = t;
+    pTree = t.getPartitionKeysSize() > 0 ? new PartitionTree(tTable) : null;
+  }
+
+  Partition addPartition(Partition p) throws AlreadyExistsException, 
MetaException {
+    String partName = makePartName(tTable.getPartitionKeys(), p.getValues());
+    Partition partition = pTree.addPartition(p, partName, false);
+    return partition == null ? pTree.getPartition(partName) : partition;
+  }
+
+  boolean isExternal() {
+    return tTable.getParameters() != null && 
"true".equals(tTable.getParameters().get(EXTERNAL_PARAM));
+  }
+
+  Partition getPartition(String partName) throws MetaException {
+    if (partName == null || partName.isEmpty()) {
+      throw new MetaException("Partition name cannot be null or empty");
+    }
+    return pTree.getPartition(partName);
+  }
+
+  Partition getPartition(List<String> partVals) throws MetaException {
+    if (partVals == null) {
+      throw new MetaException("Partition values cannot be null");
+    }
+    return pTree.getPartition(partVals);
+  }
+
+  List<Partition> addPartitions(List<Partition> partitions, boolean 
ifNotExists)
+      throws MetaException, AlreadyExistsException {
+    return pTree.addPartitions(partitions, ifNotExists);
+  }
+
+  List<Partition> getPartitionsByNames(List<String> partNames) throws 
MetaException {
+    if (partNames == null) {
+      throw new MetaException("Partition names cannot be null");
+    }
+    List<org.apache.hadoop.hive.metastore.api.Partition> partitions = new 
ArrayList<>();
+    for (String partName : partNames) {
+      org.apache.hadoop.hive.metastore.api.Partition partition = 
getPartition(partName);
+      if (partition != null) {
+        partitions.add(partition);
+      }
+    }
+    return partitions;
+  }
+
+  List<Partition> getPartitionsByPartitionVals(List<String> partialPartVals) 
throws MetaException {
+    return pTree.getPartitionsByPartitionVals(partialPartVals);
+  }
+
+  Partition getPartitionWithAuthInfo(List<String> partionVals, String 
userName, List<String> groupNames)
+      throws MetaException {
+    Partition partition = getPartition(partionVals);
+    if (partition == null) {
+      return null;
+    }
+    return checkPrivilegesForPartition(partition, userName, groupNames) ? 
partition : null;
+  }
+
+  List<Partition> listPartitions() throws MetaException {
+    return pTree.listPartitions();
+  }
+
+  List<Partition> listPartitionsWithAuthInfo(String userName, List<String> 
groupNames) throws MetaException {
+    List<Partition> partitions = listPartitions();
+    List<Partition> result = new ArrayList<>();
+    partitions.forEach(p -> {
+      if (checkPrivilegesForPartition(p, userName, groupNames)) {
+        result.add(p);
+      }
+    });
+    return result;
+  }
+
+  List<Partition> listPartitionsByPartitionValsWithAuthInfo(List<String> 
partialVals, String userName,
+      List<String> groupNames) throws MetaException {
+    List<Partition> partitions = 
pTree.getPartitionsByPartitionVals(partialVals);
+    List<Partition> result = new ArrayList<>();
+    partitions.forEach(p -> {
+      if (checkPrivilegesForPartition(p, userName, groupNames)) {
+        result.add(p);
+      }
+    });
+    return result;
+  }
+
+  boolean checkPrivilegesForPartition(Partition partition, String userName, 
List<String> groupNames) {
+    if ((userName == null || userName.isEmpty()) && (groupNames == null || 
groupNames.isEmpty())) {
+      return true;
+    }
+    PrincipalPrivilegeSet privileges = partition.getPrivileges();
+    if (privileges == null) {
+      return true;
+    }
+    if (privileges.isSetUserPrivileges()) {
+      if (!privileges.getUserPrivileges().containsKey(userName)) {
+        return false;
+      }
+    }
+    if (privileges.isSetGroupPrivileges()) {
+      if (groupNames == null) {
+        return false;
+      }
+      for (String group : groupNames) {
+        if (!privileges.getGroupPrivileges().containsKey(group)) {
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
+  Partition dropPartition(List<String> partVals) throws MetaException, 
NoSuchObjectException {
+    return pTree.dropPartition(partVals);
+  }
+
+  Partition dropPartition(String partitionName) throws MetaException, 
NoSuchObjectException {
+    Map<String, String> specFromName = makeSpecFromName(partitionName);
+    if (specFromName == null || specFromName.isEmpty()) {
+      throw new NoSuchObjectException("Invalid partition name " + 
partitionName);
+    }
+    List<String> pVals = new ArrayList<>();
+    for (FieldSchema field : tTable.getPartitionKeys()) {
+      String val = specFromName.get(field.getName());
+      if (val == null) {
+        throw new NoSuchObjectException("Partition name " + partitionName + " 
and table partition keys " + Arrays
+            .toString(tTable.getPartitionKeys().toArray()) + " does not 
match");
+      }
+      pVals.add(val);
+    }
+    return pTree.dropPartition(pVals);
+  }
+
+}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java 
b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
index 0fa6e41..e224f2c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
@@ -91,8 +91,8 @@ import org.apache.hadoop.hive.ql.log.PerfLogger;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.HiveUtils;
-import org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient;
 import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.metadata.TempTable;
 import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
 import 
org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
 import 
org.apache.hadoop.hive.ql.security.authorization.plugin.AuthorizationMetaStoreFilterHook;
@@ -136,7 +136,7 @@ public class SessionState {
   private final Map<String, Map<String, Table>> tempTables = new 
ConcurrentHashMap<>();
   private final Map<String, Map<String, ColumnStatisticsObj>> 
tempTableColStats =
       new ConcurrentHashMap<>();
-  private final Map<String, SessionHiveMetaStoreClient.TempTable> 
tempPartitions =
+  private final Map<String, TempTable> tempPartitions =
       new ConcurrentHashMap<>();
 
   protected ClassLoader parentLoader;
@@ -1924,7 +1924,7 @@ public class SessionState {
   public Map<String, Map<String, Table>> getTempTables() {
     return tempTables;
   }
-  public Map<String, SessionHiveMetaStoreClient.TempTable> getTempPartitions() 
{
+  public Map<String, TempTable> getTempPartitions() {
     return tempPartitions;
   }
 

Reply via email to