Modified: hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py?rev=1086815&r1=1086814&r2=1086815&view=diff ============================================================================== --- hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py (original) +++ hive/trunk/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py Wed Mar 30 00:59:59 2011 @@ -297,6 +297,15 @@ class Iface(fb303.FacebookService.Iface) """ pass + def get_partitions_by_names(self, db_name, tbl_name, names): + """ + Parameters: + - db_name + - tbl_name + - names + """ + pass + def alter_partition(self, db_name, tbl_name, new_part): """ Parameters: @@ -1707,6 +1716,44 @@ class Client(fb303.FacebookService.Clien raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_filter failed: unknown result"); + def get_partitions_by_names(self, db_name, tbl_name, names): + """ + Parameters: + - db_name + - tbl_name + - names + """ + self.send_get_partitions_by_names(db_name, tbl_name, names) + return self.recv_get_partitions_by_names() + + def send_get_partitions_by_names(self, db_name, tbl_name, names): + self._oprot.writeMessageBegin('get_partitions_by_names', TMessageType.CALL, self._seqid) + args = get_partitions_by_names_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.names = names + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_partitions_by_names(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = get_partitions_by_names_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success != None: + return result.success + if result.o1 != None: + raise result.o1 + if result.o2 != None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_by_names failed: unknown result"); + def alter_partition(self, db_name, tbl_name, new_part): """ Parameters: @@ -2573,6 +2620,7 @@ class Processor(fb303.FacebookService.Pr self._processMap["get_partitions_ps_with_auth"] = Processor.process_get_partitions_ps_with_auth self._processMap["get_partition_names_ps"] = Processor.process_get_partition_names_ps self._processMap["get_partitions_by_filter"] = Processor.process_get_partitions_by_filter + self._processMap["get_partitions_by_names"] = Processor.process_get_partitions_by_names self._processMap["alter_partition"] = Processor.process_alter_partition self._processMap["get_config_value"] = Processor.process_get_config_value self._processMap["partition_name_to_vals"] = Processor.process_partition_name_to_vals @@ -3145,6 +3193,22 @@ class Processor(fb303.FacebookService.Pr oprot.writeMessageEnd() oprot.trans.flush() + def process_get_partitions_by_names(self, seqid, iprot, oprot): + args = get_partitions_by_names_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_partitions_by_names_result() + try: + result.success = self._handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names) + except MetaException, o1: + result.o1 = o1 + except NoSuchObjectException, o2: + result.o2 = o2 + oprot.writeMessageBegin("get_partitions_by_names", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_alter_partition(self, seqid, iprot, oprot): args = alter_partition_args() args.read(iprot) @@ -8976,6 +9040,190 @@ class get_partitions_by_filter_result: def __ne__(self, other): return not (self == other) +class get_partitions_by_names_args: + """ + Attributes: + - db_name + - tbl_name + - names + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'db_name', None, None, ), # 1 + (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (3, TType.LIST, 'names', (TType.STRING,None), None, ), # 3 + ) + + def __init__(self, db_name=None, tbl_name=None, names=None,): + self.db_name = db_name + self.tbl_name = tbl_name + self.names = names + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = iprot.readString(); + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.LIST: + self.names = [] + (_etype347, _size344) = iprot.readListBegin() + for _i348 in xrange(_size344): + _elem349 = iprot.readString(); + self.names.append(_elem349) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_partitions_by_names_args') + if self.db_name != None: + oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeString(self.db_name) + oprot.writeFieldEnd() + if self.tbl_name != None: + oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeString(self.tbl_name) + oprot.writeFieldEnd() + if self.names != None: + oprot.writeFieldBegin('names', TType.LIST, 3) + oprot.writeListBegin(TType.STRING, len(self.names)) + for iter350 in self.names: + oprot.writeString(iter350) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_partitions_by_names_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype354, _size351) = iprot.readListBegin() + for _i355 in xrange(_size351): + _elem356 = Partition() + _elem356.read(iprot) + self.success.append(_elem356) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_partitions_by_names_result') + if self.success != None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter357 in self.success: + iter357.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 != None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 != None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class alter_partition_args: """ Attributes: @@ -9362,10 +9610,10 @@ class partition_name_to_vals_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype347, _size344) = iprot.readListBegin() - for _i348 in xrange(_size344): - _elem349 = iprot.readString(); - self.success.append(_elem349) + (_etype361, _size358) = iprot.readListBegin() + for _i362 in xrange(_size358): + _elem363 = iprot.readString(); + self.success.append(_elem363) iprot.readListEnd() else: iprot.skip(ftype) @@ -9388,8 +9636,8 @@ class partition_name_to_vals_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter350 in self.success: - oprot.writeString(iter350) + for iter364 in self.success: + oprot.writeString(iter364) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -9500,11 +9748,11 @@ class partition_name_to_spec_result: if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype352, _vtype353, _size351 ) = iprot.readMapBegin() - for _i355 in xrange(_size351): - _key356 = iprot.readString(); - _val357 = iprot.readString(); - self.success[_key356] = _val357 + (_ktype366, _vtype367, _size365 ) = iprot.readMapBegin() + for _i369 in xrange(_size365): + _key370 = iprot.readString(); + _val371 = iprot.readString(); + self.success[_key370] = _val371 iprot.readMapEnd() else: iprot.skip(ftype) @@ -9527,9 +9775,9 @@ class partition_name_to_spec_result: if self.success != None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter358,viter359 in self.success.items(): - oprot.writeString(kiter358) - oprot.writeString(viter359) + for kiter372,viter373 in self.success.items(): + oprot.writeString(kiter372) + oprot.writeString(viter373) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -10354,11 +10602,11 @@ class get_indexes_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype363, _size360) = iprot.readListBegin() - for _i364 in xrange(_size360): - _elem365 = Index() - _elem365.read(iprot) - self.success.append(_elem365) + (_etype377, _size374) = iprot.readListBegin() + for _i378 in xrange(_size374): + _elem379 = Index() + _elem379.read(iprot) + self.success.append(_elem379) iprot.readListEnd() else: iprot.skip(ftype) @@ -10387,8 +10635,8 @@ class get_indexes_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter366 in self.success: - iter366.write(oprot) + for iter380 in self.success: + iter380.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -10527,10 +10775,10 @@ class get_index_names_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype370, _size367) = iprot.readListBegin() - for _i371 in xrange(_size367): - _elem372 = iprot.readString(); - self.success.append(_elem372) + (_etype384, _size381) = iprot.readListBegin() + for _i385 in xrange(_size381): + _elem386 = iprot.readString(); + self.success.append(_elem386) iprot.readListEnd() else: iprot.skip(ftype) @@ -10553,8 +10801,8 @@ class get_index_names_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter373 in self.success: - oprot.writeString(iter373) + for iter387 in self.success: + oprot.writeString(iter387) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 != None: @@ -10908,10 +11156,10 @@ class get_role_names_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype377, _size374) = iprot.readListBegin() - for _i378 in xrange(_size374): - _elem379 = iprot.readString(); - self.success.append(_elem379) + (_etype391, _size388) = iprot.readListBegin() + for _i392 in xrange(_size388): + _elem393 = iprot.readString(); + self.success.append(_elem393) iprot.readListEnd() else: iprot.skip(ftype) @@ -10934,8 +11182,8 @@ class get_role_names_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter380 in self.success: - oprot.writeString(iter380) + for iter394 in self.success: + oprot.writeString(iter394) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -11402,11 +11650,11 @@ class list_roles_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype384, _size381) = iprot.readListBegin() - for _i385 in xrange(_size381): - _elem386 = Role() - _elem386.read(iprot) - self.success.append(_elem386) + (_etype398, _size395) = iprot.readListBegin() + for _i399 in xrange(_size395): + _elem400 = Role() + _elem400.read(iprot) + self.success.append(_elem400) iprot.readListEnd() else: iprot.skip(ftype) @@ -11429,8 +11677,8 @@ class list_roles_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter387 in self.success: - iter387.write(oprot) + for iter401 in self.success: + iter401.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None: @@ -11497,10 +11745,10 @@ class get_privilege_set_args: elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype391, _size388) = iprot.readListBegin() - for _i392 in xrange(_size388): - _elem393 = iprot.readString(); - self.group_names.append(_elem393) + (_etype405, _size402) = iprot.readListBegin() + for _i406 in xrange(_size402): + _elem407 = iprot.readString(); + self.group_names.append(_elem407) iprot.readListEnd() else: iprot.skip(ftype) @@ -11525,8 +11773,8 @@ class get_privilege_set_args: if self.group_names != None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter394 in self.group_names: - oprot.writeString(iter394) + for iter408 in self.group_names: + oprot.writeString(iter408) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11730,11 +11978,11 @@ class list_privileges_result: if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype398, _size395) = iprot.readListBegin() - for _i399 in xrange(_size395): - _elem400 = HiveObjectPrivilege() - _elem400.read(iprot) - self.success.append(_elem400) + (_etype412, _size409) = iprot.readListBegin() + for _i413 in xrange(_size409): + _elem414 = HiveObjectPrivilege() + _elem414.read(iprot) + self.success.append(_elem414) iprot.readListEnd() else: iprot.skip(ftype) @@ -11757,8 +12005,8 @@ class list_privileges_result: if self.success != None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter401 in self.success: - iter401.write(oprot) + for iter415 in self.success: + iter415.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 != None:
Modified: hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb?rev=1086815&r1=1086814&r2=1086815&view=diff ============================================================================== --- hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb (original) +++ hive/trunk/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb Wed Mar 30 00:59:59 2011 @@ -569,6 +569,23 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_filter failed: unknown result') end + def get_partitions_by_names(db_name, tbl_name, names) + send_get_partitions_by_names(db_name, tbl_name, names) + return recv_get_partitions_by_names() + end + + def send_get_partitions_by_names(db_name, tbl_name, names) + send_message('get_partitions_by_names', Get_partitions_by_names_args, :db_name => db_name, :tbl_name => tbl_name, :names => names) + end + + def recv_get_partitions_by_names() + result = receive_message(Get_partitions_by_names_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_names failed: unknown result') + end + def alter_partition(db_name, tbl_name, new_part) send_alter_partition(db_name, tbl_name, new_part) recv_alter_partition() @@ -1395,6 +1412,19 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_partitions_by_filter', seqid) end + def process_get_partitions_by_names(seqid, iprot, oprot) + args = read_args(iprot, Get_partitions_by_names_args) + result = Get_partitions_by_names_result.new() + begin + result.success = @handler.get_partitions_by_names(args.db_name, args.tbl_name, args.names) + rescue MetaException => o1 + result.o1 = o1 + rescue NoSuchObjectException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'get_partitions_by_names', seqid) + end + def process_alter_partition(seqid, iprot, oprot) args = read_args(iprot, Alter_partition_args) result = Alter_partition_result.new() @@ -2956,6 +2986,46 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Get_partitions_by_names_args + include ::Thrift::Struct, ::Thrift::Struct_Union + DB_NAME = 1 + TBL_NAME = 2 + NAMES = 3 + + FIELDS = { + DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, + TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, + NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_partitions_by_names_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => Partition}}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => MetaException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => NoSuchObjectException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Alter_partition_args include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1086815&r1=1086814&r2=1086815&view=diff ============================================================================== --- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original) +++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Wed Mar 30 00:59:59 2011 @@ -2397,6 +2397,34 @@ public class HiveMetaStore extends Thrif } @Override + public List<Partition> get_partitions_by_names(final String dbName, + final String tblName, final List<String> partNames) + throws MetaException, NoSuchObjectException, TException { + + startTableFunction("get_partitions_by_names", dbName, tblName); + + List<Partition> ret = null; + try { + ret = executeWithRetry(new Command<List<Partition>>() { + @Override + List<Partition> run(RawStore ms) throws Exception { + return ms.getPartitionsByNames(dbName, tblName, partNames); + } + }); + } catch (MetaException e) { + throw e; + } catch (NoSuchObjectException e) { + throw e; + } catch (Exception e) { + assert(e instanceof RuntimeException); + throw (RuntimeException)e; + } finally { + endFunction("get_partitions_by_names"); + } + return ret; + } + + @Override public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, String userName, List<String> groupNames) throws MetaException, TException { Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1086815&r1=1086814&r2=1086815&view=diff ============================================================================== --- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original) +++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Wed Mar 30 00:59:59 2011 @@ -51,7 +51,6 @@ import org.apache.hadoop.hive.metastore. import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; @@ -582,7 +581,7 @@ public class HiveMetaStoreClient impleme return deepCopyPartitions( client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts)); } - + @Override public List<Partition> listPartitionsWithAuthInfo(String db_name, String tbl_name, short max_parts, String user_name, List<String> group_names) @@ -648,7 +647,12 @@ public class HiveMetaStoreClient impleme List<String> part_vals) throws NoSuchObjectException, MetaException, TException { return deepCopy(client.get_partition(db_name, tbl_name, part_vals)); } - + + public List<Partition> getPartitionsByNames(String db_name, String tbl_name, + List<String> part_names) throws NoSuchObjectException, MetaException, TException { + return deepCopyPartitions(client.get_partitions_by_names(db_name, tbl_name, part_names)); + } + @Override public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names) @@ -1005,13 +1009,13 @@ public class HiveMetaStoreClient impleme public boolean drop_role(String roleName) throws MetaException, TException { return client.drop_role(roleName); } - + @Override public List<Role> list_roles(String principalName, PrincipalType principalType) throws MetaException, TException { return client.list_roles(principalName, principalType); } - + @Override public List<String> listRoleNames() throws MetaException, TException { return client.get_role_names(); Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1086815&r1=1086814&r2=1086815&view=diff ============================================================================== --- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original) +++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Wed Mar 30 00:59:59 2011 @@ -339,6 +339,19 @@ public interface IMetaStoreClient { throws MetaException, TException, NoSuchObjectException; /** + * Get partitions by a list of partition names. + * @param db_name database name + * @param tbl_name table name + * @param part_names list of partition names + * @return list of Partition objects + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + */ + public List<Partition> getPartitionsByNames(String db_name, String tbl_name, + List<String> part_names) throws NoSuchObjectException, MetaException, TException; + + /** * @param dbName * @param tableName * @param partialPvals Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1086815&r1=1086814&r2=1086815&view=diff ============================================================================== --- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original) +++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Wed Mar 30 00:59:59 2011 @@ -881,7 +881,7 @@ public class ObjectStore implements RawS private List<Order> convertToOrders(List<MOrder> mkeys) { List<Order> keys = null; if (mkeys != null) { - keys = new ArrayList<Order>(); + keys = new ArrayList<Order>(mkeys.size()); for (MOrder part : mkeys) { keys.add(new Order(part.getCol(), part.getOrder())); } @@ -907,18 +907,24 @@ public class ObjectStore implements RawS // MSD and SD should be same objects. Not sure how to make then same right now // MSerdeInfo *& SerdeInfo should be same as well - private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd) + private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd, + boolean noFS) throws MetaException { if (msd == null) { return null; } - return new StorageDescriptor(convertToFieldSchemas(msd.getCols()), msd - .getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd + return new StorageDescriptor(noFS ? null: convertToFieldSchemas(msd.getCols()), + msd.getLocation(), msd.getInputFormat(), msd.getOutputFormat(), msd .isCompressed(), msd.getNumBuckets(), converToSerDeInfo(msd .getSerDeInfo()), msd.getBucketCols(), convertToOrders(msd .getSortCols()), msd.getParameters()); } + private StorageDescriptor convertToStorageDescriptor(MStorageDescriptor msd) + throws MetaException { + return convertToStorageDescriptor(msd, false); + } + private MStorageDescriptor convertToMStorageDescriptor(StorageDescriptor sd) throws MetaException { if (sd == null) { @@ -1055,6 +1061,16 @@ public class ObjectStore implements RawS mpart.getParameters()); } + private Partition convertToPart(String dbName, String tblName, MPartition mpart) + throws MetaException { + if (mpart == null) { + return null; + } + return new Partition(mpart.getValues(), dbName, tblName, mpart.getCreateTime(), + mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd(), true), + mpart.getParameters()); + } + public boolean dropPartition(String dbName, String tableName, List<String> part_vals) throws MetaException { boolean success = false; @@ -1178,6 +1194,15 @@ public class ObjectStore implements RawS return parts; } + private List<Partition> convertToParts(String dbName, String tblName, List<MPartition> mparts) + throws MetaException { + List<Partition> parts = new ArrayList<Partition>(mparts.size()); + for (MPartition mp : mparts) { + parts.add(convertToPart(dbName, tblName, mp)); + } + return parts; + } + // TODO:pc implement max public List<String> listPartitionNames(String dbName, String tableName, short max) throws MetaException { @@ -1236,6 +1261,54 @@ public class ObjectStore implements RawS } @Override + public List<Partition> getPartitionsByNames(String dbName, String tblName, + List<String> partNames) throws MetaException, NoSuchObjectException { + + boolean success = false; + try { + openTransaction(); + + StringBuilder sb = new StringBuilder( + "table.tableName == t1 && table.database.name == t2 && ("); + int n = 0; + Map<String, String> params = new HashMap<String, String>(); + for (Iterator<String> itr = partNames.iterator(); itr.hasNext();) { + String pn = "p" + n; + n++; + String part = itr.next(); + params.put(pn, part); + sb.append("partitionName == ").append(pn); + sb.append(" || "); + } + sb.setLength(sb.length() - 4); // remove the last " || " + sb.append(')'); + + Query query = pm.newQuery(MPartition.class, sb.toString()); + + LOG.debug(" JDOQL filter is " + sb.toString()); + + params.put("t1", tblName.trim()); + params.put("t2", dbName.trim()); + + String parameterDeclaration = makeParameterDeclarationString(params); + query.declareParameters(parameterDeclaration); + query.setOrdering("partitionName ascending"); + + List<MPartition> mparts = (List<MPartition>) query.executeWithMap(params); + // pm.retrieveAll(mparts); // retrieveAll is pessimistic. some fields may not be needed + List<Partition> results = convertToParts(dbName, tblName, mparts); + // pm.makeTransientAll(mparts); // makeTransient will prohibit future access of unfetched fields + query.closeAll(); + success = commitTransaction(); + return results; + } finally { + if (!success) { + rollbackTransaction(); + } + } + } + + @Override public List<Partition> getPartitionsByFilter(String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException { openTransaction(); Modified: hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java URL: http://svn.apache.org/viewvc/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java?rev=1086815&r1=1086814&r2=1086815&view=diff ============================================================================== --- hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java (original) +++ hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java Wed Mar 30 00:59:59 2011 @@ -144,48 +144,52 @@ public interface RawStore extends Config public abstract List<Partition> getPartitionsByFilter( String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException; - + + public abstract List<Partition> getPartitionsByNames( + String dbName, String tblName, List<String> partNames) + throws MetaException, NoSuchObjectException; + public abstract boolean addRole(String rowName, String ownerName) throws InvalidObjectException, MetaException, NoSuchObjectException; - + public abstract boolean removeRole(String roleName) throws MetaException, NoSuchObjectException; - + public abstract boolean grantRole(Role role, String userName, PrincipalType principalType, - String grantor, PrincipalType grantorType, boolean grantOption) + String grantor, PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException, InvalidObjectException; - - public abstract boolean revokeRole(Role role, String userName, PrincipalType principalType) + + public abstract boolean revokeRole(Role role, String userName, PrincipalType principalType) throws MetaException, NoSuchObjectException; public abstract PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List<String> groupNames) throws InvalidObjectException, MetaException; - - public abstract PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, + + public abstract PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, List<String> groupNames) throws InvalidObjectException, MetaException; - - public abstract PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, + + public abstract PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, String userName, List<String> groupNames) throws InvalidObjectException, MetaException; - - public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, + + public abstract PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, String partition, String userName, List<String> groupNames) throws InvalidObjectException, MetaException; - - public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, + + public abstract PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, String columnName, String userName, List<String> groupNames) throws InvalidObjectException, MetaException; - + public abstract List<MGlobalPrivilege> listPrincipalGlobalGrants(String principalName, PrincipalType principalType); - + public abstract List<MDBPrivilege> listPrincipalDBGrants(String principalName, PrincipalType principalType, String dbName); public abstract List<MTablePrivilege> listAllTableGrants( String principalName, PrincipalType principalType, String dbName, String tableName); - + public abstract List<MPartitionPrivilege> listPrincipalPartitionGrants( String principalName, PrincipalType principalType, String dbName, String tableName, String partName); - + public abstract List<MTableColumnPrivilege> listPrincipalTableColumnGrants( String principalName, PrincipalType principalType, String dbName, String tableName, String columnName); @@ -193,21 +197,21 @@ public interface RawStore extends Config public abstract List<MPartitionColumnPrivilege> listPrincipalPartitionColumnGrants( String principalName, PrincipalType principalType, String dbName, String tableName, String partName, String columnName); - - public abstract boolean grantPrivileges (PrivilegeBag privileges) + + public abstract boolean grantPrivileges (PrivilegeBag privileges) throws InvalidObjectException, MetaException, NoSuchObjectException; - - public abstract boolean revokePrivileges (PrivilegeBag privileges) + + public abstract boolean revokePrivileges (PrivilegeBag privileges) throws InvalidObjectException, MetaException, NoSuchObjectException; public abstract org.apache.hadoop.hive.metastore.api.Role getRole( String roleName) throws NoSuchObjectException; public List<String> listRoleNames(); - + public List<MRoleMap> listRoles(String principalName, PrincipalType principalType); - + public abstract Partition getPartitionWithAuth(String dbName, String tblName, List<String> partVals, String user_name, List<String> group_names) throws MetaException, NoSuchObjectException, InvalidObjectException; Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=1086815&r1=1086814&r2=1086815&view=diff ============================================================================== --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original) +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Wed Mar 30 00:59:59 2011 @@ -1512,6 +1512,8 @@ public class Hive { * * @param tbl * object for which partition is needed. Must be partitioned. + * @param partialPartSpec + * partial partition specification (some subpartitions can be empty). * @return list of partition objects * @throws HiveException */ @@ -1527,21 +1529,58 @@ public class Hive { List<String> names = getPartitionNames(tbl.getDbName(), tbl.getTableName(), partialPartSpec, (short)-1); - List<Partition> partitions = new ArrayList<Partition>(); + List<Partition> partitions = getPartitionsByNames(tbl, names); + return partitions; + } - for (String pval: names) { - try { - org.apache.hadoop.hive.metastore.api.Partition tpart = - getMSC().getPartition(tbl.getDbName(), tbl.getTableName(), pval); - if (tpart != null) { - Partition p = new Partition(tbl, tpart); - partitions.add(p); + /** + * Get all partitions of the table that matches the list of given partition names. + * + * @param tbl + * object for which partition is needed. Must be partitioned. + * @param partNames + * list of partition names + * @return list of partition objects + * @throws HiveException + */ + public List<Partition> getPartitionsByNames(Table tbl, List<String> partNames) + throws HiveException { + + if (!tbl.isPartitioned()) { + throw new HiveException("Partition spec should only be supplied for a " + + "partitioned table"); + } + List<Partition> partitions = new ArrayList<Partition>(partNames.size()); + + int batchSize = HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX); + int nParts = partNames.size(); + int nBatches = nParts / batchSize; + + try { + for (int i = 0; i < nBatches; ++i) { + List<org.apache.hadoop.hive.metastore.api.Partition> tParts = + getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), + partNames.subList(i*batchSize, (i+1)*batchSize)); + if (tParts != null) { + for (org.apache.hadoop.hive.metastore.api.Partition tpart: tParts) { + partitions.add(new Partition(tbl, tpart)); + } } - } catch (Exception e) { - throw new HiveException(e); } - } + if (nParts > nBatches * batchSize) { + List<org.apache.hadoop.hive.metastore.api.Partition> tParts = + getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(), + partNames.subList(nBatches*batchSize, nParts)); + if (tParts != null) { + for (org.apache.hadoop.hive.metastore.api.Partition tpart: tParts) { + partitions.add(new Partition(tbl, tpart)); + } + } + } + } catch (Exception e) { + throw new HiveException(e); + } return partitions; } Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java?rev=1086815&r1=1086814&r2=1086815&view=diff ============================================================================== --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java (original) +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java Wed Mar 30 00:59:59 2011 @@ -183,7 +183,7 @@ public class Partition implements Serial if (table.isView()) { return; } - + String partName = ""; if (table.isPartitioned()) { try { @@ -197,6 +197,12 @@ public class Partition implements Serial tPartition.getSd().setLocation(partPath.toString()); } } + // set default if columns are not set + if (tPartition.getSd().getCols() == null) { + if (table.getCols() != null) { + tPartition.getSd().setCols(table.getCols()); + } + } } catch (MetaException e) { throw new HiveException("Invalid partition for table " + table.getTableName(), e); Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java?rev=1086815&r1=1086814&r2=1086815&view=diff ============================================================================== --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java (original) +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java Wed Mar 30 00:59:59 2011 @@ -19,7 +19,9 @@ package org.apache.hadoop.hive.ql.optimizer.ppr; import java.util.ArrayList; +import java.util.HashMap; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; @@ -42,7 +44,7 @@ public class PartExprEvalUtils { * @return value returned by the expression * @throws HiveException */ - static public Object evalExprWithPart(ExprNodeDesc expr, LinkedHashMap<String, String> partSpec, + static synchronized public Object evalExprWithPart(ExprNodeDesc expr, LinkedHashMap<String, String> partSpec, StructObjectInspector rowObjectInspector) throws HiveException { Object[] rowWithPart = new Object[2]; // Create the row object @@ -75,4 +77,45 @@ public class PartExprEvalUtils { return ((PrimitiveObjectInspector) evaluateResultOI) .getPrimitiveJavaObject(evaluateResultO); } + + static synchronized public Map<PrimitiveObjectInspector, ExprNodeEvaluator> prepareExpr( + ExprNodeDesc expr, List<String> partNames, + StructObjectInspector rowObjectInspector) throws HiveException { + + // Create the row object + List<ObjectInspector> partObjectInspectors = new ArrayList<ObjectInspector>(); + for (int i = 0; i < partNames.size(); i++) { + partObjectInspectors.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector); + } + StructObjectInspector partObjectInspector = ObjectInspectorFactory + .getStandardStructObjectInspector(partNames, partObjectInspectors); + + List<StructObjectInspector> ois = new ArrayList<StructObjectInspector>(2); + ois.add(rowObjectInspector); + ois.add(partObjectInspector); + StructObjectInspector rowWithPartObjectInspector = + ObjectInspectorFactory.getUnionStructObjectInspector(ois); + + ExprNodeEvaluator evaluator = ExprNodeEvaluatorFactory.get(expr); + ObjectInspector evaluateResultOI = evaluator.initialize(rowWithPartObjectInspector); + + Map<PrimitiveObjectInspector, ExprNodeEvaluator> result = + new HashMap<PrimitiveObjectInspector, ExprNodeEvaluator>(); + result.put((PrimitiveObjectInspector)evaluateResultOI, evaluator); + return result; + } + + static synchronized public Object evaluateExprOnPart( + Map<PrimitiveObjectInspector, ExprNodeEvaluator> pair, Object[] rowWithPart) + throws HiveException { + assert(pair.size() > 0); + // only get the 1st entry from the map + Map.Entry<PrimitiveObjectInspector, ExprNodeEvaluator> entry = pair.entrySet().iterator().next(); + PrimitiveObjectInspector evaluateResultOI = entry.getKey(); + ExprNodeEvaluator evaluator = entry.getValue(); + + Object evaluateResultO = evaluator.evaluate(rowWithPart); + + return evaluateResultOI.getPrimitiveJavaObject(evaluateResultO); + } } Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java?rev=1086815&r1=1086814&r2=1086815&view=diff ============================================================================== --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (original) +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java Wed Mar 30 00:59:59 2011 @@ -21,6 +21,7 @@ package org.apache.hadoop.hive.ql.optimi import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.LinkedHashSet; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -29,8 +30,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; @@ -57,6 +60,7 @@ import org.apache.hadoop.hive.ql.plan.Ex import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.thrift.TException; @@ -310,51 +314,69 @@ public class PartitionPruner implements Set<Partition> denied_parts, ExprNodeDesc prunerExpr, StructObjectInspector rowObjectInspector) throws Exception { - for (String partName : Hive.get().getPartitionNames(tab.getDbName(), - tab.getTableName(), (short) -1)) { + List<String> trueNames = null; + List<String> unknNames = null; + + Utilities.PerfLogBegin(LOG, "prune-listing"); + + List<String> partNames = Hive.get().getPartitionNames(tab.getDbName(), + tab.getTableName(), (short) -1); + + List<FieldSchema> pCols = tab.getPartCols(); + List<String> partCols = new ArrayList<String>(pCols.size()); + List<String> values = new ArrayList<String>(pCols.size()); + Object[] objectWithPart = new Object[2]; + + for (FieldSchema pCol : pCols) { + partCols.add(pCol.getName()); + } + + Map<PrimitiveObjectInspector, ExprNodeEvaluator> handle = PartExprEvalUtils.prepareExpr( + prunerExpr, partCols, rowObjectInspector); + + for (String partName : partNames) { // Set all the variables here LinkedHashMap<String, String> partSpec = Warehouse .makeSpecFromName(partName); - LOG.trace("about to process partition " + partSpec + " for pruning "); - // evaluate the expression tree - if (prunerExpr != null) { + values.clear(); + for (Map.Entry<String, String> kv: partSpec.entrySet()) { + values.add(kv.getValue()); + } + objectWithPart[1] = values; - Boolean r = (Boolean) PartExprEvalUtils.evalExprWithPart(prunerExpr, partSpec, - rowObjectInspector); + // evaluate the expression tree + Boolean r = (Boolean) PartExprEvalUtils.evaluateExprOnPart(handle, objectWithPart); - if (Boolean.FALSE.equals(r)) { - if (denied_parts.isEmpty()) { - Partition part = Hive.get().getPartition(tab, partSpec, - Boolean.FALSE); - denied_parts.add(part); - } - LOG.trace("pruned partition: " + partSpec); - } else { - Partition part = Hive.get().getPartition(tab, partSpec, - Boolean.FALSE); - String state = "retained"; - if (Boolean.TRUE.equals(r)) { - true_parts.add(part); - } else { - // r == null means prunerExpr contains null subexpression, - // which was converted from non-partition columns - assert (r == null); - unkn_parts.add(part); - state = "unknown"; - } - if (LOG.isDebugEnabled()) { - LOG.debug(state + " partition: " + partSpec); - } + if (r == null) { + if (unknNames == null) { + unknNames = new LinkedList<String>(); } - } else { - // is there is no parition pruning, all of them are needed - true_parts.add(Hive.get() - .getPartition(tab, partSpec, Boolean.FALSE)); + unknNames.add(partName); + LOG.debug("retained unknown partition: " + partName); + } else if (Boolean.TRUE.equals(r)) { + if (trueNames == null) { + trueNames = new LinkedList<String>(); + } + trueNames.add(partName); + LOG.debug("retained partition: " + partName); } } + Utilities.PerfLogEnd(LOG, "prune-listing"); + + Utilities.PerfLogBegin(LOG, "partition-retrieving"); + if (trueNames != null) { + List<Partition> parts = Hive.get().getPartitionsByNames(tab, trueNames); + true_parts.addAll(parts); + } + if (unknNames != null) { + List<Partition> parts = Hive.get().getPartitionsByNames(tab, unknNames); + unkn_parts.addAll(parts); + } + Utilities.PerfLogEnd(LOG, "partition-retrieving"); } + /** * Whether the expression contains a column node or not. */
